2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
44 #define LE_FLOWCTL_MAX_CREDITS 65535
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 static LIST_HEAD(chan_list);
51 static DEFINE_RWLOCK(chan_list_lock);
53 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
57 u8 code, u8 ident, u16 dlen, void *data);
58 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
61 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
64 struct sk_buff_head *skbs, u8 event);
66 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
68 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70 return BDADDR_LE_PUBLIC;
72 return BDADDR_LE_RANDOM;
78 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80 return bdaddr_type(hcon->type, hcon->src_type);
83 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85 return bdaddr_type(hcon->type, hcon->dst_type);
88 /* ---- L2CAP channels ---- */
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
95 list_for_each_entry(c, &conn->chan_l, list) {
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked channel. */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
119 struct l2cap_chan *c;
121 mutex_lock(&conn->chan_lock);
122 c = __l2cap_get_chan_by_scid(conn, cid);
125 mutex_unlock(&conn->chan_lock);
130 /* Find channel with given DCID.
131 * Returns locked channel.
133 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
136 struct l2cap_chan *c;
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_dcid(conn, cid);
142 mutex_unlock(&conn->chan_lock);
147 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
150 struct l2cap_chan *c;
152 list_for_each_entry(c, &conn->chan_l, list) {
153 if (c->ident == ident)
159 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
162 struct l2cap_chan *c;
164 mutex_lock(&conn->chan_lock);
165 c = __l2cap_get_chan_by_ident(conn, ident);
168 mutex_unlock(&conn->chan_lock);
173 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
175 struct l2cap_chan *c;
177 list_for_each_entry(c, &chan_list, global_l) {
178 if (c->sport == psm && !bacmp(&c->src, src))
184 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
188 write_lock(&chan_list_lock);
190 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
203 for (p = 0x1001; p < 0x1100; p += 2)
204 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
205 chan->psm = cpu_to_le16(p);
206 chan->sport = cpu_to_le16(p);
213 write_unlock(&chan_list_lock);
216 EXPORT_SYMBOL_GPL(l2cap_add_psm);
218 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
220 write_lock(&chan_list_lock);
222 /* Override the defaults (which are for conn-oriented) */
223 chan->omtu = L2CAP_DEFAULT_MTU;
224 chan->chan_type = L2CAP_CHAN_FIXED;
228 write_unlock(&chan_list_lock);
233 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
237 if (conn->hcon->type == LE_LINK)
238 dyn_end = L2CAP_CID_LE_DYN_END;
240 dyn_end = L2CAP_CID_DYN_END;
242 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
243 if (!__l2cap_get_chan_by_scid(conn, cid))
250 static void l2cap_state_change(struct l2cap_chan *chan, int state)
252 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
253 state_to_string(state));
256 chan->ops->state_change(chan, state, 0);
259 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
263 chan->ops->state_change(chan, chan->state, err);
266 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
268 chan->ops->state_change(chan, chan->state, err);
271 static void __set_retrans_timer(struct l2cap_chan *chan)
273 if (!delayed_work_pending(&chan->monitor_timer) &&
274 chan->retrans_timeout) {
275 l2cap_set_timer(chan, &chan->retrans_timer,
276 msecs_to_jiffies(chan->retrans_timeout));
280 static void __set_monitor_timer(struct l2cap_chan *chan)
282 __clear_retrans_timer(chan);
283 if (chan->monitor_timeout) {
284 l2cap_set_timer(chan, &chan->monitor_timer,
285 msecs_to_jiffies(chan->monitor_timeout));
289 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
294 skb_queue_walk(head, skb) {
295 if (bt_cb(skb)->l2cap.txseq == seq)
302 /* ---- L2CAP sequence number lists ---- */
304 /* For ERTM, ordered lists of sequence numbers must be tracked for
305 * SREJ requests that are received and for frames that are to be
306 * retransmitted. These seq_list functions implement a singly-linked
307 * list in an array, where membership in the list can also be checked
308 * in constant time. Items can also be added to the tail of the list
309 * and removed from the head in constant time, without further memory
313 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
315 size_t alloc_size, i;
317 /* Allocated size is a power of 2 to map sequence numbers
318 * (which may be up to 14 bits) in to a smaller array that is
319 * sized for the negotiated ERTM transmit windows.
321 alloc_size = roundup_pow_of_two(size);
323 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
327 seq_list->mask = alloc_size - 1;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
330 for (i = 0; i < alloc_size; i++)
331 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
336 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
338 kfree(seq_list->list);
341 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
344 /* Constant-time check for list membership */
345 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
348 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
350 u16 seq = seq_list->head;
351 u16 mask = seq_list->mask;
353 seq_list->head = seq_list->list[seq & mask];
354 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
356 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
364 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
378 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
380 u16 mask = seq_list->mask;
382 /* All appends happen in constant time */
384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
390 seq_list->list[seq_list->tail & mask] = seq;
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
396 static void l2cap_chan_timeout(struct work_struct *work)
398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
400 struct l2cap_conn *conn = chan->conn;
403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
405 mutex_lock(&conn->chan_lock);
406 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
407 * this work. No need to call l2cap_chan_hold(chan) here again.
409 l2cap_chan_lock(chan);
411 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
412 reason = ECONNREFUSED;
413 else if (chan->state == BT_CONNECT &&
414 chan->sec_level != BT_SECURITY_SDP)
415 reason = ECONNREFUSED;
419 l2cap_chan_close(chan, reason);
421 chan->ops->close(chan);
423 l2cap_chan_unlock(chan);
424 l2cap_chan_put(chan);
426 mutex_unlock(&conn->chan_lock);
429 struct l2cap_chan *l2cap_chan_create(void)
431 struct l2cap_chan *chan;
433 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
437 skb_queue_head_init(&chan->tx_q);
438 skb_queue_head_init(&chan->srej_q);
439 mutex_init(&chan->lock);
441 /* Set default lock nesting level */
442 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
444 write_lock(&chan_list_lock);
445 list_add(&chan->global_l, &chan_list);
446 write_unlock(&chan_list_lock);
448 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
450 chan->state = BT_OPEN;
452 kref_init(&chan->kref);
454 /* This flag is cleared in l2cap_chan_ready() */
455 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
457 BT_DBG("chan %p", chan);
461 EXPORT_SYMBOL_GPL(l2cap_chan_create);
463 static void l2cap_chan_destroy(struct kref *kref)
465 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
467 BT_DBG("chan %p", chan);
469 write_lock(&chan_list_lock);
470 list_del(&chan->global_l);
471 write_unlock(&chan_list_lock);
476 void l2cap_chan_hold(struct l2cap_chan *c)
478 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
483 void l2cap_chan_put(struct l2cap_chan *c)
485 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
487 kref_put(&c->kref, l2cap_chan_destroy);
489 EXPORT_SYMBOL_GPL(l2cap_chan_put);
491 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
493 chan->fcs = L2CAP_FCS_CRC16;
494 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
495 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
496 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
497 chan->remote_max_tx = chan->max_tx;
498 chan->remote_tx_win = chan->tx_win;
499 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
500 chan->sec_level = BT_SECURITY_LOW;
501 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
502 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
503 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
505 chan->conf_state = 0;
506 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
508 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
510 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
512 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
515 chan->sdu_last_frag = NULL;
517 chan->tx_credits = 0;
518 chan->rx_credits = le_max_credits;
519 chan->mps = min_t(u16, chan->imtu, le_default_mps);
521 skb_queue_head_init(&chan->tx_q);
524 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
526 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
527 __le16_to_cpu(chan->psm), chan->dcid);
529 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
533 switch (chan->chan_type) {
534 case L2CAP_CHAN_CONN_ORIENTED:
535 /* Alloc CID for connection-oriented socket */
536 chan->scid = l2cap_alloc_cid(conn);
537 if (conn->hcon->type == ACL_LINK)
538 chan->omtu = L2CAP_DEFAULT_MTU;
541 case L2CAP_CHAN_CONN_LESS:
542 /* Connectionless socket */
543 chan->scid = L2CAP_CID_CONN_LESS;
544 chan->dcid = L2CAP_CID_CONN_LESS;
545 chan->omtu = L2CAP_DEFAULT_MTU;
548 case L2CAP_CHAN_FIXED:
549 /* Caller will set CID and CID specific MTU values */
553 /* Raw socket can send/recv signalling messages only */
554 chan->scid = L2CAP_CID_SIGNALING;
555 chan->dcid = L2CAP_CID_SIGNALING;
556 chan->omtu = L2CAP_DEFAULT_MTU;
559 chan->local_id = L2CAP_BESTEFFORT_ID;
560 chan->local_stype = L2CAP_SERV_BESTEFFORT;
561 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
562 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
563 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
564 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
566 l2cap_chan_hold(chan);
568 /* Only keep a reference for fixed channels if they requested it */
569 if (chan->chan_type != L2CAP_CHAN_FIXED ||
570 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
571 hci_conn_hold(conn->hcon);
573 list_add(&chan->list, &conn->chan_l);
576 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
578 mutex_lock(&conn->chan_lock);
579 __l2cap_chan_add(conn, chan);
580 mutex_unlock(&conn->chan_lock);
583 void l2cap_chan_del(struct l2cap_chan *chan, int err)
585 struct l2cap_conn *conn = chan->conn;
587 __clear_chan_timer(chan);
589 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
590 state_to_string(chan->state));
592 chan->ops->teardown(chan, err);
595 struct amp_mgr *mgr = conn->hcon->amp_mgr;
596 /* Delete from channel list */
597 list_del(&chan->list);
599 l2cap_chan_put(chan);
603 /* Reference was only held for non-fixed channels or
604 * fixed channels that explicitly requested it using the
605 * FLAG_HOLD_HCI_CONN flag.
607 if (chan->chan_type != L2CAP_CHAN_FIXED ||
608 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
609 hci_conn_drop(conn->hcon);
611 if (mgr && mgr->bredr_chan == chan)
612 mgr->bredr_chan = NULL;
615 if (chan->hs_hchan) {
616 struct hci_chan *hs_hchan = chan->hs_hchan;
618 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
619 amp_disconnect_logical_link(hs_hchan);
622 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
626 case L2CAP_MODE_BASIC:
629 case L2CAP_MODE_LE_FLOWCTL:
630 skb_queue_purge(&chan->tx_q);
633 case L2CAP_MODE_ERTM:
634 __clear_retrans_timer(chan);
635 __clear_monitor_timer(chan);
636 __clear_ack_timer(chan);
638 skb_queue_purge(&chan->srej_q);
640 l2cap_seq_list_free(&chan->srej_list);
641 l2cap_seq_list_free(&chan->retrans_list);
645 case L2CAP_MODE_STREAMING:
646 skb_queue_purge(&chan->tx_q);
652 EXPORT_SYMBOL_GPL(l2cap_chan_del);
654 static void l2cap_conn_update_id_addr(struct work_struct *work)
656 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
657 id_addr_update_work);
658 struct hci_conn *hcon = conn->hcon;
659 struct l2cap_chan *chan;
661 mutex_lock(&conn->chan_lock);
663 list_for_each_entry(chan, &conn->chan_l, list) {
664 l2cap_chan_lock(chan);
665 bacpy(&chan->dst, &hcon->dst);
666 chan->dst_type = bdaddr_dst_type(hcon);
667 l2cap_chan_unlock(chan);
670 mutex_unlock(&conn->chan_lock);
673 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
675 struct l2cap_conn *conn = chan->conn;
676 struct l2cap_le_conn_rsp rsp;
679 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
680 result = L2CAP_CR_AUTHORIZATION;
682 result = L2CAP_CR_BAD_PSM;
684 l2cap_state_change(chan, BT_DISCONN);
686 rsp.dcid = cpu_to_le16(chan->scid);
687 rsp.mtu = cpu_to_le16(chan->imtu);
688 rsp.mps = cpu_to_le16(chan->mps);
689 rsp.credits = cpu_to_le16(chan->rx_credits);
690 rsp.result = cpu_to_le16(result);
692 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
696 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
698 struct l2cap_conn *conn = chan->conn;
699 struct l2cap_conn_rsp rsp;
702 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
703 result = L2CAP_CR_SEC_BLOCK;
705 result = L2CAP_CR_BAD_PSM;
707 l2cap_state_change(chan, BT_DISCONN);
709 rsp.scid = cpu_to_le16(chan->dcid);
710 rsp.dcid = cpu_to_le16(chan->scid);
711 rsp.result = cpu_to_le16(result);
712 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
714 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
717 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
719 struct l2cap_conn *conn = chan->conn;
721 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
723 switch (chan->state) {
725 chan->ops->teardown(chan, 0);
730 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
731 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
732 l2cap_send_disconn_req(chan, reason);
734 l2cap_chan_del(chan, reason);
738 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
739 if (conn->hcon->type == ACL_LINK)
740 l2cap_chan_connect_reject(chan);
741 else if (conn->hcon->type == LE_LINK)
742 l2cap_chan_le_connect_reject(chan);
745 l2cap_chan_del(chan, reason);
750 l2cap_chan_del(chan, reason);
754 chan->ops->teardown(chan, 0);
758 EXPORT_SYMBOL(l2cap_chan_close);
760 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
762 switch (chan->chan_type) {
764 switch (chan->sec_level) {
765 case BT_SECURITY_HIGH:
766 case BT_SECURITY_FIPS:
767 return HCI_AT_DEDICATED_BONDING_MITM;
768 case BT_SECURITY_MEDIUM:
769 return HCI_AT_DEDICATED_BONDING;
771 return HCI_AT_NO_BONDING;
774 case L2CAP_CHAN_CONN_LESS:
775 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
776 if (chan->sec_level == BT_SECURITY_LOW)
777 chan->sec_level = BT_SECURITY_SDP;
779 if (chan->sec_level == BT_SECURITY_HIGH ||
780 chan->sec_level == BT_SECURITY_FIPS)
781 return HCI_AT_NO_BONDING_MITM;
783 return HCI_AT_NO_BONDING;
785 case L2CAP_CHAN_CONN_ORIENTED:
786 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
787 if (chan->sec_level == BT_SECURITY_LOW)
788 chan->sec_level = BT_SECURITY_SDP;
790 if (chan->sec_level == BT_SECURITY_HIGH ||
791 chan->sec_level == BT_SECURITY_FIPS)
792 return HCI_AT_NO_BONDING_MITM;
794 return HCI_AT_NO_BONDING;
798 switch (chan->sec_level) {
799 case BT_SECURITY_HIGH:
800 case BT_SECURITY_FIPS:
801 return HCI_AT_GENERAL_BONDING_MITM;
802 case BT_SECURITY_MEDIUM:
803 return HCI_AT_GENERAL_BONDING;
805 return HCI_AT_NO_BONDING;
811 /* Service level security */
812 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
814 struct l2cap_conn *conn = chan->conn;
817 if (conn->hcon->type == LE_LINK)
818 return smp_conn_security(conn->hcon, chan->sec_level);
820 auth_type = l2cap_get_auth_type(chan);
822 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
826 static u8 l2cap_get_ident(struct l2cap_conn *conn)
830 /* Get next available identificator.
831 * 1 - 128 are used by kernel.
832 * 129 - 199 are reserved.
833 * 200 - 254 are used by utilities like l2ping, etc.
836 mutex_lock(&conn->ident_lock);
838 if (++conn->tx_ident > 128)
843 mutex_unlock(&conn->ident_lock);
848 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
851 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
854 BT_DBG("code 0x%2.2x", code);
859 /* Use NO_FLUSH if supported or we have an LE link (which does
860 * not support auto-flushing packets) */
861 if (lmp_no_flush_capable(conn->hcon->hdev) ||
862 conn->hcon->type == LE_LINK)
863 flags = ACL_START_NO_FLUSH;
867 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
868 skb->priority = HCI_PRIO_MAX;
870 hci_send_acl(conn->hchan, skb, flags);
873 static bool __chan_is_moving(struct l2cap_chan *chan)
875 return chan->move_state != L2CAP_MOVE_STABLE &&
876 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
879 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
881 struct hci_conn *hcon = chan->conn->hcon;
884 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
887 if (chan->hs_hcon && !__chan_is_moving(chan)) {
889 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
896 /* Use NO_FLUSH for LE links (where this is the only option) or
897 * if the BR/EDR link supports it and flushing has not been
898 * explicitly requested (through FLAG_FLUSHABLE).
900 if (hcon->type == LE_LINK ||
901 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
902 lmp_no_flush_capable(hcon->hdev)))
903 flags = ACL_START_NO_FLUSH;
907 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
908 hci_send_acl(chan->conn->hchan, skb, flags);
911 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
913 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
914 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
916 if (enh & L2CAP_CTRL_FRAME_TYPE) {
919 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
920 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
927 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
928 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
935 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
937 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
938 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
940 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
943 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
944 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
951 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
952 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
959 static inline void __unpack_control(struct l2cap_chan *chan,
962 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
963 __unpack_extended_control(get_unaligned_le32(skb->data),
965 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
967 __unpack_enhanced_control(get_unaligned_le16(skb->data),
969 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
973 static u32 __pack_extended_control(struct l2cap_ctrl *control)
977 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
978 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
980 if (control->sframe) {
981 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
982 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
983 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
985 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
986 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
992 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
996 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
997 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
999 if (control->sframe) {
1000 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1001 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1002 packed |= L2CAP_CTRL_FRAME_TYPE;
1004 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1005 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1011 static inline void __pack_control(struct l2cap_chan *chan,
1012 struct l2cap_ctrl *control,
1013 struct sk_buff *skb)
1015 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1016 put_unaligned_le32(__pack_extended_control(control),
1017 skb->data + L2CAP_HDR_SIZE);
1019 put_unaligned_le16(__pack_enhanced_control(control),
1020 skb->data + L2CAP_HDR_SIZE);
1024 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1026 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1027 return L2CAP_EXT_HDR_SIZE;
1029 return L2CAP_ENH_HDR_SIZE;
1032 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1035 struct sk_buff *skb;
1036 struct l2cap_hdr *lh;
1037 int hlen = __ertm_hdr_size(chan);
1039 if (chan->fcs == L2CAP_FCS_CRC16)
1040 hlen += L2CAP_FCS_SIZE;
1042 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1045 return ERR_PTR(-ENOMEM);
1047 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1048 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1049 lh->cid = cpu_to_le16(chan->dcid);
1051 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1052 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1054 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1056 if (chan->fcs == L2CAP_FCS_CRC16) {
1057 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1058 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1061 skb->priority = HCI_PRIO_MAX;
1065 static void l2cap_send_sframe(struct l2cap_chan *chan,
1066 struct l2cap_ctrl *control)
1068 struct sk_buff *skb;
1071 BT_DBG("chan %p, control %p", chan, control);
1073 if (!control->sframe)
1076 if (__chan_is_moving(chan))
1079 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1083 if (control->super == L2CAP_SUPER_RR)
1084 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1085 else if (control->super == L2CAP_SUPER_RNR)
1086 set_bit(CONN_RNR_SENT, &chan->conn_state);
1088 if (control->super != L2CAP_SUPER_SREJ) {
1089 chan->last_acked_seq = control->reqseq;
1090 __clear_ack_timer(chan);
1093 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1094 control->final, control->poll, control->super);
1096 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1097 control_field = __pack_extended_control(control);
1099 control_field = __pack_enhanced_control(control);
1101 skb = l2cap_create_sframe_pdu(chan, control_field);
1103 l2cap_do_send(chan, skb);
1106 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1108 struct l2cap_ctrl control;
1110 BT_DBG("chan %p, poll %d", chan, poll);
1112 memset(&control, 0, sizeof(control));
1114 control.poll = poll;
1116 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1117 control.super = L2CAP_SUPER_RNR;
1119 control.super = L2CAP_SUPER_RR;
1121 control.reqseq = chan->buffer_seq;
1122 l2cap_send_sframe(chan, &control);
1125 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1127 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1130 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1133 static bool __amp_capable(struct l2cap_chan *chan)
1135 struct l2cap_conn *conn = chan->conn;
1136 struct hci_dev *hdev;
1137 bool amp_available = false;
1139 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1142 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1145 read_lock(&hci_dev_list_lock);
1146 list_for_each_entry(hdev, &hci_dev_list, list) {
1147 if (hdev->amp_type != AMP_TYPE_BREDR &&
1148 test_bit(HCI_UP, &hdev->flags)) {
1149 amp_available = true;
1153 read_unlock(&hci_dev_list_lock);
1155 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1156 return amp_available;
1161 static bool l2cap_check_efs(struct l2cap_chan *chan)
1163 /* Check EFS parameters */
1167 void l2cap_send_conn_req(struct l2cap_chan *chan)
1169 struct l2cap_conn *conn = chan->conn;
1170 struct l2cap_conn_req req;
1172 req.scid = cpu_to_le16(chan->scid);
1173 req.psm = chan->psm;
1175 chan->ident = l2cap_get_ident(conn);
1177 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1179 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1182 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1184 struct l2cap_create_chan_req req;
1185 req.scid = cpu_to_le16(chan->scid);
1186 req.psm = chan->psm;
1187 req.amp_id = amp_id;
1189 chan->ident = l2cap_get_ident(chan->conn);
1191 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1195 static void l2cap_move_setup(struct l2cap_chan *chan)
1197 struct sk_buff *skb;
1199 BT_DBG("chan %p", chan);
1201 if (chan->mode != L2CAP_MODE_ERTM)
1204 __clear_retrans_timer(chan);
1205 __clear_monitor_timer(chan);
1206 __clear_ack_timer(chan);
1208 chan->retry_count = 0;
1209 skb_queue_walk(&chan->tx_q, skb) {
1210 if (bt_cb(skb)->l2cap.retries)
1211 bt_cb(skb)->l2cap.retries = 1;
1216 chan->expected_tx_seq = chan->buffer_seq;
1218 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1219 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1220 l2cap_seq_list_clear(&chan->retrans_list);
1221 l2cap_seq_list_clear(&chan->srej_list);
1222 skb_queue_purge(&chan->srej_q);
1224 chan->tx_state = L2CAP_TX_STATE_XMIT;
1225 chan->rx_state = L2CAP_RX_STATE_MOVE;
1227 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1230 static void l2cap_move_done(struct l2cap_chan *chan)
1232 u8 move_role = chan->move_role;
1233 BT_DBG("chan %p", chan);
1235 chan->move_state = L2CAP_MOVE_STABLE;
1236 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1238 if (chan->mode != L2CAP_MODE_ERTM)
1241 switch (move_role) {
1242 case L2CAP_MOVE_ROLE_INITIATOR:
1243 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1244 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1246 case L2CAP_MOVE_ROLE_RESPONDER:
1247 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1252 static void l2cap_chan_ready(struct l2cap_chan *chan)
1254 /* The channel may have already been flagged as connected in
1255 * case of receiving data before the L2CAP info req/rsp
1256 * procedure is complete.
1258 if (chan->state == BT_CONNECTED)
1261 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1262 chan->conf_state = 0;
1263 __clear_chan_timer(chan);
1265 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1266 chan->ops->suspend(chan);
1268 chan->state = BT_CONNECTED;
1270 chan->ops->ready(chan);
1273 static void l2cap_le_connect(struct l2cap_chan *chan)
1275 struct l2cap_conn *conn = chan->conn;
1276 struct l2cap_le_conn_req req;
1278 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1281 req.psm = chan->psm;
1282 req.scid = cpu_to_le16(chan->scid);
1283 req.mtu = cpu_to_le16(chan->imtu);
1284 req.mps = cpu_to_le16(chan->mps);
1285 req.credits = cpu_to_le16(chan->rx_credits);
1287 chan->ident = l2cap_get_ident(conn);
1289 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1293 static void l2cap_le_start(struct l2cap_chan *chan)
1295 struct l2cap_conn *conn = chan->conn;
1297 if (!smp_conn_security(conn->hcon, chan->sec_level))
1301 l2cap_chan_ready(chan);
1305 if (chan->state == BT_CONNECT)
1306 l2cap_le_connect(chan);
1309 static void l2cap_start_connection(struct l2cap_chan *chan)
1311 if (__amp_capable(chan)) {
1312 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1313 a2mp_discover_amp(chan);
1314 } else if (chan->conn->hcon->type == LE_LINK) {
1315 l2cap_le_start(chan);
1317 l2cap_send_conn_req(chan);
1321 static void l2cap_request_info(struct l2cap_conn *conn)
1323 struct l2cap_info_req req;
1325 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1328 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1330 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1331 conn->info_ident = l2cap_get_ident(conn);
1333 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1335 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1339 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1341 /* The minimum encryption key size needs to be enforced by the
1342 * host stack before establishing any L2CAP connections. The
1343 * specification in theory allows a minimum of 1, but to align
1344 * BR/EDR and LE transports, a minimum of 7 is chosen.
1346 * This check might also be called for unencrypted connections
1347 * that have no key size requirements. Ensure that the link is
1348 * actually encrypted before enforcing a key size.
1350 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1351 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1354 static void l2cap_do_start(struct l2cap_chan *chan)
1356 struct l2cap_conn *conn = chan->conn;
1358 if (conn->hcon->type == LE_LINK) {
1359 l2cap_le_start(chan);
1363 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1364 l2cap_request_info(conn);
1368 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1371 if (!l2cap_chan_check_security(chan, true) ||
1372 !__l2cap_no_conn_pending(chan))
1375 if (l2cap_check_enc_key_size(conn->hcon))
1376 l2cap_start_connection(chan);
1378 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1381 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1383 u32 local_feat_mask = l2cap_feat_mask;
1385 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1388 case L2CAP_MODE_ERTM:
1389 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1390 case L2CAP_MODE_STREAMING:
1391 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1397 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1399 struct l2cap_conn *conn = chan->conn;
1400 struct l2cap_disconn_req req;
1405 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1406 __clear_retrans_timer(chan);
1407 __clear_monitor_timer(chan);
1408 __clear_ack_timer(chan);
1411 if (chan->scid == L2CAP_CID_A2MP) {
1412 l2cap_state_change(chan, BT_DISCONN);
1416 req.dcid = cpu_to_le16(chan->dcid);
1417 req.scid = cpu_to_le16(chan->scid);
1418 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1421 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1424 /* ---- L2CAP connections ---- */
1425 static void l2cap_conn_start(struct l2cap_conn *conn)
1427 struct l2cap_chan *chan, *tmp;
1429 BT_DBG("conn %p", conn);
1431 mutex_lock(&conn->chan_lock);
1433 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1434 l2cap_chan_lock(chan);
1436 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1437 l2cap_chan_ready(chan);
1438 l2cap_chan_unlock(chan);
1442 if (chan->state == BT_CONNECT) {
1443 if (!l2cap_chan_check_security(chan, true) ||
1444 !__l2cap_no_conn_pending(chan)) {
1445 l2cap_chan_unlock(chan);
1449 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1450 && test_bit(CONF_STATE2_DEVICE,
1451 &chan->conf_state)) {
1452 l2cap_chan_close(chan, ECONNRESET);
1453 l2cap_chan_unlock(chan);
1457 if (l2cap_check_enc_key_size(conn->hcon))
1458 l2cap_start_connection(chan);
1460 l2cap_chan_close(chan, ECONNREFUSED);
1462 } else if (chan->state == BT_CONNECT2) {
1463 struct l2cap_conn_rsp rsp;
1465 rsp.scid = cpu_to_le16(chan->dcid);
1466 rsp.dcid = cpu_to_le16(chan->scid);
1468 if (l2cap_chan_check_security(chan, false)) {
1469 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1470 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1471 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1472 chan->ops->defer(chan);
1475 l2cap_state_change(chan, BT_CONFIG);
1476 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1477 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1480 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1481 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1484 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1487 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1488 rsp.result != L2CAP_CR_SUCCESS) {
1489 l2cap_chan_unlock(chan);
1493 set_bit(CONF_REQ_SENT, &chan->conf_state);
1494 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1495 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1496 chan->num_conf_req++;
1499 l2cap_chan_unlock(chan);
1502 mutex_unlock(&conn->chan_lock);
1505 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1507 struct hci_conn *hcon = conn->hcon;
1508 struct hci_dev *hdev = hcon->hdev;
1510 BT_DBG("%s conn %p", hdev->name, conn);
1512 /* For outgoing pairing which doesn't necessarily have an
1513 * associated socket (e.g. mgmt_pair_device).
1516 smp_conn_security(hcon, hcon->pending_sec_level);
1518 /* For LE slave connections, make sure the connection interval
1519 * is in the range of the minium and maximum interval that has
1520 * been configured for this connection. If not, then trigger
1521 * the connection update procedure.
1523 if (hcon->role == HCI_ROLE_SLAVE &&
1524 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1525 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1526 struct l2cap_conn_param_update_req req;
1528 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1529 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1530 req.latency = cpu_to_le16(hcon->le_conn_latency);
1531 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1533 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1534 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1538 static void l2cap_conn_ready(struct l2cap_conn *conn)
1540 struct l2cap_chan *chan;
1541 struct hci_conn *hcon = conn->hcon;
1543 BT_DBG("conn %p", conn);
1545 if (hcon->type == ACL_LINK)
1546 l2cap_request_info(conn);
1548 mutex_lock(&conn->chan_lock);
1550 list_for_each_entry(chan, &conn->chan_l, list) {
1552 l2cap_chan_lock(chan);
1554 if (chan->scid == L2CAP_CID_A2MP) {
1555 l2cap_chan_unlock(chan);
1559 if (hcon->type == LE_LINK) {
1560 l2cap_le_start(chan);
1561 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1562 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1563 l2cap_chan_ready(chan);
1564 } else if (chan->state == BT_CONNECT) {
1565 l2cap_do_start(chan);
1568 l2cap_chan_unlock(chan);
1571 mutex_unlock(&conn->chan_lock);
1573 if (hcon->type == LE_LINK)
1574 l2cap_le_conn_ready(conn);
1576 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1579 /* Notify sockets that we cannot guaranty reliability anymore */
1580 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1582 struct l2cap_chan *chan;
1584 BT_DBG("conn %p", conn);
1586 mutex_lock(&conn->chan_lock);
1588 list_for_each_entry(chan, &conn->chan_l, list) {
1589 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1590 l2cap_chan_set_err(chan, err);
1593 mutex_unlock(&conn->chan_lock);
1596 static void l2cap_info_timeout(struct work_struct *work)
1598 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1601 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1602 conn->info_ident = 0;
1604 l2cap_conn_start(conn);
1609 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1610 * callback is called during registration. The ->remove callback is called
1611 * during unregistration.
1612 * An l2cap_user object can either be explicitly unregistered or when the
1613 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1614 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1615 * External modules must own a reference to the l2cap_conn object if they intend
1616 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1617 * any time if they don't.
1620 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1622 struct hci_dev *hdev = conn->hcon->hdev;
1625 /* We need to check whether l2cap_conn is registered. If it is not, we
1626 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1627 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1628 * relies on the parent hci_conn object to be locked. This itself relies
1629 * on the hci_dev object to be locked. So we must lock the hci device
1634 if (!list_empty(&user->list)) {
1639 /* conn->hchan is NULL after l2cap_conn_del() was called */
1645 ret = user->probe(conn, user);
1649 list_add(&user->list, &conn->users);
1653 hci_dev_unlock(hdev);
1656 EXPORT_SYMBOL(l2cap_register_user);
1658 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1660 struct hci_dev *hdev = conn->hcon->hdev;
1664 if (list_empty(&user->list))
1667 list_del_init(&user->list);
1668 user->remove(conn, user);
1671 hci_dev_unlock(hdev);
1673 EXPORT_SYMBOL(l2cap_unregister_user);
1675 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1677 struct l2cap_user *user;
1679 while (!list_empty(&conn->users)) {
1680 user = list_first_entry(&conn->users, struct l2cap_user, list);
1681 list_del_init(&user->list);
1682 user->remove(conn, user);
1686 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1688 struct l2cap_conn *conn = hcon->l2cap_data;
1689 struct l2cap_chan *chan, *l;
1694 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1696 kfree_skb(conn->rx_skb);
1698 skb_queue_purge(&conn->pending_rx);
1700 /* We can not call flush_work(&conn->pending_rx_work) here since we
1701 * might block if we are running on a worker from the same workqueue
1702 * pending_rx_work is waiting on.
1704 if (work_pending(&conn->pending_rx_work))
1705 cancel_work_sync(&conn->pending_rx_work);
1707 if (work_pending(&conn->id_addr_update_work))
1708 cancel_work_sync(&conn->id_addr_update_work);
1710 l2cap_unregister_all_users(conn);
1712 /* Force the connection to be immediately dropped */
1713 hcon->disc_timeout = 0;
1715 mutex_lock(&conn->chan_lock);
1718 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1719 l2cap_chan_hold(chan);
1720 l2cap_chan_lock(chan);
1722 l2cap_chan_del(chan, err);
1724 chan->ops->close(chan);
1726 l2cap_chan_unlock(chan);
1727 l2cap_chan_put(chan);
1730 mutex_unlock(&conn->chan_lock);
1732 hci_chan_del(conn->hchan);
1734 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1735 cancel_delayed_work_sync(&conn->info_timer);
1737 hcon->l2cap_data = NULL;
1739 l2cap_conn_put(conn);
1742 static void l2cap_conn_free(struct kref *ref)
1744 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1746 hci_conn_put(conn->hcon);
1750 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1752 kref_get(&conn->ref);
1755 EXPORT_SYMBOL(l2cap_conn_get);
1757 void l2cap_conn_put(struct l2cap_conn *conn)
1759 kref_put(&conn->ref, l2cap_conn_free);
1761 EXPORT_SYMBOL(l2cap_conn_put);
1763 /* ---- Socket interface ---- */
1765 /* Find socket with psm and source / destination bdaddr.
1766 * Returns closest match.
1768 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1773 struct l2cap_chan *c, *c1 = NULL;
1775 read_lock(&chan_list_lock);
1777 list_for_each_entry(c, &chan_list, global_l) {
1778 if (state && c->state != state)
1781 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1784 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1787 if (c->psm == psm) {
1788 int src_match, dst_match;
1789 int src_any, dst_any;
1792 src_match = !bacmp(&c->src, src);
1793 dst_match = !bacmp(&c->dst, dst);
1794 if (src_match && dst_match) {
1796 read_unlock(&chan_list_lock);
1801 src_any = !bacmp(&c->src, BDADDR_ANY);
1802 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1803 if ((src_match && dst_any) || (src_any && dst_match) ||
1804 (src_any && dst_any))
1810 l2cap_chan_hold(c1);
1812 read_unlock(&chan_list_lock);
1817 static void l2cap_monitor_timeout(struct work_struct *work)
1819 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1820 monitor_timer.work);
1822 BT_DBG("chan %p", chan);
1824 l2cap_chan_lock(chan);
1827 l2cap_chan_unlock(chan);
1828 l2cap_chan_put(chan);
1832 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1834 l2cap_chan_unlock(chan);
1835 l2cap_chan_put(chan);
1838 static void l2cap_retrans_timeout(struct work_struct *work)
1840 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1841 retrans_timer.work);
1843 BT_DBG("chan %p", chan);
1845 l2cap_chan_lock(chan);
1848 l2cap_chan_unlock(chan);
1849 l2cap_chan_put(chan);
1853 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1854 l2cap_chan_unlock(chan);
1855 l2cap_chan_put(chan);
1858 static void l2cap_streaming_send(struct l2cap_chan *chan,
1859 struct sk_buff_head *skbs)
1861 struct sk_buff *skb;
1862 struct l2cap_ctrl *control;
1864 BT_DBG("chan %p, skbs %p", chan, skbs);
1866 if (__chan_is_moving(chan))
1869 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1871 while (!skb_queue_empty(&chan->tx_q)) {
1873 skb = skb_dequeue(&chan->tx_q);
1875 bt_cb(skb)->l2cap.retries = 1;
1876 control = &bt_cb(skb)->l2cap;
1878 control->reqseq = 0;
1879 control->txseq = chan->next_tx_seq;
1881 __pack_control(chan, control, skb);
1883 if (chan->fcs == L2CAP_FCS_CRC16) {
1884 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1885 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1888 l2cap_do_send(chan, skb);
1890 BT_DBG("Sent txseq %u", control->txseq);
1892 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1893 chan->frames_sent++;
1897 static int l2cap_ertm_send(struct l2cap_chan *chan)
1899 struct sk_buff *skb, *tx_skb;
1900 struct l2cap_ctrl *control;
1903 BT_DBG("chan %p", chan);
1905 if (chan->state != BT_CONNECTED)
1908 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1911 if (__chan_is_moving(chan))
1914 while (chan->tx_send_head &&
1915 chan->unacked_frames < chan->remote_tx_win &&
1916 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1918 skb = chan->tx_send_head;
1920 bt_cb(skb)->l2cap.retries = 1;
1921 control = &bt_cb(skb)->l2cap;
1923 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1926 control->reqseq = chan->buffer_seq;
1927 chan->last_acked_seq = chan->buffer_seq;
1928 control->txseq = chan->next_tx_seq;
1930 __pack_control(chan, control, skb);
1932 if (chan->fcs == L2CAP_FCS_CRC16) {
1933 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1934 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1937 /* Clone after data has been modified. Data is assumed to be
1938 read-only (for locking purposes) on cloned sk_buffs.
1940 tx_skb = skb_clone(skb, GFP_KERNEL);
1945 __set_retrans_timer(chan);
1947 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1948 chan->unacked_frames++;
1949 chan->frames_sent++;
1952 if (skb_queue_is_last(&chan->tx_q, skb))
1953 chan->tx_send_head = NULL;
1955 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1957 l2cap_do_send(chan, tx_skb);
1958 BT_DBG("Sent txseq %u", control->txseq);
1961 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1962 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1967 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1969 struct l2cap_ctrl control;
1970 struct sk_buff *skb;
1971 struct sk_buff *tx_skb;
1974 BT_DBG("chan %p", chan);
1976 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1979 if (__chan_is_moving(chan))
1982 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1983 seq = l2cap_seq_list_pop(&chan->retrans_list);
1985 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1987 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1992 bt_cb(skb)->l2cap.retries++;
1993 control = bt_cb(skb)->l2cap;
1995 if (chan->max_tx != 0 &&
1996 bt_cb(skb)->l2cap.retries > chan->max_tx) {
1997 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1998 l2cap_send_disconn_req(chan, ECONNRESET);
1999 l2cap_seq_list_clear(&chan->retrans_list);
2003 control.reqseq = chan->buffer_seq;
2004 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2009 if (skb_cloned(skb)) {
2010 /* Cloned sk_buffs are read-only, so we need a
2013 tx_skb = skb_copy(skb, GFP_KERNEL);
2015 tx_skb = skb_clone(skb, GFP_KERNEL);
2019 l2cap_seq_list_clear(&chan->retrans_list);
2023 /* Update skb contents */
2024 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2025 put_unaligned_le32(__pack_extended_control(&control),
2026 tx_skb->data + L2CAP_HDR_SIZE);
2028 put_unaligned_le16(__pack_enhanced_control(&control),
2029 tx_skb->data + L2CAP_HDR_SIZE);
2033 if (chan->fcs == L2CAP_FCS_CRC16) {
2034 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2035 tx_skb->len - L2CAP_FCS_SIZE);
2036 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2040 l2cap_do_send(chan, tx_skb);
2042 BT_DBG("Resent txseq %d", control.txseq);
2044 chan->last_acked_seq = chan->buffer_seq;
2048 static void l2cap_retransmit(struct l2cap_chan *chan,
2049 struct l2cap_ctrl *control)
2051 BT_DBG("chan %p, control %p", chan, control);
2053 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2054 l2cap_ertm_resend(chan);
2057 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2058 struct l2cap_ctrl *control)
2060 struct sk_buff *skb;
2062 BT_DBG("chan %p, control %p", chan, control);
2065 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2067 l2cap_seq_list_clear(&chan->retrans_list);
2069 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2072 if (chan->unacked_frames) {
2073 skb_queue_walk(&chan->tx_q, skb) {
2074 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2075 skb == chan->tx_send_head)
2079 skb_queue_walk_from(&chan->tx_q, skb) {
2080 if (skb == chan->tx_send_head)
2083 l2cap_seq_list_append(&chan->retrans_list,
2084 bt_cb(skb)->l2cap.txseq);
2087 l2cap_ertm_resend(chan);
2091 static void l2cap_send_ack(struct l2cap_chan *chan)
2093 struct l2cap_ctrl control;
2094 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2095 chan->last_acked_seq);
2098 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2099 chan, chan->last_acked_seq, chan->buffer_seq);
2101 memset(&control, 0, sizeof(control));
2104 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2105 chan->rx_state == L2CAP_RX_STATE_RECV) {
2106 __clear_ack_timer(chan);
2107 control.super = L2CAP_SUPER_RNR;
2108 control.reqseq = chan->buffer_seq;
2109 l2cap_send_sframe(chan, &control);
2111 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2112 l2cap_ertm_send(chan);
2113 /* If any i-frames were sent, they included an ack */
2114 if (chan->buffer_seq == chan->last_acked_seq)
2118 /* Ack now if the window is 3/4ths full.
2119 * Calculate without mul or div
2121 threshold = chan->ack_win;
2122 threshold += threshold << 1;
2125 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2128 if (frames_to_ack >= threshold) {
2129 __clear_ack_timer(chan);
2130 control.super = L2CAP_SUPER_RR;
2131 control.reqseq = chan->buffer_seq;
2132 l2cap_send_sframe(chan, &control);
2137 __set_ack_timer(chan);
2141 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2142 struct msghdr *msg, int len,
2143 int count, struct sk_buff *skb)
2145 struct l2cap_conn *conn = chan->conn;
2146 struct sk_buff **frag;
2149 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
2155 /* Continuation fragments (no L2CAP header) */
2156 frag = &skb_shinfo(skb)->frag_list;
2158 struct sk_buff *tmp;
2160 count = min_t(unsigned int, conn->mtu, len);
2162 tmp = chan->ops->alloc_skb(chan, 0, count,
2163 msg->msg_flags & MSG_DONTWAIT);
2165 return PTR_ERR(tmp);
2169 if (copy_from_iter(skb_put(*frag, count), count,
2170 &msg->msg_iter) != count)
2176 skb->len += (*frag)->len;
2177 skb->data_len += (*frag)->len;
2179 frag = &(*frag)->next;
2185 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2186 struct msghdr *msg, size_t len)
2188 struct l2cap_conn *conn = chan->conn;
2189 struct sk_buff *skb;
2190 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2191 struct l2cap_hdr *lh;
2193 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2194 __le16_to_cpu(chan->psm), len);
2196 count = min_t(unsigned int, (conn->mtu - hlen), len);
2198 skb = chan->ops->alloc_skb(chan, hlen, count,
2199 msg->msg_flags & MSG_DONTWAIT);
2203 /* Create L2CAP header */
2204 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2205 lh->cid = cpu_to_le16(chan->dcid);
2206 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2207 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2209 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2210 if (unlikely(err < 0)) {
2212 return ERR_PTR(err);
2217 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2218 struct msghdr *msg, size_t len)
2220 struct l2cap_conn *conn = chan->conn;
2221 struct sk_buff *skb;
2223 struct l2cap_hdr *lh;
2225 BT_DBG("chan %p len %zu", chan, len);
2227 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2229 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2230 msg->msg_flags & MSG_DONTWAIT);
2234 /* Create L2CAP header */
2235 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2236 lh->cid = cpu_to_le16(chan->dcid);
2237 lh->len = cpu_to_le16(len);
2239 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2240 if (unlikely(err < 0)) {
2242 return ERR_PTR(err);
2247 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2248 struct msghdr *msg, size_t len,
2251 struct l2cap_conn *conn = chan->conn;
2252 struct sk_buff *skb;
2253 int err, count, hlen;
2254 struct l2cap_hdr *lh;
2256 BT_DBG("chan %p len %zu", chan, len);
2259 return ERR_PTR(-ENOTCONN);
2261 hlen = __ertm_hdr_size(chan);
2264 hlen += L2CAP_SDULEN_SIZE;
2266 if (chan->fcs == L2CAP_FCS_CRC16)
2267 hlen += L2CAP_FCS_SIZE;
2269 count = min_t(unsigned int, (conn->mtu - hlen), len);
2271 skb = chan->ops->alloc_skb(chan, hlen, count,
2272 msg->msg_flags & MSG_DONTWAIT);
2276 /* Create L2CAP header */
2277 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2278 lh->cid = cpu_to_le16(chan->dcid);
2279 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2281 /* Control header is populated later */
2282 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2283 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2285 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2288 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2290 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2291 if (unlikely(err < 0)) {
2293 return ERR_PTR(err);
2296 bt_cb(skb)->l2cap.fcs = chan->fcs;
2297 bt_cb(skb)->l2cap.retries = 0;
2301 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2302 struct sk_buff_head *seg_queue,
2303 struct msghdr *msg, size_t len)
2305 struct sk_buff *skb;
2310 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2312 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2313 * so fragmented skbs are not used. The HCI layer's handling
2314 * of fragmented skbs is not compatible with ERTM's queueing.
2317 /* PDU size is derived from the HCI MTU */
2318 pdu_len = chan->conn->mtu;
2320 /* Constrain PDU size for BR/EDR connections */
2322 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2324 /* Adjust for largest possible L2CAP overhead. */
2326 pdu_len -= L2CAP_FCS_SIZE;
2328 pdu_len -= __ertm_hdr_size(chan);
2330 /* Remote device may have requested smaller PDUs */
2331 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2333 if (len <= pdu_len) {
2334 sar = L2CAP_SAR_UNSEGMENTED;
2338 sar = L2CAP_SAR_START;
2343 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2346 __skb_queue_purge(seg_queue);
2347 return PTR_ERR(skb);
2350 bt_cb(skb)->l2cap.sar = sar;
2351 __skb_queue_tail(seg_queue, skb);
2357 if (len <= pdu_len) {
2358 sar = L2CAP_SAR_END;
2361 sar = L2CAP_SAR_CONTINUE;
2368 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2370 size_t len, u16 sdulen)
2372 struct l2cap_conn *conn = chan->conn;
2373 struct sk_buff *skb;
2374 int err, count, hlen;
2375 struct l2cap_hdr *lh;
2377 BT_DBG("chan %p len %zu", chan, len);
2380 return ERR_PTR(-ENOTCONN);
2382 hlen = L2CAP_HDR_SIZE;
2385 hlen += L2CAP_SDULEN_SIZE;
2387 count = min_t(unsigned int, (conn->mtu - hlen), len);
2389 skb = chan->ops->alloc_skb(chan, hlen, count,
2390 msg->msg_flags & MSG_DONTWAIT);
2394 /* Create L2CAP header */
2395 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2396 lh->cid = cpu_to_le16(chan->dcid);
2397 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2400 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2402 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2403 if (unlikely(err < 0)) {
2405 return ERR_PTR(err);
2411 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2412 struct sk_buff_head *seg_queue,
2413 struct msghdr *msg, size_t len)
2415 struct sk_buff *skb;
2419 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2422 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2428 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2430 __skb_queue_purge(seg_queue);
2431 return PTR_ERR(skb);
2434 __skb_queue_tail(seg_queue, skb);
2440 pdu_len += L2CAP_SDULEN_SIZE;
2447 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2449 struct sk_buff *skb;
2451 struct sk_buff_head seg_queue;
2456 /* Connectionless channel */
2457 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2458 skb = l2cap_create_connless_pdu(chan, msg, len);
2460 return PTR_ERR(skb);
2462 /* Channel lock is released before requesting new skb and then
2463 * reacquired thus we need to recheck channel state.
2465 if (chan->state != BT_CONNECTED) {
2470 l2cap_do_send(chan, skb);
2474 switch (chan->mode) {
2475 case L2CAP_MODE_LE_FLOWCTL:
2476 /* Check outgoing MTU */
2477 if (len > chan->omtu)
2480 if (!chan->tx_credits)
2483 __skb_queue_head_init(&seg_queue);
2485 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2487 if (chan->state != BT_CONNECTED) {
2488 __skb_queue_purge(&seg_queue);
2495 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2497 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2498 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2502 if (!chan->tx_credits)
2503 chan->ops->suspend(chan);
2509 case L2CAP_MODE_BASIC:
2510 /* Check outgoing MTU */
2511 if (len > chan->omtu)
2514 /* Create a basic PDU */
2515 skb = l2cap_create_basic_pdu(chan, msg, len);
2517 return PTR_ERR(skb);
2519 /* Channel lock is released before requesting new skb and then
2520 * reacquired thus we need to recheck channel state.
2522 if (chan->state != BT_CONNECTED) {
2527 l2cap_do_send(chan, skb);
2531 case L2CAP_MODE_ERTM:
2532 case L2CAP_MODE_STREAMING:
2533 /* Check outgoing MTU */
2534 if (len > chan->omtu) {
2539 __skb_queue_head_init(&seg_queue);
2541 /* Do segmentation before calling in to the state machine,
2542 * since it's possible to block while waiting for memory
2545 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2547 /* The channel could have been closed while segmenting,
2548 * check that it is still connected.
2550 if (chan->state != BT_CONNECTED) {
2551 __skb_queue_purge(&seg_queue);
2558 if (chan->mode == L2CAP_MODE_ERTM)
2559 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2561 l2cap_streaming_send(chan, &seg_queue);
2565 /* If the skbs were not queued for sending, they'll still be in
2566 * seg_queue and need to be purged.
2568 __skb_queue_purge(&seg_queue);
2572 BT_DBG("bad state %1.1x", chan->mode);
2578 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2580 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2582 struct l2cap_ctrl control;
2585 BT_DBG("chan %p, txseq %u", chan, txseq);
2587 memset(&control, 0, sizeof(control));
2589 control.super = L2CAP_SUPER_SREJ;
2591 for (seq = chan->expected_tx_seq; seq != txseq;
2592 seq = __next_seq(chan, seq)) {
2593 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2594 control.reqseq = seq;
2595 l2cap_send_sframe(chan, &control);
2596 l2cap_seq_list_append(&chan->srej_list, seq);
2600 chan->expected_tx_seq = __next_seq(chan, txseq);
2603 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2605 struct l2cap_ctrl control;
2607 BT_DBG("chan %p", chan);
2609 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2612 memset(&control, 0, sizeof(control));
2614 control.super = L2CAP_SUPER_SREJ;
2615 control.reqseq = chan->srej_list.tail;
2616 l2cap_send_sframe(chan, &control);
2619 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2621 struct l2cap_ctrl control;
2625 BT_DBG("chan %p, txseq %u", chan, txseq);
2627 memset(&control, 0, sizeof(control));
2629 control.super = L2CAP_SUPER_SREJ;
2631 /* Capture initial list head to allow only one pass through the list. */
2632 initial_head = chan->srej_list.head;
2635 seq = l2cap_seq_list_pop(&chan->srej_list);
2636 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2639 control.reqseq = seq;
2640 l2cap_send_sframe(chan, &control);
2641 l2cap_seq_list_append(&chan->srej_list, seq);
2642 } while (chan->srej_list.head != initial_head);
2645 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2647 struct sk_buff *acked_skb;
2650 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2652 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2655 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2656 chan->expected_ack_seq, chan->unacked_frames);
2658 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2659 ackseq = __next_seq(chan, ackseq)) {
2661 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2663 skb_unlink(acked_skb, &chan->tx_q);
2664 kfree_skb(acked_skb);
2665 chan->unacked_frames--;
2669 chan->expected_ack_seq = reqseq;
2671 if (chan->unacked_frames == 0)
2672 __clear_retrans_timer(chan);
2674 BT_DBG("unacked_frames %u", chan->unacked_frames);
2677 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2679 BT_DBG("chan %p", chan);
2681 chan->expected_tx_seq = chan->buffer_seq;
2682 l2cap_seq_list_clear(&chan->srej_list);
2683 skb_queue_purge(&chan->srej_q);
2684 chan->rx_state = L2CAP_RX_STATE_RECV;
2687 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2688 struct l2cap_ctrl *control,
2689 struct sk_buff_head *skbs, u8 event)
2691 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2695 case L2CAP_EV_DATA_REQUEST:
2696 if (chan->tx_send_head == NULL)
2697 chan->tx_send_head = skb_peek(skbs);
2699 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2700 l2cap_ertm_send(chan);
2702 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2703 BT_DBG("Enter LOCAL_BUSY");
2704 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2706 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2707 /* The SREJ_SENT state must be aborted if we are to
2708 * enter the LOCAL_BUSY state.
2710 l2cap_abort_rx_srej_sent(chan);
2713 l2cap_send_ack(chan);
2716 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2717 BT_DBG("Exit LOCAL_BUSY");
2718 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2721 struct l2cap_ctrl local_control;
2723 memset(&local_control, 0, sizeof(local_control));
2724 local_control.sframe = 1;
2725 local_control.super = L2CAP_SUPER_RR;
2726 local_control.poll = 1;
2727 local_control.reqseq = chan->buffer_seq;
2728 l2cap_send_sframe(chan, &local_control);
2730 chan->retry_count = 1;
2731 __set_monitor_timer(chan);
2732 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2735 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2736 l2cap_process_reqseq(chan, control->reqseq);
2738 case L2CAP_EV_EXPLICIT_POLL:
2739 l2cap_send_rr_or_rnr(chan, 1);
2740 chan->retry_count = 1;
2741 __set_monitor_timer(chan);
2742 __clear_ack_timer(chan);
2743 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2745 case L2CAP_EV_RETRANS_TO:
2746 l2cap_send_rr_or_rnr(chan, 1);
2747 chan->retry_count = 1;
2748 __set_monitor_timer(chan);
2749 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2751 case L2CAP_EV_RECV_FBIT:
2752 /* Nothing to process */
2759 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2760 struct l2cap_ctrl *control,
2761 struct sk_buff_head *skbs, u8 event)
2763 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2767 case L2CAP_EV_DATA_REQUEST:
2768 if (chan->tx_send_head == NULL)
2769 chan->tx_send_head = skb_peek(skbs);
2770 /* Queue data, but don't send. */
2771 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2773 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2774 BT_DBG("Enter LOCAL_BUSY");
2775 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2777 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2778 /* The SREJ_SENT state must be aborted if we are to
2779 * enter the LOCAL_BUSY state.
2781 l2cap_abort_rx_srej_sent(chan);
2784 l2cap_send_ack(chan);
2787 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2788 BT_DBG("Exit LOCAL_BUSY");
2789 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2791 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2792 struct l2cap_ctrl local_control;
2793 memset(&local_control, 0, sizeof(local_control));
2794 local_control.sframe = 1;
2795 local_control.super = L2CAP_SUPER_RR;
2796 local_control.poll = 1;
2797 local_control.reqseq = chan->buffer_seq;
2798 l2cap_send_sframe(chan, &local_control);
2800 chan->retry_count = 1;
2801 __set_monitor_timer(chan);
2802 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2805 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2806 l2cap_process_reqseq(chan, control->reqseq);
2810 case L2CAP_EV_RECV_FBIT:
2811 if (control && control->final) {
2812 __clear_monitor_timer(chan);
2813 if (chan->unacked_frames > 0)
2814 __set_retrans_timer(chan);
2815 chan->retry_count = 0;
2816 chan->tx_state = L2CAP_TX_STATE_XMIT;
2817 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2820 case L2CAP_EV_EXPLICIT_POLL:
2823 case L2CAP_EV_MONITOR_TO:
2824 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2825 l2cap_send_rr_or_rnr(chan, 1);
2826 __set_monitor_timer(chan);
2827 chan->retry_count++;
2829 l2cap_send_disconn_req(chan, ECONNABORTED);
2837 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2838 struct sk_buff_head *skbs, u8 event)
2840 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2841 chan, control, skbs, event, chan->tx_state);
2843 switch (chan->tx_state) {
2844 case L2CAP_TX_STATE_XMIT:
2845 l2cap_tx_state_xmit(chan, control, skbs, event);
2847 case L2CAP_TX_STATE_WAIT_F:
2848 l2cap_tx_state_wait_f(chan, control, skbs, event);
2856 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2857 struct l2cap_ctrl *control)
2859 BT_DBG("chan %p, control %p", chan, control);
2860 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2863 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2864 struct l2cap_ctrl *control)
2866 BT_DBG("chan %p, control %p", chan, control);
2867 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2870 /* Copy frame to all raw sockets on that connection */
2871 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2873 struct sk_buff *nskb;
2874 struct l2cap_chan *chan;
2876 BT_DBG("conn %p", conn);
2878 mutex_lock(&conn->chan_lock);
2880 list_for_each_entry(chan, &conn->chan_l, list) {
2881 if (chan->chan_type != L2CAP_CHAN_RAW)
2884 /* Don't send frame to the channel it came from */
2885 if (bt_cb(skb)->l2cap.chan == chan)
2888 nskb = skb_clone(skb, GFP_KERNEL);
2891 if (chan->ops->recv(chan, nskb))
2895 mutex_unlock(&conn->chan_lock);
2898 /* ---- L2CAP signalling commands ---- */
2899 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2900 u8 ident, u16 dlen, void *data)
2902 struct sk_buff *skb, **frag;
2903 struct l2cap_cmd_hdr *cmd;
2904 struct l2cap_hdr *lh;
2907 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2908 conn, code, ident, dlen);
2910 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2913 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2914 count = min_t(unsigned int, conn->mtu, len);
2916 skb = bt_skb_alloc(count, GFP_KERNEL);
2920 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2921 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2923 if (conn->hcon->type == LE_LINK)
2924 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2926 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2928 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2931 cmd->len = cpu_to_le16(dlen);
2934 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2935 memcpy(skb_put(skb, count), data, count);
2941 /* Continuation fragments (no L2CAP header) */
2942 frag = &skb_shinfo(skb)->frag_list;
2944 count = min_t(unsigned int, conn->mtu, len);
2946 *frag = bt_skb_alloc(count, GFP_KERNEL);
2950 memcpy(skb_put(*frag, count), data, count);
2955 frag = &(*frag)->next;
2965 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2968 struct l2cap_conf_opt *opt = *ptr;
2971 len = L2CAP_CONF_OPT_SIZE + opt->len;
2979 *val = *((u8 *) opt->val);
2983 *val = get_unaligned_le16(opt->val);
2987 *val = get_unaligned_le32(opt->val);
2991 *val = (unsigned long) opt->val;
2995 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2999 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3001 struct l2cap_conf_opt *opt = *ptr;
3003 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3005 if (size < L2CAP_CONF_OPT_SIZE + len)
3013 *((u8 *) opt->val) = val;
3017 put_unaligned_le16(val, opt->val);
3021 put_unaligned_le32(val, opt->val);
3025 memcpy(opt->val, (void *) val, len);
3029 *ptr += L2CAP_CONF_OPT_SIZE + len;
3032 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3034 struct l2cap_conf_efs efs;
3036 switch (chan->mode) {
3037 case L2CAP_MODE_ERTM:
3038 efs.id = chan->local_id;
3039 efs.stype = chan->local_stype;
3040 efs.msdu = cpu_to_le16(chan->local_msdu);
3041 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3042 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3043 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3046 case L2CAP_MODE_STREAMING:
3048 efs.stype = L2CAP_SERV_BESTEFFORT;
3049 efs.msdu = cpu_to_le16(chan->local_msdu);
3050 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3059 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3060 (unsigned long) &efs, size);
3063 static void l2cap_ack_timeout(struct work_struct *work)
3065 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3069 BT_DBG("chan %p", chan);
3071 l2cap_chan_lock(chan);
3073 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3074 chan->last_acked_seq);
3077 l2cap_send_rr_or_rnr(chan, 0);
3079 l2cap_chan_unlock(chan);
3080 l2cap_chan_put(chan);
3083 int l2cap_ertm_init(struct l2cap_chan *chan)
3087 chan->next_tx_seq = 0;
3088 chan->expected_tx_seq = 0;
3089 chan->expected_ack_seq = 0;
3090 chan->unacked_frames = 0;
3091 chan->buffer_seq = 0;
3092 chan->frames_sent = 0;
3093 chan->last_acked_seq = 0;
3095 chan->sdu_last_frag = NULL;
3098 skb_queue_head_init(&chan->tx_q);
3100 chan->local_amp_id = AMP_ID_BREDR;
3101 chan->move_id = AMP_ID_BREDR;
3102 chan->move_state = L2CAP_MOVE_STABLE;
3103 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3105 if (chan->mode != L2CAP_MODE_ERTM)
3108 chan->rx_state = L2CAP_RX_STATE_RECV;
3109 chan->tx_state = L2CAP_TX_STATE_XMIT;
3111 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3112 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3113 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3115 skb_queue_head_init(&chan->srej_q);
3117 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3121 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3123 l2cap_seq_list_free(&chan->srej_list);
3128 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3131 case L2CAP_MODE_STREAMING:
3132 case L2CAP_MODE_ERTM:
3133 if (l2cap_mode_supported(mode, remote_feat_mask))
3137 return L2CAP_MODE_BASIC;
3141 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3143 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3144 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3147 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3149 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3150 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3153 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3154 struct l2cap_conf_rfc *rfc)
3156 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3157 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3159 /* Class 1 devices have must have ERTM timeouts
3160 * exceeding the Link Supervision Timeout. The
3161 * default Link Supervision Timeout for AMP
3162 * controllers is 10 seconds.
3164 * Class 1 devices use 0xffffffff for their
3165 * best-effort flush timeout, so the clamping logic
3166 * will result in a timeout that meets the above
3167 * requirement. ERTM timeouts are 16-bit values, so
3168 * the maximum timeout is 65.535 seconds.
3171 /* Convert timeout to milliseconds and round */
3172 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3174 /* This is the recommended formula for class 2 devices
3175 * that start ERTM timers when packets are sent to the
3178 ertm_to = 3 * ertm_to + 500;
3180 if (ertm_to > 0xffff)
3183 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3184 rfc->monitor_timeout = rfc->retrans_timeout;
3186 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3187 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3191 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3193 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3194 __l2cap_ews_supported(chan->conn)) {
3195 /* use extended control field */
3196 set_bit(FLAG_EXT_CTRL, &chan->flags);
3197 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3199 chan->tx_win = min_t(u16, chan->tx_win,
3200 L2CAP_DEFAULT_TX_WINDOW);
3201 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3203 chan->ack_win = chan->tx_win;
3206 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3208 struct l2cap_conf_req *req = data;
3209 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3210 void *ptr = req->data;
3211 void *endptr = data + data_size;
3214 BT_DBG("chan %p", chan);
3216 if (chan->num_conf_req || chan->num_conf_rsp)
3219 switch (chan->mode) {
3220 case L2CAP_MODE_STREAMING:
3221 case L2CAP_MODE_ERTM:
3222 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3225 if (__l2cap_efs_supported(chan->conn))
3226 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3230 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3235 if (chan->imtu != L2CAP_DEFAULT_MTU)
3236 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3238 switch (chan->mode) {
3239 case L2CAP_MODE_BASIC:
3243 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3244 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3247 rfc.mode = L2CAP_MODE_BASIC;
3249 rfc.max_transmit = 0;
3250 rfc.retrans_timeout = 0;
3251 rfc.monitor_timeout = 0;
3252 rfc.max_pdu_size = 0;
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3255 (unsigned long) &rfc, endptr - ptr);
3258 case L2CAP_MODE_ERTM:
3259 rfc.mode = L2CAP_MODE_ERTM;
3260 rfc.max_transmit = chan->max_tx;
3262 __l2cap_set_ertm_timeouts(chan, &rfc);
3264 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3265 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3267 rfc.max_pdu_size = cpu_to_le16(size);
3269 l2cap_txwin_setup(chan);
3271 rfc.txwin_size = min_t(u16, chan->tx_win,
3272 L2CAP_DEFAULT_TX_WINDOW);
3274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3275 (unsigned long) &rfc, endptr - ptr);
3277 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3278 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3280 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3281 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3282 chan->tx_win, endptr - ptr);
3284 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3285 if (chan->fcs == L2CAP_FCS_NONE ||
3286 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3287 chan->fcs = L2CAP_FCS_NONE;
3288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3289 chan->fcs, endptr - ptr);
3293 case L2CAP_MODE_STREAMING:
3294 l2cap_txwin_setup(chan);
3295 rfc.mode = L2CAP_MODE_STREAMING;
3297 rfc.max_transmit = 0;
3298 rfc.retrans_timeout = 0;
3299 rfc.monitor_timeout = 0;
3301 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3302 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3304 rfc.max_pdu_size = cpu_to_le16(size);
3306 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3307 (unsigned long) &rfc, endptr - ptr);
3309 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3310 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3312 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3313 if (chan->fcs == L2CAP_FCS_NONE ||
3314 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3315 chan->fcs = L2CAP_FCS_NONE;
3316 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3317 chan->fcs, endptr - ptr);
3322 req->dcid = cpu_to_le16(chan->dcid);
3323 req->flags = cpu_to_le16(0);
3328 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3330 struct l2cap_conf_rsp *rsp = data;
3331 void *ptr = rsp->data;
3332 void *endptr = data + data_size;
3333 void *req = chan->conf_req;
3334 int len = chan->conf_len;
3335 int type, hint, olen;
3337 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3338 struct l2cap_conf_efs efs;
3340 u16 mtu = L2CAP_DEFAULT_MTU;
3341 u16 result = L2CAP_CONF_SUCCESS;
3344 BT_DBG("chan %p", chan);
3346 while (len >= L2CAP_CONF_OPT_SIZE) {
3347 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3351 hint = type & L2CAP_CONF_HINT;
3352 type &= L2CAP_CONF_MASK;
3355 case L2CAP_CONF_MTU:
3361 case L2CAP_CONF_FLUSH_TO:
3364 chan->flush_to = val;
3367 case L2CAP_CONF_QOS:
3370 case L2CAP_CONF_RFC:
3371 if (olen != sizeof(rfc))
3373 memcpy(&rfc, (void *) val, olen);
3376 case L2CAP_CONF_FCS:
3379 if (val == L2CAP_FCS_NONE)
3380 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3383 case L2CAP_CONF_EFS:
3384 if (olen != sizeof(efs))
3387 memcpy(&efs, (void *) val, olen);
3390 case L2CAP_CONF_EWS:
3393 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3394 return -ECONNREFUSED;
3395 set_bit(FLAG_EXT_CTRL, &chan->flags);
3396 set_bit(CONF_EWS_RECV, &chan->conf_state);
3397 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3398 chan->remote_tx_win = val;
3404 result = L2CAP_CONF_UNKNOWN;
3405 *((u8 *) ptr++) = type;
3410 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3413 switch (chan->mode) {
3414 case L2CAP_MODE_STREAMING:
3415 case L2CAP_MODE_ERTM:
3416 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3417 chan->mode = l2cap_select_mode(rfc.mode,
3418 chan->conn->feat_mask);
3423 if (__l2cap_efs_supported(chan->conn))
3424 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3426 return -ECONNREFUSED;
3429 if (chan->mode != rfc.mode)
3430 return -ECONNREFUSED;
3436 if (chan->mode != rfc.mode) {
3437 result = L2CAP_CONF_UNACCEPT;
3438 rfc.mode = chan->mode;
3440 if (chan->num_conf_rsp == 1)
3441 return -ECONNREFUSED;
3443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3444 (unsigned long) &rfc, endptr - ptr);
3447 if (result == L2CAP_CONF_SUCCESS) {
3448 /* Configure output options and let the other side know
3449 * which ones we don't like. */
3451 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3452 result = L2CAP_CONF_UNACCEPT;
3455 set_bit(CONF_MTU_DONE, &chan->conf_state);
3457 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3460 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3461 efs.stype != L2CAP_SERV_NOTRAFIC &&
3462 efs.stype != chan->local_stype) {
3464 result = L2CAP_CONF_UNACCEPT;
3466 if (chan->num_conf_req >= 1)
3467 return -ECONNREFUSED;
3469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3471 (unsigned long) &efs, endptr - ptr);
3473 /* Send PENDING Conf Rsp */
3474 result = L2CAP_CONF_PENDING;
3475 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3480 case L2CAP_MODE_BASIC:
3481 chan->fcs = L2CAP_FCS_NONE;
3482 set_bit(CONF_MODE_DONE, &chan->conf_state);
3485 case L2CAP_MODE_ERTM:
3486 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3487 chan->remote_tx_win = rfc.txwin_size;
3489 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3491 chan->remote_max_tx = rfc.max_transmit;
3493 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3494 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3495 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3496 rfc.max_pdu_size = cpu_to_le16(size);
3497 chan->remote_mps = size;
3499 __l2cap_set_ertm_timeouts(chan, &rfc);
3501 set_bit(CONF_MODE_DONE, &chan->conf_state);
3503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3504 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3506 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3507 chan->remote_id = efs.id;
3508 chan->remote_stype = efs.stype;
3509 chan->remote_msdu = le16_to_cpu(efs.msdu);
3510 chan->remote_flush_to =
3511 le32_to_cpu(efs.flush_to);
3512 chan->remote_acc_lat =
3513 le32_to_cpu(efs.acc_lat);
3514 chan->remote_sdu_itime =
3515 le32_to_cpu(efs.sdu_itime);
3516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3518 (unsigned long) &efs, endptr - ptr);
3522 case L2CAP_MODE_STREAMING:
3523 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3524 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3525 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3526 rfc.max_pdu_size = cpu_to_le16(size);
3527 chan->remote_mps = size;
3529 set_bit(CONF_MODE_DONE, &chan->conf_state);
3531 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3532 (unsigned long) &rfc, endptr - ptr);
3537 result = L2CAP_CONF_UNACCEPT;
3539 memset(&rfc, 0, sizeof(rfc));
3540 rfc.mode = chan->mode;
3543 if (result == L2CAP_CONF_SUCCESS)
3544 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3546 rsp->scid = cpu_to_le16(chan->dcid);
3547 rsp->result = cpu_to_le16(result);
3548 rsp->flags = cpu_to_le16(0);
3553 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3554 void *data, size_t size, u16 *result)
3556 struct l2cap_conf_req *req = data;
3557 void *ptr = req->data;
3558 void *endptr = data + size;
3561 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3562 struct l2cap_conf_efs efs;
3564 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3566 while (len >= L2CAP_CONF_OPT_SIZE) {
3567 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3572 case L2CAP_CONF_MTU:
3575 if (val < L2CAP_DEFAULT_MIN_MTU) {
3576 *result = L2CAP_CONF_UNACCEPT;
3577 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3584 case L2CAP_CONF_FLUSH_TO:
3587 chan->flush_to = val;
3588 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3589 chan->flush_to, endptr - ptr);
3592 case L2CAP_CONF_RFC:
3593 if (olen != sizeof(rfc))
3595 memcpy(&rfc, (void *)val, olen);
3596 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3597 rfc.mode != chan->mode)
3598 return -ECONNREFUSED;
3600 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3601 (unsigned long) &rfc, endptr - ptr);
3604 case L2CAP_CONF_EWS:
3607 chan->ack_win = min_t(u16, val, chan->ack_win);
3608 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3609 chan->tx_win, endptr - ptr);
3612 case L2CAP_CONF_EFS:
3613 if (olen != sizeof(efs))
3615 memcpy(&efs, (void *)val, olen);
3616 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3617 efs.stype != L2CAP_SERV_NOTRAFIC &&
3618 efs.stype != chan->local_stype)
3619 return -ECONNREFUSED;
3620 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3621 (unsigned long) &efs, endptr - ptr);
3624 case L2CAP_CONF_FCS:
3627 if (*result == L2CAP_CONF_PENDING)
3628 if (val == L2CAP_FCS_NONE)
3629 set_bit(CONF_RECV_NO_FCS,
3635 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3636 return -ECONNREFUSED;
3638 chan->mode = rfc.mode;
3640 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3642 case L2CAP_MODE_ERTM:
3643 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3644 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3645 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3646 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3647 chan->ack_win = min_t(u16, chan->ack_win,
3650 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3651 chan->local_msdu = le16_to_cpu(efs.msdu);
3652 chan->local_sdu_itime =
3653 le32_to_cpu(efs.sdu_itime);
3654 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3655 chan->local_flush_to =
3656 le32_to_cpu(efs.flush_to);
3660 case L2CAP_MODE_STREAMING:
3661 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3665 req->dcid = cpu_to_le16(chan->dcid);
3666 req->flags = cpu_to_le16(0);
3671 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3672 u16 result, u16 flags)
3674 struct l2cap_conf_rsp *rsp = data;
3675 void *ptr = rsp->data;
3677 BT_DBG("chan %p", chan);
3679 rsp->scid = cpu_to_le16(chan->dcid);
3680 rsp->result = cpu_to_le16(result);
3681 rsp->flags = cpu_to_le16(flags);
3686 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3688 struct l2cap_le_conn_rsp rsp;
3689 struct l2cap_conn *conn = chan->conn;
3691 BT_DBG("chan %p", chan);
3693 rsp.dcid = cpu_to_le16(chan->scid);
3694 rsp.mtu = cpu_to_le16(chan->imtu);
3695 rsp.mps = cpu_to_le16(chan->mps);
3696 rsp.credits = cpu_to_le16(chan->rx_credits);
3697 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3699 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3703 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3705 struct l2cap_conn_rsp rsp;
3706 struct l2cap_conn *conn = chan->conn;
3710 rsp.scid = cpu_to_le16(chan->dcid);
3711 rsp.dcid = cpu_to_le16(chan->scid);
3712 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3713 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3716 rsp_code = L2CAP_CREATE_CHAN_RSP;
3718 rsp_code = L2CAP_CONN_RSP;
3720 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3722 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3724 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3727 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3728 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3729 chan->num_conf_req++;
3732 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3736 /* Use sane default values in case a misbehaving remote device
3737 * did not send an RFC or extended window size option.
3739 u16 txwin_ext = chan->ack_win;
3740 struct l2cap_conf_rfc rfc = {
3742 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3743 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3744 .max_pdu_size = cpu_to_le16(chan->imtu),
3745 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3748 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3750 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3753 while (len >= L2CAP_CONF_OPT_SIZE) {
3754 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3759 case L2CAP_CONF_RFC:
3760 if (olen != sizeof(rfc))
3762 memcpy(&rfc, (void *)val, olen);
3764 case L2CAP_CONF_EWS:
3773 case L2CAP_MODE_ERTM:
3774 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3775 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3776 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3777 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3778 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3780 chan->ack_win = min_t(u16, chan->ack_win,
3783 case L2CAP_MODE_STREAMING:
3784 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3788 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3789 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3792 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3794 if (cmd_len < sizeof(*rej))
3797 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3800 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3801 cmd->ident == conn->info_ident) {
3802 cancel_delayed_work(&conn->info_timer);
3804 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3805 conn->info_ident = 0;
3807 l2cap_conn_start(conn);
3813 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3814 struct l2cap_cmd_hdr *cmd,
3815 u8 *data, u8 rsp_code, u8 amp_id)
3817 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3818 struct l2cap_conn_rsp rsp;
3819 struct l2cap_chan *chan = NULL, *pchan;
3820 int result, status = L2CAP_CS_NO_INFO;
3822 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3823 __le16 psm = req->psm;
3825 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3827 /* Check if we have socket listening on psm */
3828 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3829 &conn->hcon->dst, ACL_LINK);
3831 result = L2CAP_CR_BAD_PSM;
3835 mutex_lock(&conn->chan_lock);
3836 l2cap_chan_lock(pchan);
3838 /* Check if the ACL is secure enough (if not SDP) */
3839 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3840 !hci_conn_check_link_mode(conn->hcon)) {
3841 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3842 result = L2CAP_CR_SEC_BLOCK;
3846 result = L2CAP_CR_NO_MEM;
3848 /* Check if we already have channel with that dcid */
3849 if (__l2cap_get_chan_by_dcid(conn, scid))
3852 chan = pchan->ops->new_connection(pchan);
3856 /* For certain devices (ex: HID mouse), support for authentication,
3857 * pairing and bonding is optional. For such devices, inorder to avoid
3858 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3859 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3861 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3863 bacpy(&chan->src, &conn->hcon->src);
3864 bacpy(&chan->dst, &conn->hcon->dst);
3865 chan->src_type = bdaddr_src_type(conn->hcon);
3866 chan->dst_type = bdaddr_dst_type(conn->hcon);
3869 chan->local_amp_id = amp_id;
3871 __l2cap_chan_add(conn, chan);
3875 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3877 chan->ident = cmd->ident;
3879 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3880 if (l2cap_chan_check_security(chan, false)) {
3881 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3882 l2cap_state_change(chan, BT_CONNECT2);
3883 result = L2CAP_CR_PEND;
3884 status = L2CAP_CS_AUTHOR_PEND;
3885 chan->ops->defer(chan);
3887 /* Force pending result for AMP controllers.
3888 * The connection will succeed after the
3889 * physical link is up.
3891 if (amp_id == AMP_ID_BREDR) {
3892 l2cap_state_change(chan, BT_CONFIG);
3893 result = L2CAP_CR_SUCCESS;
3895 l2cap_state_change(chan, BT_CONNECT2);
3896 result = L2CAP_CR_PEND;
3898 status = L2CAP_CS_NO_INFO;
3901 l2cap_state_change(chan, BT_CONNECT2);
3902 result = L2CAP_CR_PEND;
3903 status = L2CAP_CS_AUTHEN_PEND;
3906 l2cap_state_change(chan, BT_CONNECT2);
3907 result = L2CAP_CR_PEND;
3908 status = L2CAP_CS_NO_INFO;
3912 l2cap_chan_unlock(pchan);
3913 mutex_unlock(&conn->chan_lock);
3914 l2cap_chan_put(pchan);
3917 rsp.scid = cpu_to_le16(scid);
3918 rsp.dcid = cpu_to_le16(dcid);
3919 rsp.result = cpu_to_le16(result);
3920 rsp.status = cpu_to_le16(status);
3921 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3923 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3924 struct l2cap_info_req info;
3925 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3927 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3928 conn->info_ident = l2cap_get_ident(conn);
3930 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3932 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3933 sizeof(info), &info);
3936 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3937 result == L2CAP_CR_SUCCESS) {
3939 set_bit(CONF_REQ_SENT, &chan->conf_state);
3940 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3941 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3942 chan->num_conf_req++;
3948 static int l2cap_connect_req(struct l2cap_conn *conn,
3949 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3951 struct hci_dev *hdev = conn->hcon->hdev;
3952 struct hci_conn *hcon = conn->hcon;
3954 if (cmd_len < sizeof(struct l2cap_conn_req))
3958 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3959 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3960 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3961 hci_dev_unlock(hdev);
3963 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3967 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3968 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3971 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3972 u16 scid, dcid, result, status;
3973 struct l2cap_chan *chan;
3977 if (cmd_len < sizeof(*rsp))
3980 scid = __le16_to_cpu(rsp->scid);
3981 dcid = __le16_to_cpu(rsp->dcid);
3982 result = __le16_to_cpu(rsp->result);
3983 status = __le16_to_cpu(rsp->status);
3985 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3986 dcid, scid, result, status);
3988 mutex_lock(&conn->chan_lock);
3991 chan = __l2cap_get_chan_by_scid(conn, scid);
3997 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4006 l2cap_chan_lock(chan);
4009 case L2CAP_CR_SUCCESS:
4010 l2cap_state_change(chan, BT_CONFIG);
4013 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4015 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4018 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4019 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4020 chan->num_conf_req++;
4024 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4028 l2cap_chan_del(chan, ECONNREFUSED);
4032 l2cap_chan_unlock(chan);
4035 mutex_unlock(&conn->chan_lock);
4040 static inline void set_default_fcs(struct l2cap_chan *chan)
4042 /* FCS is enabled only in ERTM or streaming mode, if one or both
4045 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4046 chan->fcs = L2CAP_FCS_NONE;
4047 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4048 chan->fcs = L2CAP_FCS_CRC16;
4051 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4052 u8 ident, u16 flags)
4054 struct l2cap_conn *conn = chan->conn;
4056 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4059 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4060 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4062 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4063 l2cap_build_conf_rsp(chan, data,
4064 L2CAP_CONF_SUCCESS, flags), data);
4067 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4070 struct l2cap_cmd_rej_cid rej;
4072 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4073 rej.scid = __cpu_to_le16(scid);
4074 rej.dcid = __cpu_to_le16(dcid);
4076 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4079 static inline int l2cap_config_req(struct l2cap_conn *conn,
4080 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4083 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4086 struct l2cap_chan *chan;
4089 if (cmd_len < sizeof(*req))
4092 dcid = __le16_to_cpu(req->dcid);
4093 flags = __le16_to_cpu(req->flags);
4095 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4097 chan = l2cap_get_chan_by_scid(conn, dcid);
4099 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4103 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4104 chan->state != BT_CONNECTED) {
4105 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4110 /* Reject if config buffer is too small. */
4111 len = cmd_len - sizeof(*req);
4112 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4113 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4114 l2cap_build_conf_rsp(chan, rsp,
4115 L2CAP_CONF_REJECT, flags), rsp);
4120 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4121 chan->conf_len += len;
4123 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4124 /* Incomplete config. Send empty response. */
4125 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4126 l2cap_build_conf_rsp(chan, rsp,
4127 L2CAP_CONF_SUCCESS, flags), rsp);
4131 /* Complete config. */
4132 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4134 l2cap_send_disconn_req(chan, ECONNRESET);
4138 chan->ident = cmd->ident;
4139 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4140 chan->num_conf_rsp++;
4142 /* Reset config buffer. */
4145 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4148 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4149 set_default_fcs(chan);
4151 if (chan->mode == L2CAP_MODE_ERTM ||
4152 chan->mode == L2CAP_MODE_STREAMING)
4153 err = l2cap_ertm_init(chan);
4156 l2cap_send_disconn_req(chan, -err);
4158 l2cap_chan_ready(chan);
4163 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4165 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4166 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4167 chan->num_conf_req++;
4170 /* Got Conf Rsp PENDING from remote side and assume we sent
4171 Conf Rsp PENDING in the code above */
4172 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4173 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4175 /* check compatibility */
4177 /* Send rsp for BR/EDR channel */
4179 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4181 chan->ident = cmd->ident;
4185 l2cap_chan_unlock(chan);
4189 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4190 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4193 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4194 u16 scid, flags, result;
4195 struct l2cap_chan *chan;
4196 int len = cmd_len - sizeof(*rsp);
4199 if (cmd_len < sizeof(*rsp))
4202 scid = __le16_to_cpu(rsp->scid);
4203 flags = __le16_to_cpu(rsp->flags);
4204 result = __le16_to_cpu(rsp->result);
4206 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4209 chan = l2cap_get_chan_by_scid(conn, scid);
4214 case L2CAP_CONF_SUCCESS:
4215 l2cap_conf_rfc_get(chan, rsp->data, len);
4216 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4219 case L2CAP_CONF_PENDING:
4220 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4222 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4225 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4226 buf, sizeof(buf), &result);
4228 l2cap_send_disconn_req(chan, ECONNRESET);
4232 if (!chan->hs_hcon) {
4233 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4236 if (l2cap_check_efs(chan)) {
4237 amp_create_logical_link(chan);
4238 chan->ident = cmd->ident;
4244 case L2CAP_CONF_UNACCEPT:
4245 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4248 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4249 l2cap_send_disconn_req(chan, ECONNRESET);
4253 /* throw out any old stored conf requests */
4254 result = L2CAP_CONF_SUCCESS;
4255 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4256 req, sizeof(req), &result);
4258 l2cap_send_disconn_req(chan, ECONNRESET);
4262 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4263 L2CAP_CONF_REQ, len, req);
4264 chan->num_conf_req++;
4265 if (result != L2CAP_CONF_SUCCESS)
4271 l2cap_chan_set_err(chan, ECONNRESET);
4273 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4274 l2cap_send_disconn_req(chan, ECONNRESET);
4278 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4281 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4283 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4284 set_default_fcs(chan);
4286 if (chan->mode == L2CAP_MODE_ERTM ||
4287 chan->mode == L2CAP_MODE_STREAMING)
4288 err = l2cap_ertm_init(chan);
4291 l2cap_send_disconn_req(chan, -err);
4293 l2cap_chan_ready(chan);
4297 l2cap_chan_unlock(chan);
4301 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4302 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4305 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4306 struct l2cap_disconn_rsp rsp;
4308 struct l2cap_chan *chan;
4310 if (cmd_len != sizeof(*req))
4313 scid = __le16_to_cpu(req->scid);
4314 dcid = __le16_to_cpu(req->dcid);
4316 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4318 mutex_lock(&conn->chan_lock);
4320 chan = __l2cap_get_chan_by_scid(conn, dcid);
4322 mutex_unlock(&conn->chan_lock);
4323 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4327 l2cap_chan_hold(chan);
4328 l2cap_chan_lock(chan);
4330 rsp.dcid = cpu_to_le16(chan->scid);
4331 rsp.scid = cpu_to_le16(chan->dcid);
4332 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4334 chan->ops->set_shutdown(chan);
4336 l2cap_chan_del(chan, ECONNRESET);
4338 chan->ops->close(chan);
4340 l2cap_chan_unlock(chan);
4341 l2cap_chan_put(chan);
4343 mutex_unlock(&conn->chan_lock);
4348 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4349 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4352 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4354 struct l2cap_chan *chan;
4356 if (cmd_len != sizeof(*rsp))
4359 scid = __le16_to_cpu(rsp->scid);
4360 dcid = __le16_to_cpu(rsp->dcid);
4362 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4364 mutex_lock(&conn->chan_lock);
4366 chan = __l2cap_get_chan_by_scid(conn, scid);
4368 mutex_unlock(&conn->chan_lock);
4372 l2cap_chan_hold(chan);
4373 l2cap_chan_lock(chan);
4375 if (chan->state != BT_DISCONN) {
4376 l2cap_chan_unlock(chan);
4377 l2cap_chan_put(chan);
4378 mutex_unlock(&conn->chan_lock);
4382 l2cap_chan_del(chan, 0);
4384 chan->ops->close(chan);
4386 l2cap_chan_unlock(chan);
4387 l2cap_chan_put(chan);
4389 mutex_unlock(&conn->chan_lock);
4394 static inline int l2cap_information_req(struct l2cap_conn *conn,
4395 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4398 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4401 if (cmd_len != sizeof(*req))
4404 type = __le16_to_cpu(req->type);
4406 BT_DBG("type 0x%4.4x", type);
4408 if (type == L2CAP_IT_FEAT_MASK) {
4410 u32 feat_mask = l2cap_feat_mask;
4411 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4412 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4413 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4415 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4417 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4418 feat_mask |= L2CAP_FEAT_EXT_FLOW
4419 | L2CAP_FEAT_EXT_WINDOW;
4421 put_unaligned_le32(feat_mask, rsp->data);
4422 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4424 } else if (type == L2CAP_IT_FIXED_CHAN) {
4426 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4428 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4429 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4430 rsp->data[0] = conn->local_fixed_chan;
4431 memset(rsp->data + 1, 0, 7);
4432 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4435 struct l2cap_info_rsp rsp;
4436 rsp.type = cpu_to_le16(type);
4437 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4438 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4445 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4446 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4449 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4452 if (cmd_len < sizeof(*rsp))
4455 type = __le16_to_cpu(rsp->type);
4456 result = __le16_to_cpu(rsp->result);
4458 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4460 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4461 if (cmd->ident != conn->info_ident ||
4462 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4465 cancel_delayed_work(&conn->info_timer);
4467 if (result != L2CAP_IR_SUCCESS) {
4468 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4469 conn->info_ident = 0;
4471 l2cap_conn_start(conn);
4477 case L2CAP_IT_FEAT_MASK:
4478 conn->feat_mask = get_unaligned_le32(rsp->data);
4480 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4481 struct l2cap_info_req req;
4482 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4484 conn->info_ident = l2cap_get_ident(conn);
4486 l2cap_send_cmd(conn, conn->info_ident,
4487 L2CAP_INFO_REQ, sizeof(req), &req);
4489 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4490 conn->info_ident = 0;
4492 l2cap_conn_start(conn);
4496 case L2CAP_IT_FIXED_CHAN:
4497 conn->remote_fixed_chan = rsp->data[0];
4498 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4499 conn->info_ident = 0;
4501 l2cap_conn_start(conn);
4508 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4509 struct l2cap_cmd_hdr *cmd,
4510 u16 cmd_len, void *data)
4512 struct l2cap_create_chan_req *req = data;
4513 struct l2cap_create_chan_rsp rsp;
4514 struct l2cap_chan *chan;
4515 struct hci_dev *hdev;
4518 if (cmd_len != sizeof(*req))
4521 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4524 psm = le16_to_cpu(req->psm);
4525 scid = le16_to_cpu(req->scid);
4527 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4529 /* For controller id 0 make BR/EDR connection */
4530 if (req->amp_id == AMP_ID_BREDR) {
4531 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4536 /* Validate AMP controller id */
4537 hdev = hci_dev_get(req->amp_id);
4541 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4546 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4549 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4550 struct hci_conn *hs_hcon;
4552 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4556 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4561 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4563 mgr->bredr_chan = chan;
4564 chan->hs_hcon = hs_hcon;
4565 chan->fcs = L2CAP_FCS_NONE;
4566 conn->mtu = hdev->block_mtu;
4575 rsp.scid = cpu_to_le16(scid);
4576 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4577 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4579 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4585 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4587 struct l2cap_move_chan_req req;
4590 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4592 ident = l2cap_get_ident(chan->conn);
4593 chan->ident = ident;
4595 req.icid = cpu_to_le16(chan->scid);
4596 req.dest_amp_id = dest_amp_id;
4598 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4601 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4604 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4606 struct l2cap_move_chan_rsp rsp;
4608 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4610 rsp.icid = cpu_to_le16(chan->dcid);
4611 rsp.result = cpu_to_le16(result);
4613 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4617 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4619 struct l2cap_move_chan_cfm cfm;
4621 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4623 chan->ident = l2cap_get_ident(chan->conn);
4625 cfm.icid = cpu_to_le16(chan->scid);
4626 cfm.result = cpu_to_le16(result);
4628 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4631 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4634 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4636 struct l2cap_move_chan_cfm cfm;
4638 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4640 cfm.icid = cpu_to_le16(icid);
4641 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4643 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4647 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4650 struct l2cap_move_chan_cfm_rsp rsp;
4652 BT_DBG("icid 0x%4.4x", icid);
4654 rsp.icid = cpu_to_le16(icid);
4655 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4658 static void __release_logical_link(struct l2cap_chan *chan)
4660 chan->hs_hchan = NULL;
4661 chan->hs_hcon = NULL;
4663 /* Placeholder - release the logical link */
4666 static void l2cap_logical_fail(struct l2cap_chan *chan)
4668 /* Logical link setup failed */
4669 if (chan->state != BT_CONNECTED) {
4670 /* Create channel failure, disconnect */
4671 l2cap_send_disconn_req(chan, ECONNRESET);
4675 switch (chan->move_role) {
4676 case L2CAP_MOVE_ROLE_RESPONDER:
4677 l2cap_move_done(chan);
4678 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4680 case L2CAP_MOVE_ROLE_INITIATOR:
4681 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4682 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4683 /* Remote has only sent pending or
4684 * success responses, clean up
4686 l2cap_move_done(chan);
4689 /* Other amp move states imply that the move
4690 * has already aborted
4692 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4697 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4698 struct hci_chan *hchan)
4700 struct l2cap_conf_rsp rsp;
4702 chan->hs_hchan = hchan;
4703 chan->hs_hcon->l2cap_data = chan->conn;
4705 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4707 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4710 set_default_fcs(chan);
4712 err = l2cap_ertm_init(chan);
4714 l2cap_send_disconn_req(chan, -err);
4716 l2cap_chan_ready(chan);
4720 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4721 struct hci_chan *hchan)
4723 chan->hs_hcon = hchan->conn;
4724 chan->hs_hcon->l2cap_data = chan->conn;
4726 BT_DBG("move_state %d", chan->move_state);
4728 switch (chan->move_state) {
4729 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4730 /* Move confirm will be sent after a success
4731 * response is received
4733 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4735 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4736 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4737 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4738 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4739 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4740 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4741 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4742 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4743 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4747 /* Move was not in expected state, free the channel */
4748 __release_logical_link(chan);
4750 chan->move_state = L2CAP_MOVE_STABLE;
4754 /* Call with chan locked */
4755 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4758 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4761 l2cap_logical_fail(chan);
4762 __release_logical_link(chan);
4766 if (chan->state != BT_CONNECTED) {
4767 /* Ignore logical link if channel is on BR/EDR */
4768 if (chan->local_amp_id != AMP_ID_BREDR)
4769 l2cap_logical_finish_create(chan, hchan);
4771 l2cap_logical_finish_move(chan, hchan);
4775 void l2cap_move_start(struct l2cap_chan *chan)
4777 BT_DBG("chan %p", chan);
4779 if (chan->local_amp_id == AMP_ID_BREDR) {
4780 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4782 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4783 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4784 /* Placeholder - start physical link setup */
4786 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4787 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4789 l2cap_move_setup(chan);
4790 l2cap_send_move_chan_req(chan, 0);
4794 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4795 u8 local_amp_id, u8 remote_amp_id)
4797 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4798 local_amp_id, remote_amp_id);
4800 chan->fcs = L2CAP_FCS_NONE;
4802 /* Outgoing channel on AMP */
4803 if (chan->state == BT_CONNECT) {
4804 if (result == L2CAP_CR_SUCCESS) {
4805 chan->local_amp_id = local_amp_id;
4806 l2cap_send_create_chan_req(chan, remote_amp_id);
4808 /* Revert to BR/EDR connect */
4809 l2cap_send_conn_req(chan);
4815 /* Incoming channel on AMP */
4816 if (__l2cap_no_conn_pending(chan)) {
4817 struct l2cap_conn_rsp rsp;
4819 rsp.scid = cpu_to_le16(chan->dcid);
4820 rsp.dcid = cpu_to_le16(chan->scid);
4822 if (result == L2CAP_CR_SUCCESS) {
4823 /* Send successful response */
4824 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4825 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4827 /* Send negative response */
4828 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4829 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4832 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4835 if (result == L2CAP_CR_SUCCESS) {
4836 l2cap_state_change(chan, BT_CONFIG);
4837 set_bit(CONF_REQ_SENT, &chan->conf_state);
4838 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4840 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4841 chan->num_conf_req++;
4846 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4849 l2cap_move_setup(chan);
4850 chan->move_id = local_amp_id;
4851 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4853 l2cap_send_move_chan_req(chan, remote_amp_id);
4856 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4858 struct hci_chan *hchan = NULL;
4860 /* Placeholder - get hci_chan for logical link */
4863 if (hchan->state == BT_CONNECTED) {
4864 /* Logical link is ready to go */
4865 chan->hs_hcon = hchan->conn;
4866 chan->hs_hcon->l2cap_data = chan->conn;
4867 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4868 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4870 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4872 /* Wait for logical link to be ready */
4873 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4876 /* Logical link not available */
4877 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4881 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4883 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4885 if (result == -EINVAL)
4886 rsp_result = L2CAP_MR_BAD_ID;
4888 rsp_result = L2CAP_MR_NOT_ALLOWED;
4890 l2cap_send_move_chan_rsp(chan, rsp_result);
4893 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4894 chan->move_state = L2CAP_MOVE_STABLE;
4896 /* Restart data transmission */
4897 l2cap_ertm_send(chan);
4900 /* Invoke with locked chan */
4901 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4903 u8 local_amp_id = chan->local_amp_id;
4904 u8 remote_amp_id = chan->remote_amp_id;
4906 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4907 chan, result, local_amp_id, remote_amp_id);
4909 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4912 if (chan->state != BT_CONNECTED) {
4913 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4914 } else if (result != L2CAP_MR_SUCCESS) {
4915 l2cap_do_move_cancel(chan, result);
4917 switch (chan->move_role) {
4918 case L2CAP_MOVE_ROLE_INITIATOR:
4919 l2cap_do_move_initiate(chan, local_amp_id,
4922 case L2CAP_MOVE_ROLE_RESPONDER:
4923 l2cap_do_move_respond(chan, result);
4926 l2cap_do_move_cancel(chan, result);
4932 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4933 struct l2cap_cmd_hdr *cmd,
4934 u16 cmd_len, void *data)
4936 struct l2cap_move_chan_req *req = data;
4937 struct l2cap_move_chan_rsp rsp;
4938 struct l2cap_chan *chan;
4940 u16 result = L2CAP_MR_NOT_ALLOWED;
4942 if (cmd_len != sizeof(*req))
4945 icid = le16_to_cpu(req->icid);
4947 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4949 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4952 chan = l2cap_get_chan_by_dcid(conn, icid);
4954 rsp.icid = cpu_to_le16(icid);
4955 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4956 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4961 chan->ident = cmd->ident;
4963 if (chan->scid < L2CAP_CID_DYN_START ||
4964 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4965 (chan->mode != L2CAP_MODE_ERTM &&
4966 chan->mode != L2CAP_MODE_STREAMING)) {
4967 result = L2CAP_MR_NOT_ALLOWED;
4968 goto send_move_response;
4971 if (chan->local_amp_id == req->dest_amp_id) {
4972 result = L2CAP_MR_SAME_ID;
4973 goto send_move_response;
4976 if (req->dest_amp_id != AMP_ID_BREDR) {
4977 struct hci_dev *hdev;
4978 hdev = hci_dev_get(req->dest_amp_id);
4979 if (!hdev || hdev->dev_type != HCI_AMP ||
4980 !test_bit(HCI_UP, &hdev->flags)) {
4984 result = L2CAP_MR_BAD_ID;
4985 goto send_move_response;
4990 /* Detect a move collision. Only send a collision response
4991 * if this side has "lost", otherwise proceed with the move.
4992 * The winner has the larger bd_addr.
4994 if ((__chan_is_moving(chan) ||
4995 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4996 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4997 result = L2CAP_MR_COLLISION;
4998 goto send_move_response;
5001 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5002 l2cap_move_setup(chan);
5003 chan->move_id = req->dest_amp_id;
5006 if (req->dest_amp_id == AMP_ID_BREDR) {
5007 /* Moving to BR/EDR */
5008 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5009 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5010 result = L2CAP_MR_PEND;
5012 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5013 result = L2CAP_MR_SUCCESS;
5016 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5017 /* Placeholder - uncomment when amp functions are available */
5018 /*amp_accept_physical(chan, req->dest_amp_id);*/
5019 result = L2CAP_MR_PEND;
5023 l2cap_send_move_chan_rsp(chan, result);
5025 l2cap_chan_unlock(chan);
5030 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5032 struct l2cap_chan *chan;
5033 struct hci_chan *hchan = NULL;
5035 chan = l2cap_get_chan_by_scid(conn, icid);
5037 l2cap_send_move_chan_cfm_icid(conn, icid);
5041 __clear_chan_timer(chan);
5042 if (result == L2CAP_MR_PEND)
5043 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5045 switch (chan->move_state) {
5046 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5047 /* Move confirm will be sent when logical link
5050 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5052 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5053 if (result == L2CAP_MR_PEND) {
5055 } else if (test_bit(CONN_LOCAL_BUSY,
5056 &chan->conn_state)) {
5057 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5059 /* Logical link is up or moving to BR/EDR,
5062 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5063 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5066 case L2CAP_MOVE_WAIT_RSP:
5068 if (result == L2CAP_MR_SUCCESS) {
5069 /* Remote is ready, send confirm immediately
5070 * after logical link is ready
5072 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5074 /* Both logical link and move success
5075 * are required to confirm
5077 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5080 /* Placeholder - get hci_chan for logical link */
5082 /* Logical link not available */
5083 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5087 /* If the logical link is not yet connected, do not
5088 * send confirmation.
5090 if (hchan->state != BT_CONNECTED)
5093 /* Logical link is already ready to go */
5095 chan->hs_hcon = hchan->conn;
5096 chan->hs_hcon->l2cap_data = chan->conn;
5098 if (result == L2CAP_MR_SUCCESS) {
5099 /* Can confirm now */
5100 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5102 /* Now only need move success
5105 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5108 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5111 /* Any other amp move state means the move failed. */
5112 chan->move_id = chan->local_amp_id;
5113 l2cap_move_done(chan);
5114 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5117 l2cap_chan_unlock(chan);
5120 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5123 struct l2cap_chan *chan;
5125 chan = l2cap_get_chan_by_ident(conn, ident);
5127 /* Could not locate channel, icid is best guess */
5128 l2cap_send_move_chan_cfm_icid(conn, icid);
5132 __clear_chan_timer(chan);
5134 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5135 if (result == L2CAP_MR_COLLISION) {
5136 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5138 /* Cleanup - cancel move */
5139 chan->move_id = chan->local_amp_id;
5140 l2cap_move_done(chan);
5144 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5146 l2cap_chan_unlock(chan);
5149 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5150 struct l2cap_cmd_hdr *cmd,
5151 u16 cmd_len, void *data)
5153 struct l2cap_move_chan_rsp *rsp = data;
5156 if (cmd_len != sizeof(*rsp))
5159 icid = le16_to_cpu(rsp->icid);
5160 result = le16_to_cpu(rsp->result);
5162 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5164 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5165 l2cap_move_continue(conn, icid, result);
5167 l2cap_move_fail(conn, cmd->ident, icid, result);
5172 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5173 struct l2cap_cmd_hdr *cmd,
5174 u16 cmd_len, void *data)
5176 struct l2cap_move_chan_cfm *cfm = data;
5177 struct l2cap_chan *chan;
5180 if (cmd_len != sizeof(*cfm))
5183 icid = le16_to_cpu(cfm->icid);
5184 result = le16_to_cpu(cfm->result);
5186 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5188 chan = l2cap_get_chan_by_dcid(conn, icid);
5190 /* Spec requires a response even if the icid was not found */
5191 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5195 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5196 if (result == L2CAP_MC_CONFIRMED) {
5197 chan->local_amp_id = chan->move_id;
5198 if (chan->local_amp_id == AMP_ID_BREDR)
5199 __release_logical_link(chan);
5201 chan->move_id = chan->local_amp_id;
5204 l2cap_move_done(chan);
5207 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5209 l2cap_chan_unlock(chan);
5214 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5215 struct l2cap_cmd_hdr *cmd,
5216 u16 cmd_len, void *data)
5218 struct l2cap_move_chan_cfm_rsp *rsp = data;
5219 struct l2cap_chan *chan;
5222 if (cmd_len != sizeof(*rsp))
5225 icid = le16_to_cpu(rsp->icid);
5227 BT_DBG("icid 0x%4.4x", icid);
5229 chan = l2cap_get_chan_by_scid(conn, icid);
5233 __clear_chan_timer(chan);
5235 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5236 chan->local_amp_id = chan->move_id;
5238 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5239 __release_logical_link(chan);
5241 l2cap_move_done(chan);
5244 l2cap_chan_unlock(chan);
5249 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5250 struct l2cap_cmd_hdr *cmd,
5251 u16 cmd_len, u8 *data)
5253 struct hci_conn *hcon = conn->hcon;
5254 struct l2cap_conn_param_update_req *req;
5255 struct l2cap_conn_param_update_rsp rsp;
5256 u16 min, max, latency, to_multiplier;
5259 if (hcon->role != HCI_ROLE_MASTER)
5262 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5265 req = (struct l2cap_conn_param_update_req *) data;
5266 min = __le16_to_cpu(req->min);
5267 max = __le16_to_cpu(req->max);
5268 latency = __le16_to_cpu(req->latency);
5269 to_multiplier = __le16_to_cpu(req->to_multiplier);
5271 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5272 min, max, latency, to_multiplier);
5274 memset(&rsp, 0, sizeof(rsp));
5276 err = hci_check_conn_params(min, max, latency, to_multiplier);
5278 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5280 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5282 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5288 store_hint = hci_le_conn_update(hcon, min, max, latency,
5290 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5291 store_hint, min, max, latency,
5299 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5300 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5303 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5304 struct hci_conn *hcon = conn->hcon;
5305 u16 dcid, mtu, mps, credits, result;
5306 struct l2cap_chan *chan;
5309 if (cmd_len < sizeof(*rsp))
5312 dcid = __le16_to_cpu(rsp->dcid);
5313 mtu = __le16_to_cpu(rsp->mtu);
5314 mps = __le16_to_cpu(rsp->mps);
5315 credits = __le16_to_cpu(rsp->credits);
5316 result = __le16_to_cpu(rsp->result);
5318 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5319 dcid < L2CAP_CID_DYN_START ||
5320 dcid > L2CAP_CID_LE_DYN_END))
5323 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5324 dcid, mtu, mps, credits, result);
5326 mutex_lock(&conn->chan_lock);
5328 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5336 l2cap_chan_lock(chan);
5339 case L2CAP_CR_SUCCESS:
5340 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5348 chan->remote_mps = mps;
5349 chan->tx_credits = credits;
5350 l2cap_chan_ready(chan);
5353 case L2CAP_CR_AUTHENTICATION:
5354 case L2CAP_CR_ENCRYPTION:
5355 /* If we already have MITM protection we can't do
5358 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5359 l2cap_chan_del(chan, ECONNREFUSED);
5363 sec_level = hcon->sec_level + 1;
5364 if (chan->sec_level < sec_level)
5365 chan->sec_level = sec_level;
5367 /* We'll need to send a new Connect Request */
5368 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5370 smp_conn_security(hcon, chan->sec_level);
5374 l2cap_chan_del(chan, ECONNREFUSED);
5378 l2cap_chan_unlock(chan);
5381 mutex_unlock(&conn->chan_lock);
5386 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5387 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5392 switch (cmd->code) {
5393 case L2CAP_COMMAND_REJ:
5394 l2cap_command_rej(conn, cmd, cmd_len, data);
5397 case L2CAP_CONN_REQ:
5398 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5401 case L2CAP_CONN_RSP:
5402 case L2CAP_CREATE_CHAN_RSP:
5403 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5406 case L2CAP_CONF_REQ:
5407 err = l2cap_config_req(conn, cmd, cmd_len, data);
5410 case L2CAP_CONF_RSP:
5411 l2cap_config_rsp(conn, cmd, cmd_len, data);
5414 case L2CAP_DISCONN_REQ:
5415 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5418 case L2CAP_DISCONN_RSP:
5419 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5422 case L2CAP_ECHO_REQ:
5423 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5426 case L2CAP_ECHO_RSP:
5429 case L2CAP_INFO_REQ:
5430 err = l2cap_information_req(conn, cmd, cmd_len, data);
5433 case L2CAP_INFO_RSP:
5434 l2cap_information_rsp(conn, cmd, cmd_len, data);
5437 case L2CAP_CREATE_CHAN_REQ:
5438 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5441 case L2CAP_MOVE_CHAN_REQ:
5442 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5445 case L2CAP_MOVE_CHAN_RSP:
5446 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5449 case L2CAP_MOVE_CHAN_CFM:
5450 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5453 case L2CAP_MOVE_CHAN_CFM_RSP:
5454 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5458 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5466 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5467 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5470 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5471 struct l2cap_le_conn_rsp rsp;
5472 struct l2cap_chan *chan, *pchan;
5473 u16 dcid, scid, credits, mtu, mps;
5477 if (cmd_len != sizeof(*req))
5480 scid = __le16_to_cpu(req->scid);
5481 mtu = __le16_to_cpu(req->mtu);
5482 mps = __le16_to_cpu(req->mps);
5487 if (mtu < 23 || mps < 23)
5490 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5493 /* Check if we have socket listening on psm */
5494 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5495 &conn->hcon->dst, LE_LINK);
5497 result = L2CAP_CR_BAD_PSM;
5502 mutex_lock(&conn->chan_lock);
5503 l2cap_chan_lock(pchan);
5505 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5507 result = L2CAP_CR_AUTHENTICATION;
5509 goto response_unlock;
5512 /* Check for valid dynamic CID range */
5513 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5514 result = L2CAP_CR_INVALID_SCID;
5516 goto response_unlock;
5519 /* Check if we already have channel with that dcid */
5520 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5521 result = L2CAP_CR_SCID_IN_USE;
5523 goto response_unlock;
5526 chan = pchan->ops->new_connection(pchan);
5528 result = L2CAP_CR_NO_MEM;
5529 goto response_unlock;
5532 l2cap_le_flowctl_init(chan);
5534 bacpy(&chan->src, &conn->hcon->src);
5535 bacpy(&chan->dst, &conn->hcon->dst);
5536 chan->src_type = bdaddr_src_type(conn->hcon);
5537 chan->dst_type = bdaddr_dst_type(conn->hcon);
5541 chan->remote_mps = mps;
5542 chan->tx_credits = __le16_to_cpu(req->credits);
5544 __l2cap_chan_add(conn, chan);
5546 credits = chan->rx_credits;
5548 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5550 chan->ident = cmd->ident;
5552 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5553 l2cap_state_change(chan, BT_CONNECT2);
5554 /* The following result value is actually not defined
5555 * for LE CoC but we use it to let the function know
5556 * that it should bail out after doing its cleanup
5557 * instead of sending a response.
5559 result = L2CAP_CR_PEND;
5560 chan->ops->defer(chan);
5562 l2cap_chan_ready(chan);
5563 result = L2CAP_CR_SUCCESS;
5567 l2cap_chan_unlock(pchan);
5568 mutex_unlock(&conn->chan_lock);
5569 l2cap_chan_put(pchan);
5571 if (result == L2CAP_CR_PEND)
5576 rsp.mtu = cpu_to_le16(chan->imtu);
5577 rsp.mps = cpu_to_le16(chan->mps);
5583 rsp.dcid = cpu_to_le16(dcid);
5584 rsp.credits = cpu_to_le16(credits);
5585 rsp.result = cpu_to_le16(result);
5587 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5592 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5593 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5596 struct l2cap_le_credits *pkt;
5597 struct l2cap_chan *chan;
5598 u16 cid, credits, max_credits;
5600 if (cmd_len != sizeof(*pkt))
5603 pkt = (struct l2cap_le_credits *) data;
5604 cid = __le16_to_cpu(pkt->cid);
5605 credits = __le16_to_cpu(pkt->credits);
5607 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5609 chan = l2cap_get_chan_by_dcid(conn, cid);
5613 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5614 if (credits > max_credits) {
5615 BT_ERR("LE credits overflow");
5616 l2cap_send_disconn_req(chan, ECONNRESET);
5617 l2cap_chan_unlock(chan);
5619 /* Return 0 so that we don't trigger an unnecessary
5620 * command reject packet.
5625 chan->tx_credits += credits;
5627 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5628 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5632 if (chan->tx_credits)
5633 chan->ops->resume(chan);
5635 l2cap_chan_unlock(chan);
5640 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5641 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5644 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5645 struct l2cap_chan *chan;
5647 if (cmd_len < sizeof(*rej))
5650 mutex_lock(&conn->chan_lock);
5652 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5656 l2cap_chan_lock(chan);
5657 l2cap_chan_del(chan, ECONNREFUSED);
5658 l2cap_chan_unlock(chan);
5661 mutex_unlock(&conn->chan_lock);
5665 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5666 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5671 switch (cmd->code) {
5672 case L2CAP_COMMAND_REJ:
5673 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5676 case L2CAP_CONN_PARAM_UPDATE_REQ:
5677 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5680 case L2CAP_CONN_PARAM_UPDATE_RSP:
5683 case L2CAP_LE_CONN_RSP:
5684 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5687 case L2CAP_LE_CONN_REQ:
5688 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5691 case L2CAP_LE_CREDITS:
5692 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5695 case L2CAP_DISCONN_REQ:
5696 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5699 case L2CAP_DISCONN_RSP:
5700 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5704 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5712 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5713 struct sk_buff *skb)
5715 struct hci_conn *hcon = conn->hcon;
5716 struct l2cap_cmd_hdr *cmd;
5720 if (hcon->type != LE_LINK)
5723 if (skb->len < L2CAP_CMD_HDR_SIZE)
5726 cmd = (void *) skb->data;
5727 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5729 len = le16_to_cpu(cmd->len);
5731 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5733 if (len != skb->len || !cmd->ident) {
5734 BT_DBG("corrupted command");
5738 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5740 struct l2cap_cmd_rej_unk rej;
5742 BT_ERR("Wrong link type (%d)", err);
5744 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5745 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5753 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5754 struct sk_buff *skb)
5756 struct hci_conn *hcon = conn->hcon;
5757 u8 *data = skb->data;
5759 struct l2cap_cmd_hdr cmd;
5762 l2cap_raw_recv(conn, skb);
5764 if (hcon->type != ACL_LINK)
5767 while (len >= L2CAP_CMD_HDR_SIZE) {
5769 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5770 data += L2CAP_CMD_HDR_SIZE;
5771 len -= L2CAP_CMD_HDR_SIZE;
5773 cmd_len = le16_to_cpu(cmd.len);
5775 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5778 if (cmd_len > len || !cmd.ident) {
5779 BT_DBG("corrupted command");
5783 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5785 struct l2cap_cmd_rej_unk rej;
5787 BT_ERR("Wrong link type (%d)", err);
5789 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5790 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5802 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5804 u16 our_fcs, rcv_fcs;
5807 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5808 hdr_size = L2CAP_EXT_HDR_SIZE;
5810 hdr_size = L2CAP_ENH_HDR_SIZE;
5812 if (chan->fcs == L2CAP_FCS_CRC16) {
5813 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5814 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5815 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5817 if (our_fcs != rcv_fcs)
5823 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5825 struct l2cap_ctrl control;
5827 BT_DBG("chan %p", chan);
5829 memset(&control, 0, sizeof(control));
5832 control.reqseq = chan->buffer_seq;
5833 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5835 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5836 control.super = L2CAP_SUPER_RNR;
5837 l2cap_send_sframe(chan, &control);
5840 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5841 chan->unacked_frames > 0)
5842 __set_retrans_timer(chan);
5844 /* Send pending iframes */
5845 l2cap_ertm_send(chan);
5847 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5848 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5849 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5852 control.super = L2CAP_SUPER_RR;
5853 l2cap_send_sframe(chan, &control);
5857 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5858 struct sk_buff **last_frag)
5860 /* skb->len reflects data in skb as well as all fragments
5861 * skb->data_len reflects only data in fragments
5863 if (!skb_has_frag_list(skb))
5864 skb_shinfo(skb)->frag_list = new_frag;
5866 new_frag->next = NULL;
5868 (*last_frag)->next = new_frag;
5869 *last_frag = new_frag;
5871 skb->len += new_frag->len;
5872 skb->data_len += new_frag->len;
5873 skb->truesize += new_frag->truesize;
5876 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5877 struct l2cap_ctrl *control)
5881 switch (control->sar) {
5882 case L2CAP_SAR_UNSEGMENTED:
5886 err = chan->ops->recv(chan, skb);
5889 case L2CAP_SAR_START:
5893 chan->sdu_len = get_unaligned_le16(skb->data);
5894 skb_pull(skb, L2CAP_SDULEN_SIZE);
5896 if (chan->sdu_len > chan->imtu) {
5901 if (skb->len >= chan->sdu_len)
5905 chan->sdu_last_frag = skb;
5911 case L2CAP_SAR_CONTINUE:
5915 append_skb_frag(chan->sdu, skb,
5916 &chan->sdu_last_frag);
5919 if (chan->sdu->len >= chan->sdu_len)
5929 append_skb_frag(chan->sdu, skb,
5930 &chan->sdu_last_frag);
5933 if (chan->sdu->len != chan->sdu_len)
5936 err = chan->ops->recv(chan, chan->sdu);
5939 /* Reassembly complete */
5941 chan->sdu_last_frag = NULL;
5949 kfree_skb(chan->sdu);
5951 chan->sdu_last_frag = NULL;
5958 static int l2cap_resegment(struct l2cap_chan *chan)
5964 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5968 if (chan->mode != L2CAP_MODE_ERTM)
5971 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5972 l2cap_tx(chan, NULL, NULL, event);
5975 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5978 /* Pass sequential frames to l2cap_reassemble_sdu()
5979 * until a gap is encountered.
5982 BT_DBG("chan %p", chan);
5984 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5985 struct sk_buff *skb;
5986 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5987 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5989 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5994 skb_unlink(skb, &chan->srej_q);
5995 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5996 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6001 if (skb_queue_empty(&chan->srej_q)) {
6002 chan->rx_state = L2CAP_RX_STATE_RECV;
6003 l2cap_send_ack(chan);
6009 static void l2cap_handle_srej(struct l2cap_chan *chan,
6010 struct l2cap_ctrl *control)
6012 struct sk_buff *skb;
6014 BT_DBG("chan %p, control %p", chan, control);
6016 if (control->reqseq == chan->next_tx_seq) {
6017 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6018 l2cap_send_disconn_req(chan, ECONNRESET);
6022 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6025 BT_DBG("Seq %d not available for retransmission",
6030 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6031 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6032 l2cap_send_disconn_req(chan, ECONNRESET);
6036 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6038 if (control->poll) {
6039 l2cap_pass_to_tx(chan, control);
6041 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6042 l2cap_retransmit(chan, control);
6043 l2cap_ertm_send(chan);
6045 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6046 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6047 chan->srej_save_reqseq = control->reqseq;
6050 l2cap_pass_to_tx_fbit(chan, control);
6052 if (control->final) {
6053 if (chan->srej_save_reqseq != control->reqseq ||
6054 !test_and_clear_bit(CONN_SREJ_ACT,
6056 l2cap_retransmit(chan, control);
6058 l2cap_retransmit(chan, control);
6059 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6060 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6061 chan->srej_save_reqseq = control->reqseq;
6067 static void l2cap_handle_rej(struct l2cap_chan *chan,
6068 struct l2cap_ctrl *control)
6070 struct sk_buff *skb;
6072 BT_DBG("chan %p, control %p", chan, control);
6074 if (control->reqseq == chan->next_tx_seq) {
6075 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6076 l2cap_send_disconn_req(chan, ECONNRESET);
6080 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6082 if (chan->max_tx && skb &&
6083 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6084 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6085 l2cap_send_disconn_req(chan, ECONNRESET);
6089 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6091 l2cap_pass_to_tx(chan, control);
6093 if (control->final) {
6094 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6095 l2cap_retransmit_all(chan, control);
6097 l2cap_retransmit_all(chan, control);
6098 l2cap_ertm_send(chan);
6099 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6100 set_bit(CONN_REJ_ACT, &chan->conn_state);
6104 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6106 BT_DBG("chan %p, txseq %d", chan, txseq);
6108 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6109 chan->expected_tx_seq);
6111 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6112 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6114 /* See notes below regarding "double poll" and
6117 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6118 BT_DBG("Invalid/Ignore - after SREJ");
6119 return L2CAP_TXSEQ_INVALID_IGNORE;
6121 BT_DBG("Invalid - in window after SREJ sent");
6122 return L2CAP_TXSEQ_INVALID;
6126 if (chan->srej_list.head == txseq) {
6127 BT_DBG("Expected SREJ");
6128 return L2CAP_TXSEQ_EXPECTED_SREJ;
6131 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6132 BT_DBG("Duplicate SREJ - txseq already stored");
6133 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6136 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6137 BT_DBG("Unexpected SREJ - not requested");
6138 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6142 if (chan->expected_tx_seq == txseq) {
6143 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6145 BT_DBG("Invalid - txseq outside tx window");
6146 return L2CAP_TXSEQ_INVALID;
6149 return L2CAP_TXSEQ_EXPECTED;
6153 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6154 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6155 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6156 return L2CAP_TXSEQ_DUPLICATE;
6159 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6160 /* A source of invalid packets is a "double poll" condition,
6161 * where delays cause us to send multiple poll packets. If
6162 * the remote stack receives and processes both polls,
6163 * sequence numbers can wrap around in such a way that a
6164 * resent frame has a sequence number that looks like new data
6165 * with a sequence gap. This would trigger an erroneous SREJ
6168 * Fortunately, this is impossible with a tx window that's
6169 * less than half of the maximum sequence number, which allows
6170 * invalid frames to be safely ignored.
6172 * With tx window sizes greater than half of the tx window
6173 * maximum, the frame is invalid and cannot be ignored. This
6174 * causes a disconnect.
6177 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6178 BT_DBG("Invalid/Ignore - txseq outside tx window");
6179 return L2CAP_TXSEQ_INVALID_IGNORE;
6181 BT_DBG("Invalid - txseq outside tx window");
6182 return L2CAP_TXSEQ_INVALID;
6185 BT_DBG("Unexpected - txseq indicates missing frames");
6186 return L2CAP_TXSEQ_UNEXPECTED;
6190 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6191 struct l2cap_ctrl *control,
6192 struct sk_buff *skb, u8 event)
6195 bool skb_in_use = false;
6197 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6201 case L2CAP_EV_RECV_IFRAME:
6202 switch (l2cap_classify_txseq(chan, control->txseq)) {
6203 case L2CAP_TXSEQ_EXPECTED:
6204 l2cap_pass_to_tx(chan, control);
6206 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6207 BT_DBG("Busy, discarding expected seq %d",
6212 chan->expected_tx_seq = __next_seq(chan,
6215 chan->buffer_seq = chan->expected_tx_seq;
6218 err = l2cap_reassemble_sdu(chan, skb, control);
6222 if (control->final) {
6223 if (!test_and_clear_bit(CONN_REJ_ACT,
6224 &chan->conn_state)) {
6226 l2cap_retransmit_all(chan, control);
6227 l2cap_ertm_send(chan);
6231 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6232 l2cap_send_ack(chan);
6234 case L2CAP_TXSEQ_UNEXPECTED:
6235 l2cap_pass_to_tx(chan, control);
6237 /* Can't issue SREJ frames in the local busy state.
6238 * Drop this frame, it will be seen as missing
6239 * when local busy is exited.
6241 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6242 BT_DBG("Busy, discarding unexpected seq %d",
6247 /* There was a gap in the sequence, so an SREJ
6248 * must be sent for each missing frame. The
6249 * current frame is stored for later use.
6251 skb_queue_tail(&chan->srej_q, skb);
6253 BT_DBG("Queued %p (queue len %d)", skb,
6254 skb_queue_len(&chan->srej_q));
6256 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6257 l2cap_seq_list_clear(&chan->srej_list);
6258 l2cap_send_srej(chan, control->txseq);
6260 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6262 case L2CAP_TXSEQ_DUPLICATE:
6263 l2cap_pass_to_tx(chan, control);
6265 case L2CAP_TXSEQ_INVALID_IGNORE:
6267 case L2CAP_TXSEQ_INVALID:
6269 l2cap_send_disconn_req(chan, ECONNRESET);
6273 case L2CAP_EV_RECV_RR:
6274 l2cap_pass_to_tx(chan, control);
6275 if (control->final) {
6276 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6278 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6279 !__chan_is_moving(chan)) {
6281 l2cap_retransmit_all(chan, control);
6284 l2cap_ertm_send(chan);
6285 } else if (control->poll) {
6286 l2cap_send_i_or_rr_or_rnr(chan);
6288 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6289 &chan->conn_state) &&
6290 chan->unacked_frames)
6291 __set_retrans_timer(chan);
6293 l2cap_ertm_send(chan);
6296 case L2CAP_EV_RECV_RNR:
6297 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6298 l2cap_pass_to_tx(chan, control);
6299 if (control && control->poll) {
6300 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6301 l2cap_send_rr_or_rnr(chan, 0);
6303 __clear_retrans_timer(chan);
6304 l2cap_seq_list_clear(&chan->retrans_list);
6306 case L2CAP_EV_RECV_REJ:
6307 l2cap_handle_rej(chan, control);
6309 case L2CAP_EV_RECV_SREJ:
6310 l2cap_handle_srej(chan, control);
6316 if (skb && !skb_in_use) {
6317 BT_DBG("Freeing %p", skb);
6324 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6325 struct l2cap_ctrl *control,
6326 struct sk_buff *skb, u8 event)
6329 u16 txseq = control->txseq;
6330 bool skb_in_use = false;
6332 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6336 case L2CAP_EV_RECV_IFRAME:
6337 switch (l2cap_classify_txseq(chan, txseq)) {
6338 case L2CAP_TXSEQ_EXPECTED:
6339 /* Keep frame for reassembly later */
6340 l2cap_pass_to_tx(chan, control);
6341 skb_queue_tail(&chan->srej_q, skb);
6343 BT_DBG("Queued %p (queue len %d)", skb,
6344 skb_queue_len(&chan->srej_q));
6346 chan->expected_tx_seq = __next_seq(chan, txseq);
6348 case L2CAP_TXSEQ_EXPECTED_SREJ:
6349 l2cap_seq_list_pop(&chan->srej_list);
6351 l2cap_pass_to_tx(chan, control);
6352 skb_queue_tail(&chan->srej_q, skb);
6354 BT_DBG("Queued %p (queue len %d)", skb,
6355 skb_queue_len(&chan->srej_q));
6357 err = l2cap_rx_queued_iframes(chan);
6362 case L2CAP_TXSEQ_UNEXPECTED:
6363 /* Got a frame that can't be reassembled yet.
6364 * Save it for later, and send SREJs to cover
6365 * the missing frames.
6367 skb_queue_tail(&chan->srej_q, skb);
6369 BT_DBG("Queued %p (queue len %d)", skb,
6370 skb_queue_len(&chan->srej_q));
6372 l2cap_pass_to_tx(chan, control);
6373 l2cap_send_srej(chan, control->txseq);
6375 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6376 /* This frame was requested with an SREJ, but
6377 * some expected retransmitted frames are
6378 * missing. Request retransmission of missing
6381 skb_queue_tail(&chan->srej_q, skb);
6383 BT_DBG("Queued %p (queue len %d)", skb,
6384 skb_queue_len(&chan->srej_q));
6386 l2cap_pass_to_tx(chan, control);
6387 l2cap_send_srej_list(chan, control->txseq);
6389 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6390 /* We've already queued this frame. Drop this copy. */
6391 l2cap_pass_to_tx(chan, control);
6393 case L2CAP_TXSEQ_DUPLICATE:
6394 /* Expecting a later sequence number, so this frame
6395 * was already received. Ignore it completely.
6398 case L2CAP_TXSEQ_INVALID_IGNORE:
6400 case L2CAP_TXSEQ_INVALID:
6402 l2cap_send_disconn_req(chan, ECONNRESET);
6406 case L2CAP_EV_RECV_RR:
6407 l2cap_pass_to_tx(chan, control);
6408 if (control->final) {
6409 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6411 if (!test_and_clear_bit(CONN_REJ_ACT,
6412 &chan->conn_state)) {
6414 l2cap_retransmit_all(chan, control);
6417 l2cap_ertm_send(chan);
6418 } else if (control->poll) {
6419 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6420 &chan->conn_state) &&
6421 chan->unacked_frames) {
6422 __set_retrans_timer(chan);
6425 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6426 l2cap_send_srej_tail(chan);
6428 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6429 &chan->conn_state) &&
6430 chan->unacked_frames)
6431 __set_retrans_timer(chan);
6433 l2cap_send_ack(chan);
6436 case L2CAP_EV_RECV_RNR:
6437 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6438 l2cap_pass_to_tx(chan, control);
6439 if (control->poll) {
6440 l2cap_send_srej_tail(chan);
6442 struct l2cap_ctrl rr_control;
6443 memset(&rr_control, 0, sizeof(rr_control));
6444 rr_control.sframe = 1;
6445 rr_control.super = L2CAP_SUPER_RR;
6446 rr_control.reqseq = chan->buffer_seq;
6447 l2cap_send_sframe(chan, &rr_control);
6451 case L2CAP_EV_RECV_REJ:
6452 l2cap_handle_rej(chan, control);
6454 case L2CAP_EV_RECV_SREJ:
6455 l2cap_handle_srej(chan, control);
6459 if (skb && !skb_in_use) {
6460 BT_DBG("Freeing %p", skb);
6467 static int l2cap_finish_move(struct l2cap_chan *chan)
6469 BT_DBG("chan %p", chan);
6471 chan->rx_state = L2CAP_RX_STATE_RECV;
6474 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6476 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6478 return l2cap_resegment(chan);
6481 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6482 struct l2cap_ctrl *control,
6483 struct sk_buff *skb, u8 event)
6487 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6493 l2cap_process_reqseq(chan, control->reqseq);
6495 if (!skb_queue_empty(&chan->tx_q))
6496 chan->tx_send_head = skb_peek(&chan->tx_q);
6498 chan->tx_send_head = NULL;
6500 /* Rewind next_tx_seq to the point expected
6503 chan->next_tx_seq = control->reqseq;
6504 chan->unacked_frames = 0;
6506 err = l2cap_finish_move(chan);
6510 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6511 l2cap_send_i_or_rr_or_rnr(chan);
6513 if (event == L2CAP_EV_RECV_IFRAME)
6516 return l2cap_rx_state_recv(chan, control, NULL, event);
6519 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6520 struct l2cap_ctrl *control,
6521 struct sk_buff *skb, u8 event)
6525 if (!control->final)
6528 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6530 chan->rx_state = L2CAP_RX_STATE_RECV;
6531 l2cap_process_reqseq(chan, control->reqseq);
6533 if (!skb_queue_empty(&chan->tx_q))
6534 chan->tx_send_head = skb_peek(&chan->tx_q);
6536 chan->tx_send_head = NULL;
6538 /* Rewind next_tx_seq to the point expected
6541 chan->next_tx_seq = control->reqseq;
6542 chan->unacked_frames = 0;
6545 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6547 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6549 err = l2cap_resegment(chan);
6552 err = l2cap_rx_state_recv(chan, control, skb, event);
6557 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6559 /* Make sure reqseq is for a packet that has been sent but not acked */
6562 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6563 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6566 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6567 struct sk_buff *skb, u8 event)
6571 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6572 control, skb, event, chan->rx_state);
6574 if (__valid_reqseq(chan, control->reqseq)) {
6575 switch (chan->rx_state) {
6576 case L2CAP_RX_STATE_RECV:
6577 err = l2cap_rx_state_recv(chan, control, skb, event);
6579 case L2CAP_RX_STATE_SREJ_SENT:
6580 err = l2cap_rx_state_srej_sent(chan, control, skb,
6583 case L2CAP_RX_STATE_WAIT_P:
6584 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6586 case L2CAP_RX_STATE_WAIT_F:
6587 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6594 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6595 control->reqseq, chan->next_tx_seq,
6596 chan->expected_ack_seq);
6597 l2cap_send_disconn_req(chan, ECONNRESET);
6603 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6604 struct sk_buff *skb)
6608 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6611 if (l2cap_classify_txseq(chan, control->txseq) ==
6612 L2CAP_TXSEQ_EXPECTED) {
6613 l2cap_pass_to_tx(chan, control);
6615 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6616 __next_seq(chan, chan->buffer_seq));
6618 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6620 l2cap_reassemble_sdu(chan, skb, control);
6623 kfree_skb(chan->sdu);
6626 chan->sdu_last_frag = NULL;
6630 BT_DBG("Freeing %p", skb);
6635 chan->last_acked_seq = control->txseq;
6636 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6641 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6643 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6647 __unpack_control(chan, skb);
6652 * We can just drop the corrupted I-frame here.
6653 * Receiver will miss it and start proper recovery
6654 * procedures and ask for retransmission.
6656 if (l2cap_check_fcs(chan, skb))
6659 if (!control->sframe && control->sar == L2CAP_SAR_START)
6660 len -= L2CAP_SDULEN_SIZE;
6662 if (chan->fcs == L2CAP_FCS_CRC16)
6663 len -= L2CAP_FCS_SIZE;
6665 if (len > chan->mps) {
6666 l2cap_send_disconn_req(chan, ECONNRESET);
6670 if (!control->sframe) {
6673 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6674 control->sar, control->reqseq, control->final,
6677 /* Validate F-bit - F=0 always valid, F=1 only
6678 * valid in TX WAIT_F
6680 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6683 if (chan->mode != L2CAP_MODE_STREAMING) {
6684 event = L2CAP_EV_RECV_IFRAME;
6685 err = l2cap_rx(chan, control, skb, event);
6687 err = l2cap_stream_rx(chan, control, skb);
6691 l2cap_send_disconn_req(chan, ECONNRESET);
6693 const u8 rx_func_to_event[4] = {
6694 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6695 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6698 /* Only I-frames are expected in streaming mode */
6699 if (chan->mode == L2CAP_MODE_STREAMING)
6702 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6703 control->reqseq, control->final, control->poll,
6707 BT_ERR("Trailing bytes: %d in sframe", len);
6708 l2cap_send_disconn_req(chan, ECONNRESET);
6712 /* Validate F and P bits */
6713 if (control->final && (control->poll ||
6714 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6717 event = rx_func_to_event[control->super];
6718 if (l2cap_rx(chan, control, skb, event))
6719 l2cap_send_disconn_req(chan, ECONNRESET);
6729 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6731 struct l2cap_conn *conn = chan->conn;
6732 struct l2cap_le_credits pkt;
6735 /* We return more credits to the sender only after the amount of
6736 * credits falls below half of the initial amount.
6738 if (chan->rx_credits >= (le_max_credits + 1) / 2)
6741 return_credits = le_max_credits - chan->rx_credits;
6743 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6745 chan->rx_credits += return_credits;
6747 pkt.cid = cpu_to_le16(chan->scid);
6748 pkt.credits = cpu_to_le16(return_credits);
6750 chan->ident = l2cap_get_ident(conn);
6752 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6755 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6759 if (!chan->rx_credits) {
6760 BT_ERR("No credits to receive LE L2CAP data");
6761 l2cap_send_disconn_req(chan, ECONNRESET);
6765 if (chan->imtu < skb->len) {
6766 BT_ERR("Too big LE L2CAP PDU");
6771 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6773 l2cap_chan_le_send_credits(chan);
6780 sdu_len = get_unaligned_le16(skb->data);
6781 skb_pull(skb, L2CAP_SDULEN_SIZE);
6783 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6784 sdu_len, skb->len, chan->imtu);
6786 if (sdu_len > chan->imtu) {
6787 BT_ERR("Too big LE L2CAP SDU length received");
6792 if (skb->len > sdu_len) {
6793 BT_ERR("Too much LE L2CAP data received");
6798 if (skb->len == sdu_len)
6799 return chan->ops->recv(chan, skb);
6802 chan->sdu_len = sdu_len;
6803 chan->sdu_last_frag = skb;
6805 /* Detect if remote is not able to use the selected MPS */
6806 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6807 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6809 /* Adjust the number of credits */
6810 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6811 chan->mps = mps_len;
6812 l2cap_chan_le_send_credits(chan);
6818 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6819 chan->sdu->len, skb->len, chan->sdu_len);
6821 if (chan->sdu->len + skb->len > chan->sdu_len) {
6822 BT_ERR("Too much LE L2CAP data received");
6827 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6830 if (chan->sdu->len == chan->sdu_len) {
6831 err = chan->ops->recv(chan, chan->sdu);
6834 chan->sdu_last_frag = NULL;
6842 kfree_skb(chan->sdu);
6844 chan->sdu_last_frag = NULL;
6848 /* We can't return an error here since we took care of the skb
6849 * freeing internally. An error return would cause the caller to
6850 * do a double-free of the skb.
6855 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6856 struct sk_buff *skb)
6858 struct l2cap_chan *chan;
6860 chan = l2cap_get_chan_by_scid(conn, cid);
6862 if (cid == L2CAP_CID_A2MP) {
6863 chan = a2mp_channel_create(conn, skb);
6869 l2cap_chan_lock(chan);
6871 BT_DBG("unknown cid 0x%4.4x", cid);
6872 /* Drop packet and return */
6878 BT_DBG("chan %p, len %d", chan, skb->len);
6880 /* If we receive data on a fixed channel before the info req/rsp
6881 * procdure is done simply assume that the channel is supported
6882 * and mark it as ready.
6884 if (chan->chan_type == L2CAP_CHAN_FIXED)
6885 l2cap_chan_ready(chan);
6887 if (chan->state != BT_CONNECTED)
6890 switch (chan->mode) {
6891 case L2CAP_MODE_LE_FLOWCTL:
6892 if (l2cap_le_data_rcv(chan, skb) < 0)
6897 case L2CAP_MODE_BASIC:
6898 /* If socket recv buffers overflows we drop data here
6899 * which is *bad* because L2CAP has to be reliable.
6900 * But we don't have any other choice. L2CAP doesn't
6901 * provide flow control mechanism. */
6903 if (chan->imtu < skb->len) {
6904 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6908 if (!chan->ops->recv(chan, skb))
6912 case L2CAP_MODE_ERTM:
6913 case L2CAP_MODE_STREAMING:
6914 l2cap_data_rcv(chan, skb);
6918 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6926 l2cap_chan_unlock(chan);
6929 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6930 struct sk_buff *skb)
6932 struct hci_conn *hcon = conn->hcon;
6933 struct l2cap_chan *chan;
6935 if (hcon->type != ACL_LINK)
6938 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6943 BT_DBG("chan %p, len %d", chan, skb->len);
6945 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6948 if (chan->imtu < skb->len)
6951 /* Store remote BD_ADDR and PSM for msg_name */
6952 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6953 bt_cb(skb)->l2cap.psm = psm;
6955 if (!chan->ops->recv(chan, skb)) {
6956 l2cap_chan_put(chan);
6961 l2cap_chan_put(chan);
6966 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6968 struct l2cap_hdr *lh = (void *) skb->data;
6969 struct hci_conn *hcon = conn->hcon;
6973 if (hcon->state != BT_CONNECTED) {
6974 BT_DBG("queueing pending rx skb");
6975 skb_queue_tail(&conn->pending_rx, skb);
6979 skb_pull(skb, L2CAP_HDR_SIZE);
6980 cid = __le16_to_cpu(lh->cid);
6981 len = __le16_to_cpu(lh->len);
6983 if (len != skb->len) {
6988 /* Since we can't actively block incoming LE connections we must
6989 * at least ensure that we ignore incoming data from them.
6991 if (hcon->type == LE_LINK &&
6992 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6993 bdaddr_dst_type(hcon))) {
6998 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7001 case L2CAP_CID_SIGNALING:
7002 l2cap_sig_channel(conn, skb);
7005 case L2CAP_CID_CONN_LESS:
7006 psm = get_unaligned((__le16 *) skb->data);
7007 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7008 l2cap_conless_channel(conn, psm, skb);
7011 case L2CAP_CID_LE_SIGNALING:
7012 l2cap_le_sig_channel(conn, skb);
7016 l2cap_data_channel(conn, cid, skb);
7021 static void process_pending_rx(struct work_struct *work)
7023 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7025 struct sk_buff *skb;
7029 while ((skb = skb_dequeue(&conn->pending_rx)))
7030 l2cap_recv_frame(conn, skb);
7033 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7035 struct l2cap_conn *conn = hcon->l2cap_data;
7036 struct hci_chan *hchan;
7041 hchan = hci_chan_create(hcon);
7045 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7047 hci_chan_del(hchan);
7051 kref_init(&conn->ref);
7052 hcon->l2cap_data = conn;
7053 conn->hcon = hci_conn_get(hcon);
7054 conn->hchan = hchan;
7056 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7058 switch (hcon->type) {
7060 if (hcon->hdev->le_mtu) {
7061 conn->mtu = hcon->hdev->le_mtu;
7066 conn->mtu = hcon->hdev->acl_mtu;
7070 conn->feat_mask = 0;
7072 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7074 if (hcon->type == ACL_LINK &&
7075 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7076 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7078 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7079 (bredr_sc_enabled(hcon->hdev) ||
7080 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7081 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7083 mutex_init(&conn->ident_lock);
7084 mutex_init(&conn->chan_lock);
7086 INIT_LIST_HEAD(&conn->chan_l);
7087 INIT_LIST_HEAD(&conn->users);
7089 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7091 skb_queue_head_init(&conn->pending_rx);
7092 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7093 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7095 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7100 static bool is_valid_psm(u16 psm, u8 dst_type) {
7104 if (bdaddr_type_is_le(dst_type))
7105 return (psm <= 0x00ff);
7107 /* PSM must be odd and lsb of upper byte must be 0 */
7108 return ((psm & 0x0101) == 0x0001);
7111 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7112 bdaddr_t *dst, u8 dst_type)
7114 struct l2cap_conn *conn;
7115 struct hci_conn *hcon;
7116 struct hci_dev *hdev;
7119 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7120 dst_type, __le16_to_cpu(psm));
7122 hdev = hci_get_route(dst, &chan->src);
7124 return -EHOSTUNREACH;
7128 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7129 chan->chan_type != L2CAP_CHAN_RAW) {
7134 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7139 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7144 switch (chan->mode) {
7145 case L2CAP_MODE_BASIC:
7147 case L2CAP_MODE_LE_FLOWCTL:
7148 l2cap_le_flowctl_init(chan);
7150 case L2CAP_MODE_ERTM:
7151 case L2CAP_MODE_STREAMING:
7160 switch (chan->state) {
7164 /* Already connecting */
7169 /* Already connected */
7183 /* Set destination address and psm */
7184 bacpy(&chan->dst, dst);
7185 chan->dst_type = dst_type;
7190 if (bdaddr_type_is_le(dst_type)) {
7193 /* Convert from L2CAP channel address type to HCI address type
7195 if (dst_type == BDADDR_LE_PUBLIC)
7196 dst_type = ADDR_LE_DEV_PUBLIC;
7198 dst_type = ADDR_LE_DEV_RANDOM;
7200 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7201 role = HCI_ROLE_SLAVE;
7203 role = HCI_ROLE_MASTER;
7205 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7207 HCI_LE_CONN_TIMEOUT,
7210 u8 auth_type = l2cap_get_auth_type(chan);
7211 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7215 err = PTR_ERR(hcon);
7219 conn = l2cap_conn_add(hcon);
7221 hci_conn_drop(hcon);
7226 mutex_lock(&conn->chan_lock);
7227 l2cap_chan_lock(chan);
7229 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7230 hci_conn_drop(hcon);
7235 /* Update source addr of the socket */
7236 bacpy(&chan->src, &hcon->src);
7237 chan->src_type = bdaddr_src_type(hcon);
7239 __l2cap_chan_add(conn, chan);
7241 /* l2cap_chan_add takes its own ref so we can drop this one */
7242 hci_conn_drop(hcon);
7244 l2cap_state_change(chan, BT_CONNECT);
7245 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7247 /* Release chan->sport so that it can be reused by other
7248 * sockets (as it's only used for listening sockets).
7250 write_lock(&chan_list_lock);
7252 write_unlock(&chan_list_lock);
7254 if (hcon->state == BT_CONNECTED) {
7255 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7256 __clear_chan_timer(chan);
7257 if (l2cap_chan_check_security(chan, true))
7258 l2cap_state_change(chan, BT_CONNECTED);
7260 l2cap_do_start(chan);
7266 l2cap_chan_unlock(chan);
7267 mutex_unlock(&conn->chan_lock);
7269 hci_dev_unlock(hdev);
7273 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7275 /* ---- L2CAP interface with lower layer (HCI) ---- */
7277 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7279 int exact = 0, lm1 = 0, lm2 = 0;
7280 struct l2cap_chan *c;
7282 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7284 /* Find listening sockets and check their link_mode */
7285 read_lock(&chan_list_lock);
7286 list_for_each_entry(c, &chan_list, global_l) {
7287 if (c->state != BT_LISTEN)
7290 if (!bacmp(&c->src, &hdev->bdaddr)) {
7291 lm1 |= HCI_LM_ACCEPT;
7292 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7293 lm1 |= HCI_LM_MASTER;
7295 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7296 lm2 |= HCI_LM_ACCEPT;
7297 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7298 lm2 |= HCI_LM_MASTER;
7301 read_unlock(&chan_list_lock);
7303 return exact ? lm1 : lm2;
7306 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7307 * from an existing channel in the list or from the beginning of the
7308 * global list (by passing NULL as first parameter).
7310 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7311 struct hci_conn *hcon)
7313 u8 src_type = bdaddr_src_type(hcon);
7315 read_lock(&chan_list_lock);
7318 c = list_next_entry(c, global_l);
7320 c = list_entry(chan_list.next, typeof(*c), global_l);
7322 list_for_each_entry_from(c, &chan_list, global_l) {
7323 if (c->chan_type != L2CAP_CHAN_FIXED)
7325 if (c->state != BT_LISTEN)
7327 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7329 if (src_type != c->src_type)
7333 read_unlock(&chan_list_lock);
7337 read_unlock(&chan_list_lock);
7342 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7344 struct hci_dev *hdev = hcon->hdev;
7345 struct l2cap_conn *conn;
7346 struct l2cap_chan *pchan;
7349 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7352 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7355 l2cap_conn_del(hcon, bt_to_errno(status));
7359 conn = l2cap_conn_add(hcon);
7363 dst_type = bdaddr_dst_type(hcon);
7365 /* If device is blocked, do not create channels for it */
7366 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7369 /* Find fixed channels and notify them of the new connection. We
7370 * use multiple individual lookups, continuing each time where
7371 * we left off, because the list lock would prevent calling the
7372 * potentially sleeping l2cap_chan_lock() function.
7374 pchan = l2cap_global_fixed_chan(NULL, hcon);
7376 struct l2cap_chan *chan, *next;
7378 /* Client fixed channels should override server ones */
7379 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7382 l2cap_chan_lock(pchan);
7383 chan = pchan->ops->new_connection(pchan);
7385 bacpy(&chan->src, &hcon->src);
7386 bacpy(&chan->dst, &hcon->dst);
7387 chan->src_type = bdaddr_src_type(hcon);
7388 chan->dst_type = dst_type;
7390 __l2cap_chan_add(conn, chan);
7393 l2cap_chan_unlock(pchan);
7395 next = l2cap_global_fixed_chan(pchan, hcon);
7396 l2cap_chan_put(pchan);
7400 l2cap_conn_ready(conn);
7403 int l2cap_disconn_ind(struct hci_conn *hcon)
7405 struct l2cap_conn *conn = hcon->l2cap_data;
7407 BT_DBG("hcon %p", hcon);
7410 return HCI_ERROR_REMOTE_USER_TERM;
7411 return conn->disc_reason;
7414 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7416 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7419 BT_DBG("hcon %p reason %d", hcon, reason);
7421 l2cap_conn_del(hcon, bt_to_errno(reason));
7424 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7426 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7429 if (encrypt == 0x00) {
7430 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7431 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7432 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7433 chan->sec_level == BT_SECURITY_FIPS)
7434 l2cap_chan_close(chan, ECONNREFUSED);
7436 if (chan->sec_level == BT_SECURITY_MEDIUM)
7437 __clear_chan_timer(chan);
7441 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7443 struct l2cap_conn *conn = hcon->l2cap_data;
7444 struct l2cap_chan *chan;
7449 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7451 mutex_lock(&conn->chan_lock);
7453 list_for_each_entry(chan, &conn->chan_l, list) {
7454 l2cap_chan_lock(chan);
7456 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7457 state_to_string(chan->state));
7459 if (chan->scid == L2CAP_CID_A2MP) {
7460 l2cap_chan_unlock(chan);
7464 if (!status && encrypt)
7465 chan->sec_level = hcon->sec_level;
7467 if (!__l2cap_no_conn_pending(chan)) {
7468 l2cap_chan_unlock(chan);
7472 if (!status && (chan->state == BT_CONNECTED ||
7473 chan->state == BT_CONFIG)) {
7474 chan->ops->resume(chan);
7475 l2cap_check_encryption(chan, encrypt);
7476 l2cap_chan_unlock(chan);
7480 if (chan->state == BT_CONNECT) {
7481 if (!status && l2cap_check_enc_key_size(hcon))
7482 l2cap_start_connection(chan);
7484 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7485 } else if (chan->state == BT_CONNECT2 &&
7486 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7487 struct l2cap_conn_rsp rsp;
7490 if (!status && l2cap_check_enc_key_size(hcon)) {
7491 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7492 res = L2CAP_CR_PEND;
7493 stat = L2CAP_CS_AUTHOR_PEND;
7494 chan->ops->defer(chan);
7496 l2cap_state_change(chan, BT_CONFIG);
7497 res = L2CAP_CR_SUCCESS;
7498 stat = L2CAP_CS_NO_INFO;
7501 l2cap_state_change(chan, BT_DISCONN);
7502 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7503 res = L2CAP_CR_SEC_BLOCK;
7504 stat = L2CAP_CS_NO_INFO;
7507 rsp.scid = cpu_to_le16(chan->dcid);
7508 rsp.dcid = cpu_to_le16(chan->scid);
7509 rsp.result = cpu_to_le16(res);
7510 rsp.status = cpu_to_le16(stat);
7511 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7514 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7515 res == L2CAP_CR_SUCCESS) {
7517 set_bit(CONF_REQ_SENT, &chan->conf_state);
7518 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7520 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7522 chan->num_conf_req++;
7526 l2cap_chan_unlock(chan);
7529 mutex_unlock(&conn->chan_lock);
7532 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7534 struct l2cap_conn *conn = hcon->l2cap_data;
7535 struct l2cap_hdr *hdr;
7538 /* For AMP controller do not create l2cap conn */
7539 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7543 conn = l2cap_conn_add(hcon);
7548 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7552 case ACL_START_NO_FLUSH:
7555 BT_ERR("Unexpected start frame (len %d)", skb->len);
7556 kfree_skb(conn->rx_skb);
7557 conn->rx_skb = NULL;
7559 l2cap_conn_unreliable(conn, ECOMM);
7562 /* Start fragment always begin with Basic L2CAP header */
7563 if (skb->len < L2CAP_HDR_SIZE) {
7564 BT_ERR("Frame is too short (len %d)", skb->len);
7565 l2cap_conn_unreliable(conn, ECOMM);
7569 hdr = (struct l2cap_hdr *) skb->data;
7570 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7572 if (len == skb->len) {
7573 /* Complete frame received */
7574 l2cap_recv_frame(conn, skb);
7578 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7580 if (skb->len > len) {
7581 BT_ERR("Frame is too long (len %d, expected len %d)",
7583 l2cap_conn_unreliable(conn, ECOMM);
7587 /* Allocate skb for the complete frame (with header) */
7588 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7592 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7594 conn->rx_len = len - skb->len;
7598 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7600 if (!conn->rx_len) {
7601 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7602 l2cap_conn_unreliable(conn, ECOMM);
7606 if (skb->len > conn->rx_len) {
7607 BT_ERR("Fragment is too long (len %d, expected %d)",
7608 skb->len, conn->rx_len);
7609 kfree_skb(conn->rx_skb);
7610 conn->rx_skb = NULL;
7612 l2cap_conn_unreliable(conn, ECOMM);
7616 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7618 conn->rx_len -= skb->len;
7620 if (!conn->rx_len) {
7621 /* Complete frame received. l2cap_recv_frame
7622 * takes ownership of the skb so set the global
7623 * rx_skb pointer to NULL first.
7625 struct sk_buff *rx_skb = conn->rx_skb;
7626 conn->rx_skb = NULL;
7627 l2cap_recv_frame(conn, rx_skb);
7636 static struct hci_cb l2cap_cb = {
7638 .connect_cfm = l2cap_connect_cfm,
7639 .disconn_cfm = l2cap_disconn_cfm,
7640 .security_cfm = l2cap_security_cfm,
7643 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7645 struct l2cap_chan *c;
7647 read_lock(&chan_list_lock);
7649 list_for_each_entry(c, &chan_list, global_l) {
7650 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7651 &c->src, c->src_type, &c->dst, c->dst_type,
7652 c->state, __le16_to_cpu(c->psm),
7653 c->scid, c->dcid, c->imtu, c->omtu,
7654 c->sec_level, c->mode);
7657 read_unlock(&chan_list_lock);
7662 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7664 return single_open(file, l2cap_debugfs_show, inode->i_private);
7667 static const struct file_operations l2cap_debugfs_fops = {
7668 .open = l2cap_debugfs_open,
7670 .llseek = seq_lseek,
7671 .release = single_release,
7674 static struct dentry *l2cap_debugfs;
7676 int __init l2cap_init(void)
7680 err = l2cap_init_sockets();
7684 hci_register_cb(&l2cap_cb);
7686 if (IS_ERR_OR_NULL(bt_debugfs))
7689 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7690 NULL, &l2cap_debugfs_fops);
7692 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7694 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7700 void l2cap_exit(void)
7702 debugfs_remove(l2cap_debugfs);
7703 hci_unregister_cb(&l2cap_cb);
7704 l2cap_cleanup_sockets();
7707 module_param(disable_ertm, bool, 0644);
7708 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");