2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
70 if (link_type == LE_LINK) {
71 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
74 return BDADDR_LE_RANDOM;
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
82 return bdaddr_type(hcon->type, hcon->src_type);
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
87 return bdaddr_type(hcon->type, hcon->dst_type);
90 /* ---- L2CAP channels ---- */
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
97 list_for_each_entry(c, &conn->chan_l, list) {
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
107 struct l2cap_chan *c;
109 list_for_each_entry(c, &conn->chan_l, list) {
116 /* Find channel with given SCID.
117 * Returns a reference locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_scid(conn, cid);
127 /* Only lock if chan reference is not 0 */
128 c = l2cap_chan_hold_unless_zero(c);
132 mutex_unlock(&conn->chan_lock);
137 /* Find channel with given DCID.
138 * Returns a reference locked channel.
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
143 struct l2cap_chan *c;
145 mutex_lock(&conn->chan_lock);
146 c = __l2cap_get_chan_by_dcid(conn, cid);
148 /* Only lock if chan reference is not 0 */
149 c = l2cap_chan_hold_unless_zero(c);
153 mutex_unlock(&conn->chan_lock);
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &conn->chan_l, list) {
164 if (c->ident == ident)
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
173 struct l2cap_chan *c;
175 mutex_lock(&conn->chan_lock);
176 c = __l2cap_get_chan_by_ident(conn, ident);
178 /* Only lock if chan reference is not 0 */
179 c = l2cap_chan_hold_unless_zero(c);
183 mutex_unlock(&conn->chan_lock);
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
191 struct l2cap_chan *c;
193 list_for_each_entry(c, &chan_list, global_l) {
194 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
197 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
200 if (c->sport == psm && !bacmp(&c->src, src))
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
210 write_lock(&chan_list_lock);
212 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
222 u16 p, start, end, incr;
224 if (chan->src_type == BDADDR_BREDR) {
225 start = L2CAP_PSM_DYN_START;
226 end = L2CAP_PSM_AUTO_END;
229 start = L2CAP_PSM_LE_DYN_START;
230 end = L2CAP_PSM_LE_DYN_END;
235 for (p = start; p <= end; p += incr)
236 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
238 chan->psm = cpu_to_le16(p);
239 chan->sport = cpu_to_le16(p);
246 write_unlock(&chan_list_lock);
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
251 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
253 write_lock(&chan_list_lock);
255 /* Override the defaults (which are for conn-oriented) */
256 chan->omtu = L2CAP_DEFAULT_MTU;
257 chan->chan_type = L2CAP_CHAN_FIXED;
261 write_unlock(&chan_list_lock);
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
270 if (conn->hcon->type == LE_LINK)
271 dyn_end = L2CAP_CID_LE_DYN_END;
273 dyn_end = L2CAP_CID_DYN_END;
275 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 if (!__l2cap_get_chan_by_scid(conn, cid))
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
285 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 state_to_string(state));
289 chan->ops->state_change(chan, state, 0);
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
296 chan->ops->state_change(chan, chan->state, err);
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
301 chan->ops->state_change(chan, chan->state, err);
304 static void __set_retrans_timer(struct l2cap_chan *chan)
306 if (!delayed_work_pending(&chan->monitor_timer) &&
307 chan->retrans_timeout) {
308 l2cap_set_timer(chan, &chan->retrans_timer,
309 msecs_to_jiffies(chan->retrans_timeout));
313 static void __set_monitor_timer(struct l2cap_chan *chan)
315 __clear_retrans_timer(chan);
316 if (chan->monitor_timeout) {
317 l2cap_set_timer(chan, &chan->monitor_timer,
318 msecs_to_jiffies(chan->monitor_timeout));
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
327 skb_queue_walk(head, skb) {
328 if (bt_cb(skb)->l2cap.txseq == seq)
335 /* ---- L2CAP sequence number lists ---- */
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338 * SREJ requests that are received and for frames that are to be
339 * retransmitted. These seq_list functions implement a singly-linked
340 * list in an array, where membership in the list can also be checked
341 * in constant time. Items can also be added to the tail of the list
342 * and removed from the head in constant time, without further memory
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
348 size_t alloc_size, i;
350 /* Allocated size is a power of 2 to map sequence numbers
351 * (which may be up to 14 bits) in to a smaller array that is
352 * sized for the negotiated ERTM transmit windows.
354 alloc_size = roundup_pow_of_two(size);
356 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
360 seq_list->mask = alloc_size - 1;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 for (i = 0; i < alloc_size; i++)
364 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
371 kfree(seq_list->list);
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
377 /* Constant-time check for list membership */
378 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
383 u16 seq = seq_list->head;
384 u16 mask = seq_list->mask;
386 seq_list->head = seq_list->list[seq & mask];
387 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
389 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
401 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
404 for (i = 0; i <= seq_list->mask; i++)
405 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
407 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
413 u16 mask = seq_list->mask;
415 /* All appends happen in constant time */
417 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
420 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 seq_list->head = seq;
423 seq_list->list[seq_list->tail & mask] = seq;
425 seq_list->tail = seq;
426 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
429 static void l2cap_chan_timeout(struct work_struct *work)
431 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
433 struct l2cap_conn *conn = chan->conn;
436 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
438 mutex_lock(&conn->chan_lock);
439 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 * this work. No need to call l2cap_chan_hold(chan) here again.
442 l2cap_chan_lock(chan);
444 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 reason = ECONNREFUSED;
446 else if (chan->state == BT_CONNECT &&
447 chan->sec_level != BT_SECURITY_SDP)
448 reason = ECONNREFUSED;
452 l2cap_chan_close(chan, reason);
454 chan->ops->close(chan);
456 l2cap_chan_unlock(chan);
457 l2cap_chan_put(chan);
459 mutex_unlock(&conn->chan_lock);
462 struct l2cap_chan *l2cap_chan_create(void)
464 struct l2cap_chan *chan;
466 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
470 skb_queue_head_init(&chan->tx_q);
471 skb_queue_head_init(&chan->srej_q);
472 mutex_init(&chan->lock);
474 /* Set default lock nesting level */
475 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
477 write_lock(&chan_list_lock);
478 list_add(&chan->global_l, &chan_list);
479 write_unlock(&chan_list_lock);
481 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
486 chan->state = BT_OPEN;
488 kref_init(&chan->kref);
490 /* This flag is cleared in l2cap_chan_ready() */
491 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
493 BT_DBG("chan %p", chan);
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
499 static void l2cap_chan_destroy(struct kref *kref)
501 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
503 BT_DBG("chan %p", chan);
505 write_lock(&chan_list_lock);
506 list_del(&chan->global_l);
507 write_unlock(&chan_list_lock);
512 void l2cap_chan_hold(struct l2cap_chan *c)
514 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
521 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
523 if (!kref_get_unless_zero(&c->kref))
529 void l2cap_chan_put(struct l2cap_chan *c)
531 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
533 kref_put(&c->kref, l2cap_chan_destroy);
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
539 chan->fcs = L2CAP_FCS_CRC16;
540 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 chan->remote_max_tx = chan->max_tx;
544 chan->remote_tx_win = chan->tx_win;
545 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 chan->sec_level = BT_SECURITY_LOW;
547 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
551 chan->conf_state = 0;
552 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
554 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
561 chan->sdu_last_frag = NULL;
563 chan->tx_credits = tx_credits;
564 /* Derive MPS from connection MTU to stop HCI fragmentation */
565 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 /* Give enough credits for a full packet */
567 chan->rx_credits = (chan->imtu / chan->mps) + 1;
569 skb_queue_head_init(&chan->tx_q);
572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
574 l2cap_le_flowctl_init(chan, tx_credits);
576 /* L2CAP implementations shall support a minimum MPS of 64 octets */
577 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 chan->mps = L2CAP_ECRED_MIN_MPS;
579 chan->rx_credits = (chan->imtu / chan->mps) + 1;
583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
585 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 __le16_to_cpu(chan->psm), chan->dcid);
588 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 switch (chan->chan_type) {
593 case L2CAP_CHAN_CONN_ORIENTED:
594 /* Alloc CID for connection-oriented socket */
595 chan->scid = l2cap_alloc_cid(conn);
596 if (conn->hcon->type == ACL_LINK)
597 chan->omtu = L2CAP_DEFAULT_MTU;
600 case L2CAP_CHAN_CONN_LESS:
601 /* Connectionless socket */
602 chan->scid = L2CAP_CID_CONN_LESS;
603 chan->dcid = L2CAP_CID_CONN_LESS;
604 chan->omtu = L2CAP_DEFAULT_MTU;
607 case L2CAP_CHAN_FIXED:
608 /* Caller will set CID and CID specific MTU values */
612 /* Raw socket can send/recv signalling messages only */
613 chan->scid = L2CAP_CID_SIGNALING;
614 chan->dcid = L2CAP_CID_SIGNALING;
615 chan->omtu = L2CAP_DEFAULT_MTU;
618 chan->local_id = L2CAP_BESTEFFORT_ID;
619 chan->local_stype = L2CAP_SERV_BESTEFFORT;
620 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
621 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
622 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
623 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
625 l2cap_chan_hold(chan);
627 /* Only keep a reference for fixed channels if they requested it */
628 if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 hci_conn_hold(conn->hcon);
632 list_add(&chan->list, &conn->chan_l);
635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
637 mutex_lock(&conn->chan_lock);
638 __l2cap_chan_add(conn, chan);
639 mutex_unlock(&conn->chan_lock);
642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
644 struct l2cap_conn *conn = chan->conn;
646 __clear_chan_timer(chan);
648 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 state_to_string(chan->state));
651 chan->ops->teardown(chan, err);
654 struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 /* Delete from channel list */
656 list_del(&chan->list);
658 l2cap_chan_put(chan);
662 /* Reference was only held for non-fixed channels or
663 * fixed channels that explicitly requested it using the
664 * FLAG_HOLD_HCI_CONN flag.
666 if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 hci_conn_drop(conn->hcon);
670 if (mgr && mgr->bredr_chan == chan)
671 mgr->bredr_chan = NULL;
674 if (chan->hs_hchan) {
675 struct hci_chan *hs_hchan = chan->hs_hchan;
677 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 amp_disconnect_logical_link(hs_hchan);
681 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
684 switch (chan->mode) {
685 case L2CAP_MODE_BASIC:
688 case L2CAP_MODE_LE_FLOWCTL:
689 case L2CAP_MODE_EXT_FLOWCTL:
690 skb_queue_purge(&chan->tx_q);
693 case L2CAP_MODE_ERTM:
694 __clear_retrans_timer(chan);
695 __clear_monitor_timer(chan);
696 __clear_ack_timer(chan);
698 skb_queue_purge(&chan->srej_q);
700 l2cap_seq_list_free(&chan->srej_list);
701 l2cap_seq_list_free(&chan->retrans_list);
704 case L2CAP_MODE_STREAMING:
705 skb_queue_purge(&chan->tx_q);
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
711 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
712 l2cap_chan_func_t func, void *data)
714 struct l2cap_chan *chan, *l;
716 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
717 if (chan->ident == id)
722 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
725 struct l2cap_chan *chan;
727 list_for_each_entry(chan, &conn->chan_l, list) {
732 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
738 mutex_lock(&conn->chan_lock);
739 __l2cap_chan_list(conn, func, data);
740 mutex_unlock(&conn->chan_lock);
743 EXPORT_SYMBOL_GPL(l2cap_chan_list);
745 static void l2cap_conn_update_id_addr(struct work_struct *work)
747 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
748 id_addr_update_work);
749 struct hci_conn *hcon = conn->hcon;
750 struct l2cap_chan *chan;
752 mutex_lock(&conn->chan_lock);
754 list_for_each_entry(chan, &conn->chan_l, list) {
755 l2cap_chan_lock(chan);
756 bacpy(&chan->dst, &hcon->dst);
757 chan->dst_type = bdaddr_dst_type(hcon);
758 l2cap_chan_unlock(chan);
761 mutex_unlock(&conn->chan_lock);
764 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
766 struct l2cap_conn *conn = chan->conn;
767 struct l2cap_le_conn_rsp rsp;
770 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
771 result = L2CAP_CR_LE_AUTHORIZATION;
773 result = L2CAP_CR_LE_BAD_PSM;
775 l2cap_state_change(chan, BT_DISCONN);
777 rsp.dcid = cpu_to_le16(chan->scid);
778 rsp.mtu = cpu_to_le16(chan->imtu);
779 rsp.mps = cpu_to_le16(chan->mps);
780 rsp.credits = cpu_to_le16(chan->rx_credits);
781 rsp.result = cpu_to_le16(result);
783 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
787 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
789 l2cap_state_change(chan, BT_DISCONN);
791 __l2cap_ecred_conn_rsp_defer(chan);
794 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
796 struct l2cap_conn *conn = chan->conn;
797 struct l2cap_conn_rsp rsp;
800 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
801 result = L2CAP_CR_SEC_BLOCK;
803 result = L2CAP_CR_BAD_PSM;
805 l2cap_state_change(chan, BT_DISCONN);
807 rsp.scid = cpu_to_le16(chan->dcid);
808 rsp.dcid = cpu_to_le16(chan->scid);
809 rsp.result = cpu_to_le16(result);
810 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
812 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
815 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
817 struct l2cap_conn *conn = chan->conn;
819 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
821 switch (chan->state) {
823 chan->ops->teardown(chan, 0);
828 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
829 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
830 l2cap_send_disconn_req(chan, reason);
832 l2cap_chan_del(chan, reason);
836 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
837 if (conn->hcon->type == ACL_LINK)
838 l2cap_chan_connect_reject(chan);
839 else if (conn->hcon->type == LE_LINK) {
840 switch (chan->mode) {
841 case L2CAP_MODE_LE_FLOWCTL:
842 l2cap_chan_le_connect_reject(chan);
844 case L2CAP_MODE_EXT_FLOWCTL:
845 l2cap_chan_ecred_connect_reject(chan);
851 l2cap_chan_del(chan, reason);
856 l2cap_chan_del(chan, reason);
860 chan->ops->teardown(chan, 0);
864 EXPORT_SYMBOL(l2cap_chan_close);
866 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
868 switch (chan->chan_type) {
870 switch (chan->sec_level) {
871 case BT_SECURITY_HIGH:
872 case BT_SECURITY_FIPS:
873 return HCI_AT_DEDICATED_BONDING_MITM;
874 case BT_SECURITY_MEDIUM:
875 return HCI_AT_DEDICATED_BONDING;
877 return HCI_AT_NO_BONDING;
880 case L2CAP_CHAN_CONN_LESS:
881 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
882 if (chan->sec_level == BT_SECURITY_LOW)
883 chan->sec_level = BT_SECURITY_SDP;
885 if (chan->sec_level == BT_SECURITY_HIGH ||
886 chan->sec_level == BT_SECURITY_FIPS)
887 return HCI_AT_NO_BONDING_MITM;
889 return HCI_AT_NO_BONDING;
891 case L2CAP_CHAN_CONN_ORIENTED:
892 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
893 if (chan->sec_level == BT_SECURITY_LOW)
894 chan->sec_level = BT_SECURITY_SDP;
896 if (chan->sec_level == BT_SECURITY_HIGH ||
897 chan->sec_level == BT_SECURITY_FIPS)
898 return HCI_AT_NO_BONDING_MITM;
900 return HCI_AT_NO_BONDING;
905 switch (chan->sec_level) {
906 case BT_SECURITY_HIGH:
907 case BT_SECURITY_FIPS:
908 return HCI_AT_GENERAL_BONDING_MITM;
909 case BT_SECURITY_MEDIUM:
910 return HCI_AT_GENERAL_BONDING;
912 return HCI_AT_NO_BONDING;
918 /* Service level security */
919 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
921 struct l2cap_conn *conn = chan->conn;
924 if (conn->hcon->type == LE_LINK)
925 return smp_conn_security(conn->hcon, chan->sec_level);
927 auth_type = l2cap_get_auth_type(chan);
929 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
933 static u8 l2cap_get_ident(struct l2cap_conn *conn)
937 /* Get next available identificator.
938 * 1 - 128 are used by kernel.
939 * 129 - 199 are reserved.
940 * 200 - 254 are used by utilities like l2ping, etc.
943 mutex_lock(&conn->ident_lock);
945 if (++conn->tx_ident > 128)
950 mutex_unlock(&conn->ident_lock);
955 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
958 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
961 BT_DBG("code 0x%2.2x", code);
966 /* Use NO_FLUSH if supported or we have an LE link (which does
967 * not support auto-flushing packets) */
968 if (lmp_no_flush_capable(conn->hcon->hdev) ||
969 conn->hcon->type == LE_LINK)
970 flags = ACL_START_NO_FLUSH;
974 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
975 skb->priority = HCI_PRIO_MAX;
977 hci_send_acl(conn->hchan, skb, flags);
980 static bool __chan_is_moving(struct l2cap_chan *chan)
982 return chan->move_state != L2CAP_MOVE_STABLE &&
983 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
986 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
988 struct hci_conn *hcon = chan->conn->hcon;
991 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
994 if (chan->hs_hcon && !__chan_is_moving(chan)) {
996 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1003 /* Use NO_FLUSH for LE links (where this is the only option) or
1004 * if the BR/EDR link supports it and flushing has not been
1005 * explicitly requested (through FLAG_FLUSHABLE).
1007 if (hcon->type == LE_LINK ||
1008 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1009 lmp_no_flush_capable(hcon->hdev)))
1010 flags = ACL_START_NO_FLUSH;
1014 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1015 hci_send_acl(chan->conn->hchan, skb, flags);
1018 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1020 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1021 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1023 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1025 control->sframe = 1;
1026 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1027 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1033 control->sframe = 0;
1034 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1035 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1042 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1044 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1045 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1047 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1049 control->sframe = 1;
1050 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1051 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1057 control->sframe = 0;
1058 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1059 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1066 static inline void __unpack_control(struct l2cap_chan *chan,
1067 struct sk_buff *skb)
1069 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1070 __unpack_extended_control(get_unaligned_le32(skb->data),
1071 &bt_cb(skb)->l2cap);
1072 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1074 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1075 &bt_cb(skb)->l2cap);
1076 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1080 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1084 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1085 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1087 if (control->sframe) {
1088 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1089 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1090 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1092 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1093 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1099 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1103 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1104 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1106 if (control->sframe) {
1107 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1108 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1109 packed |= L2CAP_CTRL_FRAME_TYPE;
1111 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1112 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1118 static inline void __pack_control(struct l2cap_chan *chan,
1119 struct l2cap_ctrl *control,
1120 struct sk_buff *skb)
1122 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1123 put_unaligned_le32(__pack_extended_control(control),
1124 skb->data + L2CAP_HDR_SIZE);
1126 put_unaligned_le16(__pack_enhanced_control(control),
1127 skb->data + L2CAP_HDR_SIZE);
1131 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1133 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1134 return L2CAP_EXT_HDR_SIZE;
1136 return L2CAP_ENH_HDR_SIZE;
1139 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1142 struct sk_buff *skb;
1143 struct l2cap_hdr *lh;
1144 int hlen = __ertm_hdr_size(chan);
1146 if (chan->fcs == L2CAP_FCS_CRC16)
1147 hlen += L2CAP_FCS_SIZE;
1149 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1152 return ERR_PTR(-ENOMEM);
1154 lh = skb_put(skb, L2CAP_HDR_SIZE);
1155 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1156 lh->cid = cpu_to_le16(chan->dcid);
1158 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1159 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1161 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1163 if (chan->fcs == L2CAP_FCS_CRC16) {
1164 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1165 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1168 skb->priority = HCI_PRIO_MAX;
1172 static void l2cap_send_sframe(struct l2cap_chan *chan,
1173 struct l2cap_ctrl *control)
1175 struct sk_buff *skb;
1178 BT_DBG("chan %p, control %p", chan, control);
1180 if (!control->sframe)
1183 if (__chan_is_moving(chan))
1186 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1190 if (control->super == L2CAP_SUPER_RR)
1191 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1192 else if (control->super == L2CAP_SUPER_RNR)
1193 set_bit(CONN_RNR_SENT, &chan->conn_state);
1195 if (control->super != L2CAP_SUPER_SREJ) {
1196 chan->last_acked_seq = control->reqseq;
1197 __clear_ack_timer(chan);
1200 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1201 control->final, control->poll, control->super);
1203 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1204 control_field = __pack_extended_control(control);
1206 control_field = __pack_enhanced_control(control);
1208 skb = l2cap_create_sframe_pdu(chan, control_field);
1210 l2cap_do_send(chan, skb);
1213 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1215 struct l2cap_ctrl control;
1217 BT_DBG("chan %p, poll %d", chan, poll);
1219 memset(&control, 0, sizeof(control));
1221 control.poll = poll;
1223 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1224 control.super = L2CAP_SUPER_RNR;
1226 control.super = L2CAP_SUPER_RR;
1228 control.reqseq = chan->buffer_seq;
1229 l2cap_send_sframe(chan, &control);
1232 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1234 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1237 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1240 static bool __amp_capable(struct l2cap_chan *chan)
1242 struct l2cap_conn *conn = chan->conn;
1243 struct hci_dev *hdev;
1244 bool amp_available = false;
1246 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1249 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1252 read_lock(&hci_dev_list_lock);
1253 list_for_each_entry(hdev, &hci_dev_list, list) {
1254 if (hdev->amp_type != AMP_TYPE_BREDR &&
1255 test_bit(HCI_UP, &hdev->flags)) {
1256 amp_available = true;
1260 read_unlock(&hci_dev_list_lock);
1262 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1263 return amp_available;
1268 static bool l2cap_check_efs(struct l2cap_chan *chan)
1270 /* Check EFS parameters */
1274 void l2cap_send_conn_req(struct l2cap_chan *chan)
1276 struct l2cap_conn *conn = chan->conn;
1277 struct l2cap_conn_req req;
1279 req.scid = cpu_to_le16(chan->scid);
1280 req.psm = chan->psm;
1282 chan->ident = l2cap_get_ident(conn);
1284 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1286 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1289 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1291 struct l2cap_create_chan_req req;
1292 req.scid = cpu_to_le16(chan->scid);
1293 req.psm = chan->psm;
1294 req.amp_id = amp_id;
1296 chan->ident = l2cap_get_ident(chan->conn);
1298 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1302 static void l2cap_move_setup(struct l2cap_chan *chan)
1304 struct sk_buff *skb;
1306 BT_DBG("chan %p", chan);
1308 if (chan->mode != L2CAP_MODE_ERTM)
1311 __clear_retrans_timer(chan);
1312 __clear_monitor_timer(chan);
1313 __clear_ack_timer(chan);
1315 chan->retry_count = 0;
1316 skb_queue_walk(&chan->tx_q, skb) {
1317 if (bt_cb(skb)->l2cap.retries)
1318 bt_cb(skb)->l2cap.retries = 1;
1323 chan->expected_tx_seq = chan->buffer_seq;
1325 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1326 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1327 l2cap_seq_list_clear(&chan->retrans_list);
1328 l2cap_seq_list_clear(&chan->srej_list);
1329 skb_queue_purge(&chan->srej_q);
1331 chan->tx_state = L2CAP_TX_STATE_XMIT;
1332 chan->rx_state = L2CAP_RX_STATE_MOVE;
1334 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1337 static void l2cap_move_done(struct l2cap_chan *chan)
1339 u8 move_role = chan->move_role;
1340 BT_DBG("chan %p", chan);
1342 chan->move_state = L2CAP_MOVE_STABLE;
1343 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1345 if (chan->mode != L2CAP_MODE_ERTM)
1348 switch (move_role) {
1349 case L2CAP_MOVE_ROLE_INITIATOR:
1350 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1351 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1353 case L2CAP_MOVE_ROLE_RESPONDER:
1354 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1359 static void l2cap_chan_ready(struct l2cap_chan *chan)
1361 /* The channel may have already been flagged as connected in
1362 * case of receiving data before the L2CAP info req/rsp
1363 * procedure is complete.
1365 if (chan->state == BT_CONNECTED)
1368 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1369 chan->conf_state = 0;
1370 __clear_chan_timer(chan);
1372 switch (chan->mode) {
1373 case L2CAP_MODE_LE_FLOWCTL:
1374 case L2CAP_MODE_EXT_FLOWCTL:
1375 if (!chan->tx_credits)
1376 chan->ops->suspend(chan);
1380 chan->state = BT_CONNECTED;
1382 chan->ops->ready(chan);
1385 static void l2cap_le_connect(struct l2cap_chan *chan)
1387 struct l2cap_conn *conn = chan->conn;
1388 struct l2cap_le_conn_req req;
1390 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1394 chan->imtu = chan->conn->mtu;
1396 l2cap_le_flowctl_init(chan, 0);
1398 memset(&req, 0, sizeof(req));
1399 req.psm = chan->psm;
1400 req.scid = cpu_to_le16(chan->scid);
1401 req.mtu = cpu_to_le16(chan->imtu);
1402 req.mps = cpu_to_le16(chan->mps);
1403 req.credits = cpu_to_le16(chan->rx_credits);
1405 chan->ident = l2cap_get_ident(conn);
1407 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1411 struct l2cap_ecred_conn_data {
1413 struct l2cap_ecred_conn_req req;
1416 struct l2cap_chan *chan;
1421 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1423 struct l2cap_ecred_conn_data *conn = data;
1426 if (chan == conn->chan)
1429 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1432 pid = chan->ops->get_peer_pid(chan);
1434 /* Only add deferred channels with the same PID/PSM */
1435 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1436 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1439 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1442 l2cap_ecred_init(chan, 0);
1444 /* Set the same ident so we can match on the rsp */
1445 chan->ident = conn->chan->ident;
1447 /* Include all channels deferred */
1448 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1453 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1455 struct l2cap_conn *conn = chan->conn;
1456 struct l2cap_ecred_conn_data data;
1458 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1461 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1464 l2cap_ecred_init(chan, 0);
1466 memset(&data, 0, sizeof(data));
1467 data.pdu.req.psm = chan->psm;
1468 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1469 data.pdu.req.mps = cpu_to_le16(chan->mps);
1470 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1471 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1473 chan->ident = l2cap_get_ident(conn);
1477 data.pid = chan->ops->get_peer_pid(chan);
1479 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1481 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1482 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1486 static void l2cap_le_start(struct l2cap_chan *chan)
1488 struct l2cap_conn *conn = chan->conn;
1490 if (!smp_conn_security(conn->hcon, chan->sec_level))
1494 l2cap_chan_ready(chan);
1498 if (chan->state == BT_CONNECT) {
1499 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1500 l2cap_ecred_connect(chan);
1502 l2cap_le_connect(chan);
1506 static void l2cap_start_connection(struct l2cap_chan *chan)
1508 if (__amp_capable(chan)) {
1509 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1510 a2mp_discover_amp(chan);
1511 } else if (chan->conn->hcon->type == LE_LINK) {
1512 l2cap_le_start(chan);
1514 l2cap_send_conn_req(chan);
1518 static void l2cap_request_info(struct l2cap_conn *conn)
1520 struct l2cap_info_req req;
1522 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1525 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1527 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1528 conn->info_ident = l2cap_get_ident(conn);
1530 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1532 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1536 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1538 /* The minimum encryption key size needs to be enforced by the
1539 * host stack before establishing any L2CAP connections. The
1540 * specification in theory allows a minimum of 1, but to align
1541 * BR/EDR and LE transports, a minimum of 7 is chosen.
1543 * This check might also be called for unencrypted connections
1544 * that have no key size requirements. Ensure that the link is
1545 * actually encrypted before enforcing a key size.
1547 int min_key_size = hcon->hdev->min_enc_key_size;
1549 /* On FIPS security level, key size must be 16 bytes */
1550 if (hcon->sec_level == BT_SECURITY_FIPS)
1553 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1554 hcon->enc_key_size >= min_key_size);
1557 static void l2cap_do_start(struct l2cap_chan *chan)
1559 struct l2cap_conn *conn = chan->conn;
1561 if (conn->hcon->type == LE_LINK) {
1562 l2cap_le_start(chan);
1566 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1567 l2cap_request_info(conn);
1571 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1574 if (!l2cap_chan_check_security(chan, true) ||
1575 !__l2cap_no_conn_pending(chan))
1578 if (l2cap_check_enc_key_size(conn->hcon))
1579 l2cap_start_connection(chan);
1581 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1584 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1586 u32 local_feat_mask = l2cap_feat_mask;
1588 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1591 case L2CAP_MODE_ERTM:
1592 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1593 case L2CAP_MODE_STREAMING:
1594 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1600 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1602 struct l2cap_conn *conn = chan->conn;
1603 struct l2cap_disconn_req req;
1608 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1609 __clear_retrans_timer(chan);
1610 __clear_monitor_timer(chan);
1611 __clear_ack_timer(chan);
1614 if (chan->scid == L2CAP_CID_A2MP) {
1615 l2cap_state_change(chan, BT_DISCONN);
1619 req.dcid = cpu_to_le16(chan->dcid);
1620 req.scid = cpu_to_le16(chan->scid);
1621 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1624 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1627 /* ---- L2CAP connections ---- */
1628 static void l2cap_conn_start(struct l2cap_conn *conn)
1630 struct l2cap_chan *chan, *tmp;
1632 BT_DBG("conn %p", conn);
1634 mutex_lock(&conn->chan_lock);
1636 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1637 l2cap_chan_lock(chan);
1639 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1640 l2cap_chan_ready(chan);
1641 l2cap_chan_unlock(chan);
1645 if (chan->state == BT_CONNECT) {
1646 if (!l2cap_chan_check_security(chan, true) ||
1647 !__l2cap_no_conn_pending(chan)) {
1648 l2cap_chan_unlock(chan);
1652 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1653 && test_bit(CONF_STATE2_DEVICE,
1654 &chan->conf_state)) {
1655 l2cap_chan_close(chan, ECONNRESET);
1656 l2cap_chan_unlock(chan);
1660 if (l2cap_check_enc_key_size(conn->hcon))
1661 l2cap_start_connection(chan);
1663 l2cap_chan_close(chan, ECONNREFUSED);
1665 } else if (chan->state == BT_CONNECT2) {
1666 struct l2cap_conn_rsp rsp;
1668 rsp.scid = cpu_to_le16(chan->dcid);
1669 rsp.dcid = cpu_to_le16(chan->scid);
1671 if (l2cap_chan_check_security(chan, false)) {
1672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1673 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1674 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1675 chan->ops->defer(chan);
1678 l2cap_state_change(chan, BT_CONFIG);
1679 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1680 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1683 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1684 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1687 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1690 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1691 rsp.result != L2CAP_CR_SUCCESS) {
1692 l2cap_chan_unlock(chan);
1696 set_bit(CONF_REQ_SENT, &chan->conf_state);
1697 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1698 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1699 chan->num_conf_req++;
1702 l2cap_chan_unlock(chan);
1705 mutex_unlock(&conn->chan_lock);
1708 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1710 struct hci_conn *hcon = conn->hcon;
1711 struct hci_dev *hdev = hcon->hdev;
1713 BT_DBG("%s conn %p", hdev->name, conn);
1715 /* For outgoing pairing which doesn't necessarily have an
1716 * associated socket (e.g. mgmt_pair_device).
1719 smp_conn_security(hcon, hcon->pending_sec_level);
1721 /* For LE peripheral connections, make sure the connection interval
1722 * is in the range of the minimum and maximum interval that has
1723 * been configured for this connection. If not, then trigger
1724 * the connection update procedure.
1726 if (hcon->role == HCI_ROLE_SLAVE &&
1727 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1728 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1729 struct l2cap_conn_param_update_req req;
1731 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1732 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1733 req.latency = cpu_to_le16(hcon->le_conn_latency);
1734 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1736 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1737 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1741 static void l2cap_conn_ready(struct l2cap_conn *conn)
1743 struct l2cap_chan *chan;
1744 struct hci_conn *hcon = conn->hcon;
1746 BT_DBG("conn %p", conn);
1748 if (hcon->type == ACL_LINK)
1749 l2cap_request_info(conn);
1751 mutex_lock(&conn->chan_lock);
1753 list_for_each_entry(chan, &conn->chan_l, list) {
1755 l2cap_chan_lock(chan);
1757 if (chan->scid == L2CAP_CID_A2MP) {
1758 l2cap_chan_unlock(chan);
1762 if (hcon->type == LE_LINK) {
1763 l2cap_le_start(chan);
1764 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1765 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1766 l2cap_chan_ready(chan);
1767 } else if (chan->state == BT_CONNECT) {
1768 l2cap_do_start(chan);
1771 l2cap_chan_unlock(chan);
1774 mutex_unlock(&conn->chan_lock);
1776 if (hcon->type == LE_LINK)
1777 l2cap_le_conn_ready(conn);
1779 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1782 /* Notify sockets that we cannot guaranty reliability anymore */
1783 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1785 struct l2cap_chan *chan;
1787 BT_DBG("conn %p", conn);
1789 mutex_lock(&conn->chan_lock);
1791 list_for_each_entry(chan, &conn->chan_l, list) {
1792 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1793 l2cap_chan_set_err(chan, err);
1796 mutex_unlock(&conn->chan_lock);
1799 static void l2cap_info_timeout(struct work_struct *work)
1801 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1804 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1805 conn->info_ident = 0;
1807 l2cap_conn_start(conn);
1812 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1813 * callback is called during registration. The ->remove callback is called
1814 * during unregistration.
1815 * An l2cap_user object can either be explicitly unregistered or when the
1816 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1817 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1818 * External modules must own a reference to the l2cap_conn object if they intend
1819 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1820 * any time if they don't.
1823 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1825 struct hci_dev *hdev = conn->hcon->hdev;
1828 /* We need to check whether l2cap_conn is registered. If it is not, we
1829 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1830 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1831 * relies on the parent hci_conn object to be locked. This itself relies
1832 * on the hci_dev object to be locked. So we must lock the hci device
1837 if (!list_empty(&user->list)) {
1842 /* conn->hchan is NULL after l2cap_conn_del() was called */
1848 ret = user->probe(conn, user);
1852 list_add(&user->list, &conn->users);
1856 hci_dev_unlock(hdev);
1859 EXPORT_SYMBOL(l2cap_register_user);
1861 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1863 struct hci_dev *hdev = conn->hcon->hdev;
1867 if (list_empty(&user->list))
1870 list_del_init(&user->list);
1871 user->remove(conn, user);
1874 hci_dev_unlock(hdev);
1876 EXPORT_SYMBOL(l2cap_unregister_user);
1878 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1880 struct l2cap_user *user;
1882 while (!list_empty(&conn->users)) {
1883 user = list_first_entry(&conn->users, struct l2cap_user, list);
1884 list_del_init(&user->list);
1885 user->remove(conn, user);
1889 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1891 struct l2cap_conn *conn = hcon->l2cap_data;
1892 struct l2cap_chan *chan, *l;
1897 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1899 kfree_skb(conn->rx_skb);
1901 skb_queue_purge(&conn->pending_rx);
1903 /* We can not call flush_work(&conn->pending_rx_work) here since we
1904 * might block if we are running on a worker from the same workqueue
1905 * pending_rx_work is waiting on.
1907 if (work_pending(&conn->pending_rx_work))
1908 cancel_work_sync(&conn->pending_rx_work);
1910 if (work_pending(&conn->id_addr_update_work))
1911 cancel_work_sync(&conn->id_addr_update_work);
1913 l2cap_unregister_all_users(conn);
1915 /* Force the connection to be immediately dropped */
1916 hcon->disc_timeout = 0;
1918 mutex_lock(&conn->chan_lock);
1921 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1922 l2cap_chan_hold(chan);
1923 l2cap_chan_lock(chan);
1925 l2cap_chan_del(chan, err);
1927 chan->ops->close(chan);
1929 l2cap_chan_unlock(chan);
1930 l2cap_chan_put(chan);
1933 mutex_unlock(&conn->chan_lock);
1935 hci_chan_del(conn->hchan);
1937 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1938 cancel_delayed_work_sync(&conn->info_timer);
1940 hcon->l2cap_data = NULL;
1942 l2cap_conn_put(conn);
1945 static void l2cap_conn_free(struct kref *ref)
1947 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1949 hci_conn_put(conn->hcon);
1953 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1955 kref_get(&conn->ref);
1958 EXPORT_SYMBOL(l2cap_conn_get);
1960 void l2cap_conn_put(struct l2cap_conn *conn)
1962 kref_put(&conn->ref, l2cap_conn_free);
1964 EXPORT_SYMBOL(l2cap_conn_put);
1966 /* ---- Socket interface ---- */
1968 /* Find socket with psm and source / destination bdaddr.
1969 * Returns closest match.
1971 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1976 struct l2cap_chan *c, *tmp, *c1 = NULL;
1978 read_lock(&chan_list_lock);
1980 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1981 if (state && c->state != state)
1984 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1987 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1990 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1991 int src_match, dst_match;
1992 int src_any, dst_any;
1995 src_match = !bacmp(&c->src, src);
1996 dst_match = !bacmp(&c->dst, dst);
1997 if (src_match && dst_match) {
1998 if (!l2cap_chan_hold_unless_zero(c))
2001 read_unlock(&chan_list_lock);
2006 src_any = !bacmp(&c->src, BDADDR_ANY);
2007 dst_any = !bacmp(&c->dst, BDADDR_ANY);
2008 if ((src_match && dst_any) || (src_any && dst_match) ||
2009 (src_any && dst_any))
2015 c1 = l2cap_chan_hold_unless_zero(c1);
2017 read_unlock(&chan_list_lock);
2022 static void l2cap_monitor_timeout(struct work_struct *work)
2024 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2025 monitor_timer.work);
2027 BT_DBG("chan %p", chan);
2029 l2cap_chan_lock(chan);
2032 l2cap_chan_unlock(chan);
2033 l2cap_chan_put(chan);
2037 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2039 l2cap_chan_unlock(chan);
2040 l2cap_chan_put(chan);
2043 static void l2cap_retrans_timeout(struct work_struct *work)
2045 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2046 retrans_timer.work);
2048 BT_DBG("chan %p", chan);
2050 l2cap_chan_lock(chan);
2053 l2cap_chan_unlock(chan);
2054 l2cap_chan_put(chan);
2058 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2059 l2cap_chan_unlock(chan);
2060 l2cap_chan_put(chan);
2063 static void l2cap_streaming_send(struct l2cap_chan *chan,
2064 struct sk_buff_head *skbs)
2066 struct sk_buff *skb;
2067 struct l2cap_ctrl *control;
2069 BT_DBG("chan %p, skbs %p", chan, skbs);
2071 if (__chan_is_moving(chan))
2074 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2076 while (!skb_queue_empty(&chan->tx_q)) {
2078 skb = skb_dequeue(&chan->tx_q);
2080 bt_cb(skb)->l2cap.retries = 1;
2081 control = &bt_cb(skb)->l2cap;
2083 control->reqseq = 0;
2084 control->txseq = chan->next_tx_seq;
2086 __pack_control(chan, control, skb);
2088 if (chan->fcs == L2CAP_FCS_CRC16) {
2089 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2090 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2093 l2cap_do_send(chan, skb);
2095 BT_DBG("Sent txseq %u", control->txseq);
2097 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2098 chan->frames_sent++;
2102 static int l2cap_ertm_send(struct l2cap_chan *chan)
2104 struct sk_buff *skb, *tx_skb;
2105 struct l2cap_ctrl *control;
2108 BT_DBG("chan %p", chan);
2110 if (chan->state != BT_CONNECTED)
2113 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2116 if (__chan_is_moving(chan))
2119 while (chan->tx_send_head &&
2120 chan->unacked_frames < chan->remote_tx_win &&
2121 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2123 skb = chan->tx_send_head;
2125 bt_cb(skb)->l2cap.retries = 1;
2126 control = &bt_cb(skb)->l2cap;
2128 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2131 control->reqseq = chan->buffer_seq;
2132 chan->last_acked_seq = chan->buffer_seq;
2133 control->txseq = chan->next_tx_seq;
2135 __pack_control(chan, control, skb);
2137 if (chan->fcs == L2CAP_FCS_CRC16) {
2138 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2139 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2142 /* Clone after data has been modified. Data is assumed to be
2143 read-only (for locking purposes) on cloned sk_buffs.
2145 tx_skb = skb_clone(skb, GFP_KERNEL);
2150 __set_retrans_timer(chan);
2152 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2153 chan->unacked_frames++;
2154 chan->frames_sent++;
2157 if (skb_queue_is_last(&chan->tx_q, skb))
2158 chan->tx_send_head = NULL;
2160 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2162 l2cap_do_send(chan, tx_skb);
2163 BT_DBG("Sent txseq %u", control->txseq);
2166 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2167 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2172 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2174 struct l2cap_ctrl control;
2175 struct sk_buff *skb;
2176 struct sk_buff *tx_skb;
2179 BT_DBG("chan %p", chan);
2181 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2184 if (__chan_is_moving(chan))
2187 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2188 seq = l2cap_seq_list_pop(&chan->retrans_list);
2190 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2192 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2197 bt_cb(skb)->l2cap.retries++;
2198 control = bt_cb(skb)->l2cap;
2200 if (chan->max_tx != 0 &&
2201 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2202 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2203 l2cap_send_disconn_req(chan, ECONNRESET);
2204 l2cap_seq_list_clear(&chan->retrans_list);
2208 control.reqseq = chan->buffer_seq;
2209 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2214 if (skb_cloned(skb)) {
2215 /* Cloned sk_buffs are read-only, so we need a
2218 tx_skb = skb_copy(skb, GFP_KERNEL);
2220 tx_skb = skb_clone(skb, GFP_KERNEL);
2224 l2cap_seq_list_clear(&chan->retrans_list);
2228 /* Update skb contents */
2229 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2230 put_unaligned_le32(__pack_extended_control(&control),
2231 tx_skb->data + L2CAP_HDR_SIZE);
2233 put_unaligned_le16(__pack_enhanced_control(&control),
2234 tx_skb->data + L2CAP_HDR_SIZE);
2238 if (chan->fcs == L2CAP_FCS_CRC16) {
2239 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2240 tx_skb->len - L2CAP_FCS_SIZE);
2241 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2245 l2cap_do_send(chan, tx_skb);
2247 BT_DBG("Resent txseq %d", control.txseq);
2249 chan->last_acked_seq = chan->buffer_seq;
2253 static void l2cap_retransmit(struct l2cap_chan *chan,
2254 struct l2cap_ctrl *control)
2256 BT_DBG("chan %p, control %p", chan, control);
2258 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2259 l2cap_ertm_resend(chan);
2262 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2263 struct l2cap_ctrl *control)
2265 struct sk_buff *skb;
2267 BT_DBG("chan %p, control %p", chan, control);
2270 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2272 l2cap_seq_list_clear(&chan->retrans_list);
2274 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2277 if (chan->unacked_frames) {
2278 skb_queue_walk(&chan->tx_q, skb) {
2279 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2280 skb == chan->tx_send_head)
2284 skb_queue_walk_from(&chan->tx_q, skb) {
2285 if (skb == chan->tx_send_head)
2288 l2cap_seq_list_append(&chan->retrans_list,
2289 bt_cb(skb)->l2cap.txseq);
2292 l2cap_ertm_resend(chan);
2296 static void l2cap_send_ack(struct l2cap_chan *chan)
2298 struct l2cap_ctrl control;
2299 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2300 chan->last_acked_seq);
2303 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2304 chan, chan->last_acked_seq, chan->buffer_seq);
2306 memset(&control, 0, sizeof(control));
2309 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2310 chan->rx_state == L2CAP_RX_STATE_RECV) {
2311 __clear_ack_timer(chan);
2312 control.super = L2CAP_SUPER_RNR;
2313 control.reqseq = chan->buffer_seq;
2314 l2cap_send_sframe(chan, &control);
2316 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2317 l2cap_ertm_send(chan);
2318 /* If any i-frames were sent, they included an ack */
2319 if (chan->buffer_seq == chan->last_acked_seq)
2323 /* Ack now if the window is 3/4ths full.
2324 * Calculate without mul or div
2326 threshold = chan->ack_win;
2327 threshold += threshold << 1;
2330 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2333 if (frames_to_ack >= threshold) {
2334 __clear_ack_timer(chan);
2335 control.super = L2CAP_SUPER_RR;
2336 control.reqseq = chan->buffer_seq;
2337 l2cap_send_sframe(chan, &control);
2342 __set_ack_timer(chan);
2346 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2347 struct msghdr *msg, int len,
2348 int count, struct sk_buff *skb)
2350 struct l2cap_conn *conn = chan->conn;
2351 struct sk_buff **frag;
2354 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2360 /* Continuation fragments (no L2CAP header) */
2361 frag = &skb_shinfo(skb)->frag_list;
2363 struct sk_buff *tmp;
2365 count = min_t(unsigned int, conn->mtu, len);
2367 tmp = chan->ops->alloc_skb(chan, 0, count,
2368 msg->msg_flags & MSG_DONTWAIT);
2370 return PTR_ERR(tmp);
2374 if (!copy_from_iter_full(skb_put(*frag, count), count,
2381 skb->len += (*frag)->len;
2382 skb->data_len += (*frag)->len;
2384 frag = &(*frag)->next;
2390 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2391 struct msghdr *msg, size_t len)
2393 struct l2cap_conn *conn = chan->conn;
2394 struct sk_buff *skb;
2395 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2396 struct l2cap_hdr *lh;
2398 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2399 __le16_to_cpu(chan->psm), len);
2401 count = min_t(unsigned int, (conn->mtu - hlen), len);
2403 skb = chan->ops->alloc_skb(chan, hlen, count,
2404 msg->msg_flags & MSG_DONTWAIT);
2408 /* Create L2CAP header */
2409 lh = skb_put(skb, L2CAP_HDR_SIZE);
2410 lh->cid = cpu_to_le16(chan->dcid);
2411 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2412 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2414 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2415 if (unlikely(err < 0)) {
2417 return ERR_PTR(err);
2422 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2423 struct msghdr *msg, size_t len)
2425 struct l2cap_conn *conn = chan->conn;
2426 struct sk_buff *skb;
2428 struct l2cap_hdr *lh;
2430 BT_DBG("chan %p len %zu", chan, len);
2432 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2434 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2435 msg->msg_flags & MSG_DONTWAIT);
2439 /* Create L2CAP header */
2440 lh = skb_put(skb, L2CAP_HDR_SIZE);
2441 lh->cid = cpu_to_le16(chan->dcid);
2442 lh->len = cpu_to_le16(len);
2444 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2445 if (unlikely(err < 0)) {
2447 return ERR_PTR(err);
2452 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2453 struct msghdr *msg, size_t len,
2456 struct l2cap_conn *conn = chan->conn;
2457 struct sk_buff *skb;
2458 int err, count, hlen;
2459 struct l2cap_hdr *lh;
2461 BT_DBG("chan %p len %zu", chan, len);
2464 return ERR_PTR(-ENOTCONN);
2466 hlen = __ertm_hdr_size(chan);
2469 hlen += L2CAP_SDULEN_SIZE;
2471 if (chan->fcs == L2CAP_FCS_CRC16)
2472 hlen += L2CAP_FCS_SIZE;
2474 count = min_t(unsigned int, (conn->mtu - hlen), len);
2476 skb = chan->ops->alloc_skb(chan, hlen, count,
2477 msg->msg_flags & MSG_DONTWAIT);
2481 /* Create L2CAP header */
2482 lh = skb_put(skb, L2CAP_HDR_SIZE);
2483 lh->cid = cpu_to_le16(chan->dcid);
2484 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2486 /* Control header is populated later */
2487 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2488 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2490 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2493 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2495 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2496 if (unlikely(err < 0)) {
2498 return ERR_PTR(err);
2501 bt_cb(skb)->l2cap.fcs = chan->fcs;
2502 bt_cb(skb)->l2cap.retries = 0;
2506 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2507 struct sk_buff_head *seg_queue,
2508 struct msghdr *msg, size_t len)
2510 struct sk_buff *skb;
2515 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2517 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2518 * so fragmented skbs are not used. The HCI layer's handling
2519 * of fragmented skbs is not compatible with ERTM's queueing.
2522 /* PDU size is derived from the HCI MTU */
2523 pdu_len = chan->conn->mtu;
2525 /* Constrain PDU size for BR/EDR connections */
2527 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2529 /* Adjust for largest possible L2CAP overhead. */
2531 pdu_len -= L2CAP_FCS_SIZE;
2533 pdu_len -= __ertm_hdr_size(chan);
2535 /* Remote device may have requested smaller PDUs */
2536 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2538 if (len <= pdu_len) {
2539 sar = L2CAP_SAR_UNSEGMENTED;
2543 sar = L2CAP_SAR_START;
2548 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2551 __skb_queue_purge(seg_queue);
2552 return PTR_ERR(skb);
2555 bt_cb(skb)->l2cap.sar = sar;
2556 __skb_queue_tail(seg_queue, skb);
2562 if (len <= pdu_len) {
2563 sar = L2CAP_SAR_END;
2566 sar = L2CAP_SAR_CONTINUE;
2573 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2575 size_t len, u16 sdulen)
2577 struct l2cap_conn *conn = chan->conn;
2578 struct sk_buff *skb;
2579 int err, count, hlen;
2580 struct l2cap_hdr *lh;
2582 BT_DBG("chan %p len %zu", chan, len);
2585 return ERR_PTR(-ENOTCONN);
2587 hlen = L2CAP_HDR_SIZE;
2590 hlen += L2CAP_SDULEN_SIZE;
2592 count = min_t(unsigned int, (conn->mtu - hlen), len);
2594 skb = chan->ops->alloc_skb(chan, hlen, count,
2595 msg->msg_flags & MSG_DONTWAIT);
2599 /* Create L2CAP header */
2600 lh = skb_put(skb, L2CAP_HDR_SIZE);
2601 lh->cid = cpu_to_le16(chan->dcid);
2602 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2605 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2607 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2608 if (unlikely(err < 0)) {
2610 return ERR_PTR(err);
2616 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2617 struct sk_buff_head *seg_queue,
2618 struct msghdr *msg, size_t len)
2620 struct sk_buff *skb;
2624 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2627 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2633 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2635 __skb_queue_purge(seg_queue);
2636 return PTR_ERR(skb);
2639 __skb_queue_tail(seg_queue, skb);
2645 pdu_len += L2CAP_SDULEN_SIZE;
2652 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2656 BT_DBG("chan %p", chan);
2658 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2659 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2664 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2665 skb_queue_len(&chan->tx_q));
2668 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2670 struct sk_buff *skb;
2672 struct sk_buff_head seg_queue;
2677 /* Connectionless channel */
2678 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2679 skb = l2cap_create_connless_pdu(chan, msg, len);
2681 return PTR_ERR(skb);
2683 l2cap_do_send(chan, skb);
2687 switch (chan->mode) {
2688 case L2CAP_MODE_LE_FLOWCTL:
2689 case L2CAP_MODE_EXT_FLOWCTL:
2690 /* Check outgoing MTU */
2691 if (len > chan->omtu)
2694 __skb_queue_head_init(&seg_queue);
2696 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2698 if (chan->state != BT_CONNECTED) {
2699 __skb_queue_purge(&seg_queue);
2706 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2708 l2cap_le_flowctl_send(chan);
2710 if (!chan->tx_credits)
2711 chan->ops->suspend(chan);
2717 case L2CAP_MODE_BASIC:
2718 /* Check outgoing MTU */
2719 if (len > chan->omtu)
2722 /* Create a basic PDU */
2723 skb = l2cap_create_basic_pdu(chan, msg, len);
2725 return PTR_ERR(skb);
2727 l2cap_do_send(chan, skb);
2731 case L2CAP_MODE_ERTM:
2732 case L2CAP_MODE_STREAMING:
2733 /* Check outgoing MTU */
2734 if (len > chan->omtu) {
2739 __skb_queue_head_init(&seg_queue);
2741 /* Do segmentation before calling in to the state machine,
2742 * since it's possible to block while waiting for memory
2745 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2750 if (chan->mode == L2CAP_MODE_ERTM)
2751 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2753 l2cap_streaming_send(chan, &seg_queue);
2757 /* If the skbs were not queued for sending, they'll still be in
2758 * seg_queue and need to be purged.
2760 __skb_queue_purge(&seg_queue);
2764 BT_DBG("bad state %1.1x", chan->mode);
2770 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2772 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2774 struct l2cap_ctrl control;
2777 BT_DBG("chan %p, txseq %u", chan, txseq);
2779 memset(&control, 0, sizeof(control));
2781 control.super = L2CAP_SUPER_SREJ;
2783 for (seq = chan->expected_tx_seq; seq != txseq;
2784 seq = __next_seq(chan, seq)) {
2785 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2786 control.reqseq = seq;
2787 l2cap_send_sframe(chan, &control);
2788 l2cap_seq_list_append(&chan->srej_list, seq);
2792 chan->expected_tx_seq = __next_seq(chan, txseq);
2795 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2797 struct l2cap_ctrl control;
2799 BT_DBG("chan %p", chan);
2801 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2804 memset(&control, 0, sizeof(control));
2806 control.super = L2CAP_SUPER_SREJ;
2807 control.reqseq = chan->srej_list.tail;
2808 l2cap_send_sframe(chan, &control);
2811 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2813 struct l2cap_ctrl control;
2817 BT_DBG("chan %p, txseq %u", chan, txseq);
2819 memset(&control, 0, sizeof(control));
2821 control.super = L2CAP_SUPER_SREJ;
2823 /* Capture initial list head to allow only one pass through the list. */
2824 initial_head = chan->srej_list.head;
2827 seq = l2cap_seq_list_pop(&chan->srej_list);
2828 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2831 control.reqseq = seq;
2832 l2cap_send_sframe(chan, &control);
2833 l2cap_seq_list_append(&chan->srej_list, seq);
2834 } while (chan->srej_list.head != initial_head);
2837 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2839 struct sk_buff *acked_skb;
2842 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2844 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2847 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2848 chan->expected_ack_seq, chan->unacked_frames);
2850 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2851 ackseq = __next_seq(chan, ackseq)) {
2853 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2855 skb_unlink(acked_skb, &chan->tx_q);
2856 kfree_skb(acked_skb);
2857 chan->unacked_frames--;
2861 chan->expected_ack_seq = reqseq;
2863 if (chan->unacked_frames == 0)
2864 __clear_retrans_timer(chan);
2866 BT_DBG("unacked_frames %u", chan->unacked_frames);
2869 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2871 BT_DBG("chan %p", chan);
2873 chan->expected_tx_seq = chan->buffer_seq;
2874 l2cap_seq_list_clear(&chan->srej_list);
2875 skb_queue_purge(&chan->srej_q);
2876 chan->rx_state = L2CAP_RX_STATE_RECV;
2879 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2880 struct l2cap_ctrl *control,
2881 struct sk_buff_head *skbs, u8 event)
2883 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2887 case L2CAP_EV_DATA_REQUEST:
2888 if (chan->tx_send_head == NULL)
2889 chan->tx_send_head = skb_peek(skbs);
2891 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2892 l2cap_ertm_send(chan);
2894 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2895 BT_DBG("Enter LOCAL_BUSY");
2896 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2898 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2899 /* The SREJ_SENT state must be aborted if we are to
2900 * enter the LOCAL_BUSY state.
2902 l2cap_abort_rx_srej_sent(chan);
2905 l2cap_send_ack(chan);
2908 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2909 BT_DBG("Exit LOCAL_BUSY");
2910 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2912 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2913 struct l2cap_ctrl local_control;
2915 memset(&local_control, 0, sizeof(local_control));
2916 local_control.sframe = 1;
2917 local_control.super = L2CAP_SUPER_RR;
2918 local_control.poll = 1;
2919 local_control.reqseq = chan->buffer_seq;
2920 l2cap_send_sframe(chan, &local_control);
2922 chan->retry_count = 1;
2923 __set_monitor_timer(chan);
2924 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2927 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2928 l2cap_process_reqseq(chan, control->reqseq);
2930 case L2CAP_EV_EXPLICIT_POLL:
2931 l2cap_send_rr_or_rnr(chan, 1);
2932 chan->retry_count = 1;
2933 __set_monitor_timer(chan);
2934 __clear_ack_timer(chan);
2935 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2937 case L2CAP_EV_RETRANS_TO:
2938 l2cap_send_rr_or_rnr(chan, 1);
2939 chan->retry_count = 1;
2940 __set_monitor_timer(chan);
2941 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2943 case L2CAP_EV_RECV_FBIT:
2944 /* Nothing to process */
2951 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2952 struct l2cap_ctrl *control,
2953 struct sk_buff_head *skbs, u8 event)
2955 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2959 case L2CAP_EV_DATA_REQUEST:
2960 if (chan->tx_send_head == NULL)
2961 chan->tx_send_head = skb_peek(skbs);
2962 /* Queue data, but don't send. */
2963 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2965 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2966 BT_DBG("Enter LOCAL_BUSY");
2967 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2969 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2970 /* The SREJ_SENT state must be aborted if we are to
2971 * enter the LOCAL_BUSY state.
2973 l2cap_abort_rx_srej_sent(chan);
2976 l2cap_send_ack(chan);
2979 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2980 BT_DBG("Exit LOCAL_BUSY");
2981 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2983 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2984 struct l2cap_ctrl local_control;
2985 memset(&local_control, 0, sizeof(local_control));
2986 local_control.sframe = 1;
2987 local_control.super = L2CAP_SUPER_RR;
2988 local_control.poll = 1;
2989 local_control.reqseq = chan->buffer_seq;
2990 l2cap_send_sframe(chan, &local_control);
2992 chan->retry_count = 1;
2993 __set_monitor_timer(chan);
2994 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2997 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2998 l2cap_process_reqseq(chan, control->reqseq);
3001 case L2CAP_EV_RECV_FBIT:
3002 if (control && control->final) {
3003 __clear_monitor_timer(chan);
3004 if (chan->unacked_frames > 0)
3005 __set_retrans_timer(chan);
3006 chan->retry_count = 0;
3007 chan->tx_state = L2CAP_TX_STATE_XMIT;
3008 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3011 case L2CAP_EV_EXPLICIT_POLL:
3014 case L2CAP_EV_MONITOR_TO:
3015 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3016 l2cap_send_rr_or_rnr(chan, 1);
3017 __set_monitor_timer(chan);
3018 chan->retry_count++;
3020 l2cap_send_disconn_req(chan, ECONNABORTED);
3028 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3029 struct sk_buff_head *skbs, u8 event)
3031 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3032 chan, control, skbs, event, chan->tx_state);
3034 switch (chan->tx_state) {
3035 case L2CAP_TX_STATE_XMIT:
3036 l2cap_tx_state_xmit(chan, control, skbs, event);
3038 case L2CAP_TX_STATE_WAIT_F:
3039 l2cap_tx_state_wait_f(chan, control, skbs, event);
3047 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3048 struct l2cap_ctrl *control)
3050 BT_DBG("chan %p, control %p", chan, control);
3051 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3054 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3055 struct l2cap_ctrl *control)
3057 BT_DBG("chan %p, control %p", chan, control);
3058 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3061 /* Copy frame to all raw sockets on that connection */
3062 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3064 struct sk_buff *nskb;
3065 struct l2cap_chan *chan;
3067 BT_DBG("conn %p", conn);
3069 mutex_lock(&conn->chan_lock);
3071 list_for_each_entry(chan, &conn->chan_l, list) {
3072 if (chan->chan_type != L2CAP_CHAN_RAW)
3075 /* Don't send frame to the channel it came from */
3076 if (bt_cb(skb)->l2cap.chan == chan)
3079 nskb = skb_clone(skb, GFP_KERNEL);
3082 if (chan->ops->recv(chan, nskb))
3086 mutex_unlock(&conn->chan_lock);
3089 /* ---- L2CAP signalling commands ---- */
3090 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3091 u8 ident, u16 dlen, void *data)
3093 struct sk_buff *skb, **frag;
3094 struct l2cap_cmd_hdr *cmd;
3095 struct l2cap_hdr *lh;
3098 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3099 conn, code, ident, dlen);
3101 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3104 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3105 count = min_t(unsigned int, conn->mtu, len);
3107 skb = bt_skb_alloc(count, GFP_KERNEL);
3111 lh = skb_put(skb, L2CAP_HDR_SIZE);
3112 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3114 if (conn->hcon->type == LE_LINK)
3115 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3117 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3119 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3122 cmd->len = cpu_to_le16(dlen);
3125 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3126 skb_put_data(skb, data, count);
3132 /* Continuation fragments (no L2CAP header) */
3133 frag = &skb_shinfo(skb)->frag_list;
3135 count = min_t(unsigned int, conn->mtu, len);
3137 *frag = bt_skb_alloc(count, GFP_KERNEL);
3141 skb_put_data(*frag, data, count);
3146 frag = &(*frag)->next;
3156 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3159 struct l2cap_conf_opt *opt = *ptr;
3162 len = L2CAP_CONF_OPT_SIZE + opt->len;
3170 *val = *((u8 *) opt->val);
3174 *val = get_unaligned_le16(opt->val);
3178 *val = get_unaligned_le32(opt->val);
3182 *val = (unsigned long) opt->val;
3186 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3190 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3192 struct l2cap_conf_opt *opt = *ptr;
3194 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3196 if (size < L2CAP_CONF_OPT_SIZE + len)
3204 *((u8 *) opt->val) = val;
3208 put_unaligned_le16(val, opt->val);
3212 put_unaligned_le32(val, opt->val);
3216 memcpy(opt->val, (void *) val, len);
3220 *ptr += L2CAP_CONF_OPT_SIZE + len;
3223 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3225 struct l2cap_conf_efs efs;
3227 switch (chan->mode) {
3228 case L2CAP_MODE_ERTM:
3229 efs.id = chan->local_id;
3230 efs.stype = chan->local_stype;
3231 efs.msdu = cpu_to_le16(chan->local_msdu);
3232 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3233 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3234 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3237 case L2CAP_MODE_STREAMING:
3239 efs.stype = L2CAP_SERV_BESTEFFORT;
3240 efs.msdu = cpu_to_le16(chan->local_msdu);
3241 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3250 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3251 (unsigned long) &efs, size);
3254 static void l2cap_ack_timeout(struct work_struct *work)
3256 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3260 BT_DBG("chan %p", chan);
3262 l2cap_chan_lock(chan);
3264 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3265 chan->last_acked_seq);
3268 l2cap_send_rr_or_rnr(chan, 0);
3270 l2cap_chan_unlock(chan);
3271 l2cap_chan_put(chan);
3274 int l2cap_ertm_init(struct l2cap_chan *chan)
3278 chan->next_tx_seq = 0;
3279 chan->expected_tx_seq = 0;
3280 chan->expected_ack_seq = 0;
3281 chan->unacked_frames = 0;
3282 chan->buffer_seq = 0;
3283 chan->frames_sent = 0;
3284 chan->last_acked_seq = 0;
3286 chan->sdu_last_frag = NULL;
3289 skb_queue_head_init(&chan->tx_q);
3291 chan->local_amp_id = AMP_ID_BREDR;
3292 chan->move_id = AMP_ID_BREDR;
3293 chan->move_state = L2CAP_MOVE_STABLE;
3294 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3296 if (chan->mode != L2CAP_MODE_ERTM)
3299 chan->rx_state = L2CAP_RX_STATE_RECV;
3300 chan->tx_state = L2CAP_TX_STATE_XMIT;
3302 skb_queue_head_init(&chan->srej_q);
3304 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3308 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3310 l2cap_seq_list_free(&chan->srej_list);
3315 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3318 case L2CAP_MODE_STREAMING:
3319 case L2CAP_MODE_ERTM:
3320 if (l2cap_mode_supported(mode, remote_feat_mask))
3324 return L2CAP_MODE_BASIC;
3328 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3330 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3331 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3334 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3336 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3337 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3340 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3341 struct l2cap_conf_rfc *rfc)
3343 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3344 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3346 /* Class 1 devices have must have ERTM timeouts
3347 * exceeding the Link Supervision Timeout. The
3348 * default Link Supervision Timeout for AMP
3349 * controllers is 10 seconds.
3351 * Class 1 devices use 0xffffffff for their
3352 * best-effort flush timeout, so the clamping logic
3353 * will result in a timeout that meets the above
3354 * requirement. ERTM timeouts are 16-bit values, so
3355 * the maximum timeout is 65.535 seconds.
3358 /* Convert timeout to milliseconds and round */
3359 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3361 /* This is the recommended formula for class 2 devices
3362 * that start ERTM timers when packets are sent to the
3365 ertm_to = 3 * ertm_to + 500;
3367 if (ertm_to > 0xffff)
3370 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3371 rfc->monitor_timeout = rfc->retrans_timeout;
3373 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3374 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3378 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3380 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3381 __l2cap_ews_supported(chan->conn)) {
3382 /* use extended control field */
3383 set_bit(FLAG_EXT_CTRL, &chan->flags);
3384 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3386 chan->tx_win = min_t(u16, chan->tx_win,
3387 L2CAP_DEFAULT_TX_WINDOW);
3388 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3390 chan->ack_win = chan->tx_win;
3393 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3395 struct hci_conn *conn = chan->conn->hcon;
3397 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3399 /* The 2-DH1 packet has between 2 and 56 information bytes
3400 * (including the 2-byte payload header)
3402 if (!(conn->pkt_type & HCI_2DH1))
3405 /* The 3-DH1 packet has between 2 and 85 information bytes
3406 * (including the 2-byte payload header)
3408 if (!(conn->pkt_type & HCI_3DH1))
3411 /* The 2-DH3 packet has between 2 and 369 information bytes
3412 * (including the 2-byte payload header)
3414 if (!(conn->pkt_type & HCI_2DH3))
3417 /* The 3-DH3 packet has between 2 and 554 information bytes
3418 * (including the 2-byte payload header)
3420 if (!(conn->pkt_type & HCI_3DH3))
3423 /* The 2-DH5 packet has between 2 and 681 information bytes
3424 * (including the 2-byte payload header)
3426 if (!(conn->pkt_type & HCI_2DH5))
3429 /* The 3-DH5 packet has between 2 and 1023 information bytes
3430 * (including the 2-byte payload header)
3432 if (!(conn->pkt_type & HCI_3DH5))
3436 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3438 struct l2cap_conf_req *req = data;
3439 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3440 void *ptr = req->data;
3441 void *endptr = data + data_size;
3444 BT_DBG("chan %p", chan);
3446 if (chan->num_conf_req || chan->num_conf_rsp)
3449 switch (chan->mode) {
3450 case L2CAP_MODE_STREAMING:
3451 case L2CAP_MODE_ERTM:
3452 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3455 if (__l2cap_efs_supported(chan->conn))
3456 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3460 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3465 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3467 l2cap_mtu_auto(chan);
3468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3472 switch (chan->mode) {
3473 case L2CAP_MODE_BASIC:
3477 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3478 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3481 rfc.mode = L2CAP_MODE_BASIC;
3483 rfc.max_transmit = 0;
3484 rfc.retrans_timeout = 0;
3485 rfc.monitor_timeout = 0;
3486 rfc.max_pdu_size = 0;
3488 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3489 (unsigned long) &rfc, endptr - ptr);
3492 case L2CAP_MODE_ERTM:
3493 rfc.mode = L2CAP_MODE_ERTM;
3494 rfc.max_transmit = chan->max_tx;
3496 __l2cap_set_ertm_timeouts(chan, &rfc);
3498 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3499 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3501 rfc.max_pdu_size = cpu_to_le16(size);
3503 l2cap_txwin_setup(chan);
3505 rfc.txwin_size = min_t(u16, chan->tx_win,
3506 L2CAP_DEFAULT_TX_WINDOW);
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3509 (unsigned long) &rfc, endptr - ptr);
3511 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3512 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3514 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3516 chan->tx_win, endptr - ptr);
3518 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3519 if (chan->fcs == L2CAP_FCS_NONE ||
3520 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3521 chan->fcs = L2CAP_FCS_NONE;
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3523 chan->fcs, endptr - ptr);
3527 case L2CAP_MODE_STREAMING:
3528 l2cap_txwin_setup(chan);
3529 rfc.mode = L2CAP_MODE_STREAMING;
3531 rfc.max_transmit = 0;
3532 rfc.retrans_timeout = 0;
3533 rfc.monitor_timeout = 0;
3535 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3536 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3538 rfc.max_pdu_size = cpu_to_le16(size);
3540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3541 (unsigned long) &rfc, endptr - ptr);
3543 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3544 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3546 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3547 if (chan->fcs == L2CAP_FCS_NONE ||
3548 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3549 chan->fcs = L2CAP_FCS_NONE;
3550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3551 chan->fcs, endptr - ptr);
3556 req->dcid = cpu_to_le16(chan->dcid);
3557 req->flags = cpu_to_le16(0);
3562 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3564 struct l2cap_conf_rsp *rsp = data;
3565 void *ptr = rsp->data;
3566 void *endptr = data + data_size;
3567 void *req = chan->conf_req;
3568 int len = chan->conf_len;
3569 int type, hint, olen;
3571 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3572 struct l2cap_conf_efs efs;
3574 u16 mtu = L2CAP_DEFAULT_MTU;
3575 u16 result = L2CAP_CONF_SUCCESS;
3578 BT_DBG("chan %p", chan);
3580 while (len >= L2CAP_CONF_OPT_SIZE) {
3581 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3585 hint = type & L2CAP_CONF_HINT;
3586 type &= L2CAP_CONF_MASK;
3589 case L2CAP_CONF_MTU:
3595 case L2CAP_CONF_FLUSH_TO:
3598 chan->flush_to = val;
3601 case L2CAP_CONF_QOS:
3604 case L2CAP_CONF_RFC:
3605 if (olen != sizeof(rfc))
3607 memcpy(&rfc, (void *) val, olen);
3610 case L2CAP_CONF_FCS:
3613 if (val == L2CAP_FCS_NONE)
3614 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3617 case L2CAP_CONF_EFS:
3618 if (olen != sizeof(efs))
3621 memcpy(&efs, (void *) val, olen);
3624 case L2CAP_CONF_EWS:
3627 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3628 return -ECONNREFUSED;
3629 set_bit(FLAG_EXT_CTRL, &chan->flags);
3630 set_bit(CONF_EWS_RECV, &chan->conf_state);
3631 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3632 chan->remote_tx_win = val;
3638 result = L2CAP_CONF_UNKNOWN;
3639 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3644 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3647 switch (chan->mode) {
3648 case L2CAP_MODE_STREAMING:
3649 case L2CAP_MODE_ERTM:
3650 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3651 chan->mode = l2cap_select_mode(rfc.mode,
3652 chan->conn->feat_mask);
3657 if (__l2cap_efs_supported(chan->conn))
3658 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3660 return -ECONNREFUSED;
3663 if (chan->mode != rfc.mode)
3664 return -ECONNREFUSED;
3670 if (chan->mode != rfc.mode) {
3671 result = L2CAP_CONF_UNACCEPT;
3672 rfc.mode = chan->mode;
3674 if (chan->num_conf_rsp == 1)
3675 return -ECONNREFUSED;
3677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3678 (unsigned long) &rfc, endptr - ptr);
3681 if (result == L2CAP_CONF_SUCCESS) {
3682 /* Configure output options and let the other side know
3683 * which ones we don't like. */
3685 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3686 result = L2CAP_CONF_UNACCEPT;
3689 set_bit(CONF_MTU_DONE, &chan->conf_state);
3691 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3694 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3695 efs.stype != L2CAP_SERV_NOTRAFIC &&
3696 efs.stype != chan->local_stype) {
3698 result = L2CAP_CONF_UNACCEPT;
3700 if (chan->num_conf_req >= 1)
3701 return -ECONNREFUSED;
3703 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3705 (unsigned long) &efs, endptr - ptr);
3707 /* Send PENDING Conf Rsp */
3708 result = L2CAP_CONF_PENDING;
3709 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3714 case L2CAP_MODE_BASIC:
3715 chan->fcs = L2CAP_FCS_NONE;
3716 set_bit(CONF_MODE_DONE, &chan->conf_state);
3719 case L2CAP_MODE_ERTM:
3720 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3721 chan->remote_tx_win = rfc.txwin_size;
3723 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3725 chan->remote_max_tx = rfc.max_transmit;
3727 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3728 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3729 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3730 rfc.max_pdu_size = cpu_to_le16(size);
3731 chan->remote_mps = size;
3733 __l2cap_set_ertm_timeouts(chan, &rfc);
3735 set_bit(CONF_MODE_DONE, &chan->conf_state);
3737 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3738 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3741 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3742 chan->remote_id = efs.id;
3743 chan->remote_stype = efs.stype;
3744 chan->remote_msdu = le16_to_cpu(efs.msdu);
3745 chan->remote_flush_to =
3746 le32_to_cpu(efs.flush_to);
3747 chan->remote_acc_lat =
3748 le32_to_cpu(efs.acc_lat);
3749 chan->remote_sdu_itime =
3750 le32_to_cpu(efs.sdu_itime);
3751 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3753 (unsigned long) &efs, endptr - ptr);
3757 case L2CAP_MODE_STREAMING:
3758 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3759 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3760 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3761 rfc.max_pdu_size = cpu_to_le16(size);
3762 chan->remote_mps = size;
3764 set_bit(CONF_MODE_DONE, &chan->conf_state);
3766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3767 (unsigned long) &rfc, endptr - ptr);
3772 result = L2CAP_CONF_UNACCEPT;
3774 memset(&rfc, 0, sizeof(rfc));
3775 rfc.mode = chan->mode;
3778 if (result == L2CAP_CONF_SUCCESS)
3779 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3781 rsp->scid = cpu_to_le16(chan->dcid);
3782 rsp->result = cpu_to_le16(result);
3783 rsp->flags = cpu_to_le16(0);
3788 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3789 void *data, size_t size, u16 *result)
3791 struct l2cap_conf_req *req = data;
3792 void *ptr = req->data;
3793 void *endptr = data + size;
3796 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3797 struct l2cap_conf_efs efs;
3799 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3801 while (len >= L2CAP_CONF_OPT_SIZE) {
3802 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3807 case L2CAP_CONF_MTU:
3810 if (val < L2CAP_DEFAULT_MIN_MTU) {
3811 *result = L2CAP_CONF_UNACCEPT;
3812 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3815 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3819 case L2CAP_CONF_FLUSH_TO:
3822 chan->flush_to = val;
3823 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3824 chan->flush_to, endptr - ptr);
3827 case L2CAP_CONF_RFC:
3828 if (olen != sizeof(rfc))
3830 memcpy(&rfc, (void *)val, olen);
3831 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3832 rfc.mode != chan->mode)
3833 return -ECONNREFUSED;
3835 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3836 (unsigned long) &rfc, endptr - ptr);
3839 case L2CAP_CONF_EWS:
3842 chan->ack_win = min_t(u16, val, chan->ack_win);
3843 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3844 chan->tx_win, endptr - ptr);
3847 case L2CAP_CONF_EFS:
3848 if (olen != sizeof(efs))
3850 memcpy(&efs, (void *)val, olen);
3851 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3852 efs.stype != L2CAP_SERV_NOTRAFIC &&
3853 efs.stype != chan->local_stype)
3854 return -ECONNREFUSED;
3855 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3856 (unsigned long) &efs, endptr - ptr);
3859 case L2CAP_CONF_FCS:
3862 if (*result == L2CAP_CONF_PENDING)
3863 if (val == L2CAP_FCS_NONE)
3864 set_bit(CONF_RECV_NO_FCS,
3870 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3871 return -ECONNREFUSED;
3873 chan->mode = rfc.mode;
3875 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3877 case L2CAP_MODE_ERTM:
3878 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3879 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3880 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3881 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3882 chan->ack_win = min_t(u16, chan->ack_win,
3885 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3886 chan->local_msdu = le16_to_cpu(efs.msdu);
3887 chan->local_sdu_itime =
3888 le32_to_cpu(efs.sdu_itime);
3889 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3890 chan->local_flush_to =
3891 le32_to_cpu(efs.flush_to);
3895 case L2CAP_MODE_STREAMING:
3896 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3900 req->dcid = cpu_to_le16(chan->dcid);
3901 req->flags = cpu_to_le16(0);
3906 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3907 u16 result, u16 flags)
3909 struct l2cap_conf_rsp *rsp = data;
3910 void *ptr = rsp->data;
3912 BT_DBG("chan %p", chan);
3914 rsp->scid = cpu_to_le16(chan->dcid);
3915 rsp->result = cpu_to_le16(result);
3916 rsp->flags = cpu_to_le16(flags);
3921 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3923 struct l2cap_le_conn_rsp rsp;
3924 struct l2cap_conn *conn = chan->conn;
3926 BT_DBG("chan %p", chan);
3928 rsp.dcid = cpu_to_le16(chan->scid);
3929 rsp.mtu = cpu_to_le16(chan->imtu);
3930 rsp.mps = cpu_to_le16(chan->mps);
3931 rsp.credits = cpu_to_le16(chan->rx_credits);
3932 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3934 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3938 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3942 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3945 switch (chan->state) {
3947 /* If channel still pending accept add to result */
3953 /* If not connected or pending accept it has been refused */
3954 *result = -ECONNREFUSED;
3959 struct l2cap_ecred_rsp_data {
3961 struct l2cap_ecred_conn_rsp rsp;
3962 __le16 scid[L2CAP_ECRED_MAX_CID];
3967 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3969 struct l2cap_ecred_rsp_data *rsp = data;
3971 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3974 /* Reset ident so only one response is sent */
3977 /* Include all channels pending with the same ident */
3978 if (!rsp->pdu.rsp.result)
3979 rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3981 l2cap_chan_del(chan, ECONNRESET);
3984 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3986 struct l2cap_conn *conn = chan->conn;
3987 struct l2cap_ecred_rsp_data data;
3988 u16 id = chan->ident;
3994 BT_DBG("chan %p id %d", chan, id);
3996 memset(&data, 0, sizeof(data));
3998 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3999 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
4000 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
4001 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
4003 /* Verify that all channels are ready */
4004 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
4010 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
4012 /* Build response */
4013 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
4015 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
4016 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
4020 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4022 struct l2cap_conn_rsp rsp;
4023 struct l2cap_conn *conn = chan->conn;
4027 rsp.scid = cpu_to_le16(chan->dcid);
4028 rsp.dcid = cpu_to_le16(chan->scid);
4029 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4030 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4033 rsp_code = L2CAP_CREATE_CHAN_RSP;
4035 rsp_code = L2CAP_CONN_RSP;
4037 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4039 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4041 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4044 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4045 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4046 chan->num_conf_req++;
4049 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4053 /* Use sane default values in case a misbehaving remote device
4054 * did not send an RFC or extended window size option.
4056 u16 txwin_ext = chan->ack_win;
4057 struct l2cap_conf_rfc rfc = {
4059 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4060 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4061 .max_pdu_size = cpu_to_le16(chan->imtu),
4062 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4065 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4067 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4070 while (len >= L2CAP_CONF_OPT_SIZE) {
4071 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4076 case L2CAP_CONF_RFC:
4077 if (olen != sizeof(rfc))
4079 memcpy(&rfc, (void *)val, olen);
4081 case L2CAP_CONF_EWS:
4090 case L2CAP_MODE_ERTM:
4091 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4092 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4093 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4094 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4095 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4097 chan->ack_win = min_t(u16, chan->ack_win,
4100 case L2CAP_MODE_STREAMING:
4101 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4105 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4106 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4109 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4111 if (cmd_len < sizeof(*rej))
4114 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4117 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4118 cmd->ident == conn->info_ident) {
4119 cancel_delayed_work(&conn->info_timer);
4121 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4122 conn->info_ident = 0;
4124 l2cap_conn_start(conn);
4130 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4131 struct l2cap_cmd_hdr *cmd,
4132 u8 *data, u8 rsp_code, u8 amp_id)
4134 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4135 struct l2cap_conn_rsp rsp;
4136 struct l2cap_chan *chan = NULL, *pchan;
4137 int result, status = L2CAP_CS_NO_INFO;
4139 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4140 __le16 psm = req->psm;
4142 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4144 /* Check if we have socket listening on psm */
4145 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4146 &conn->hcon->dst, ACL_LINK);
4148 result = L2CAP_CR_BAD_PSM;
4152 mutex_lock(&conn->chan_lock);
4153 l2cap_chan_lock(pchan);
4155 /* Check if the ACL is secure enough (if not SDP) */
4156 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4157 !hci_conn_check_link_mode(conn->hcon)) {
4158 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4159 result = L2CAP_CR_SEC_BLOCK;
4163 result = L2CAP_CR_NO_MEM;
4165 /* Check for valid dynamic CID range (as per Erratum 3253) */
4166 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4167 result = L2CAP_CR_INVALID_SCID;
4171 /* Check if we already have channel with that dcid */
4172 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4173 result = L2CAP_CR_SCID_IN_USE;
4177 chan = pchan->ops->new_connection(pchan);
4181 /* For certain devices (ex: HID mouse), support for authentication,
4182 * pairing and bonding is optional. For such devices, inorder to avoid
4183 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4184 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4186 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4188 bacpy(&chan->src, &conn->hcon->src);
4189 bacpy(&chan->dst, &conn->hcon->dst);
4190 chan->src_type = bdaddr_src_type(conn->hcon);
4191 chan->dst_type = bdaddr_dst_type(conn->hcon);
4194 chan->local_amp_id = amp_id;
4196 __l2cap_chan_add(conn, chan);
4200 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4202 chan->ident = cmd->ident;
4204 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4205 if (l2cap_chan_check_security(chan, false)) {
4206 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4207 l2cap_state_change(chan, BT_CONNECT2);
4208 result = L2CAP_CR_PEND;
4209 status = L2CAP_CS_AUTHOR_PEND;
4210 chan->ops->defer(chan);
4212 /* Force pending result for AMP controllers.
4213 * The connection will succeed after the
4214 * physical link is up.
4216 if (amp_id == AMP_ID_BREDR) {
4217 l2cap_state_change(chan, BT_CONFIG);
4218 result = L2CAP_CR_SUCCESS;
4220 l2cap_state_change(chan, BT_CONNECT2);
4221 result = L2CAP_CR_PEND;
4223 status = L2CAP_CS_NO_INFO;
4226 l2cap_state_change(chan, BT_CONNECT2);
4227 result = L2CAP_CR_PEND;
4228 status = L2CAP_CS_AUTHEN_PEND;
4231 l2cap_state_change(chan, BT_CONNECT2);
4232 result = L2CAP_CR_PEND;
4233 status = L2CAP_CS_NO_INFO;
4237 l2cap_chan_unlock(pchan);
4238 mutex_unlock(&conn->chan_lock);
4239 l2cap_chan_put(pchan);
4242 rsp.scid = cpu_to_le16(scid);
4243 rsp.dcid = cpu_to_le16(dcid);
4244 rsp.result = cpu_to_le16(result);
4245 rsp.status = cpu_to_le16(status);
4246 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4248 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4249 struct l2cap_info_req info;
4250 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4252 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4253 conn->info_ident = l2cap_get_ident(conn);
4255 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4257 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4258 sizeof(info), &info);
4261 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4262 result == L2CAP_CR_SUCCESS) {
4264 set_bit(CONF_REQ_SENT, &chan->conf_state);
4265 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4266 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4267 chan->num_conf_req++;
4273 static int l2cap_connect_req(struct l2cap_conn *conn,
4274 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4276 struct hci_dev *hdev = conn->hcon->hdev;
4277 struct hci_conn *hcon = conn->hcon;
4279 if (cmd_len < sizeof(struct l2cap_conn_req))
4283 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4284 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4285 mgmt_device_connected(hdev, hcon, NULL, 0);
4286 hci_dev_unlock(hdev);
4288 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4292 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4293 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4296 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4297 u16 scid, dcid, result, status;
4298 struct l2cap_chan *chan;
4302 if (cmd_len < sizeof(*rsp))
4305 scid = __le16_to_cpu(rsp->scid);
4306 dcid = __le16_to_cpu(rsp->dcid);
4307 result = __le16_to_cpu(rsp->result);
4308 status = __le16_to_cpu(rsp->status);
4310 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4311 dcid > L2CAP_CID_DYN_END))
4314 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4315 dcid, scid, result, status);
4317 mutex_lock(&conn->chan_lock);
4320 chan = __l2cap_get_chan_by_scid(conn, scid);
4326 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4333 chan = l2cap_chan_hold_unless_zero(chan);
4341 l2cap_chan_lock(chan);
4344 case L2CAP_CR_SUCCESS:
4345 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4350 l2cap_state_change(chan, BT_CONFIG);
4353 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4355 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4358 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4359 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4360 chan->num_conf_req++;
4364 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4368 l2cap_chan_del(chan, ECONNREFUSED);
4372 l2cap_chan_unlock(chan);
4373 l2cap_chan_put(chan);
4376 mutex_unlock(&conn->chan_lock);
4381 static inline void set_default_fcs(struct l2cap_chan *chan)
4383 /* FCS is enabled only in ERTM or streaming mode, if one or both
4386 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4387 chan->fcs = L2CAP_FCS_NONE;
4388 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4389 chan->fcs = L2CAP_FCS_CRC16;
4392 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4393 u8 ident, u16 flags)
4395 struct l2cap_conn *conn = chan->conn;
4397 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4400 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4401 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4403 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4404 l2cap_build_conf_rsp(chan, data,
4405 L2CAP_CONF_SUCCESS, flags), data);
4408 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4411 struct l2cap_cmd_rej_cid rej;
4413 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4414 rej.scid = __cpu_to_le16(scid);
4415 rej.dcid = __cpu_to_le16(dcid);
4417 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4420 static inline int l2cap_config_req(struct l2cap_conn *conn,
4421 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4424 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4427 struct l2cap_chan *chan;
4430 if (cmd_len < sizeof(*req))
4433 dcid = __le16_to_cpu(req->dcid);
4434 flags = __le16_to_cpu(req->flags);
4436 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4438 chan = l2cap_get_chan_by_scid(conn, dcid);
4440 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4444 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4445 chan->state != BT_CONNECTED) {
4446 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4451 /* Reject if config buffer is too small. */
4452 len = cmd_len - sizeof(*req);
4453 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4454 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4455 l2cap_build_conf_rsp(chan, rsp,
4456 L2CAP_CONF_REJECT, flags), rsp);
4461 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4462 chan->conf_len += len;
4464 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4465 /* Incomplete config. Send empty response. */
4466 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4467 l2cap_build_conf_rsp(chan, rsp,
4468 L2CAP_CONF_SUCCESS, flags), rsp);
4472 /* Complete config. */
4473 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4475 l2cap_send_disconn_req(chan, ECONNRESET);
4479 chan->ident = cmd->ident;
4480 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4481 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4482 chan->num_conf_rsp++;
4484 /* Reset config buffer. */
4487 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4490 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4491 set_default_fcs(chan);
4493 if (chan->mode == L2CAP_MODE_ERTM ||
4494 chan->mode == L2CAP_MODE_STREAMING)
4495 err = l2cap_ertm_init(chan);
4498 l2cap_send_disconn_req(chan, -err);
4500 l2cap_chan_ready(chan);
4505 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4507 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4508 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4509 chan->num_conf_req++;
4512 /* Got Conf Rsp PENDING from remote side and assume we sent
4513 Conf Rsp PENDING in the code above */
4514 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4515 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4517 /* check compatibility */
4519 /* Send rsp for BR/EDR channel */
4521 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4523 chan->ident = cmd->ident;
4527 l2cap_chan_unlock(chan);
4528 l2cap_chan_put(chan);
4532 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4533 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4536 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4537 u16 scid, flags, result;
4538 struct l2cap_chan *chan;
4539 int len = cmd_len - sizeof(*rsp);
4542 if (cmd_len < sizeof(*rsp))
4545 scid = __le16_to_cpu(rsp->scid);
4546 flags = __le16_to_cpu(rsp->flags);
4547 result = __le16_to_cpu(rsp->result);
4549 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4552 chan = l2cap_get_chan_by_scid(conn, scid);
4557 case L2CAP_CONF_SUCCESS:
4558 l2cap_conf_rfc_get(chan, rsp->data, len);
4559 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4562 case L2CAP_CONF_PENDING:
4563 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4565 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4568 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4569 buf, sizeof(buf), &result);
4571 l2cap_send_disconn_req(chan, ECONNRESET);
4575 if (!chan->hs_hcon) {
4576 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4579 if (l2cap_check_efs(chan)) {
4580 amp_create_logical_link(chan);
4581 chan->ident = cmd->ident;
4587 case L2CAP_CONF_UNKNOWN:
4588 case L2CAP_CONF_UNACCEPT:
4589 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4592 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4593 l2cap_send_disconn_req(chan, ECONNRESET);
4597 /* throw out any old stored conf requests */
4598 result = L2CAP_CONF_SUCCESS;
4599 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4600 req, sizeof(req), &result);
4602 l2cap_send_disconn_req(chan, ECONNRESET);
4606 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4607 L2CAP_CONF_REQ, len, req);
4608 chan->num_conf_req++;
4609 if (result != L2CAP_CONF_SUCCESS)
4616 l2cap_chan_set_err(chan, ECONNRESET);
4618 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4619 l2cap_send_disconn_req(chan, ECONNRESET);
4623 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4626 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4628 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4629 set_default_fcs(chan);
4631 if (chan->mode == L2CAP_MODE_ERTM ||
4632 chan->mode == L2CAP_MODE_STREAMING)
4633 err = l2cap_ertm_init(chan);
4636 l2cap_send_disconn_req(chan, -err);
4638 l2cap_chan_ready(chan);
4642 l2cap_chan_unlock(chan);
4643 l2cap_chan_put(chan);
4647 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4648 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4651 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4652 struct l2cap_disconn_rsp rsp;
4654 struct l2cap_chan *chan;
4656 if (cmd_len != sizeof(*req))
4659 scid = __le16_to_cpu(req->scid);
4660 dcid = __le16_to_cpu(req->dcid);
4662 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4664 chan = l2cap_get_chan_by_scid(conn, dcid);
4666 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4670 rsp.dcid = cpu_to_le16(chan->scid);
4671 rsp.scid = cpu_to_le16(chan->dcid);
4672 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4674 chan->ops->set_shutdown(chan);
4676 l2cap_chan_unlock(chan);
4677 mutex_lock(&conn->chan_lock);
4678 l2cap_chan_lock(chan);
4679 l2cap_chan_del(chan, ECONNRESET);
4680 mutex_unlock(&conn->chan_lock);
4682 chan->ops->close(chan);
4684 l2cap_chan_unlock(chan);
4685 l2cap_chan_put(chan);
4690 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4691 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4694 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4696 struct l2cap_chan *chan;
4698 if (cmd_len != sizeof(*rsp))
4701 scid = __le16_to_cpu(rsp->scid);
4702 dcid = __le16_to_cpu(rsp->dcid);
4704 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4706 chan = l2cap_get_chan_by_scid(conn, scid);
4711 if (chan->state != BT_DISCONN) {
4712 l2cap_chan_unlock(chan);
4713 l2cap_chan_put(chan);
4717 l2cap_chan_unlock(chan);
4718 mutex_lock(&conn->chan_lock);
4719 l2cap_chan_lock(chan);
4720 l2cap_chan_del(chan, 0);
4721 mutex_unlock(&conn->chan_lock);
4723 chan->ops->close(chan);
4725 l2cap_chan_unlock(chan);
4726 l2cap_chan_put(chan);
4731 static inline int l2cap_information_req(struct l2cap_conn *conn,
4732 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4735 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4738 if (cmd_len != sizeof(*req))
4741 type = __le16_to_cpu(req->type);
4743 BT_DBG("type 0x%4.4x", type);
4745 if (type == L2CAP_IT_FEAT_MASK) {
4747 u32 feat_mask = l2cap_feat_mask;
4748 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4749 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4750 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4752 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4754 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4755 feat_mask |= L2CAP_FEAT_EXT_FLOW
4756 | L2CAP_FEAT_EXT_WINDOW;
4758 put_unaligned_le32(feat_mask, rsp->data);
4759 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4761 } else if (type == L2CAP_IT_FIXED_CHAN) {
4763 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4765 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4766 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4767 rsp->data[0] = conn->local_fixed_chan;
4768 memset(rsp->data + 1, 0, 7);
4769 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4772 struct l2cap_info_rsp rsp;
4773 rsp.type = cpu_to_le16(type);
4774 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4775 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4782 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4783 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4786 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4789 if (cmd_len < sizeof(*rsp))
4792 type = __le16_to_cpu(rsp->type);
4793 result = __le16_to_cpu(rsp->result);
4795 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4797 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4798 if (cmd->ident != conn->info_ident ||
4799 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4802 cancel_delayed_work(&conn->info_timer);
4804 if (result != L2CAP_IR_SUCCESS) {
4805 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4806 conn->info_ident = 0;
4808 l2cap_conn_start(conn);
4814 case L2CAP_IT_FEAT_MASK:
4815 conn->feat_mask = get_unaligned_le32(rsp->data);
4817 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4818 struct l2cap_info_req req;
4819 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4821 conn->info_ident = l2cap_get_ident(conn);
4823 l2cap_send_cmd(conn, conn->info_ident,
4824 L2CAP_INFO_REQ, sizeof(req), &req);
4826 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4827 conn->info_ident = 0;
4829 l2cap_conn_start(conn);
4833 case L2CAP_IT_FIXED_CHAN:
4834 conn->remote_fixed_chan = rsp->data[0];
4835 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4836 conn->info_ident = 0;
4838 l2cap_conn_start(conn);
4845 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4846 struct l2cap_cmd_hdr *cmd,
4847 u16 cmd_len, void *data)
4849 struct l2cap_create_chan_req *req = data;
4850 struct l2cap_create_chan_rsp rsp;
4851 struct l2cap_chan *chan;
4852 struct hci_dev *hdev;
4855 if (cmd_len != sizeof(*req))
4858 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4861 psm = le16_to_cpu(req->psm);
4862 scid = le16_to_cpu(req->scid);
4864 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4866 /* For controller id 0 make BR/EDR connection */
4867 if (req->amp_id == AMP_ID_BREDR) {
4868 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4873 /* Validate AMP controller id */
4874 hdev = hci_dev_get(req->amp_id);
4878 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4883 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4886 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4887 struct hci_conn *hs_hcon;
4889 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4893 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4898 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4900 mgr->bredr_chan = chan;
4901 chan->hs_hcon = hs_hcon;
4902 chan->fcs = L2CAP_FCS_NONE;
4903 conn->mtu = hdev->block_mtu;
4912 rsp.scid = cpu_to_le16(scid);
4913 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4914 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4916 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4922 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4924 struct l2cap_move_chan_req req;
4927 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4929 ident = l2cap_get_ident(chan->conn);
4930 chan->ident = ident;
4932 req.icid = cpu_to_le16(chan->scid);
4933 req.dest_amp_id = dest_amp_id;
4935 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4938 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4941 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4943 struct l2cap_move_chan_rsp rsp;
4945 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4947 rsp.icid = cpu_to_le16(chan->dcid);
4948 rsp.result = cpu_to_le16(result);
4950 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4954 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4956 struct l2cap_move_chan_cfm cfm;
4958 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4960 chan->ident = l2cap_get_ident(chan->conn);
4962 cfm.icid = cpu_to_le16(chan->scid);
4963 cfm.result = cpu_to_le16(result);
4965 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4968 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4971 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4973 struct l2cap_move_chan_cfm cfm;
4975 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4977 cfm.icid = cpu_to_le16(icid);
4978 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4980 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4984 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4987 struct l2cap_move_chan_cfm_rsp rsp;
4989 BT_DBG("icid 0x%4.4x", icid);
4991 rsp.icid = cpu_to_le16(icid);
4992 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4995 static void __release_logical_link(struct l2cap_chan *chan)
4997 chan->hs_hchan = NULL;
4998 chan->hs_hcon = NULL;
5000 /* Placeholder - release the logical link */
5003 static void l2cap_logical_fail(struct l2cap_chan *chan)
5005 /* Logical link setup failed */
5006 if (chan->state != BT_CONNECTED) {
5007 /* Create channel failure, disconnect */
5008 l2cap_send_disconn_req(chan, ECONNRESET);
5012 switch (chan->move_role) {
5013 case L2CAP_MOVE_ROLE_RESPONDER:
5014 l2cap_move_done(chan);
5015 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5017 case L2CAP_MOVE_ROLE_INITIATOR:
5018 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5019 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5020 /* Remote has only sent pending or
5021 * success responses, clean up
5023 l2cap_move_done(chan);
5026 /* Other amp move states imply that the move
5027 * has already aborted
5029 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5034 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5035 struct hci_chan *hchan)
5037 struct l2cap_conf_rsp rsp;
5039 chan->hs_hchan = hchan;
5040 chan->hs_hcon->l2cap_data = chan->conn;
5042 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5044 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5047 set_default_fcs(chan);
5049 err = l2cap_ertm_init(chan);
5051 l2cap_send_disconn_req(chan, -err);
5053 l2cap_chan_ready(chan);
5057 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5058 struct hci_chan *hchan)
5060 chan->hs_hcon = hchan->conn;
5061 chan->hs_hcon->l2cap_data = chan->conn;
5063 BT_DBG("move_state %d", chan->move_state);
5065 switch (chan->move_state) {
5066 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5067 /* Move confirm will be sent after a success
5068 * response is received
5070 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5072 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5073 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5074 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5075 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5076 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5077 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5078 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5079 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5080 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5084 /* Move was not in expected state, free the channel */
5085 __release_logical_link(chan);
5087 chan->move_state = L2CAP_MOVE_STABLE;
5091 /* Call with chan locked */
5092 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5095 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5098 l2cap_logical_fail(chan);
5099 __release_logical_link(chan);
5103 if (chan->state != BT_CONNECTED) {
5104 /* Ignore logical link if channel is on BR/EDR */
5105 if (chan->local_amp_id != AMP_ID_BREDR)
5106 l2cap_logical_finish_create(chan, hchan);
5108 l2cap_logical_finish_move(chan, hchan);
5112 void l2cap_move_start(struct l2cap_chan *chan)
5114 BT_DBG("chan %p", chan);
5116 if (chan->local_amp_id == AMP_ID_BREDR) {
5117 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5119 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5120 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5121 /* Placeholder - start physical link setup */
5123 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5124 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5126 l2cap_move_setup(chan);
5127 l2cap_send_move_chan_req(chan, 0);
5131 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5132 u8 local_amp_id, u8 remote_amp_id)
5134 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5135 local_amp_id, remote_amp_id);
5137 chan->fcs = L2CAP_FCS_NONE;
5139 /* Outgoing channel on AMP */
5140 if (chan->state == BT_CONNECT) {
5141 if (result == L2CAP_CR_SUCCESS) {
5142 chan->local_amp_id = local_amp_id;
5143 l2cap_send_create_chan_req(chan, remote_amp_id);
5145 /* Revert to BR/EDR connect */
5146 l2cap_send_conn_req(chan);
5152 /* Incoming channel on AMP */
5153 if (__l2cap_no_conn_pending(chan)) {
5154 struct l2cap_conn_rsp rsp;
5156 rsp.scid = cpu_to_le16(chan->dcid);
5157 rsp.dcid = cpu_to_le16(chan->scid);
5159 if (result == L2CAP_CR_SUCCESS) {
5160 /* Send successful response */
5161 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5162 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5164 /* Send negative response */
5165 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5166 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5169 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5172 if (result == L2CAP_CR_SUCCESS) {
5173 l2cap_state_change(chan, BT_CONFIG);
5174 set_bit(CONF_REQ_SENT, &chan->conf_state);
5175 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5177 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5178 chan->num_conf_req++;
5183 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5186 l2cap_move_setup(chan);
5187 chan->move_id = local_amp_id;
5188 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5190 l2cap_send_move_chan_req(chan, remote_amp_id);
5193 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5195 struct hci_chan *hchan = NULL;
5197 /* Placeholder - get hci_chan for logical link */
5200 if (hchan->state == BT_CONNECTED) {
5201 /* Logical link is ready to go */
5202 chan->hs_hcon = hchan->conn;
5203 chan->hs_hcon->l2cap_data = chan->conn;
5204 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5205 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5207 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5209 /* Wait for logical link to be ready */
5210 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5213 /* Logical link not available */
5214 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5218 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5220 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5222 if (result == -EINVAL)
5223 rsp_result = L2CAP_MR_BAD_ID;
5225 rsp_result = L2CAP_MR_NOT_ALLOWED;
5227 l2cap_send_move_chan_rsp(chan, rsp_result);
5230 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5231 chan->move_state = L2CAP_MOVE_STABLE;
5233 /* Restart data transmission */
5234 l2cap_ertm_send(chan);
5237 /* Invoke with locked chan */
5238 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5240 u8 local_amp_id = chan->local_amp_id;
5241 u8 remote_amp_id = chan->remote_amp_id;
5243 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5244 chan, result, local_amp_id, remote_amp_id);
5246 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5249 if (chan->state != BT_CONNECTED) {
5250 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5251 } else if (result != L2CAP_MR_SUCCESS) {
5252 l2cap_do_move_cancel(chan, result);
5254 switch (chan->move_role) {
5255 case L2CAP_MOVE_ROLE_INITIATOR:
5256 l2cap_do_move_initiate(chan, local_amp_id,
5259 case L2CAP_MOVE_ROLE_RESPONDER:
5260 l2cap_do_move_respond(chan, result);
5263 l2cap_do_move_cancel(chan, result);
5269 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5270 struct l2cap_cmd_hdr *cmd,
5271 u16 cmd_len, void *data)
5273 struct l2cap_move_chan_req *req = data;
5274 struct l2cap_move_chan_rsp rsp;
5275 struct l2cap_chan *chan;
5277 u16 result = L2CAP_MR_NOT_ALLOWED;
5279 if (cmd_len != sizeof(*req))
5282 icid = le16_to_cpu(req->icid);
5284 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5286 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5289 chan = l2cap_get_chan_by_dcid(conn, icid);
5291 rsp.icid = cpu_to_le16(icid);
5292 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5293 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5298 chan->ident = cmd->ident;
5300 if (chan->scid < L2CAP_CID_DYN_START ||
5301 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5302 (chan->mode != L2CAP_MODE_ERTM &&
5303 chan->mode != L2CAP_MODE_STREAMING)) {
5304 result = L2CAP_MR_NOT_ALLOWED;
5305 goto send_move_response;
5308 if (chan->local_amp_id == req->dest_amp_id) {
5309 result = L2CAP_MR_SAME_ID;
5310 goto send_move_response;
5313 if (req->dest_amp_id != AMP_ID_BREDR) {
5314 struct hci_dev *hdev;
5315 hdev = hci_dev_get(req->dest_amp_id);
5316 if (!hdev || hdev->dev_type != HCI_AMP ||
5317 !test_bit(HCI_UP, &hdev->flags)) {
5321 result = L2CAP_MR_BAD_ID;
5322 goto send_move_response;
5327 /* Detect a move collision. Only send a collision response
5328 * if this side has "lost", otherwise proceed with the move.
5329 * The winner has the larger bd_addr.
5331 if ((__chan_is_moving(chan) ||
5332 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5333 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5334 result = L2CAP_MR_COLLISION;
5335 goto send_move_response;
5338 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5339 l2cap_move_setup(chan);
5340 chan->move_id = req->dest_amp_id;
5342 if (req->dest_amp_id == AMP_ID_BREDR) {
5343 /* Moving to BR/EDR */
5344 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5345 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5346 result = L2CAP_MR_PEND;
5348 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5349 result = L2CAP_MR_SUCCESS;
5352 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5353 /* Placeholder - uncomment when amp functions are available */
5354 /*amp_accept_physical(chan, req->dest_amp_id);*/
5355 result = L2CAP_MR_PEND;
5359 l2cap_send_move_chan_rsp(chan, result);
5361 l2cap_chan_unlock(chan);
5362 l2cap_chan_put(chan);
5367 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5369 struct l2cap_chan *chan;
5370 struct hci_chan *hchan = NULL;
5372 chan = l2cap_get_chan_by_scid(conn, icid);
5374 l2cap_send_move_chan_cfm_icid(conn, icid);
5378 __clear_chan_timer(chan);
5379 if (result == L2CAP_MR_PEND)
5380 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5382 switch (chan->move_state) {
5383 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5384 /* Move confirm will be sent when logical link
5387 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5389 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5390 if (result == L2CAP_MR_PEND) {
5392 } else if (test_bit(CONN_LOCAL_BUSY,
5393 &chan->conn_state)) {
5394 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5396 /* Logical link is up or moving to BR/EDR,
5399 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5400 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5403 case L2CAP_MOVE_WAIT_RSP:
5405 if (result == L2CAP_MR_SUCCESS) {
5406 /* Remote is ready, send confirm immediately
5407 * after logical link is ready
5409 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5411 /* Both logical link and move success
5412 * are required to confirm
5414 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5417 /* Placeholder - get hci_chan for logical link */
5419 /* Logical link not available */
5420 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5424 /* If the logical link is not yet connected, do not
5425 * send confirmation.
5427 if (hchan->state != BT_CONNECTED)
5430 /* Logical link is already ready to go */
5432 chan->hs_hcon = hchan->conn;
5433 chan->hs_hcon->l2cap_data = chan->conn;
5435 if (result == L2CAP_MR_SUCCESS) {
5436 /* Can confirm now */
5437 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5439 /* Now only need move success
5442 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5445 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5448 /* Any other amp move state means the move failed. */
5449 chan->move_id = chan->local_amp_id;
5450 l2cap_move_done(chan);
5451 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5454 l2cap_chan_unlock(chan);
5455 l2cap_chan_put(chan);
5458 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5461 struct l2cap_chan *chan;
5463 chan = l2cap_get_chan_by_ident(conn, ident);
5465 /* Could not locate channel, icid is best guess */
5466 l2cap_send_move_chan_cfm_icid(conn, icid);
5470 __clear_chan_timer(chan);
5472 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5473 if (result == L2CAP_MR_COLLISION) {
5474 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5476 /* Cleanup - cancel move */
5477 chan->move_id = chan->local_amp_id;
5478 l2cap_move_done(chan);
5482 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5484 l2cap_chan_unlock(chan);
5485 l2cap_chan_put(chan);
5488 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5489 struct l2cap_cmd_hdr *cmd,
5490 u16 cmd_len, void *data)
5492 struct l2cap_move_chan_rsp *rsp = data;
5495 if (cmd_len != sizeof(*rsp))
5498 icid = le16_to_cpu(rsp->icid);
5499 result = le16_to_cpu(rsp->result);
5501 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5503 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5504 l2cap_move_continue(conn, icid, result);
5506 l2cap_move_fail(conn, cmd->ident, icid, result);
5511 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5512 struct l2cap_cmd_hdr *cmd,
5513 u16 cmd_len, void *data)
5515 struct l2cap_move_chan_cfm *cfm = data;
5516 struct l2cap_chan *chan;
5519 if (cmd_len != sizeof(*cfm))
5522 icid = le16_to_cpu(cfm->icid);
5523 result = le16_to_cpu(cfm->result);
5525 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5527 chan = l2cap_get_chan_by_dcid(conn, icid);
5529 /* Spec requires a response even if the icid was not found */
5530 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5534 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5535 if (result == L2CAP_MC_CONFIRMED) {
5536 chan->local_amp_id = chan->move_id;
5537 if (chan->local_amp_id == AMP_ID_BREDR)
5538 __release_logical_link(chan);
5540 chan->move_id = chan->local_amp_id;
5543 l2cap_move_done(chan);
5546 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5548 l2cap_chan_unlock(chan);
5549 l2cap_chan_put(chan);
5554 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5555 struct l2cap_cmd_hdr *cmd,
5556 u16 cmd_len, void *data)
5558 struct l2cap_move_chan_cfm_rsp *rsp = data;
5559 struct l2cap_chan *chan;
5562 if (cmd_len != sizeof(*rsp))
5565 icid = le16_to_cpu(rsp->icid);
5567 BT_DBG("icid 0x%4.4x", icid);
5569 chan = l2cap_get_chan_by_scid(conn, icid);
5573 __clear_chan_timer(chan);
5575 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5576 chan->local_amp_id = chan->move_id;
5578 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5579 __release_logical_link(chan);
5581 l2cap_move_done(chan);
5584 l2cap_chan_unlock(chan);
5585 l2cap_chan_put(chan);
5590 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5591 struct l2cap_cmd_hdr *cmd,
5592 u16 cmd_len, u8 *data)
5594 struct hci_conn *hcon = conn->hcon;
5595 struct l2cap_conn_param_update_req *req;
5596 struct l2cap_conn_param_update_rsp rsp;
5597 u16 min, max, latency, to_multiplier;
5600 if (hcon->role != HCI_ROLE_MASTER)
5603 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5606 req = (struct l2cap_conn_param_update_req *) data;
5607 min = __le16_to_cpu(req->min);
5608 max = __le16_to_cpu(req->max);
5609 latency = __le16_to_cpu(req->latency);
5610 to_multiplier = __le16_to_cpu(req->to_multiplier);
5612 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5613 min, max, latency, to_multiplier);
5615 memset(&rsp, 0, sizeof(rsp));
5617 if (max > hcon->le_conn_max_interval) {
5618 BT_DBG("requested connection interval exceeds current bounds.");
5621 err = hci_check_conn_params(min, max, latency, to_multiplier);
5625 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5627 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5629 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5635 store_hint = hci_le_conn_update(hcon, min, max, latency,
5637 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5638 store_hint, min, max, latency,
5646 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5647 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5650 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5651 struct hci_conn *hcon = conn->hcon;
5652 u16 dcid, mtu, mps, credits, result;
5653 struct l2cap_chan *chan;
5656 if (cmd_len < sizeof(*rsp))
5659 dcid = __le16_to_cpu(rsp->dcid);
5660 mtu = __le16_to_cpu(rsp->mtu);
5661 mps = __le16_to_cpu(rsp->mps);
5662 credits = __le16_to_cpu(rsp->credits);
5663 result = __le16_to_cpu(rsp->result);
5665 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5666 dcid < L2CAP_CID_DYN_START ||
5667 dcid > L2CAP_CID_LE_DYN_END))
5670 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5671 dcid, mtu, mps, credits, result);
5673 mutex_lock(&conn->chan_lock);
5675 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5683 l2cap_chan_lock(chan);
5686 case L2CAP_CR_LE_SUCCESS:
5687 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5695 chan->remote_mps = mps;
5696 chan->tx_credits = credits;
5697 l2cap_chan_ready(chan);
5700 case L2CAP_CR_LE_AUTHENTICATION:
5701 case L2CAP_CR_LE_ENCRYPTION:
5702 /* If we already have MITM protection we can't do
5705 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5706 l2cap_chan_del(chan, ECONNREFUSED);
5710 sec_level = hcon->sec_level + 1;
5711 if (chan->sec_level < sec_level)
5712 chan->sec_level = sec_level;
5714 /* We'll need to send a new Connect Request */
5715 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5717 smp_conn_security(hcon, chan->sec_level);
5721 l2cap_chan_del(chan, ECONNREFUSED);
5725 l2cap_chan_unlock(chan);
5728 mutex_unlock(&conn->chan_lock);
5733 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5734 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5739 switch (cmd->code) {
5740 case L2CAP_COMMAND_REJ:
5741 l2cap_command_rej(conn, cmd, cmd_len, data);
5744 case L2CAP_CONN_REQ:
5745 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5748 case L2CAP_CONN_RSP:
5749 case L2CAP_CREATE_CHAN_RSP:
5750 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5753 case L2CAP_CONF_REQ:
5754 err = l2cap_config_req(conn, cmd, cmd_len, data);
5757 case L2CAP_CONF_RSP:
5758 l2cap_config_rsp(conn, cmd, cmd_len, data);
5761 case L2CAP_DISCONN_REQ:
5762 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5765 case L2CAP_DISCONN_RSP:
5766 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5769 case L2CAP_ECHO_REQ:
5770 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5773 case L2CAP_ECHO_RSP:
5776 case L2CAP_INFO_REQ:
5777 err = l2cap_information_req(conn, cmd, cmd_len, data);
5780 case L2CAP_INFO_RSP:
5781 l2cap_information_rsp(conn, cmd, cmd_len, data);
5784 case L2CAP_CREATE_CHAN_REQ:
5785 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5788 case L2CAP_MOVE_CHAN_REQ:
5789 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5792 case L2CAP_MOVE_CHAN_RSP:
5793 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5796 case L2CAP_MOVE_CHAN_CFM:
5797 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5800 case L2CAP_MOVE_CHAN_CFM_RSP:
5801 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5805 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5813 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5814 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5817 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5818 struct l2cap_le_conn_rsp rsp;
5819 struct l2cap_chan *chan, *pchan;
5820 u16 dcid, scid, credits, mtu, mps;
5824 if (cmd_len != sizeof(*req))
5827 scid = __le16_to_cpu(req->scid);
5828 mtu = __le16_to_cpu(req->mtu);
5829 mps = __le16_to_cpu(req->mps);
5834 if (mtu < 23 || mps < 23)
5837 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5840 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5843 * Valid range: 0x0001-0x00ff
5845 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5847 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5848 result = L2CAP_CR_LE_BAD_PSM;
5853 /* Check if we have socket listening on psm */
5854 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5855 &conn->hcon->dst, LE_LINK);
5857 result = L2CAP_CR_LE_BAD_PSM;
5862 mutex_lock(&conn->chan_lock);
5863 l2cap_chan_lock(pchan);
5865 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5867 result = L2CAP_CR_LE_AUTHENTICATION;
5869 goto response_unlock;
5872 /* Check for valid dynamic CID range */
5873 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5874 result = L2CAP_CR_LE_INVALID_SCID;
5876 goto response_unlock;
5879 /* Check if we already have channel with that dcid */
5880 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5881 result = L2CAP_CR_LE_SCID_IN_USE;
5883 goto response_unlock;
5886 chan = pchan->ops->new_connection(pchan);
5888 result = L2CAP_CR_LE_NO_MEM;
5889 goto response_unlock;
5892 bacpy(&chan->src, &conn->hcon->src);
5893 bacpy(&chan->dst, &conn->hcon->dst);
5894 chan->src_type = bdaddr_src_type(conn->hcon);
5895 chan->dst_type = bdaddr_dst_type(conn->hcon);
5899 chan->remote_mps = mps;
5901 __l2cap_chan_add(conn, chan);
5903 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5906 credits = chan->rx_credits;
5908 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5910 chan->ident = cmd->ident;
5912 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5913 l2cap_state_change(chan, BT_CONNECT2);
5914 /* The following result value is actually not defined
5915 * for LE CoC but we use it to let the function know
5916 * that it should bail out after doing its cleanup
5917 * instead of sending a response.
5919 result = L2CAP_CR_PEND;
5920 chan->ops->defer(chan);
5922 l2cap_chan_ready(chan);
5923 result = L2CAP_CR_LE_SUCCESS;
5927 l2cap_chan_unlock(pchan);
5928 mutex_unlock(&conn->chan_lock);
5929 l2cap_chan_put(pchan);
5931 if (result == L2CAP_CR_PEND)
5936 rsp.mtu = cpu_to_le16(chan->imtu);
5937 rsp.mps = cpu_to_le16(chan->mps);
5943 rsp.dcid = cpu_to_le16(dcid);
5944 rsp.credits = cpu_to_le16(credits);
5945 rsp.result = cpu_to_le16(result);
5947 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5952 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5953 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5956 struct l2cap_le_credits *pkt;
5957 struct l2cap_chan *chan;
5958 u16 cid, credits, max_credits;
5960 if (cmd_len != sizeof(*pkt))
5963 pkt = (struct l2cap_le_credits *) data;
5964 cid = __le16_to_cpu(pkt->cid);
5965 credits = __le16_to_cpu(pkt->credits);
5967 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5969 chan = l2cap_get_chan_by_dcid(conn, cid);
5973 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5974 if (credits > max_credits) {
5975 BT_ERR("LE credits overflow");
5976 l2cap_send_disconn_req(chan, ECONNRESET);
5978 /* Return 0 so that we don't trigger an unnecessary
5979 * command reject packet.
5984 chan->tx_credits += credits;
5986 /* Resume sending */
5987 l2cap_le_flowctl_send(chan);
5989 if (chan->tx_credits)
5990 chan->ops->resume(chan);
5993 l2cap_chan_unlock(chan);
5994 l2cap_chan_put(chan);
5999 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
6000 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6003 struct l2cap_ecred_conn_req *req = (void *) data;
6005 struct l2cap_ecred_conn_rsp rsp;
6006 __le16 dcid[L2CAP_ECRED_MAX_CID];
6008 struct l2cap_chan *chan, *pchan;
6018 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6019 result = L2CAP_CR_LE_INVALID_PARAMS;
6023 cmd_len -= sizeof(*req);
6024 num_scid = cmd_len / sizeof(u16);
6026 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6027 result = L2CAP_CR_LE_INVALID_PARAMS;
6031 mtu = __le16_to_cpu(req->mtu);
6032 mps = __le16_to_cpu(req->mps);
6034 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6035 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6041 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6044 * Valid range: 0x0001-0x00ff
6046 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6048 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6049 result = L2CAP_CR_LE_BAD_PSM;
6053 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6055 memset(&pdu, 0, sizeof(pdu));
6057 /* Check if we have socket listening on psm */
6058 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6059 &conn->hcon->dst, LE_LINK);
6061 result = L2CAP_CR_LE_BAD_PSM;
6065 mutex_lock(&conn->chan_lock);
6066 l2cap_chan_lock(pchan);
6068 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6070 result = L2CAP_CR_LE_AUTHENTICATION;
6074 result = L2CAP_CR_LE_SUCCESS;
6076 for (i = 0; i < num_scid; i++) {
6077 u16 scid = __le16_to_cpu(req->scid[i]);
6079 BT_DBG("scid[%d] 0x%4.4x", i, scid);
6081 pdu.dcid[i] = 0x0000;
6082 len += sizeof(*pdu.dcid);
6084 /* Check for valid dynamic CID range */
6085 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6086 result = L2CAP_CR_LE_INVALID_SCID;
6090 /* Check if we already have channel with that dcid */
6091 if (__l2cap_get_chan_by_dcid(conn, scid)) {
6092 result = L2CAP_CR_LE_SCID_IN_USE;
6096 chan = pchan->ops->new_connection(pchan);
6098 result = L2CAP_CR_LE_NO_MEM;
6102 bacpy(&chan->src, &conn->hcon->src);
6103 bacpy(&chan->dst, &conn->hcon->dst);
6104 chan->src_type = bdaddr_src_type(conn->hcon);
6105 chan->dst_type = bdaddr_dst_type(conn->hcon);
6109 chan->remote_mps = mps;
6111 __l2cap_chan_add(conn, chan);
6113 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6116 if (!pdu.rsp.credits) {
6117 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6118 pdu.rsp.mps = cpu_to_le16(chan->mps);
6119 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6122 pdu.dcid[i] = cpu_to_le16(chan->scid);
6124 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6126 chan->ident = cmd->ident;
6127 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
6129 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6130 l2cap_state_change(chan, BT_CONNECT2);
6132 chan->ops->defer(chan);
6134 l2cap_chan_ready(chan);
6139 l2cap_chan_unlock(pchan);
6140 mutex_unlock(&conn->chan_lock);
6141 l2cap_chan_put(pchan);
6144 pdu.rsp.result = cpu_to_le16(result);
6149 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6150 sizeof(pdu.rsp) + len, &pdu);
6155 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6156 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6159 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6160 struct hci_conn *hcon = conn->hcon;
6161 u16 mtu, mps, credits, result;
6162 struct l2cap_chan *chan, *tmp;
6163 int err = 0, sec_level;
6166 if (cmd_len < sizeof(*rsp))
6169 mtu = __le16_to_cpu(rsp->mtu);
6170 mps = __le16_to_cpu(rsp->mps);
6171 credits = __le16_to_cpu(rsp->credits);
6172 result = __le16_to_cpu(rsp->result);
6174 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6177 mutex_lock(&conn->chan_lock);
6179 cmd_len -= sizeof(*rsp);
6181 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6184 if (chan->ident != cmd->ident ||
6185 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6186 chan->state == BT_CONNECTED)
6189 l2cap_chan_lock(chan);
6191 /* Check that there is a dcid for each pending channel */
6192 if (cmd_len < sizeof(dcid)) {
6193 l2cap_chan_del(chan, ECONNREFUSED);
6194 l2cap_chan_unlock(chan);
6198 dcid = __le16_to_cpu(rsp->dcid[i++]);
6199 cmd_len -= sizeof(u16);
6201 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6203 /* Check if dcid is already in use */
6204 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6205 /* If a device receives a
6206 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6207 * already-assigned Destination CID, then both the
6208 * original channel and the new channel shall be
6209 * immediately discarded and not used.
6211 l2cap_chan_del(chan, ECONNREFUSED);
6212 l2cap_chan_unlock(chan);
6213 chan = __l2cap_get_chan_by_dcid(conn, dcid);
6214 l2cap_chan_lock(chan);
6215 l2cap_chan_del(chan, ECONNRESET);
6216 l2cap_chan_unlock(chan);
6221 case L2CAP_CR_LE_AUTHENTICATION:
6222 case L2CAP_CR_LE_ENCRYPTION:
6223 /* If we already have MITM protection we can't do
6226 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6227 l2cap_chan_del(chan, ECONNREFUSED);
6231 sec_level = hcon->sec_level + 1;
6232 if (chan->sec_level < sec_level)
6233 chan->sec_level = sec_level;
6235 /* We'll need to send a new Connect Request */
6236 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6238 smp_conn_security(hcon, chan->sec_level);
6241 case L2CAP_CR_LE_BAD_PSM:
6242 l2cap_chan_del(chan, ECONNREFUSED);
6246 /* If dcid was not set it means channels was refused */
6248 l2cap_chan_del(chan, ECONNREFUSED);
6255 chan->remote_mps = mps;
6256 chan->tx_credits = credits;
6257 l2cap_chan_ready(chan);
6261 l2cap_chan_unlock(chan);
6264 mutex_unlock(&conn->chan_lock);
6269 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6270 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6273 struct l2cap_ecred_reconf_req *req = (void *) data;
6274 struct l2cap_ecred_reconf_rsp rsp;
6275 u16 mtu, mps, result;
6276 struct l2cap_chan *chan;
6282 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6283 result = L2CAP_CR_LE_INVALID_PARAMS;
6287 mtu = __le16_to_cpu(req->mtu);
6288 mps = __le16_to_cpu(req->mps);
6290 BT_DBG("mtu %u mps %u", mtu, mps);
6292 if (mtu < L2CAP_ECRED_MIN_MTU) {
6293 result = L2CAP_RECONF_INVALID_MTU;
6297 if (mps < L2CAP_ECRED_MIN_MPS) {
6298 result = L2CAP_RECONF_INVALID_MPS;
6302 cmd_len -= sizeof(*req);
6303 num_scid = cmd_len / sizeof(u16);
6304 result = L2CAP_RECONF_SUCCESS;
6306 for (i = 0; i < num_scid; i++) {
6309 scid = __le16_to_cpu(req->scid[i]);
6313 chan = __l2cap_get_chan_by_dcid(conn, scid);
6317 /* If the MTU value is decreased for any of the included
6318 * channels, then the receiver shall disconnect all
6319 * included channels.
6321 if (chan->omtu > mtu) {
6322 BT_ERR("chan %p decreased MTU %u -> %u", chan,
6324 result = L2CAP_RECONF_INVALID_MTU;
6328 chan->remote_mps = mps;
6332 rsp.result = cpu_to_le16(result);
6334 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6340 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6341 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6344 struct l2cap_chan *chan, *tmp;
6345 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6348 if (cmd_len < sizeof(*rsp))
6351 result = __le16_to_cpu(rsp->result);
6353 BT_DBG("result 0x%4.4x", rsp->result);
6358 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6359 if (chan->ident != cmd->ident)
6362 l2cap_chan_del(chan, ECONNRESET);
6368 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6369 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6372 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6373 struct l2cap_chan *chan;
6375 if (cmd_len < sizeof(*rej))
6378 mutex_lock(&conn->chan_lock);
6380 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6384 chan = l2cap_chan_hold_unless_zero(chan);
6388 l2cap_chan_lock(chan);
6389 l2cap_chan_del(chan, ECONNREFUSED);
6390 l2cap_chan_unlock(chan);
6391 l2cap_chan_put(chan);
6394 mutex_unlock(&conn->chan_lock);
6398 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6399 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6404 switch (cmd->code) {
6405 case L2CAP_COMMAND_REJ:
6406 l2cap_le_command_rej(conn, cmd, cmd_len, data);
6409 case L2CAP_CONN_PARAM_UPDATE_REQ:
6410 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6413 case L2CAP_CONN_PARAM_UPDATE_RSP:
6416 case L2CAP_LE_CONN_RSP:
6417 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6420 case L2CAP_LE_CONN_REQ:
6421 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6424 case L2CAP_LE_CREDITS:
6425 err = l2cap_le_credits(conn, cmd, cmd_len, data);
6428 case L2CAP_ECRED_CONN_REQ:
6429 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6432 case L2CAP_ECRED_CONN_RSP:
6433 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6436 case L2CAP_ECRED_RECONF_REQ:
6437 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6440 case L2CAP_ECRED_RECONF_RSP:
6441 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6444 case L2CAP_DISCONN_REQ:
6445 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6448 case L2CAP_DISCONN_RSP:
6449 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6453 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6461 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6462 struct sk_buff *skb)
6464 struct hci_conn *hcon = conn->hcon;
6465 struct l2cap_cmd_hdr *cmd;
6469 if (hcon->type != LE_LINK)
6472 if (skb->len < L2CAP_CMD_HDR_SIZE)
6475 cmd = (void *) skb->data;
6476 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6478 len = le16_to_cpu(cmd->len);
6480 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6482 if (len != skb->len || !cmd->ident) {
6483 BT_DBG("corrupted command");
6487 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6489 struct l2cap_cmd_rej_unk rej;
6491 BT_ERR("Wrong link type (%d)", err);
6493 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6494 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6502 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
6504 struct l2cap_cmd_rej_unk rej;
6506 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6507 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
6510 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6511 struct sk_buff *skb)
6513 struct hci_conn *hcon = conn->hcon;
6514 struct l2cap_cmd_hdr *cmd;
6517 l2cap_raw_recv(conn, skb);
6519 if (hcon->type != ACL_LINK)
6522 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6525 cmd = (void *) skb->data;
6526 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6528 len = le16_to_cpu(cmd->len);
6530 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6533 if (len > skb->len || !cmd->ident) {
6534 BT_DBG("corrupted command");
6535 l2cap_sig_send_rej(conn, cmd->ident);
6536 skb_pull(skb, len > skb->len ? skb->len : len);
6540 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6542 BT_ERR("Wrong link type (%d)", err);
6543 l2cap_sig_send_rej(conn, cmd->ident);
6550 BT_DBG("corrupted command");
6551 l2cap_sig_send_rej(conn, 0);
6558 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
6560 u16 our_fcs, rcv_fcs;
6563 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6564 hdr_size = L2CAP_EXT_HDR_SIZE;
6566 hdr_size = L2CAP_ENH_HDR_SIZE;
6568 if (chan->fcs == L2CAP_FCS_CRC16) {
6569 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6570 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6571 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6573 if (our_fcs != rcv_fcs)
6579 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6581 struct l2cap_ctrl control;
6583 BT_DBG("chan %p", chan);
6585 memset(&control, 0, sizeof(control));
6588 control.reqseq = chan->buffer_seq;
6589 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6591 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6592 control.super = L2CAP_SUPER_RNR;
6593 l2cap_send_sframe(chan, &control);
6596 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6597 chan->unacked_frames > 0)
6598 __set_retrans_timer(chan);
6600 /* Send pending iframes */
6601 l2cap_ertm_send(chan);
6603 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6604 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6605 /* F-bit wasn't sent in an s-frame or i-frame yet, so
6608 control.super = L2CAP_SUPER_RR;
6609 l2cap_send_sframe(chan, &control);
6613 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6614 struct sk_buff **last_frag)
6616 /* skb->len reflects data in skb as well as all fragments
6617 * skb->data_len reflects only data in fragments
6619 if (!skb_has_frag_list(skb))
6620 skb_shinfo(skb)->frag_list = new_frag;
6622 new_frag->next = NULL;
6624 (*last_frag)->next = new_frag;
6625 *last_frag = new_frag;
6627 skb->len += new_frag->len;
6628 skb->data_len += new_frag->len;
6629 skb->truesize += new_frag->truesize;
6632 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6633 struct l2cap_ctrl *control)
6637 switch (control->sar) {
6638 case L2CAP_SAR_UNSEGMENTED:
6642 err = chan->ops->recv(chan, skb);
6645 case L2CAP_SAR_START:
6649 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6652 chan->sdu_len = get_unaligned_le16(skb->data);
6653 skb_pull(skb, L2CAP_SDULEN_SIZE);
6655 if (chan->sdu_len > chan->imtu) {
6660 if (skb->len >= chan->sdu_len)
6664 chan->sdu_last_frag = skb;
6670 case L2CAP_SAR_CONTINUE:
6674 append_skb_frag(chan->sdu, skb,
6675 &chan->sdu_last_frag);
6678 if (chan->sdu->len >= chan->sdu_len)
6688 append_skb_frag(chan->sdu, skb,
6689 &chan->sdu_last_frag);
6692 if (chan->sdu->len != chan->sdu_len)
6695 err = chan->ops->recv(chan, chan->sdu);
6698 /* Reassembly complete */
6700 chan->sdu_last_frag = NULL;
6708 kfree_skb(chan->sdu);
6710 chan->sdu_last_frag = NULL;
6717 static int l2cap_resegment(struct l2cap_chan *chan)
6723 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6727 if (chan->mode != L2CAP_MODE_ERTM)
6730 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6731 l2cap_tx(chan, NULL, NULL, event);
6734 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6737 /* Pass sequential frames to l2cap_reassemble_sdu()
6738 * until a gap is encountered.
6741 BT_DBG("chan %p", chan);
6743 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6744 struct sk_buff *skb;
6745 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6746 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6748 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6753 skb_unlink(skb, &chan->srej_q);
6754 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6755 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6760 if (skb_queue_empty(&chan->srej_q)) {
6761 chan->rx_state = L2CAP_RX_STATE_RECV;
6762 l2cap_send_ack(chan);
6768 static void l2cap_handle_srej(struct l2cap_chan *chan,
6769 struct l2cap_ctrl *control)
6771 struct sk_buff *skb;
6773 BT_DBG("chan %p, control %p", chan, control);
6775 if (control->reqseq == chan->next_tx_seq) {
6776 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6777 l2cap_send_disconn_req(chan, ECONNRESET);
6781 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6784 BT_DBG("Seq %d not available for retransmission",
6789 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6790 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6791 l2cap_send_disconn_req(chan, ECONNRESET);
6795 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6797 if (control->poll) {
6798 l2cap_pass_to_tx(chan, control);
6800 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6801 l2cap_retransmit(chan, control);
6802 l2cap_ertm_send(chan);
6804 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6805 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6806 chan->srej_save_reqseq = control->reqseq;
6809 l2cap_pass_to_tx_fbit(chan, control);
6811 if (control->final) {
6812 if (chan->srej_save_reqseq != control->reqseq ||
6813 !test_and_clear_bit(CONN_SREJ_ACT,
6815 l2cap_retransmit(chan, control);
6817 l2cap_retransmit(chan, control);
6818 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6819 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6820 chan->srej_save_reqseq = control->reqseq;
6826 static void l2cap_handle_rej(struct l2cap_chan *chan,
6827 struct l2cap_ctrl *control)
6829 struct sk_buff *skb;
6831 BT_DBG("chan %p, control %p", chan, control);
6833 if (control->reqseq == chan->next_tx_seq) {
6834 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6835 l2cap_send_disconn_req(chan, ECONNRESET);
6839 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6841 if (chan->max_tx && skb &&
6842 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6843 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6844 l2cap_send_disconn_req(chan, ECONNRESET);
6848 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6850 l2cap_pass_to_tx(chan, control);
6852 if (control->final) {
6853 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6854 l2cap_retransmit_all(chan, control);
6856 l2cap_retransmit_all(chan, control);
6857 l2cap_ertm_send(chan);
6858 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6859 set_bit(CONN_REJ_ACT, &chan->conn_state);
6863 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6865 BT_DBG("chan %p, txseq %d", chan, txseq);
6867 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6868 chan->expected_tx_seq);
6870 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6871 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6873 /* See notes below regarding "double poll" and
6876 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6877 BT_DBG("Invalid/Ignore - after SREJ");
6878 return L2CAP_TXSEQ_INVALID_IGNORE;
6880 BT_DBG("Invalid - in window after SREJ sent");
6881 return L2CAP_TXSEQ_INVALID;
6885 if (chan->srej_list.head == txseq) {
6886 BT_DBG("Expected SREJ");
6887 return L2CAP_TXSEQ_EXPECTED_SREJ;
6890 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6891 BT_DBG("Duplicate SREJ - txseq already stored");
6892 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6895 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6896 BT_DBG("Unexpected SREJ - not requested");
6897 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6901 if (chan->expected_tx_seq == txseq) {
6902 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6904 BT_DBG("Invalid - txseq outside tx window");
6905 return L2CAP_TXSEQ_INVALID;
6908 return L2CAP_TXSEQ_EXPECTED;
6912 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6913 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6914 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6915 return L2CAP_TXSEQ_DUPLICATE;
6918 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6919 /* A source of invalid packets is a "double poll" condition,
6920 * where delays cause us to send multiple poll packets. If
6921 * the remote stack receives and processes both polls,
6922 * sequence numbers can wrap around in such a way that a
6923 * resent frame has a sequence number that looks like new data
6924 * with a sequence gap. This would trigger an erroneous SREJ
6927 * Fortunately, this is impossible with a tx window that's
6928 * less than half of the maximum sequence number, which allows
6929 * invalid frames to be safely ignored.
6931 * With tx window sizes greater than half of the tx window
6932 * maximum, the frame is invalid and cannot be ignored. This
6933 * causes a disconnect.
6936 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6937 BT_DBG("Invalid/Ignore - txseq outside tx window");
6938 return L2CAP_TXSEQ_INVALID_IGNORE;
6940 BT_DBG("Invalid - txseq outside tx window");
6941 return L2CAP_TXSEQ_INVALID;
6944 BT_DBG("Unexpected - txseq indicates missing frames");
6945 return L2CAP_TXSEQ_UNEXPECTED;
6949 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6950 struct l2cap_ctrl *control,
6951 struct sk_buff *skb, u8 event)
6953 struct l2cap_ctrl local_control;
6955 bool skb_in_use = false;
6957 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6961 case L2CAP_EV_RECV_IFRAME:
6962 switch (l2cap_classify_txseq(chan, control->txseq)) {
6963 case L2CAP_TXSEQ_EXPECTED:
6964 l2cap_pass_to_tx(chan, control);
6966 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6967 BT_DBG("Busy, discarding expected seq %d",
6972 chan->expected_tx_seq = __next_seq(chan,
6975 chan->buffer_seq = chan->expected_tx_seq;
6978 /* l2cap_reassemble_sdu may free skb, hence invalidate
6979 * control, so make a copy in advance to use it after
6980 * l2cap_reassemble_sdu returns and to avoid the race
6981 * condition, for example:
6983 * The current thread calls:
6984 * l2cap_reassemble_sdu
6985 * chan->ops->recv == l2cap_sock_recv_cb
6986 * __sock_queue_rcv_skb
6987 * Another thread calls:
6991 * Then the current thread tries to access control, but
6992 * it was freed by skb_free_datagram.
6994 local_control = *control;
6995 err = l2cap_reassemble_sdu(chan, skb, control);
6999 if (local_control.final) {
7000 if (!test_and_clear_bit(CONN_REJ_ACT,
7001 &chan->conn_state)) {
7002 local_control.final = 0;
7003 l2cap_retransmit_all(chan, &local_control);
7004 l2cap_ertm_send(chan);
7008 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
7009 l2cap_send_ack(chan);
7011 case L2CAP_TXSEQ_UNEXPECTED:
7012 l2cap_pass_to_tx(chan, control);
7014 /* Can't issue SREJ frames in the local busy state.
7015 * Drop this frame, it will be seen as missing
7016 * when local busy is exited.
7018 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
7019 BT_DBG("Busy, discarding unexpected seq %d",
7024 /* There was a gap in the sequence, so an SREJ
7025 * must be sent for each missing frame. The
7026 * current frame is stored for later use.
7028 skb_queue_tail(&chan->srej_q, skb);
7030 BT_DBG("Queued %p (queue len %d)", skb,
7031 skb_queue_len(&chan->srej_q));
7033 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7034 l2cap_seq_list_clear(&chan->srej_list);
7035 l2cap_send_srej(chan, control->txseq);
7037 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7039 case L2CAP_TXSEQ_DUPLICATE:
7040 l2cap_pass_to_tx(chan, control);
7042 case L2CAP_TXSEQ_INVALID_IGNORE:
7044 case L2CAP_TXSEQ_INVALID:
7046 l2cap_send_disconn_req(chan, ECONNRESET);
7050 case L2CAP_EV_RECV_RR:
7051 l2cap_pass_to_tx(chan, control);
7052 if (control->final) {
7053 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7055 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7056 !__chan_is_moving(chan)) {
7058 l2cap_retransmit_all(chan, control);
7061 l2cap_ertm_send(chan);
7062 } else if (control->poll) {
7063 l2cap_send_i_or_rr_or_rnr(chan);
7065 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7066 &chan->conn_state) &&
7067 chan->unacked_frames)
7068 __set_retrans_timer(chan);
7070 l2cap_ertm_send(chan);
7073 case L2CAP_EV_RECV_RNR:
7074 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7075 l2cap_pass_to_tx(chan, control);
7076 if (control && control->poll) {
7077 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7078 l2cap_send_rr_or_rnr(chan, 0);
7080 __clear_retrans_timer(chan);
7081 l2cap_seq_list_clear(&chan->retrans_list);
7083 case L2CAP_EV_RECV_REJ:
7084 l2cap_handle_rej(chan, control);
7086 case L2CAP_EV_RECV_SREJ:
7087 l2cap_handle_srej(chan, control);
7093 if (skb && !skb_in_use) {
7094 BT_DBG("Freeing %p", skb);
7101 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7102 struct l2cap_ctrl *control,
7103 struct sk_buff *skb, u8 event)
7106 u16 txseq = control->txseq;
7107 bool skb_in_use = false;
7109 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7113 case L2CAP_EV_RECV_IFRAME:
7114 switch (l2cap_classify_txseq(chan, txseq)) {
7115 case L2CAP_TXSEQ_EXPECTED:
7116 /* Keep frame for reassembly later */
7117 l2cap_pass_to_tx(chan, control);
7118 skb_queue_tail(&chan->srej_q, skb);
7120 BT_DBG("Queued %p (queue len %d)", skb,
7121 skb_queue_len(&chan->srej_q));
7123 chan->expected_tx_seq = __next_seq(chan, txseq);
7125 case L2CAP_TXSEQ_EXPECTED_SREJ:
7126 l2cap_seq_list_pop(&chan->srej_list);
7128 l2cap_pass_to_tx(chan, control);
7129 skb_queue_tail(&chan->srej_q, skb);
7131 BT_DBG("Queued %p (queue len %d)", skb,
7132 skb_queue_len(&chan->srej_q));
7134 err = l2cap_rx_queued_iframes(chan);
7139 case L2CAP_TXSEQ_UNEXPECTED:
7140 /* Got a frame that can't be reassembled yet.
7141 * Save it for later, and send SREJs to cover
7142 * the missing frames.
7144 skb_queue_tail(&chan->srej_q, skb);
7146 BT_DBG("Queued %p (queue len %d)", skb,
7147 skb_queue_len(&chan->srej_q));
7149 l2cap_pass_to_tx(chan, control);
7150 l2cap_send_srej(chan, control->txseq);
7152 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7153 /* This frame was requested with an SREJ, but
7154 * some expected retransmitted frames are
7155 * missing. Request retransmission of missing
7158 skb_queue_tail(&chan->srej_q, skb);
7160 BT_DBG("Queued %p (queue len %d)", skb,
7161 skb_queue_len(&chan->srej_q));
7163 l2cap_pass_to_tx(chan, control);
7164 l2cap_send_srej_list(chan, control->txseq);
7166 case L2CAP_TXSEQ_DUPLICATE_SREJ:
7167 /* We've already queued this frame. Drop this copy. */
7168 l2cap_pass_to_tx(chan, control);
7170 case L2CAP_TXSEQ_DUPLICATE:
7171 /* Expecting a later sequence number, so this frame
7172 * was already received. Ignore it completely.
7175 case L2CAP_TXSEQ_INVALID_IGNORE:
7177 case L2CAP_TXSEQ_INVALID:
7179 l2cap_send_disconn_req(chan, ECONNRESET);
7183 case L2CAP_EV_RECV_RR:
7184 l2cap_pass_to_tx(chan, control);
7185 if (control->final) {
7186 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7188 if (!test_and_clear_bit(CONN_REJ_ACT,
7189 &chan->conn_state)) {
7191 l2cap_retransmit_all(chan, control);
7194 l2cap_ertm_send(chan);
7195 } else if (control->poll) {
7196 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7197 &chan->conn_state) &&
7198 chan->unacked_frames) {
7199 __set_retrans_timer(chan);
7202 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7203 l2cap_send_srej_tail(chan);
7205 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7206 &chan->conn_state) &&
7207 chan->unacked_frames)
7208 __set_retrans_timer(chan);
7210 l2cap_send_ack(chan);
7213 case L2CAP_EV_RECV_RNR:
7214 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7215 l2cap_pass_to_tx(chan, control);
7216 if (control->poll) {
7217 l2cap_send_srej_tail(chan);
7219 struct l2cap_ctrl rr_control;
7220 memset(&rr_control, 0, sizeof(rr_control));
7221 rr_control.sframe = 1;
7222 rr_control.super = L2CAP_SUPER_RR;
7223 rr_control.reqseq = chan->buffer_seq;
7224 l2cap_send_sframe(chan, &rr_control);
7228 case L2CAP_EV_RECV_REJ:
7229 l2cap_handle_rej(chan, control);
7231 case L2CAP_EV_RECV_SREJ:
7232 l2cap_handle_srej(chan, control);
7236 if (skb && !skb_in_use) {
7237 BT_DBG("Freeing %p", skb);
7244 static int l2cap_finish_move(struct l2cap_chan *chan)
7246 BT_DBG("chan %p", chan);
7248 chan->rx_state = L2CAP_RX_STATE_RECV;
7251 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7253 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7255 return l2cap_resegment(chan);
7258 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7259 struct l2cap_ctrl *control,
7260 struct sk_buff *skb, u8 event)
7264 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7270 l2cap_process_reqseq(chan, control->reqseq);
7272 if (!skb_queue_empty(&chan->tx_q))
7273 chan->tx_send_head = skb_peek(&chan->tx_q);
7275 chan->tx_send_head = NULL;
7277 /* Rewind next_tx_seq to the point expected
7280 chan->next_tx_seq = control->reqseq;
7281 chan->unacked_frames = 0;
7283 err = l2cap_finish_move(chan);
7287 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7288 l2cap_send_i_or_rr_or_rnr(chan);
7290 if (event == L2CAP_EV_RECV_IFRAME)
7293 return l2cap_rx_state_recv(chan, control, NULL, event);
7296 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7297 struct l2cap_ctrl *control,
7298 struct sk_buff *skb, u8 event)
7302 if (!control->final)
7305 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7307 chan->rx_state = L2CAP_RX_STATE_RECV;
7308 l2cap_process_reqseq(chan, control->reqseq);
7310 if (!skb_queue_empty(&chan->tx_q))
7311 chan->tx_send_head = skb_peek(&chan->tx_q);
7313 chan->tx_send_head = NULL;
7315 /* Rewind next_tx_seq to the point expected
7318 chan->next_tx_seq = control->reqseq;
7319 chan->unacked_frames = 0;
7322 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7324 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7326 err = l2cap_resegment(chan);
7329 err = l2cap_rx_state_recv(chan, control, skb, event);
7334 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7336 /* Make sure reqseq is for a packet that has been sent but not acked */
7339 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7340 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7343 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7344 struct sk_buff *skb, u8 event)
7348 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7349 control, skb, event, chan->rx_state);
7351 if (__valid_reqseq(chan, control->reqseq)) {
7352 switch (chan->rx_state) {
7353 case L2CAP_RX_STATE_RECV:
7354 err = l2cap_rx_state_recv(chan, control, skb, event);
7356 case L2CAP_RX_STATE_SREJ_SENT:
7357 err = l2cap_rx_state_srej_sent(chan, control, skb,
7360 case L2CAP_RX_STATE_WAIT_P:
7361 err = l2cap_rx_state_wait_p(chan, control, skb, event);
7363 case L2CAP_RX_STATE_WAIT_F:
7364 err = l2cap_rx_state_wait_f(chan, control, skb, event);
7371 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7372 control->reqseq, chan->next_tx_seq,
7373 chan->expected_ack_seq);
7374 l2cap_send_disconn_req(chan, ECONNRESET);
7380 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7381 struct sk_buff *skb)
7383 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7384 * the txseq field in advance to use it after l2cap_reassemble_sdu
7385 * returns and to avoid the race condition, for example:
7387 * The current thread calls:
7388 * l2cap_reassemble_sdu
7389 * chan->ops->recv == l2cap_sock_recv_cb
7390 * __sock_queue_rcv_skb
7391 * Another thread calls:
7395 * Then the current thread tries to access control, but it was freed by
7396 * skb_free_datagram.
7398 u16 txseq = control->txseq;
7400 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7403 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7404 l2cap_pass_to_tx(chan, control);
7406 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7407 __next_seq(chan, chan->buffer_seq));
7409 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7411 l2cap_reassemble_sdu(chan, skb, control);
7414 kfree_skb(chan->sdu);
7417 chan->sdu_last_frag = NULL;
7421 BT_DBG("Freeing %p", skb);
7426 chan->last_acked_seq = txseq;
7427 chan->expected_tx_seq = __next_seq(chan, txseq);
7432 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7434 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7438 __unpack_control(chan, skb);
7443 * We can just drop the corrupted I-frame here.
7444 * Receiver will miss it and start proper recovery
7445 * procedures and ask for retransmission.
7447 if (l2cap_check_fcs(chan, skb))
7450 if (!control->sframe && control->sar == L2CAP_SAR_START)
7451 len -= L2CAP_SDULEN_SIZE;
7453 if (chan->fcs == L2CAP_FCS_CRC16)
7454 len -= L2CAP_FCS_SIZE;
7456 if (len > chan->mps) {
7457 l2cap_send_disconn_req(chan, ECONNRESET);
7461 if (chan->ops->filter) {
7462 if (chan->ops->filter(chan, skb))
7466 if (!control->sframe) {
7469 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7470 control->sar, control->reqseq, control->final,
7473 /* Validate F-bit - F=0 always valid, F=1 only
7474 * valid in TX WAIT_F
7476 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7479 if (chan->mode != L2CAP_MODE_STREAMING) {
7480 event = L2CAP_EV_RECV_IFRAME;
7481 err = l2cap_rx(chan, control, skb, event);
7483 err = l2cap_stream_rx(chan, control, skb);
7487 l2cap_send_disconn_req(chan, ECONNRESET);
7489 const u8 rx_func_to_event[4] = {
7490 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7491 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7494 /* Only I-frames are expected in streaming mode */
7495 if (chan->mode == L2CAP_MODE_STREAMING)
7498 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7499 control->reqseq, control->final, control->poll,
7503 BT_ERR("Trailing bytes: %d in sframe", len);
7504 l2cap_send_disconn_req(chan, ECONNRESET);
7508 /* Validate F and P bits */
7509 if (control->final && (control->poll ||
7510 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7513 event = rx_func_to_event[control->super];
7514 if (l2cap_rx(chan, control, skb, event))
7515 l2cap_send_disconn_req(chan, ECONNRESET);
7525 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7527 struct l2cap_conn *conn = chan->conn;
7528 struct l2cap_le_credits pkt;
7531 return_credits = (chan->imtu / chan->mps) + 1;
7533 if (chan->rx_credits >= return_credits)
7536 return_credits -= chan->rx_credits;
7538 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7540 chan->rx_credits += return_credits;
7542 pkt.cid = cpu_to_le16(chan->scid);
7543 pkt.credits = cpu_to_le16(return_credits);
7545 chan->ident = l2cap_get_ident(conn);
7547 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7550 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7554 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7556 /* Wait recv to confirm reception before updating the credits */
7557 err = chan->ops->recv(chan, skb);
7559 /* Update credits whenever an SDU is received */
7560 l2cap_chan_le_send_credits(chan);
7565 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7569 if (!chan->rx_credits) {
7570 BT_ERR("No credits to receive LE L2CAP data");
7571 l2cap_send_disconn_req(chan, ECONNRESET);
7575 if (chan->imtu < skb->len) {
7576 BT_ERR("Too big LE L2CAP PDU");
7581 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7583 /* Update if remote had run out of credits, this should only happens
7584 * if the remote is not using the entire MPS.
7586 if (!chan->rx_credits)
7587 l2cap_chan_le_send_credits(chan);
7594 sdu_len = get_unaligned_le16(skb->data);
7595 skb_pull(skb, L2CAP_SDULEN_SIZE);
7597 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7598 sdu_len, skb->len, chan->imtu);
7600 if (sdu_len > chan->imtu) {
7601 BT_ERR("Too big LE L2CAP SDU length received");
7606 if (skb->len > sdu_len) {
7607 BT_ERR("Too much LE L2CAP data received");
7612 if (skb->len == sdu_len)
7613 return l2cap_ecred_recv(chan, skb);
7616 chan->sdu_len = sdu_len;
7617 chan->sdu_last_frag = skb;
7619 /* Detect if remote is not able to use the selected MPS */
7620 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7621 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7623 /* Adjust the number of credits */
7624 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7625 chan->mps = mps_len;
7626 l2cap_chan_le_send_credits(chan);
7632 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7633 chan->sdu->len, skb->len, chan->sdu_len);
7635 if (chan->sdu->len + skb->len > chan->sdu_len) {
7636 BT_ERR("Too much LE L2CAP data received");
7641 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7644 if (chan->sdu->len == chan->sdu_len) {
7645 err = l2cap_ecred_recv(chan, chan->sdu);
7648 chan->sdu_last_frag = NULL;
7656 kfree_skb(chan->sdu);
7658 chan->sdu_last_frag = NULL;
7662 /* We can't return an error here since we took care of the skb
7663 * freeing internally. An error return would cause the caller to
7664 * do a double-free of the skb.
7669 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7670 struct sk_buff *skb)
7672 struct l2cap_chan *chan;
7674 chan = l2cap_get_chan_by_scid(conn, cid);
7676 if (cid == L2CAP_CID_A2MP) {
7677 chan = a2mp_channel_create(conn, skb);
7683 l2cap_chan_hold(chan);
7684 l2cap_chan_lock(chan);
7686 BT_DBG("unknown cid 0x%4.4x", cid);
7687 /* Drop packet and return */
7693 BT_DBG("chan %p, len %d", chan, skb->len);
7695 /* If we receive data on a fixed channel before the info req/rsp
7696 * procedure is done simply assume that the channel is supported
7697 * and mark it as ready.
7699 if (chan->chan_type == L2CAP_CHAN_FIXED)
7700 l2cap_chan_ready(chan);
7702 if (chan->state != BT_CONNECTED)
7705 switch (chan->mode) {
7706 case L2CAP_MODE_LE_FLOWCTL:
7707 case L2CAP_MODE_EXT_FLOWCTL:
7708 if (l2cap_ecred_data_rcv(chan, skb) < 0)
7713 case L2CAP_MODE_BASIC:
7714 /* If socket recv buffers overflows we drop data here
7715 * which is *bad* because L2CAP has to be reliable.
7716 * But we don't have any other choice. L2CAP doesn't
7717 * provide flow control mechanism. */
7719 if (chan->imtu < skb->len) {
7720 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7724 if (!chan->ops->recv(chan, skb))
7728 case L2CAP_MODE_ERTM:
7729 case L2CAP_MODE_STREAMING:
7730 l2cap_data_rcv(chan, skb);
7734 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7742 l2cap_chan_unlock(chan);
7743 l2cap_chan_put(chan);
7746 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7747 struct sk_buff *skb)
7749 struct hci_conn *hcon = conn->hcon;
7750 struct l2cap_chan *chan;
7752 if (hcon->type != ACL_LINK)
7755 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7760 BT_DBG("chan %p, len %d", chan, skb->len);
7762 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7765 if (chan->imtu < skb->len)
7768 /* Store remote BD_ADDR and PSM for msg_name */
7769 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7770 bt_cb(skb)->l2cap.psm = psm;
7772 if (!chan->ops->recv(chan, skb)) {
7773 l2cap_chan_put(chan);
7778 l2cap_chan_put(chan);
7783 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7785 struct l2cap_hdr *lh = (void *) skb->data;
7786 struct hci_conn *hcon = conn->hcon;
7790 if (hcon->state != BT_CONNECTED) {
7791 BT_DBG("queueing pending rx skb");
7792 skb_queue_tail(&conn->pending_rx, skb);
7796 skb_pull(skb, L2CAP_HDR_SIZE);
7797 cid = __le16_to_cpu(lh->cid);
7798 len = __le16_to_cpu(lh->len);
7800 if (len != skb->len) {
7805 /* Since we can't actively block incoming LE connections we must
7806 * at least ensure that we ignore incoming data from them.
7808 if (hcon->type == LE_LINK &&
7809 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7810 bdaddr_dst_type(hcon))) {
7815 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7818 case L2CAP_CID_SIGNALING:
7819 l2cap_sig_channel(conn, skb);
7822 case L2CAP_CID_CONN_LESS:
7823 psm = get_unaligned((__le16 *) skb->data);
7824 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7825 l2cap_conless_channel(conn, psm, skb);
7828 case L2CAP_CID_LE_SIGNALING:
7829 l2cap_le_sig_channel(conn, skb);
7833 l2cap_data_channel(conn, cid, skb);
7838 static void process_pending_rx(struct work_struct *work)
7840 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7842 struct sk_buff *skb;
7846 while ((skb = skb_dequeue(&conn->pending_rx)))
7847 l2cap_recv_frame(conn, skb);
7850 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7852 struct l2cap_conn *conn = hcon->l2cap_data;
7853 struct hci_chan *hchan;
7858 hchan = hci_chan_create(hcon);
7862 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7864 hci_chan_del(hchan);
7868 kref_init(&conn->ref);
7869 hcon->l2cap_data = conn;
7870 conn->hcon = hci_conn_get(hcon);
7871 conn->hchan = hchan;
7873 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7875 switch (hcon->type) {
7877 if (hcon->hdev->le_mtu) {
7878 conn->mtu = hcon->hdev->le_mtu;
7883 conn->mtu = hcon->hdev->acl_mtu;
7887 conn->feat_mask = 0;
7889 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7891 if (hcon->type == ACL_LINK &&
7892 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7893 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7895 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7896 (bredr_sc_enabled(hcon->hdev) ||
7897 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7898 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7900 mutex_init(&conn->ident_lock);
7901 mutex_init(&conn->chan_lock);
7903 INIT_LIST_HEAD(&conn->chan_l);
7904 INIT_LIST_HEAD(&conn->users);
7906 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7908 skb_queue_head_init(&conn->pending_rx);
7909 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7910 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7912 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7917 static bool is_valid_psm(u16 psm, u8 dst_type)
7922 if (bdaddr_type_is_le(dst_type))
7923 return (psm <= 0x00ff);
7925 /* PSM must be odd and lsb of upper byte must be 0 */
7926 return ((psm & 0x0101) == 0x0001);
7929 struct l2cap_chan_data {
7930 struct l2cap_chan *chan;
7935 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7937 struct l2cap_chan_data *d = data;
7940 if (chan == d->chan)
7943 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7946 pid = chan->ops->get_peer_pid(chan);
7948 /* Only count deferred channels with the same PID/PSM */
7949 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7950 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7956 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7957 bdaddr_t *dst, u8 dst_type)
7959 struct l2cap_conn *conn;
7960 struct hci_conn *hcon;
7961 struct hci_dev *hdev;
7964 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7965 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7967 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7969 return -EHOSTUNREACH;
7973 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7974 chan->chan_type != L2CAP_CHAN_RAW) {
7979 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7984 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7989 switch (chan->mode) {
7990 case L2CAP_MODE_BASIC:
7992 case L2CAP_MODE_LE_FLOWCTL:
7994 case L2CAP_MODE_EXT_FLOWCTL:
7995 if (!enable_ecred) {
8000 case L2CAP_MODE_ERTM:
8001 case L2CAP_MODE_STREAMING:
8010 switch (chan->state) {
8014 /* Already connecting */
8019 /* Already connected */
8033 /* Set destination address and psm */
8034 bacpy(&chan->dst, dst);
8035 chan->dst_type = dst_type;
8040 if (bdaddr_type_is_le(dst_type)) {
8041 /* Convert from L2CAP channel address type to HCI address type
8043 if (dst_type == BDADDR_LE_PUBLIC)
8044 dst_type = ADDR_LE_DEV_PUBLIC;
8046 dst_type = ADDR_LE_DEV_RANDOM;
8048 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8049 hcon = hci_connect_le(hdev, dst, dst_type, false,
8051 HCI_LE_CONN_TIMEOUT,
8054 hcon = hci_connect_le_scan(hdev, dst, dst_type,
8056 HCI_LE_CONN_TIMEOUT,
8057 CONN_REASON_L2CAP_CHAN);
8060 u8 auth_type = l2cap_get_auth_type(chan);
8061 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8062 CONN_REASON_L2CAP_CHAN);
8066 err = PTR_ERR(hcon);
8070 conn = l2cap_conn_add(hcon);
8072 hci_conn_drop(hcon);
8077 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8078 struct l2cap_chan_data data;
8081 data.pid = chan->ops->get_peer_pid(chan);
8084 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8086 /* Check if there isn't too many channels being connected */
8087 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8088 hci_conn_drop(hcon);
8094 mutex_lock(&conn->chan_lock);
8095 l2cap_chan_lock(chan);
8097 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8098 hci_conn_drop(hcon);
8103 /* Update source addr of the socket */
8104 bacpy(&chan->src, &hcon->src);
8105 chan->src_type = bdaddr_src_type(hcon);
8107 __l2cap_chan_add(conn, chan);
8109 /* l2cap_chan_add takes its own ref so we can drop this one */
8110 hci_conn_drop(hcon);
8112 l2cap_state_change(chan, BT_CONNECT);
8113 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8115 /* Release chan->sport so that it can be reused by other
8116 * sockets (as it's only used for listening sockets).
8118 write_lock(&chan_list_lock);
8120 write_unlock(&chan_list_lock);
8122 if (hcon->state == BT_CONNECTED) {
8123 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8124 __clear_chan_timer(chan);
8125 if (l2cap_chan_check_security(chan, true))
8126 l2cap_state_change(chan, BT_CONNECTED);
8128 l2cap_do_start(chan);
8134 l2cap_chan_unlock(chan);
8135 mutex_unlock(&conn->chan_lock);
8137 hci_dev_unlock(hdev);
8141 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8143 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8145 struct l2cap_conn *conn = chan->conn;
8147 struct l2cap_ecred_reconf_req req;
8151 pdu.req.mtu = cpu_to_le16(chan->imtu);
8152 pdu.req.mps = cpu_to_le16(chan->mps);
8153 pdu.scid = cpu_to_le16(chan->scid);
8155 chan->ident = l2cap_get_ident(conn);
8157 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8161 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8163 if (chan->imtu > mtu)
8166 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8170 l2cap_ecred_reconfigure(chan);
8175 /* ---- L2CAP interface with lower layer (HCI) ---- */
8177 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8179 int exact = 0, lm1 = 0, lm2 = 0;
8180 struct l2cap_chan *c;
8182 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8184 /* Find listening sockets and check their link_mode */
8185 read_lock(&chan_list_lock);
8186 list_for_each_entry(c, &chan_list, global_l) {
8187 if (c->state != BT_LISTEN)
8190 if (!bacmp(&c->src, &hdev->bdaddr)) {
8191 lm1 |= HCI_LM_ACCEPT;
8192 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8193 lm1 |= HCI_LM_MASTER;
8195 } else if (!bacmp(&c->src, BDADDR_ANY)) {
8196 lm2 |= HCI_LM_ACCEPT;
8197 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8198 lm2 |= HCI_LM_MASTER;
8201 read_unlock(&chan_list_lock);
8203 return exact ? lm1 : lm2;
8206 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8207 * from an existing channel in the list or from the beginning of the
8208 * global list (by passing NULL as first parameter).
8210 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8211 struct hci_conn *hcon)
8213 u8 src_type = bdaddr_src_type(hcon);
8215 read_lock(&chan_list_lock);
8218 c = list_next_entry(c, global_l);
8220 c = list_entry(chan_list.next, typeof(*c), global_l);
8222 list_for_each_entry_from(c, &chan_list, global_l) {
8223 if (c->chan_type != L2CAP_CHAN_FIXED)
8225 if (c->state != BT_LISTEN)
8227 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8229 if (src_type != c->src_type)
8232 c = l2cap_chan_hold_unless_zero(c);
8233 read_unlock(&chan_list_lock);
8237 read_unlock(&chan_list_lock);
8242 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8244 struct hci_dev *hdev = hcon->hdev;
8245 struct l2cap_conn *conn;
8246 struct l2cap_chan *pchan;
8249 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8252 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8255 l2cap_conn_del(hcon, bt_to_errno(status));
8259 conn = l2cap_conn_add(hcon);
8263 dst_type = bdaddr_dst_type(hcon);
8265 /* If device is blocked, do not create channels for it */
8266 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8269 /* Find fixed channels and notify them of the new connection. We
8270 * use multiple individual lookups, continuing each time where
8271 * we left off, because the list lock would prevent calling the
8272 * potentially sleeping l2cap_chan_lock() function.
8274 pchan = l2cap_global_fixed_chan(NULL, hcon);
8276 struct l2cap_chan *chan, *next;
8278 /* Client fixed channels should override server ones */
8279 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8282 l2cap_chan_lock(pchan);
8283 chan = pchan->ops->new_connection(pchan);
8285 bacpy(&chan->src, &hcon->src);
8286 bacpy(&chan->dst, &hcon->dst);
8287 chan->src_type = bdaddr_src_type(hcon);
8288 chan->dst_type = dst_type;
8290 __l2cap_chan_add(conn, chan);
8293 l2cap_chan_unlock(pchan);
8295 next = l2cap_global_fixed_chan(pchan, hcon);
8296 l2cap_chan_put(pchan);
8300 l2cap_conn_ready(conn);
8303 int l2cap_disconn_ind(struct hci_conn *hcon)
8305 struct l2cap_conn *conn = hcon->l2cap_data;
8307 BT_DBG("hcon %p", hcon);
8310 return HCI_ERROR_REMOTE_USER_TERM;
8311 return conn->disc_reason;
8314 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8316 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8319 BT_DBG("hcon %p reason %d", hcon, reason);
8321 l2cap_conn_del(hcon, bt_to_errno(reason));
8324 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8326 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8329 if (encrypt == 0x00) {
8330 if (chan->sec_level == BT_SECURITY_MEDIUM) {
8331 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8332 } else if (chan->sec_level == BT_SECURITY_HIGH ||
8333 chan->sec_level == BT_SECURITY_FIPS)
8334 l2cap_chan_close(chan, ECONNREFUSED);
8336 if (chan->sec_level == BT_SECURITY_MEDIUM)
8337 __clear_chan_timer(chan);
8341 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8343 struct l2cap_conn *conn = hcon->l2cap_data;
8344 struct l2cap_chan *chan;
8349 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8351 mutex_lock(&conn->chan_lock);
8353 list_for_each_entry(chan, &conn->chan_l, list) {
8354 l2cap_chan_lock(chan);
8356 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8357 state_to_string(chan->state));
8359 if (chan->scid == L2CAP_CID_A2MP) {
8360 l2cap_chan_unlock(chan);
8364 if (!status && encrypt)
8365 chan->sec_level = hcon->sec_level;
8367 if (!__l2cap_no_conn_pending(chan)) {
8368 l2cap_chan_unlock(chan);
8372 if (!status && (chan->state == BT_CONNECTED ||
8373 chan->state == BT_CONFIG)) {
8374 chan->ops->resume(chan);
8375 l2cap_check_encryption(chan, encrypt);
8376 l2cap_chan_unlock(chan);
8380 if (chan->state == BT_CONNECT) {
8381 if (!status && l2cap_check_enc_key_size(hcon))
8382 l2cap_start_connection(chan);
8384 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8385 } else if (chan->state == BT_CONNECT2 &&
8386 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8387 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8388 struct l2cap_conn_rsp rsp;
8391 if (!status && l2cap_check_enc_key_size(hcon)) {
8392 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8393 res = L2CAP_CR_PEND;
8394 stat = L2CAP_CS_AUTHOR_PEND;
8395 chan->ops->defer(chan);
8397 l2cap_state_change(chan, BT_CONFIG);
8398 res = L2CAP_CR_SUCCESS;
8399 stat = L2CAP_CS_NO_INFO;
8402 l2cap_state_change(chan, BT_DISCONN);
8403 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8404 res = L2CAP_CR_SEC_BLOCK;
8405 stat = L2CAP_CS_NO_INFO;
8408 rsp.scid = cpu_to_le16(chan->dcid);
8409 rsp.dcid = cpu_to_le16(chan->scid);
8410 rsp.result = cpu_to_le16(res);
8411 rsp.status = cpu_to_le16(stat);
8412 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8415 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8416 res == L2CAP_CR_SUCCESS) {
8418 set_bit(CONF_REQ_SENT, &chan->conf_state);
8419 l2cap_send_cmd(conn, l2cap_get_ident(conn),
8421 l2cap_build_conf_req(chan, buf, sizeof(buf)),
8423 chan->num_conf_req++;
8427 l2cap_chan_unlock(chan);
8430 mutex_unlock(&conn->chan_lock);
8433 /* Append fragment into frame respecting the maximum len of rx_skb */
8434 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8437 if (!conn->rx_skb) {
8438 /* Allocate skb for the complete frame (with header) */
8439 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8446 /* Copy as much as the rx_skb can hold */
8447 len = min_t(u16, len, skb->len);
8448 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8450 conn->rx_len -= len;
8455 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8457 struct sk_buff *rx_skb;
8460 /* Append just enough to complete the header */
8461 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8463 /* If header could not be read just continue */
8464 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8467 rx_skb = conn->rx_skb;
8468 len = get_unaligned_le16(rx_skb->data);
8470 /* Check if rx_skb has enough space to received all fragments */
8471 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8472 /* Update expected len */
8473 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8474 return L2CAP_LEN_SIZE;
8477 /* Reset conn->rx_skb since it will need to be reallocated in order to
8478 * fit all fragments.
8480 conn->rx_skb = NULL;
8482 /* Reallocates rx_skb using the exact expected length */
8483 len = l2cap_recv_frag(conn, rx_skb,
8484 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8490 static void l2cap_recv_reset(struct l2cap_conn *conn)
8492 kfree_skb(conn->rx_skb);
8493 conn->rx_skb = NULL;
8497 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8499 struct l2cap_conn *conn = hcon->l2cap_data;
8502 /* For AMP controller do not create l2cap conn */
8503 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8507 conn = l2cap_conn_add(hcon);
8512 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8516 case ACL_START_NO_FLUSH:
8519 BT_ERR("Unexpected start frame (len %d)", skb->len);
8520 l2cap_recv_reset(conn);
8521 l2cap_conn_unreliable(conn, ECOMM);
8524 /* Start fragment may not contain the L2CAP length so just
8525 * copy the initial byte when that happens and use conn->mtu as
8528 if (skb->len < L2CAP_LEN_SIZE) {
8529 l2cap_recv_frag(conn, skb, conn->mtu);
8533 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8535 if (len == skb->len) {
8536 /* Complete frame received */
8537 l2cap_recv_frame(conn, skb);
8541 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8543 if (skb->len > len) {
8544 BT_ERR("Frame is too long (len %u, expected len %d)",
8546 l2cap_conn_unreliable(conn, ECOMM);
8550 /* Append fragment into frame (with header) */
8551 if (l2cap_recv_frag(conn, skb, len) < 0)
8557 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8559 if (!conn->rx_skb) {
8560 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8561 l2cap_conn_unreliable(conn, ECOMM);
8565 /* Complete the L2CAP length if it has not been read */
8566 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8567 if (l2cap_recv_len(conn, skb) < 0) {
8568 l2cap_conn_unreliable(conn, ECOMM);
8572 /* Header still could not be read just continue */
8573 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8577 if (skb->len > conn->rx_len) {
8578 BT_ERR("Fragment is too long (len %u, expected %u)",
8579 skb->len, conn->rx_len);
8580 l2cap_recv_reset(conn);
8581 l2cap_conn_unreliable(conn, ECOMM);
8585 /* Append fragment into frame (with header) */
8586 l2cap_recv_frag(conn, skb, skb->len);
8588 if (!conn->rx_len) {
8589 /* Complete frame received. l2cap_recv_frame
8590 * takes ownership of the skb so set the global
8591 * rx_skb pointer to NULL first.
8593 struct sk_buff *rx_skb = conn->rx_skb;
8594 conn->rx_skb = NULL;
8595 l2cap_recv_frame(conn, rx_skb);
8604 static struct hci_cb l2cap_cb = {
8606 .connect_cfm = l2cap_connect_cfm,
8607 .disconn_cfm = l2cap_disconn_cfm,
8608 .security_cfm = l2cap_security_cfm,
8611 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8613 struct l2cap_chan *c;
8615 read_lock(&chan_list_lock);
8617 list_for_each_entry(c, &chan_list, global_l) {
8618 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8619 &c->src, c->src_type, &c->dst, c->dst_type,
8620 c->state, __le16_to_cpu(c->psm),
8621 c->scid, c->dcid, c->imtu, c->omtu,
8622 c->sec_level, c->mode);
8625 read_unlock(&chan_list_lock);
8630 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8632 static struct dentry *l2cap_debugfs;
8634 int __init l2cap_init(void)
8638 err = l2cap_init_sockets();
8642 hci_register_cb(&l2cap_cb);
8644 if (IS_ERR_OR_NULL(bt_debugfs))
8647 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8648 NULL, &l2cap_debugfs_fops);
8653 void l2cap_exit(void)
8655 debugfs_remove(l2cap_debugfs);
8656 hci_unregister_cb(&l2cap_cb);
8657 l2cap_cleanup_sockets();
8660 module_param(disable_ertm, bool, 0644);
8661 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8663 module_param(enable_ecred, bool, 0644);
8664 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");