2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
70 if (link_type == LE_LINK) {
71 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 return BDADDR_LE_PUBLIC;
74 return BDADDR_LE_RANDOM;
80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
82 return bdaddr_type(hcon->type, hcon->src_type);
85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
87 return bdaddr_type(hcon->type, hcon->dst_type);
90 /* ---- L2CAP channels ---- */
92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
97 list_for_each_entry(c, &conn->chan_l, list) {
104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
107 struct l2cap_chan *c;
109 list_for_each_entry(c, &conn->chan_l, list) {
116 /* Find channel with given SCID.
117 * Returns a reference locked channel.
119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
122 struct l2cap_chan *c;
124 mutex_lock(&conn->chan_lock);
125 c = __l2cap_get_chan_by_scid(conn, cid);
127 /* Only lock if chan reference is not 0 */
128 c = l2cap_chan_hold_unless_zero(c);
132 mutex_unlock(&conn->chan_lock);
137 /* Find channel with given DCID.
138 * Returns a reference locked channel.
140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
143 struct l2cap_chan *c;
145 mutex_lock(&conn->chan_lock);
146 c = __l2cap_get_chan_by_dcid(conn, cid);
148 /* Only lock if chan reference is not 0 */
149 c = l2cap_chan_hold_unless_zero(c);
153 mutex_unlock(&conn->chan_lock);
158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 struct l2cap_chan *c;
163 list_for_each_entry(c, &conn->chan_l, list) {
164 if (c->ident == ident)
170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
173 struct l2cap_chan *c;
175 mutex_lock(&conn->chan_lock);
176 c = __l2cap_get_chan_by_ident(conn, ident);
178 /* Only lock if chan reference is not 0 */
179 c = l2cap_chan_hold_unless_zero(c);
183 mutex_unlock(&conn->chan_lock);
188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
191 struct l2cap_chan *c;
193 list_for_each_entry(c, &chan_list, global_l) {
194 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
197 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
200 if (c->sport == psm && !bacmp(&c->src, src))
206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
210 write_lock(&chan_list_lock);
212 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
222 u16 p, start, end, incr;
224 if (chan->src_type == BDADDR_BREDR) {
225 start = L2CAP_PSM_DYN_START;
226 end = L2CAP_PSM_AUTO_END;
229 start = L2CAP_PSM_LE_DYN_START;
230 end = L2CAP_PSM_LE_DYN_END;
235 for (p = start; p <= end; p += incr)
236 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
238 chan->psm = cpu_to_le16(p);
239 chan->sport = cpu_to_le16(p);
246 write_unlock(&chan_list_lock);
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
251 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
253 write_lock(&chan_list_lock);
255 /* Override the defaults (which are for conn-oriented) */
256 chan->omtu = L2CAP_DEFAULT_MTU;
257 chan->chan_type = L2CAP_CHAN_FIXED;
261 write_unlock(&chan_list_lock);
266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
270 if (conn->hcon->type == LE_LINK)
271 dyn_end = L2CAP_CID_LE_DYN_END;
273 dyn_end = L2CAP_CID_DYN_END;
275 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 if (!__l2cap_get_chan_by_scid(conn, cid))
283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
285 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 state_to_string(state));
289 chan->ops->state_change(chan, state, 0);
292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
296 chan->ops->state_change(chan, chan->state, err);
299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
301 chan->ops->state_change(chan, chan->state, err);
304 static void __set_retrans_timer(struct l2cap_chan *chan)
306 if (!delayed_work_pending(&chan->monitor_timer) &&
307 chan->retrans_timeout) {
308 l2cap_set_timer(chan, &chan->retrans_timer,
309 msecs_to_jiffies(chan->retrans_timeout));
313 static void __set_monitor_timer(struct l2cap_chan *chan)
315 __clear_retrans_timer(chan);
316 if (chan->monitor_timeout) {
317 l2cap_set_timer(chan, &chan->monitor_timer,
318 msecs_to_jiffies(chan->monitor_timeout));
322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
327 skb_queue_walk(head, skb) {
328 if (bt_cb(skb)->l2cap.txseq == seq)
335 /* ---- L2CAP sequence number lists ---- */
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338 * SREJ requests that are received and for frames that are to be
339 * retransmitted. These seq_list functions implement a singly-linked
340 * list in an array, where membership in the list can also be checked
341 * in constant time. Items can also be added to the tail of the list
342 * and removed from the head in constant time, without further memory
346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
348 size_t alloc_size, i;
350 /* Allocated size is a power of 2 to map sequence numbers
351 * (which may be up to 14 bits) in to a smaller array that is
352 * sized for the negotiated ERTM transmit windows.
354 alloc_size = roundup_pow_of_two(size);
356 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
360 seq_list->mask = alloc_size - 1;
361 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 for (i = 0; i < alloc_size; i++)
364 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
371 kfree(seq_list->list);
374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
377 /* Constant-time check for list membership */
378 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
383 u16 seq = seq_list->head;
384 u16 mask = seq_list->mask;
386 seq_list->head = seq_list->list[seq & mask];
387 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
389 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
401 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
404 for (i = 0; i <= seq_list->mask; i++)
405 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
407 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
413 u16 mask = seq_list->mask;
415 /* All appends happen in constant time */
417 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
420 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 seq_list->head = seq;
423 seq_list->list[seq_list->tail & mask] = seq;
425 seq_list->tail = seq;
426 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
429 static void l2cap_chan_timeout(struct work_struct *work)
431 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
433 struct l2cap_conn *conn = chan->conn;
436 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
438 mutex_lock(&conn->chan_lock);
439 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 * this work. No need to call l2cap_chan_hold(chan) here again.
442 l2cap_chan_lock(chan);
444 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 reason = ECONNREFUSED;
446 else if (chan->state == BT_CONNECT &&
447 chan->sec_level != BT_SECURITY_SDP)
448 reason = ECONNREFUSED;
452 l2cap_chan_close(chan, reason);
454 chan->ops->close(chan);
456 l2cap_chan_unlock(chan);
457 l2cap_chan_put(chan);
459 mutex_unlock(&conn->chan_lock);
462 struct l2cap_chan *l2cap_chan_create(void)
464 struct l2cap_chan *chan;
466 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
470 skb_queue_head_init(&chan->tx_q);
471 skb_queue_head_init(&chan->srej_q);
472 mutex_init(&chan->lock);
474 /* Set default lock nesting level */
475 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
477 write_lock(&chan_list_lock);
478 list_add(&chan->global_l, &chan_list);
479 write_unlock(&chan_list_lock);
481 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
486 chan->state = BT_OPEN;
488 kref_init(&chan->kref);
490 /* This flag is cleared in l2cap_chan_ready() */
491 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
493 BT_DBG("chan %p", chan);
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
499 static void l2cap_chan_destroy(struct kref *kref)
501 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
503 BT_DBG("chan %p", chan);
505 write_lock(&chan_list_lock);
506 list_del(&chan->global_l);
507 write_unlock(&chan_list_lock);
512 void l2cap_chan_hold(struct l2cap_chan *c)
514 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
521 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
523 if (!kref_get_unless_zero(&c->kref))
529 void l2cap_chan_put(struct l2cap_chan *c)
531 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
533 kref_put(&c->kref, l2cap_chan_destroy);
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
539 chan->fcs = L2CAP_FCS_CRC16;
540 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 chan->remote_max_tx = chan->max_tx;
544 chan->remote_tx_win = chan->tx_win;
545 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 chan->sec_level = BT_SECURITY_LOW;
547 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
551 chan->conf_state = 0;
552 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
554 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
561 chan->sdu_last_frag = NULL;
563 chan->tx_credits = tx_credits;
564 /* Derive MPS from connection MTU to stop HCI fragmentation */
565 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 /* Give enough credits for a full packet */
567 chan->rx_credits = (chan->imtu / chan->mps) + 1;
569 skb_queue_head_init(&chan->tx_q);
572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
574 l2cap_le_flowctl_init(chan, tx_credits);
576 /* L2CAP implementations shall support a minimum MPS of 64 octets */
577 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 chan->mps = L2CAP_ECRED_MIN_MPS;
579 chan->rx_credits = (chan->imtu / chan->mps) + 1;
583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
585 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 __le16_to_cpu(chan->psm), chan->dcid);
588 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
592 switch (chan->chan_type) {
593 case L2CAP_CHAN_CONN_ORIENTED:
594 /* Alloc CID for connection-oriented socket */
595 chan->scid = l2cap_alloc_cid(conn);
596 if (conn->hcon->type == ACL_LINK)
597 chan->omtu = L2CAP_DEFAULT_MTU;
600 case L2CAP_CHAN_CONN_LESS:
601 /* Connectionless socket */
602 chan->scid = L2CAP_CID_CONN_LESS;
603 chan->dcid = L2CAP_CID_CONN_LESS;
604 chan->omtu = L2CAP_DEFAULT_MTU;
607 case L2CAP_CHAN_FIXED:
608 /* Caller will set CID and CID specific MTU values */
612 /* Raw socket can send/recv signalling messages only */
613 chan->scid = L2CAP_CID_SIGNALING;
614 chan->dcid = L2CAP_CID_SIGNALING;
615 chan->omtu = L2CAP_DEFAULT_MTU;
618 chan->local_id = L2CAP_BESTEFFORT_ID;
619 chan->local_stype = L2CAP_SERV_BESTEFFORT;
620 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
621 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
622 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
623 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
625 l2cap_chan_hold(chan);
627 /* Only keep a reference for fixed channels if they requested it */
628 if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 hci_conn_hold(conn->hcon);
632 list_add(&chan->list, &conn->chan_l);
635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
637 mutex_lock(&conn->chan_lock);
638 __l2cap_chan_add(conn, chan);
639 mutex_unlock(&conn->chan_lock);
642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
644 struct l2cap_conn *conn = chan->conn;
646 __clear_chan_timer(chan);
648 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 state_to_string(chan->state));
651 chan->ops->teardown(chan, err);
654 struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 /* Delete from channel list */
656 list_del(&chan->list);
658 l2cap_chan_put(chan);
662 /* Reference was only held for non-fixed channels or
663 * fixed channels that explicitly requested it using the
664 * FLAG_HOLD_HCI_CONN flag.
666 if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 hci_conn_drop(conn->hcon);
670 if (mgr && mgr->bredr_chan == chan)
671 mgr->bredr_chan = NULL;
674 if (chan->hs_hchan) {
675 struct hci_chan *hs_hchan = chan->hs_hchan;
677 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 amp_disconnect_logical_link(hs_hchan);
681 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
684 switch (chan->mode) {
685 case L2CAP_MODE_BASIC:
688 case L2CAP_MODE_LE_FLOWCTL:
689 case L2CAP_MODE_EXT_FLOWCTL:
690 skb_queue_purge(&chan->tx_q);
693 case L2CAP_MODE_ERTM:
694 __clear_retrans_timer(chan);
695 __clear_monitor_timer(chan);
696 __clear_ack_timer(chan);
698 skb_queue_purge(&chan->srej_q);
700 l2cap_seq_list_free(&chan->srej_list);
701 l2cap_seq_list_free(&chan->retrans_list);
704 case L2CAP_MODE_STREAMING:
705 skb_queue_purge(&chan->tx_q);
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
711 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
712 l2cap_chan_func_t func, void *data)
714 struct l2cap_chan *chan, *l;
716 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
717 if (chan->ident == id)
722 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
725 struct l2cap_chan *chan;
727 list_for_each_entry(chan, &conn->chan_l, list) {
732 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
738 mutex_lock(&conn->chan_lock);
739 __l2cap_chan_list(conn, func, data);
740 mutex_unlock(&conn->chan_lock);
743 EXPORT_SYMBOL_GPL(l2cap_chan_list);
745 static void l2cap_conn_update_id_addr(struct work_struct *work)
747 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
748 id_addr_update_work);
749 struct hci_conn *hcon = conn->hcon;
750 struct l2cap_chan *chan;
752 mutex_lock(&conn->chan_lock);
754 list_for_each_entry(chan, &conn->chan_l, list) {
755 l2cap_chan_lock(chan);
756 bacpy(&chan->dst, &hcon->dst);
757 chan->dst_type = bdaddr_dst_type(hcon);
758 l2cap_chan_unlock(chan);
761 mutex_unlock(&conn->chan_lock);
764 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
766 struct l2cap_conn *conn = chan->conn;
767 struct l2cap_le_conn_rsp rsp;
770 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
771 result = L2CAP_CR_LE_AUTHORIZATION;
773 result = L2CAP_CR_LE_BAD_PSM;
775 l2cap_state_change(chan, BT_DISCONN);
777 rsp.dcid = cpu_to_le16(chan->scid);
778 rsp.mtu = cpu_to_le16(chan->imtu);
779 rsp.mps = cpu_to_le16(chan->mps);
780 rsp.credits = cpu_to_le16(chan->rx_credits);
781 rsp.result = cpu_to_le16(result);
783 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
787 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
789 l2cap_state_change(chan, BT_DISCONN);
791 __l2cap_ecred_conn_rsp_defer(chan);
794 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
796 struct l2cap_conn *conn = chan->conn;
797 struct l2cap_conn_rsp rsp;
800 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
801 result = L2CAP_CR_SEC_BLOCK;
803 result = L2CAP_CR_BAD_PSM;
805 l2cap_state_change(chan, BT_DISCONN);
807 rsp.scid = cpu_to_le16(chan->dcid);
808 rsp.dcid = cpu_to_le16(chan->scid);
809 rsp.result = cpu_to_le16(result);
810 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
812 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
815 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
817 struct l2cap_conn *conn = chan->conn;
819 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
821 switch (chan->state) {
823 chan->ops->teardown(chan, 0);
828 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
829 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
830 l2cap_send_disconn_req(chan, reason);
832 l2cap_chan_del(chan, reason);
836 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
837 if (conn->hcon->type == ACL_LINK)
838 l2cap_chan_connect_reject(chan);
839 else if (conn->hcon->type == LE_LINK) {
840 switch (chan->mode) {
841 case L2CAP_MODE_LE_FLOWCTL:
842 l2cap_chan_le_connect_reject(chan);
844 case L2CAP_MODE_EXT_FLOWCTL:
845 l2cap_chan_ecred_connect_reject(chan);
851 l2cap_chan_del(chan, reason);
856 l2cap_chan_del(chan, reason);
860 chan->ops->teardown(chan, 0);
864 EXPORT_SYMBOL(l2cap_chan_close);
866 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
868 switch (chan->chan_type) {
870 switch (chan->sec_level) {
871 case BT_SECURITY_HIGH:
872 case BT_SECURITY_FIPS:
873 return HCI_AT_DEDICATED_BONDING_MITM;
874 case BT_SECURITY_MEDIUM:
875 return HCI_AT_DEDICATED_BONDING;
877 return HCI_AT_NO_BONDING;
880 case L2CAP_CHAN_CONN_LESS:
881 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
882 if (chan->sec_level == BT_SECURITY_LOW)
883 chan->sec_level = BT_SECURITY_SDP;
885 if (chan->sec_level == BT_SECURITY_HIGH ||
886 chan->sec_level == BT_SECURITY_FIPS)
887 return HCI_AT_NO_BONDING_MITM;
889 return HCI_AT_NO_BONDING;
891 case L2CAP_CHAN_CONN_ORIENTED:
892 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
893 if (chan->sec_level == BT_SECURITY_LOW)
894 chan->sec_level = BT_SECURITY_SDP;
896 if (chan->sec_level == BT_SECURITY_HIGH ||
897 chan->sec_level == BT_SECURITY_FIPS)
898 return HCI_AT_NO_BONDING_MITM;
900 return HCI_AT_NO_BONDING;
905 switch (chan->sec_level) {
906 case BT_SECURITY_HIGH:
907 case BT_SECURITY_FIPS:
908 return HCI_AT_GENERAL_BONDING_MITM;
909 case BT_SECURITY_MEDIUM:
910 return HCI_AT_GENERAL_BONDING;
912 return HCI_AT_NO_BONDING;
918 /* Service level security */
919 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
921 struct l2cap_conn *conn = chan->conn;
924 if (conn->hcon->type == LE_LINK)
925 return smp_conn_security(conn->hcon, chan->sec_level);
927 auth_type = l2cap_get_auth_type(chan);
929 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
933 static u8 l2cap_get_ident(struct l2cap_conn *conn)
937 /* Get next available identificator.
938 * 1 - 128 are used by kernel.
939 * 129 - 199 are reserved.
940 * 200 - 254 are used by utilities like l2ping, etc.
943 mutex_lock(&conn->ident_lock);
945 if (++conn->tx_ident > 128)
950 mutex_unlock(&conn->ident_lock);
955 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
958 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
961 BT_DBG("code 0x%2.2x", code);
966 /* Use NO_FLUSH if supported or we have an LE link (which does
967 * not support auto-flushing packets) */
968 if (lmp_no_flush_capable(conn->hcon->hdev) ||
969 conn->hcon->type == LE_LINK)
970 flags = ACL_START_NO_FLUSH;
974 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
975 skb->priority = HCI_PRIO_MAX;
977 hci_send_acl(conn->hchan, skb, flags);
980 static bool __chan_is_moving(struct l2cap_chan *chan)
982 return chan->move_state != L2CAP_MOVE_STABLE &&
983 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
986 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
988 struct hci_conn *hcon = chan->conn->hcon;
991 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
994 if (chan->hs_hcon && !__chan_is_moving(chan)) {
996 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1003 /* Use NO_FLUSH for LE links (where this is the only option) or
1004 * if the BR/EDR link supports it and flushing has not been
1005 * explicitly requested (through FLAG_FLUSHABLE).
1007 if (hcon->type == LE_LINK ||
1008 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1009 lmp_no_flush_capable(hcon->hdev)))
1010 flags = ACL_START_NO_FLUSH;
1014 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1015 hci_send_acl(chan->conn->hchan, skb, flags);
1018 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1020 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1021 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1023 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1025 control->sframe = 1;
1026 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1027 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1033 control->sframe = 0;
1034 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1035 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1042 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1044 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1045 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1047 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1049 control->sframe = 1;
1050 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1051 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1057 control->sframe = 0;
1058 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1059 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1066 static inline void __unpack_control(struct l2cap_chan *chan,
1067 struct sk_buff *skb)
1069 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1070 __unpack_extended_control(get_unaligned_le32(skb->data),
1071 &bt_cb(skb)->l2cap);
1072 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1074 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1075 &bt_cb(skb)->l2cap);
1076 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1080 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1084 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1085 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1087 if (control->sframe) {
1088 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1089 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1090 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1092 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1093 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1099 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1103 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1104 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1106 if (control->sframe) {
1107 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1108 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1109 packed |= L2CAP_CTRL_FRAME_TYPE;
1111 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1112 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1118 static inline void __pack_control(struct l2cap_chan *chan,
1119 struct l2cap_ctrl *control,
1120 struct sk_buff *skb)
1122 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1123 put_unaligned_le32(__pack_extended_control(control),
1124 skb->data + L2CAP_HDR_SIZE);
1126 put_unaligned_le16(__pack_enhanced_control(control),
1127 skb->data + L2CAP_HDR_SIZE);
1131 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1133 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1134 return L2CAP_EXT_HDR_SIZE;
1136 return L2CAP_ENH_HDR_SIZE;
1139 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1142 struct sk_buff *skb;
1143 struct l2cap_hdr *lh;
1144 int hlen = __ertm_hdr_size(chan);
1146 if (chan->fcs == L2CAP_FCS_CRC16)
1147 hlen += L2CAP_FCS_SIZE;
1149 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1152 return ERR_PTR(-ENOMEM);
1154 lh = skb_put(skb, L2CAP_HDR_SIZE);
1155 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1156 lh->cid = cpu_to_le16(chan->dcid);
1158 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1159 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1161 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1163 if (chan->fcs == L2CAP_FCS_CRC16) {
1164 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1165 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1168 skb->priority = HCI_PRIO_MAX;
1172 static void l2cap_send_sframe(struct l2cap_chan *chan,
1173 struct l2cap_ctrl *control)
1175 struct sk_buff *skb;
1178 BT_DBG("chan %p, control %p", chan, control);
1180 if (!control->sframe)
1183 if (__chan_is_moving(chan))
1186 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1190 if (control->super == L2CAP_SUPER_RR)
1191 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1192 else if (control->super == L2CAP_SUPER_RNR)
1193 set_bit(CONN_RNR_SENT, &chan->conn_state);
1195 if (control->super != L2CAP_SUPER_SREJ) {
1196 chan->last_acked_seq = control->reqseq;
1197 __clear_ack_timer(chan);
1200 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1201 control->final, control->poll, control->super);
1203 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1204 control_field = __pack_extended_control(control);
1206 control_field = __pack_enhanced_control(control);
1208 skb = l2cap_create_sframe_pdu(chan, control_field);
1210 l2cap_do_send(chan, skb);
1213 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1215 struct l2cap_ctrl control;
1217 BT_DBG("chan %p, poll %d", chan, poll);
1219 memset(&control, 0, sizeof(control));
1221 control.poll = poll;
1223 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1224 control.super = L2CAP_SUPER_RNR;
1226 control.super = L2CAP_SUPER_RR;
1228 control.reqseq = chan->buffer_seq;
1229 l2cap_send_sframe(chan, &control);
1232 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1234 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1237 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1240 static bool __amp_capable(struct l2cap_chan *chan)
1242 struct l2cap_conn *conn = chan->conn;
1243 struct hci_dev *hdev;
1244 bool amp_available = false;
1246 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1249 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1252 read_lock(&hci_dev_list_lock);
1253 list_for_each_entry(hdev, &hci_dev_list, list) {
1254 if (hdev->amp_type != AMP_TYPE_BREDR &&
1255 test_bit(HCI_UP, &hdev->flags)) {
1256 amp_available = true;
1260 read_unlock(&hci_dev_list_lock);
1262 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1263 return amp_available;
1268 static bool l2cap_check_efs(struct l2cap_chan *chan)
1270 /* Check EFS parameters */
1274 void l2cap_send_conn_req(struct l2cap_chan *chan)
1276 struct l2cap_conn *conn = chan->conn;
1277 struct l2cap_conn_req req;
1279 req.scid = cpu_to_le16(chan->scid);
1280 req.psm = chan->psm;
1282 chan->ident = l2cap_get_ident(conn);
1284 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1286 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1289 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1291 struct l2cap_create_chan_req req;
1292 req.scid = cpu_to_le16(chan->scid);
1293 req.psm = chan->psm;
1294 req.amp_id = amp_id;
1296 chan->ident = l2cap_get_ident(chan->conn);
1298 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1302 static void l2cap_move_setup(struct l2cap_chan *chan)
1304 struct sk_buff *skb;
1306 BT_DBG("chan %p", chan);
1308 if (chan->mode != L2CAP_MODE_ERTM)
1311 __clear_retrans_timer(chan);
1312 __clear_monitor_timer(chan);
1313 __clear_ack_timer(chan);
1315 chan->retry_count = 0;
1316 skb_queue_walk(&chan->tx_q, skb) {
1317 if (bt_cb(skb)->l2cap.retries)
1318 bt_cb(skb)->l2cap.retries = 1;
1323 chan->expected_tx_seq = chan->buffer_seq;
1325 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1326 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1327 l2cap_seq_list_clear(&chan->retrans_list);
1328 l2cap_seq_list_clear(&chan->srej_list);
1329 skb_queue_purge(&chan->srej_q);
1331 chan->tx_state = L2CAP_TX_STATE_XMIT;
1332 chan->rx_state = L2CAP_RX_STATE_MOVE;
1334 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1337 static void l2cap_move_done(struct l2cap_chan *chan)
1339 u8 move_role = chan->move_role;
1340 BT_DBG("chan %p", chan);
1342 chan->move_state = L2CAP_MOVE_STABLE;
1343 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1345 if (chan->mode != L2CAP_MODE_ERTM)
1348 switch (move_role) {
1349 case L2CAP_MOVE_ROLE_INITIATOR:
1350 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1351 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1353 case L2CAP_MOVE_ROLE_RESPONDER:
1354 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1359 static void l2cap_chan_ready(struct l2cap_chan *chan)
1361 /* The channel may have already been flagged as connected in
1362 * case of receiving data before the L2CAP info req/rsp
1363 * procedure is complete.
1365 if (chan->state == BT_CONNECTED)
1368 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1369 chan->conf_state = 0;
1370 __clear_chan_timer(chan);
1372 switch (chan->mode) {
1373 case L2CAP_MODE_LE_FLOWCTL:
1374 case L2CAP_MODE_EXT_FLOWCTL:
1375 if (!chan->tx_credits)
1376 chan->ops->suspend(chan);
1380 chan->state = BT_CONNECTED;
1382 chan->ops->ready(chan);
1385 static void l2cap_le_connect(struct l2cap_chan *chan)
1387 struct l2cap_conn *conn = chan->conn;
1388 struct l2cap_le_conn_req req;
1390 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1394 chan->imtu = chan->conn->mtu;
1396 l2cap_le_flowctl_init(chan, 0);
1398 req.psm = chan->psm;
1399 req.scid = cpu_to_le16(chan->scid);
1400 req.mtu = cpu_to_le16(chan->imtu);
1401 req.mps = cpu_to_le16(chan->mps);
1402 req.credits = cpu_to_le16(chan->rx_credits);
1404 chan->ident = l2cap_get_ident(conn);
1406 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1410 struct l2cap_ecred_conn_data {
1412 struct l2cap_ecred_conn_req req;
1415 struct l2cap_chan *chan;
1420 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1422 struct l2cap_ecred_conn_data *conn = data;
1425 if (chan == conn->chan)
1428 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1431 pid = chan->ops->get_peer_pid(chan);
1433 /* Only add deferred channels with the same PID/PSM */
1434 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1435 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1438 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1441 l2cap_ecred_init(chan, 0);
1443 /* Set the same ident so we can match on the rsp */
1444 chan->ident = conn->chan->ident;
1446 /* Include all channels deferred */
1447 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1452 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1454 struct l2cap_conn *conn = chan->conn;
1455 struct l2cap_ecred_conn_data data;
1457 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1460 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1463 l2cap_ecred_init(chan, 0);
1465 memset(&data, 0, sizeof(data));
1466 data.pdu.req.psm = chan->psm;
1467 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1468 data.pdu.req.mps = cpu_to_le16(chan->mps);
1469 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1470 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1472 chan->ident = l2cap_get_ident(conn);
1473 data.pid = chan->ops->get_peer_pid(chan);
1477 data.pid = chan->ops->get_peer_pid(chan);
1479 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1481 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1482 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1486 static void l2cap_le_start(struct l2cap_chan *chan)
1488 struct l2cap_conn *conn = chan->conn;
1490 if (!smp_conn_security(conn->hcon, chan->sec_level))
1494 l2cap_chan_ready(chan);
1498 if (chan->state == BT_CONNECT) {
1499 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1500 l2cap_ecred_connect(chan);
1502 l2cap_le_connect(chan);
1506 static void l2cap_start_connection(struct l2cap_chan *chan)
1508 if (__amp_capable(chan)) {
1509 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1510 a2mp_discover_amp(chan);
1511 } else if (chan->conn->hcon->type == LE_LINK) {
1512 l2cap_le_start(chan);
1514 l2cap_send_conn_req(chan);
1518 static void l2cap_request_info(struct l2cap_conn *conn)
1520 struct l2cap_info_req req;
1522 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1525 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1527 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1528 conn->info_ident = l2cap_get_ident(conn);
1530 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1532 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1536 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1538 /* The minimum encryption key size needs to be enforced by the
1539 * host stack before establishing any L2CAP connections. The
1540 * specification in theory allows a minimum of 1, but to align
1541 * BR/EDR and LE transports, a minimum of 7 is chosen.
1543 * This check might also be called for unencrypted connections
1544 * that have no key size requirements. Ensure that the link is
1545 * actually encrypted before enforcing a key size.
1547 int min_key_size = hcon->hdev->min_enc_key_size;
1549 /* On FIPS security level, key size must be 16 bytes */
1550 if (hcon->sec_level == BT_SECURITY_FIPS)
1553 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1554 hcon->enc_key_size >= min_key_size);
1557 static void l2cap_do_start(struct l2cap_chan *chan)
1559 struct l2cap_conn *conn = chan->conn;
1561 if (conn->hcon->type == LE_LINK) {
1562 l2cap_le_start(chan);
1566 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1567 l2cap_request_info(conn);
1571 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1574 if (!l2cap_chan_check_security(chan, true) ||
1575 !__l2cap_no_conn_pending(chan))
1578 if (l2cap_check_enc_key_size(conn->hcon))
1579 l2cap_start_connection(chan);
1581 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1584 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1586 u32 local_feat_mask = l2cap_feat_mask;
1588 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1591 case L2CAP_MODE_ERTM:
1592 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1593 case L2CAP_MODE_STREAMING:
1594 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1600 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1602 struct l2cap_conn *conn = chan->conn;
1603 struct l2cap_disconn_req req;
1608 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1609 __clear_retrans_timer(chan);
1610 __clear_monitor_timer(chan);
1611 __clear_ack_timer(chan);
1614 if (chan->scid == L2CAP_CID_A2MP) {
1615 l2cap_state_change(chan, BT_DISCONN);
1619 req.dcid = cpu_to_le16(chan->dcid);
1620 req.scid = cpu_to_le16(chan->scid);
1621 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1624 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1627 /* ---- L2CAP connections ---- */
1628 static void l2cap_conn_start(struct l2cap_conn *conn)
1630 struct l2cap_chan *chan, *tmp;
1632 BT_DBG("conn %p", conn);
1634 mutex_lock(&conn->chan_lock);
1636 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1637 l2cap_chan_lock(chan);
1639 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1640 l2cap_chan_ready(chan);
1641 l2cap_chan_unlock(chan);
1645 if (chan->state == BT_CONNECT) {
1646 if (!l2cap_chan_check_security(chan, true) ||
1647 !__l2cap_no_conn_pending(chan)) {
1648 l2cap_chan_unlock(chan);
1652 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1653 && test_bit(CONF_STATE2_DEVICE,
1654 &chan->conf_state)) {
1655 l2cap_chan_close(chan, ECONNRESET);
1656 l2cap_chan_unlock(chan);
1660 if (l2cap_check_enc_key_size(conn->hcon))
1661 l2cap_start_connection(chan);
1663 l2cap_chan_close(chan, ECONNREFUSED);
1665 } else if (chan->state == BT_CONNECT2) {
1666 struct l2cap_conn_rsp rsp;
1668 rsp.scid = cpu_to_le16(chan->dcid);
1669 rsp.dcid = cpu_to_le16(chan->scid);
1671 if (l2cap_chan_check_security(chan, false)) {
1672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1673 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1674 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1675 chan->ops->defer(chan);
1678 l2cap_state_change(chan, BT_CONFIG);
1679 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1680 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1683 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1684 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1687 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1690 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1691 rsp.result != L2CAP_CR_SUCCESS) {
1692 l2cap_chan_unlock(chan);
1696 set_bit(CONF_REQ_SENT, &chan->conf_state);
1697 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1698 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1699 chan->num_conf_req++;
1702 l2cap_chan_unlock(chan);
1705 mutex_unlock(&conn->chan_lock);
1708 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1710 struct hci_conn *hcon = conn->hcon;
1711 struct hci_dev *hdev = hcon->hdev;
1713 BT_DBG("%s conn %p", hdev->name, conn);
1715 /* For outgoing pairing which doesn't necessarily have an
1716 * associated socket (e.g. mgmt_pair_device).
1719 smp_conn_security(hcon, hcon->pending_sec_level);
1721 /* For LE peripheral connections, make sure the connection interval
1722 * is in the range of the minimum and maximum interval that has
1723 * been configured for this connection. If not, then trigger
1724 * the connection update procedure.
1726 if (hcon->role == HCI_ROLE_SLAVE &&
1727 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1728 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1729 struct l2cap_conn_param_update_req req;
1731 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1732 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1733 req.latency = cpu_to_le16(hcon->le_conn_latency);
1734 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1736 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1737 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1741 static void l2cap_conn_ready(struct l2cap_conn *conn)
1743 struct l2cap_chan *chan;
1744 struct hci_conn *hcon = conn->hcon;
1746 BT_DBG("conn %p", conn);
1748 if (hcon->type == ACL_LINK)
1749 l2cap_request_info(conn);
1751 mutex_lock(&conn->chan_lock);
1753 list_for_each_entry(chan, &conn->chan_l, list) {
1755 l2cap_chan_lock(chan);
1757 if (chan->scid == L2CAP_CID_A2MP) {
1758 l2cap_chan_unlock(chan);
1762 if (hcon->type == LE_LINK) {
1763 l2cap_le_start(chan);
1764 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1765 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1766 l2cap_chan_ready(chan);
1767 } else if (chan->state == BT_CONNECT) {
1768 l2cap_do_start(chan);
1771 l2cap_chan_unlock(chan);
1774 mutex_unlock(&conn->chan_lock);
1776 if (hcon->type == LE_LINK)
1777 l2cap_le_conn_ready(conn);
1779 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1782 /* Notify sockets that we cannot guaranty reliability anymore */
1783 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1785 struct l2cap_chan *chan;
1787 BT_DBG("conn %p", conn);
1789 mutex_lock(&conn->chan_lock);
1791 list_for_each_entry(chan, &conn->chan_l, list) {
1792 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1793 l2cap_chan_set_err(chan, err);
1796 mutex_unlock(&conn->chan_lock);
1799 static void l2cap_info_timeout(struct work_struct *work)
1801 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1804 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1805 conn->info_ident = 0;
1807 l2cap_conn_start(conn);
1812 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1813 * callback is called during registration. The ->remove callback is called
1814 * during unregistration.
1815 * An l2cap_user object can either be explicitly unregistered or when the
1816 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1817 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1818 * External modules must own a reference to the l2cap_conn object if they intend
1819 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1820 * any time if they don't.
1823 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1825 struct hci_dev *hdev = conn->hcon->hdev;
1828 /* We need to check whether l2cap_conn is registered. If it is not, we
1829 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1830 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1831 * relies on the parent hci_conn object to be locked. This itself relies
1832 * on the hci_dev object to be locked. So we must lock the hci device
1837 if (!list_empty(&user->list)) {
1842 /* conn->hchan is NULL after l2cap_conn_del() was called */
1848 ret = user->probe(conn, user);
1852 list_add(&user->list, &conn->users);
1856 hci_dev_unlock(hdev);
1859 EXPORT_SYMBOL(l2cap_register_user);
1861 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1863 struct hci_dev *hdev = conn->hcon->hdev;
1867 if (list_empty(&user->list))
1870 list_del_init(&user->list);
1871 user->remove(conn, user);
1874 hci_dev_unlock(hdev);
1876 EXPORT_SYMBOL(l2cap_unregister_user);
1878 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1880 struct l2cap_user *user;
1882 while (!list_empty(&conn->users)) {
1883 user = list_first_entry(&conn->users, struct l2cap_user, list);
1884 list_del_init(&user->list);
1885 user->remove(conn, user);
1889 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1891 struct l2cap_conn *conn = hcon->l2cap_data;
1892 struct l2cap_chan *chan, *l;
1897 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1899 kfree_skb(conn->rx_skb);
1901 skb_queue_purge(&conn->pending_rx);
1903 /* We can not call flush_work(&conn->pending_rx_work) here since we
1904 * might block if we are running on a worker from the same workqueue
1905 * pending_rx_work is waiting on.
1907 if (work_pending(&conn->pending_rx_work))
1908 cancel_work_sync(&conn->pending_rx_work);
1910 if (work_pending(&conn->id_addr_update_work))
1911 cancel_work_sync(&conn->id_addr_update_work);
1913 l2cap_unregister_all_users(conn);
1915 /* Force the connection to be immediately dropped */
1916 hcon->disc_timeout = 0;
1918 mutex_lock(&conn->chan_lock);
1921 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1922 l2cap_chan_hold(chan);
1923 l2cap_chan_lock(chan);
1925 l2cap_chan_del(chan, err);
1927 chan->ops->close(chan);
1929 l2cap_chan_unlock(chan);
1930 l2cap_chan_put(chan);
1933 mutex_unlock(&conn->chan_lock);
1935 hci_chan_del(conn->hchan);
1937 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1938 cancel_delayed_work_sync(&conn->info_timer);
1940 hcon->l2cap_data = NULL;
1942 l2cap_conn_put(conn);
1945 static void l2cap_conn_free(struct kref *ref)
1947 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1949 hci_conn_put(conn->hcon);
1953 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1955 kref_get(&conn->ref);
1958 EXPORT_SYMBOL(l2cap_conn_get);
1960 void l2cap_conn_put(struct l2cap_conn *conn)
1962 kref_put(&conn->ref, l2cap_conn_free);
1964 EXPORT_SYMBOL(l2cap_conn_put);
1966 /* ---- Socket interface ---- */
1968 /* Find socket with psm and source / destination bdaddr.
1969 * Returns closest match.
1971 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1976 struct l2cap_chan *c, *tmp, *c1 = NULL;
1978 read_lock(&chan_list_lock);
1980 list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1981 if (state && c->state != state)
1984 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1987 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1990 if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1991 int src_match, dst_match;
1992 int src_any, dst_any;
1995 src_match = !bacmp(&c->src, src);
1996 dst_match = !bacmp(&c->dst, dst);
1997 if (src_match && dst_match) {
1998 if (!l2cap_chan_hold_unless_zero(c))
2001 read_unlock(&chan_list_lock);
2006 src_any = !bacmp(&c->src, BDADDR_ANY);
2007 dst_any = !bacmp(&c->dst, BDADDR_ANY);
2008 if ((src_match && dst_any) || (src_any && dst_match) ||
2009 (src_any && dst_any))
2015 c1 = l2cap_chan_hold_unless_zero(c1);
2017 read_unlock(&chan_list_lock);
2022 static void l2cap_monitor_timeout(struct work_struct *work)
2024 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2025 monitor_timer.work);
2027 BT_DBG("chan %p", chan);
2029 l2cap_chan_lock(chan);
2032 l2cap_chan_unlock(chan);
2033 l2cap_chan_put(chan);
2037 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2039 l2cap_chan_unlock(chan);
2040 l2cap_chan_put(chan);
2043 static void l2cap_retrans_timeout(struct work_struct *work)
2045 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2046 retrans_timer.work);
2048 BT_DBG("chan %p", chan);
2050 l2cap_chan_lock(chan);
2053 l2cap_chan_unlock(chan);
2054 l2cap_chan_put(chan);
2058 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2059 l2cap_chan_unlock(chan);
2060 l2cap_chan_put(chan);
2063 static void l2cap_streaming_send(struct l2cap_chan *chan,
2064 struct sk_buff_head *skbs)
2066 struct sk_buff *skb;
2067 struct l2cap_ctrl *control;
2069 BT_DBG("chan %p, skbs %p", chan, skbs);
2071 if (__chan_is_moving(chan))
2074 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2076 while (!skb_queue_empty(&chan->tx_q)) {
2078 skb = skb_dequeue(&chan->tx_q);
2080 bt_cb(skb)->l2cap.retries = 1;
2081 control = &bt_cb(skb)->l2cap;
2083 control->reqseq = 0;
2084 control->txseq = chan->next_tx_seq;
2086 __pack_control(chan, control, skb);
2088 if (chan->fcs == L2CAP_FCS_CRC16) {
2089 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2090 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2093 l2cap_do_send(chan, skb);
2095 BT_DBG("Sent txseq %u", control->txseq);
2097 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2098 chan->frames_sent++;
2102 static int l2cap_ertm_send(struct l2cap_chan *chan)
2104 struct sk_buff *skb, *tx_skb;
2105 struct l2cap_ctrl *control;
2108 BT_DBG("chan %p", chan);
2110 if (chan->state != BT_CONNECTED)
2113 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2116 if (__chan_is_moving(chan))
2119 while (chan->tx_send_head &&
2120 chan->unacked_frames < chan->remote_tx_win &&
2121 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2123 skb = chan->tx_send_head;
2125 bt_cb(skb)->l2cap.retries = 1;
2126 control = &bt_cb(skb)->l2cap;
2128 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2131 control->reqseq = chan->buffer_seq;
2132 chan->last_acked_seq = chan->buffer_seq;
2133 control->txseq = chan->next_tx_seq;
2135 __pack_control(chan, control, skb);
2137 if (chan->fcs == L2CAP_FCS_CRC16) {
2138 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2139 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2142 /* Clone after data has been modified. Data is assumed to be
2143 read-only (for locking purposes) on cloned sk_buffs.
2145 tx_skb = skb_clone(skb, GFP_KERNEL);
2150 __set_retrans_timer(chan);
2152 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2153 chan->unacked_frames++;
2154 chan->frames_sent++;
2157 if (skb_queue_is_last(&chan->tx_q, skb))
2158 chan->tx_send_head = NULL;
2160 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2162 l2cap_do_send(chan, tx_skb);
2163 BT_DBG("Sent txseq %u", control->txseq);
2166 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2167 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2172 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2174 struct l2cap_ctrl control;
2175 struct sk_buff *skb;
2176 struct sk_buff *tx_skb;
2179 BT_DBG("chan %p", chan);
2181 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2184 if (__chan_is_moving(chan))
2187 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2188 seq = l2cap_seq_list_pop(&chan->retrans_list);
2190 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2192 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2197 bt_cb(skb)->l2cap.retries++;
2198 control = bt_cb(skb)->l2cap;
2200 if (chan->max_tx != 0 &&
2201 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2202 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2203 l2cap_send_disconn_req(chan, ECONNRESET);
2204 l2cap_seq_list_clear(&chan->retrans_list);
2208 control.reqseq = chan->buffer_seq;
2209 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2214 if (skb_cloned(skb)) {
2215 /* Cloned sk_buffs are read-only, so we need a
2218 tx_skb = skb_copy(skb, GFP_KERNEL);
2220 tx_skb = skb_clone(skb, GFP_KERNEL);
2224 l2cap_seq_list_clear(&chan->retrans_list);
2228 /* Update skb contents */
2229 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2230 put_unaligned_le32(__pack_extended_control(&control),
2231 tx_skb->data + L2CAP_HDR_SIZE);
2233 put_unaligned_le16(__pack_enhanced_control(&control),
2234 tx_skb->data + L2CAP_HDR_SIZE);
2238 if (chan->fcs == L2CAP_FCS_CRC16) {
2239 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2240 tx_skb->len - L2CAP_FCS_SIZE);
2241 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2245 l2cap_do_send(chan, tx_skb);
2247 BT_DBG("Resent txseq %d", control.txseq);
2249 chan->last_acked_seq = chan->buffer_seq;
2253 static void l2cap_retransmit(struct l2cap_chan *chan,
2254 struct l2cap_ctrl *control)
2256 BT_DBG("chan %p, control %p", chan, control);
2258 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2259 l2cap_ertm_resend(chan);
2262 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2263 struct l2cap_ctrl *control)
2265 struct sk_buff *skb;
2267 BT_DBG("chan %p, control %p", chan, control);
2270 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2272 l2cap_seq_list_clear(&chan->retrans_list);
2274 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2277 if (chan->unacked_frames) {
2278 skb_queue_walk(&chan->tx_q, skb) {
2279 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2280 skb == chan->tx_send_head)
2284 skb_queue_walk_from(&chan->tx_q, skb) {
2285 if (skb == chan->tx_send_head)
2288 l2cap_seq_list_append(&chan->retrans_list,
2289 bt_cb(skb)->l2cap.txseq);
2292 l2cap_ertm_resend(chan);
2296 static void l2cap_send_ack(struct l2cap_chan *chan)
2298 struct l2cap_ctrl control;
2299 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2300 chan->last_acked_seq);
2303 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2304 chan, chan->last_acked_seq, chan->buffer_seq);
2306 memset(&control, 0, sizeof(control));
2309 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2310 chan->rx_state == L2CAP_RX_STATE_RECV) {
2311 __clear_ack_timer(chan);
2312 control.super = L2CAP_SUPER_RNR;
2313 control.reqseq = chan->buffer_seq;
2314 l2cap_send_sframe(chan, &control);
2316 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2317 l2cap_ertm_send(chan);
2318 /* If any i-frames were sent, they included an ack */
2319 if (chan->buffer_seq == chan->last_acked_seq)
2323 /* Ack now if the window is 3/4ths full.
2324 * Calculate without mul or div
2326 threshold = chan->ack_win;
2327 threshold += threshold << 1;
2330 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2333 if (frames_to_ack >= threshold) {
2334 __clear_ack_timer(chan);
2335 control.super = L2CAP_SUPER_RR;
2336 control.reqseq = chan->buffer_seq;
2337 l2cap_send_sframe(chan, &control);
2342 __set_ack_timer(chan);
2346 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2347 struct msghdr *msg, int len,
2348 int count, struct sk_buff *skb)
2350 struct l2cap_conn *conn = chan->conn;
2351 struct sk_buff **frag;
2354 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2360 /* Continuation fragments (no L2CAP header) */
2361 frag = &skb_shinfo(skb)->frag_list;
2363 struct sk_buff *tmp;
2365 count = min_t(unsigned int, conn->mtu, len);
2367 tmp = chan->ops->alloc_skb(chan, 0, count,
2368 msg->msg_flags & MSG_DONTWAIT);
2370 return PTR_ERR(tmp);
2374 if (!copy_from_iter_full(skb_put(*frag, count), count,
2381 skb->len += (*frag)->len;
2382 skb->data_len += (*frag)->len;
2384 frag = &(*frag)->next;
2390 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2391 struct msghdr *msg, size_t len)
2393 struct l2cap_conn *conn = chan->conn;
2394 struct sk_buff *skb;
2395 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2396 struct l2cap_hdr *lh;
2398 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2399 __le16_to_cpu(chan->psm), len);
2401 count = min_t(unsigned int, (conn->mtu - hlen), len);
2403 skb = chan->ops->alloc_skb(chan, hlen, count,
2404 msg->msg_flags & MSG_DONTWAIT);
2408 /* Create L2CAP header */
2409 lh = skb_put(skb, L2CAP_HDR_SIZE);
2410 lh->cid = cpu_to_le16(chan->dcid);
2411 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2412 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2414 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2415 if (unlikely(err < 0)) {
2417 return ERR_PTR(err);
2422 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2423 struct msghdr *msg, size_t len)
2425 struct l2cap_conn *conn = chan->conn;
2426 struct sk_buff *skb;
2428 struct l2cap_hdr *lh;
2430 BT_DBG("chan %p len %zu", chan, len);
2432 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2434 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2435 msg->msg_flags & MSG_DONTWAIT);
2439 /* Create L2CAP header */
2440 lh = skb_put(skb, L2CAP_HDR_SIZE);
2441 lh->cid = cpu_to_le16(chan->dcid);
2442 lh->len = cpu_to_le16(len);
2444 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2445 if (unlikely(err < 0)) {
2447 return ERR_PTR(err);
2452 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2453 struct msghdr *msg, size_t len,
2456 struct l2cap_conn *conn = chan->conn;
2457 struct sk_buff *skb;
2458 int err, count, hlen;
2459 struct l2cap_hdr *lh;
2461 BT_DBG("chan %p len %zu", chan, len);
2464 return ERR_PTR(-ENOTCONN);
2466 hlen = __ertm_hdr_size(chan);
2469 hlen += L2CAP_SDULEN_SIZE;
2471 if (chan->fcs == L2CAP_FCS_CRC16)
2472 hlen += L2CAP_FCS_SIZE;
2474 count = min_t(unsigned int, (conn->mtu - hlen), len);
2476 skb = chan->ops->alloc_skb(chan, hlen, count,
2477 msg->msg_flags & MSG_DONTWAIT);
2481 /* Create L2CAP header */
2482 lh = skb_put(skb, L2CAP_HDR_SIZE);
2483 lh->cid = cpu_to_le16(chan->dcid);
2484 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2486 /* Control header is populated later */
2487 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2488 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2490 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2493 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2495 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2496 if (unlikely(err < 0)) {
2498 return ERR_PTR(err);
2501 bt_cb(skb)->l2cap.fcs = chan->fcs;
2502 bt_cb(skb)->l2cap.retries = 0;
2506 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2507 struct sk_buff_head *seg_queue,
2508 struct msghdr *msg, size_t len)
2510 struct sk_buff *skb;
2515 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2517 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2518 * so fragmented skbs are not used. The HCI layer's handling
2519 * of fragmented skbs is not compatible with ERTM's queueing.
2522 /* PDU size is derived from the HCI MTU */
2523 pdu_len = chan->conn->mtu;
2525 /* Constrain PDU size for BR/EDR connections */
2527 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2529 /* Adjust for largest possible L2CAP overhead. */
2531 pdu_len -= L2CAP_FCS_SIZE;
2533 pdu_len -= __ertm_hdr_size(chan);
2535 /* Remote device may have requested smaller PDUs */
2536 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2538 if (len <= pdu_len) {
2539 sar = L2CAP_SAR_UNSEGMENTED;
2543 sar = L2CAP_SAR_START;
2548 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2551 __skb_queue_purge(seg_queue);
2552 return PTR_ERR(skb);
2555 bt_cb(skb)->l2cap.sar = sar;
2556 __skb_queue_tail(seg_queue, skb);
2562 if (len <= pdu_len) {
2563 sar = L2CAP_SAR_END;
2566 sar = L2CAP_SAR_CONTINUE;
2573 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2575 size_t len, u16 sdulen)
2577 struct l2cap_conn *conn = chan->conn;
2578 struct sk_buff *skb;
2579 int err, count, hlen;
2580 struct l2cap_hdr *lh;
2582 BT_DBG("chan %p len %zu", chan, len);
2585 return ERR_PTR(-ENOTCONN);
2587 hlen = L2CAP_HDR_SIZE;
2590 hlen += L2CAP_SDULEN_SIZE;
2592 count = min_t(unsigned int, (conn->mtu - hlen), len);
2594 skb = chan->ops->alloc_skb(chan, hlen, count,
2595 msg->msg_flags & MSG_DONTWAIT);
2599 /* Create L2CAP header */
2600 lh = skb_put(skb, L2CAP_HDR_SIZE);
2601 lh->cid = cpu_to_le16(chan->dcid);
2602 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2605 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2607 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2608 if (unlikely(err < 0)) {
2610 return ERR_PTR(err);
2616 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2617 struct sk_buff_head *seg_queue,
2618 struct msghdr *msg, size_t len)
2620 struct sk_buff *skb;
2624 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2627 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2633 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2635 __skb_queue_purge(seg_queue);
2636 return PTR_ERR(skb);
2639 __skb_queue_tail(seg_queue, skb);
2645 pdu_len += L2CAP_SDULEN_SIZE;
2652 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2656 BT_DBG("chan %p", chan);
2658 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2659 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2664 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2665 skb_queue_len(&chan->tx_q));
2668 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2670 struct sk_buff *skb;
2672 struct sk_buff_head seg_queue;
2677 /* Connectionless channel */
2678 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2679 skb = l2cap_create_connless_pdu(chan, msg, len);
2681 return PTR_ERR(skb);
2683 l2cap_do_send(chan, skb);
2687 switch (chan->mode) {
2688 case L2CAP_MODE_LE_FLOWCTL:
2689 case L2CAP_MODE_EXT_FLOWCTL:
2690 /* Check outgoing MTU */
2691 if (len > chan->omtu)
2694 __skb_queue_head_init(&seg_queue);
2696 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2698 if (chan->state != BT_CONNECTED) {
2699 __skb_queue_purge(&seg_queue);
2706 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2708 l2cap_le_flowctl_send(chan);
2710 if (!chan->tx_credits)
2711 chan->ops->suspend(chan);
2717 case L2CAP_MODE_BASIC:
2718 /* Check outgoing MTU */
2719 if (len > chan->omtu)
2722 /* Create a basic PDU */
2723 skb = l2cap_create_basic_pdu(chan, msg, len);
2725 return PTR_ERR(skb);
2727 l2cap_do_send(chan, skb);
2731 case L2CAP_MODE_ERTM:
2732 case L2CAP_MODE_STREAMING:
2733 /* Check outgoing MTU */
2734 if (len > chan->omtu) {
2739 __skb_queue_head_init(&seg_queue);
2741 /* Do segmentation before calling in to the state machine,
2742 * since it's possible to block while waiting for memory
2745 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2750 if (chan->mode == L2CAP_MODE_ERTM)
2751 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2753 l2cap_streaming_send(chan, &seg_queue);
2757 /* If the skbs were not queued for sending, they'll still be in
2758 * seg_queue and need to be purged.
2760 __skb_queue_purge(&seg_queue);
2764 BT_DBG("bad state %1.1x", chan->mode);
2770 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2772 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2774 struct l2cap_ctrl control;
2777 BT_DBG("chan %p, txseq %u", chan, txseq);
2779 memset(&control, 0, sizeof(control));
2781 control.super = L2CAP_SUPER_SREJ;
2783 for (seq = chan->expected_tx_seq; seq != txseq;
2784 seq = __next_seq(chan, seq)) {
2785 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2786 control.reqseq = seq;
2787 l2cap_send_sframe(chan, &control);
2788 l2cap_seq_list_append(&chan->srej_list, seq);
2792 chan->expected_tx_seq = __next_seq(chan, txseq);
2795 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2797 struct l2cap_ctrl control;
2799 BT_DBG("chan %p", chan);
2801 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2804 memset(&control, 0, sizeof(control));
2806 control.super = L2CAP_SUPER_SREJ;
2807 control.reqseq = chan->srej_list.tail;
2808 l2cap_send_sframe(chan, &control);
2811 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2813 struct l2cap_ctrl control;
2817 BT_DBG("chan %p, txseq %u", chan, txseq);
2819 memset(&control, 0, sizeof(control));
2821 control.super = L2CAP_SUPER_SREJ;
2823 /* Capture initial list head to allow only one pass through the list. */
2824 initial_head = chan->srej_list.head;
2827 seq = l2cap_seq_list_pop(&chan->srej_list);
2828 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2831 control.reqseq = seq;
2832 l2cap_send_sframe(chan, &control);
2833 l2cap_seq_list_append(&chan->srej_list, seq);
2834 } while (chan->srej_list.head != initial_head);
2837 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2839 struct sk_buff *acked_skb;
2842 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2844 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2847 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2848 chan->expected_ack_seq, chan->unacked_frames);
2850 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2851 ackseq = __next_seq(chan, ackseq)) {
2853 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2855 skb_unlink(acked_skb, &chan->tx_q);
2856 kfree_skb(acked_skb);
2857 chan->unacked_frames--;
2861 chan->expected_ack_seq = reqseq;
2863 if (chan->unacked_frames == 0)
2864 __clear_retrans_timer(chan);
2866 BT_DBG("unacked_frames %u", chan->unacked_frames);
2869 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2871 BT_DBG("chan %p", chan);
2873 chan->expected_tx_seq = chan->buffer_seq;
2874 l2cap_seq_list_clear(&chan->srej_list);
2875 skb_queue_purge(&chan->srej_q);
2876 chan->rx_state = L2CAP_RX_STATE_RECV;
2879 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2880 struct l2cap_ctrl *control,
2881 struct sk_buff_head *skbs, u8 event)
2883 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2887 case L2CAP_EV_DATA_REQUEST:
2888 if (chan->tx_send_head == NULL)
2889 chan->tx_send_head = skb_peek(skbs);
2891 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2892 l2cap_ertm_send(chan);
2894 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2895 BT_DBG("Enter LOCAL_BUSY");
2896 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2898 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2899 /* The SREJ_SENT state must be aborted if we are to
2900 * enter the LOCAL_BUSY state.
2902 l2cap_abort_rx_srej_sent(chan);
2905 l2cap_send_ack(chan);
2908 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2909 BT_DBG("Exit LOCAL_BUSY");
2910 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2912 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2913 struct l2cap_ctrl local_control;
2915 memset(&local_control, 0, sizeof(local_control));
2916 local_control.sframe = 1;
2917 local_control.super = L2CAP_SUPER_RR;
2918 local_control.poll = 1;
2919 local_control.reqseq = chan->buffer_seq;
2920 l2cap_send_sframe(chan, &local_control);
2922 chan->retry_count = 1;
2923 __set_monitor_timer(chan);
2924 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2927 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2928 l2cap_process_reqseq(chan, control->reqseq);
2930 case L2CAP_EV_EXPLICIT_POLL:
2931 l2cap_send_rr_or_rnr(chan, 1);
2932 chan->retry_count = 1;
2933 __set_monitor_timer(chan);
2934 __clear_ack_timer(chan);
2935 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2937 case L2CAP_EV_RETRANS_TO:
2938 l2cap_send_rr_or_rnr(chan, 1);
2939 chan->retry_count = 1;
2940 __set_monitor_timer(chan);
2941 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2943 case L2CAP_EV_RECV_FBIT:
2944 /* Nothing to process */
2951 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2952 struct l2cap_ctrl *control,
2953 struct sk_buff_head *skbs, u8 event)
2955 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2959 case L2CAP_EV_DATA_REQUEST:
2960 if (chan->tx_send_head == NULL)
2961 chan->tx_send_head = skb_peek(skbs);
2962 /* Queue data, but don't send. */
2963 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2965 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2966 BT_DBG("Enter LOCAL_BUSY");
2967 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2969 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2970 /* The SREJ_SENT state must be aborted if we are to
2971 * enter the LOCAL_BUSY state.
2973 l2cap_abort_rx_srej_sent(chan);
2976 l2cap_send_ack(chan);
2979 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2980 BT_DBG("Exit LOCAL_BUSY");
2981 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2983 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2984 struct l2cap_ctrl local_control;
2985 memset(&local_control, 0, sizeof(local_control));
2986 local_control.sframe = 1;
2987 local_control.super = L2CAP_SUPER_RR;
2988 local_control.poll = 1;
2989 local_control.reqseq = chan->buffer_seq;
2990 l2cap_send_sframe(chan, &local_control);
2992 chan->retry_count = 1;
2993 __set_monitor_timer(chan);
2994 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2997 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2998 l2cap_process_reqseq(chan, control->reqseq);
3001 case L2CAP_EV_RECV_FBIT:
3002 if (control && control->final) {
3003 __clear_monitor_timer(chan);
3004 if (chan->unacked_frames > 0)
3005 __set_retrans_timer(chan);
3006 chan->retry_count = 0;
3007 chan->tx_state = L2CAP_TX_STATE_XMIT;
3008 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3011 case L2CAP_EV_EXPLICIT_POLL:
3014 case L2CAP_EV_MONITOR_TO:
3015 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3016 l2cap_send_rr_or_rnr(chan, 1);
3017 __set_monitor_timer(chan);
3018 chan->retry_count++;
3020 l2cap_send_disconn_req(chan, ECONNABORTED);
3028 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3029 struct sk_buff_head *skbs, u8 event)
3031 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3032 chan, control, skbs, event, chan->tx_state);
3034 switch (chan->tx_state) {
3035 case L2CAP_TX_STATE_XMIT:
3036 l2cap_tx_state_xmit(chan, control, skbs, event);
3038 case L2CAP_TX_STATE_WAIT_F:
3039 l2cap_tx_state_wait_f(chan, control, skbs, event);
3047 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3048 struct l2cap_ctrl *control)
3050 BT_DBG("chan %p, control %p", chan, control);
3051 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3054 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3055 struct l2cap_ctrl *control)
3057 BT_DBG("chan %p, control %p", chan, control);
3058 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3061 /* Copy frame to all raw sockets on that connection */
3062 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3064 struct sk_buff *nskb;
3065 struct l2cap_chan *chan;
3067 BT_DBG("conn %p", conn);
3069 mutex_lock(&conn->chan_lock);
3071 list_for_each_entry(chan, &conn->chan_l, list) {
3072 if (chan->chan_type != L2CAP_CHAN_RAW)
3075 /* Don't send frame to the channel it came from */
3076 if (bt_cb(skb)->l2cap.chan == chan)
3079 nskb = skb_clone(skb, GFP_KERNEL);
3082 if (chan->ops->recv(chan, nskb))
3086 mutex_unlock(&conn->chan_lock);
3089 /* ---- L2CAP signalling commands ---- */
3090 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3091 u8 ident, u16 dlen, void *data)
3093 struct sk_buff *skb, **frag;
3094 struct l2cap_cmd_hdr *cmd;
3095 struct l2cap_hdr *lh;
3098 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3099 conn, code, ident, dlen);
3101 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3104 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3105 count = min_t(unsigned int, conn->mtu, len);
3107 skb = bt_skb_alloc(count, GFP_KERNEL);
3111 lh = skb_put(skb, L2CAP_HDR_SIZE);
3112 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3114 if (conn->hcon->type == LE_LINK)
3115 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3117 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3119 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3122 cmd->len = cpu_to_le16(dlen);
3125 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3126 skb_put_data(skb, data, count);
3132 /* Continuation fragments (no L2CAP header) */
3133 frag = &skb_shinfo(skb)->frag_list;
3135 count = min_t(unsigned int, conn->mtu, len);
3137 *frag = bt_skb_alloc(count, GFP_KERNEL);
3141 skb_put_data(*frag, data, count);
3146 frag = &(*frag)->next;
3156 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3159 struct l2cap_conf_opt *opt = *ptr;
3162 len = L2CAP_CONF_OPT_SIZE + opt->len;
3170 *val = *((u8 *) opt->val);
3174 *val = get_unaligned_le16(opt->val);
3178 *val = get_unaligned_le32(opt->val);
3182 *val = (unsigned long) opt->val;
3186 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3190 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3192 struct l2cap_conf_opt *opt = *ptr;
3194 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3196 if (size < L2CAP_CONF_OPT_SIZE + len)
3204 *((u8 *) opt->val) = val;
3208 put_unaligned_le16(val, opt->val);
3212 put_unaligned_le32(val, opt->val);
3216 memcpy(opt->val, (void *) val, len);
3220 *ptr += L2CAP_CONF_OPT_SIZE + len;
3223 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3225 struct l2cap_conf_efs efs;
3227 switch (chan->mode) {
3228 case L2CAP_MODE_ERTM:
3229 efs.id = chan->local_id;
3230 efs.stype = chan->local_stype;
3231 efs.msdu = cpu_to_le16(chan->local_msdu);
3232 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3233 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3234 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3237 case L2CAP_MODE_STREAMING:
3239 efs.stype = L2CAP_SERV_BESTEFFORT;
3240 efs.msdu = cpu_to_le16(chan->local_msdu);
3241 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3250 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3251 (unsigned long) &efs, size);
3254 static void l2cap_ack_timeout(struct work_struct *work)
3256 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3260 BT_DBG("chan %p", chan);
3262 l2cap_chan_lock(chan);
3264 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3265 chan->last_acked_seq);
3268 l2cap_send_rr_or_rnr(chan, 0);
3270 l2cap_chan_unlock(chan);
3271 l2cap_chan_put(chan);
3274 int l2cap_ertm_init(struct l2cap_chan *chan)
3278 chan->next_tx_seq = 0;
3279 chan->expected_tx_seq = 0;
3280 chan->expected_ack_seq = 0;
3281 chan->unacked_frames = 0;
3282 chan->buffer_seq = 0;
3283 chan->frames_sent = 0;
3284 chan->last_acked_seq = 0;
3286 chan->sdu_last_frag = NULL;
3289 skb_queue_head_init(&chan->tx_q);
3291 chan->local_amp_id = AMP_ID_BREDR;
3292 chan->move_id = AMP_ID_BREDR;
3293 chan->move_state = L2CAP_MOVE_STABLE;
3294 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3296 if (chan->mode != L2CAP_MODE_ERTM)
3299 chan->rx_state = L2CAP_RX_STATE_RECV;
3300 chan->tx_state = L2CAP_TX_STATE_XMIT;
3302 skb_queue_head_init(&chan->srej_q);
3304 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3308 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3310 l2cap_seq_list_free(&chan->srej_list);
3315 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3318 case L2CAP_MODE_STREAMING:
3319 case L2CAP_MODE_ERTM:
3320 if (l2cap_mode_supported(mode, remote_feat_mask))
3324 return L2CAP_MODE_BASIC;
3328 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3330 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3331 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3334 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3336 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3337 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3340 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3341 struct l2cap_conf_rfc *rfc)
3343 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3344 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3346 /* Class 1 devices have must have ERTM timeouts
3347 * exceeding the Link Supervision Timeout. The
3348 * default Link Supervision Timeout for AMP
3349 * controllers is 10 seconds.
3351 * Class 1 devices use 0xffffffff for their
3352 * best-effort flush timeout, so the clamping logic
3353 * will result in a timeout that meets the above
3354 * requirement. ERTM timeouts are 16-bit values, so
3355 * the maximum timeout is 65.535 seconds.
3358 /* Convert timeout to milliseconds and round */
3359 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3361 /* This is the recommended formula for class 2 devices
3362 * that start ERTM timers when packets are sent to the
3365 ertm_to = 3 * ertm_to + 500;
3367 if (ertm_to > 0xffff)
3370 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3371 rfc->monitor_timeout = rfc->retrans_timeout;
3373 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3374 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3378 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3380 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3381 __l2cap_ews_supported(chan->conn)) {
3382 /* use extended control field */
3383 set_bit(FLAG_EXT_CTRL, &chan->flags);
3384 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3386 chan->tx_win = min_t(u16, chan->tx_win,
3387 L2CAP_DEFAULT_TX_WINDOW);
3388 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3390 chan->ack_win = chan->tx_win;
3393 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3395 struct hci_conn *conn = chan->conn->hcon;
3397 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3399 /* The 2-DH1 packet has between 2 and 56 information bytes
3400 * (including the 2-byte payload header)
3402 if (!(conn->pkt_type & HCI_2DH1))
3405 /* The 3-DH1 packet has between 2 and 85 information bytes
3406 * (including the 2-byte payload header)
3408 if (!(conn->pkt_type & HCI_3DH1))
3411 /* The 2-DH3 packet has between 2 and 369 information bytes
3412 * (including the 2-byte payload header)
3414 if (!(conn->pkt_type & HCI_2DH3))
3417 /* The 3-DH3 packet has between 2 and 554 information bytes
3418 * (including the 2-byte payload header)
3420 if (!(conn->pkt_type & HCI_3DH3))
3423 /* The 2-DH5 packet has between 2 and 681 information bytes
3424 * (including the 2-byte payload header)
3426 if (!(conn->pkt_type & HCI_2DH5))
3429 /* The 3-DH5 packet has between 2 and 1023 information bytes
3430 * (including the 2-byte payload header)
3432 if (!(conn->pkt_type & HCI_3DH5))
3436 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3438 struct l2cap_conf_req *req = data;
3439 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3440 void *ptr = req->data;
3441 void *endptr = data + data_size;
3444 BT_DBG("chan %p", chan);
3446 if (chan->num_conf_req || chan->num_conf_rsp)
3449 switch (chan->mode) {
3450 case L2CAP_MODE_STREAMING:
3451 case L2CAP_MODE_ERTM:
3452 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3455 if (__l2cap_efs_supported(chan->conn))
3456 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3460 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3465 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3467 l2cap_mtu_auto(chan);
3468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3472 switch (chan->mode) {
3473 case L2CAP_MODE_BASIC:
3477 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3478 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3481 rfc.mode = L2CAP_MODE_BASIC;
3483 rfc.max_transmit = 0;
3484 rfc.retrans_timeout = 0;
3485 rfc.monitor_timeout = 0;
3486 rfc.max_pdu_size = 0;
3488 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3489 (unsigned long) &rfc, endptr - ptr);
3492 case L2CAP_MODE_ERTM:
3493 rfc.mode = L2CAP_MODE_ERTM;
3494 rfc.max_transmit = chan->max_tx;
3496 __l2cap_set_ertm_timeouts(chan, &rfc);
3498 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3499 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3501 rfc.max_pdu_size = cpu_to_le16(size);
3503 l2cap_txwin_setup(chan);
3505 rfc.txwin_size = min_t(u16, chan->tx_win,
3506 L2CAP_DEFAULT_TX_WINDOW);
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3509 (unsigned long) &rfc, endptr - ptr);
3511 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3512 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3514 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3516 chan->tx_win, endptr - ptr);
3518 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3519 if (chan->fcs == L2CAP_FCS_NONE ||
3520 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3521 chan->fcs = L2CAP_FCS_NONE;
3522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3523 chan->fcs, endptr - ptr);
3527 case L2CAP_MODE_STREAMING:
3528 l2cap_txwin_setup(chan);
3529 rfc.mode = L2CAP_MODE_STREAMING;
3531 rfc.max_transmit = 0;
3532 rfc.retrans_timeout = 0;
3533 rfc.monitor_timeout = 0;
3535 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3536 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3538 rfc.max_pdu_size = cpu_to_le16(size);
3540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3541 (unsigned long) &rfc, endptr - ptr);
3543 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3544 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3546 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3547 if (chan->fcs == L2CAP_FCS_NONE ||
3548 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3549 chan->fcs = L2CAP_FCS_NONE;
3550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3551 chan->fcs, endptr - ptr);
3556 req->dcid = cpu_to_le16(chan->dcid);
3557 req->flags = cpu_to_le16(0);
3562 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3564 struct l2cap_conf_rsp *rsp = data;
3565 void *ptr = rsp->data;
3566 void *endptr = data + data_size;
3567 void *req = chan->conf_req;
3568 int len = chan->conf_len;
3569 int type, hint, olen;
3571 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3572 struct l2cap_conf_efs efs;
3574 u16 mtu = L2CAP_DEFAULT_MTU;
3575 u16 result = L2CAP_CONF_SUCCESS;
3578 BT_DBG("chan %p", chan);
3580 while (len >= L2CAP_CONF_OPT_SIZE) {
3581 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3585 hint = type & L2CAP_CONF_HINT;
3586 type &= L2CAP_CONF_MASK;
3589 case L2CAP_CONF_MTU:
3595 case L2CAP_CONF_FLUSH_TO:
3598 chan->flush_to = val;
3601 case L2CAP_CONF_QOS:
3604 case L2CAP_CONF_RFC:
3605 if (olen != sizeof(rfc))
3607 memcpy(&rfc, (void *) val, olen);
3610 case L2CAP_CONF_FCS:
3613 if (val == L2CAP_FCS_NONE)
3614 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3617 case L2CAP_CONF_EFS:
3618 if (olen != sizeof(efs))
3621 memcpy(&efs, (void *) val, olen);
3624 case L2CAP_CONF_EWS:
3627 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3628 return -ECONNREFUSED;
3629 set_bit(FLAG_EXT_CTRL, &chan->flags);
3630 set_bit(CONF_EWS_RECV, &chan->conf_state);
3631 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3632 chan->remote_tx_win = val;
3638 result = L2CAP_CONF_UNKNOWN;
3639 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3644 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3647 switch (chan->mode) {
3648 case L2CAP_MODE_STREAMING:
3649 case L2CAP_MODE_ERTM:
3650 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3651 chan->mode = l2cap_select_mode(rfc.mode,
3652 chan->conn->feat_mask);
3657 if (__l2cap_efs_supported(chan->conn))
3658 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3660 return -ECONNREFUSED;
3663 if (chan->mode != rfc.mode)
3664 return -ECONNREFUSED;
3670 if (chan->mode != rfc.mode) {
3671 result = L2CAP_CONF_UNACCEPT;
3672 rfc.mode = chan->mode;
3674 if (chan->num_conf_rsp == 1)
3675 return -ECONNREFUSED;
3677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3678 (unsigned long) &rfc, endptr - ptr);
3681 if (result == L2CAP_CONF_SUCCESS) {
3682 /* Configure output options and let the other side know
3683 * which ones we don't like. */
3685 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3686 result = L2CAP_CONF_UNACCEPT;
3689 set_bit(CONF_MTU_DONE, &chan->conf_state);
3691 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3694 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3695 efs.stype != L2CAP_SERV_NOTRAFIC &&
3696 efs.stype != chan->local_stype) {
3698 result = L2CAP_CONF_UNACCEPT;
3700 if (chan->num_conf_req >= 1)
3701 return -ECONNREFUSED;
3703 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3705 (unsigned long) &efs, endptr - ptr);
3707 /* Send PENDING Conf Rsp */
3708 result = L2CAP_CONF_PENDING;
3709 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3714 case L2CAP_MODE_BASIC:
3715 chan->fcs = L2CAP_FCS_NONE;
3716 set_bit(CONF_MODE_DONE, &chan->conf_state);
3719 case L2CAP_MODE_ERTM:
3720 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3721 chan->remote_tx_win = rfc.txwin_size;
3723 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3725 chan->remote_max_tx = rfc.max_transmit;
3727 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3728 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3729 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3730 rfc.max_pdu_size = cpu_to_le16(size);
3731 chan->remote_mps = size;
3733 __l2cap_set_ertm_timeouts(chan, &rfc);
3735 set_bit(CONF_MODE_DONE, &chan->conf_state);
3737 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3738 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3741 test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3742 chan->remote_id = efs.id;
3743 chan->remote_stype = efs.stype;
3744 chan->remote_msdu = le16_to_cpu(efs.msdu);
3745 chan->remote_flush_to =
3746 le32_to_cpu(efs.flush_to);
3747 chan->remote_acc_lat =
3748 le32_to_cpu(efs.acc_lat);
3749 chan->remote_sdu_itime =
3750 le32_to_cpu(efs.sdu_itime);
3751 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3753 (unsigned long) &efs, endptr - ptr);
3757 case L2CAP_MODE_STREAMING:
3758 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3759 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3760 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3761 rfc.max_pdu_size = cpu_to_le16(size);
3762 chan->remote_mps = size;
3764 set_bit(CONF_MODE_DONE, &chan->conf_state);
3766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3767 (unsigned long) &rfc, endptr - ptr);
3772 result = L2CAP_CONF_UNACCEPT;
3774 memset(&rfc, 0, sizeof(rfc));
3775 rfc.mode = chan->mode;
3778 if (result == L2CAP_CONF_SUCCESS)
3779 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3781 rsp->scid = cpu_to_le16(chan->dcid);
3782 rsp->result = cpu_to_le16(result);
3783 rsp->flags = cpu_to_le16(0);
3788 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3789 void *data, size_t size, u16 *result)
3791 struct l2cap_conf_req *req = data;
3792 void *ptr = req->data;
3793 void *endptr = data + size;
3796 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3797 struct l2cap_conf_efs efs;
3799 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3801 while (len >= L2CAP_CONF_OPT_SIZE) {
3802 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3807 case L2CAP_CONF_MTU:
3810 if (val < L2CAP_DEFAULT_MIN_MTU) {
3811 *result = L2CAP_CONF_UNACCEPT;
3812 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3815 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3819 case L2CAP_CONF_FLUSH_TO:
3822 chan->flush_to = val;
3823 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3824 chan->flush_to, endptr - ptr);
3827 case L2CAP_CONF_RFC:
3828 if (olen != sizeof(rfc))
3830 memcpy(&rfc, (void *)val, olen);
3831 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3832 rfc.mode != chan->mode)
3833 return -ECONNREFUSED;
3835 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3836 (unsigned long) &rfc, endptr - ptr);
3839 case L2CAP_CONF_EWS:
3842 chan->ack_win = min_t(u16, val, chan->ack_win);
3843 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3844 chan->tx_win, endptr - ptr);
3847 case L2CAP_CONF_EFS:
3848 if (olen != sizeof(efs))
3850 memcpy(&efs, (void *)val, olen);
3851 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3852 efs.stype != L2CAP_SERV_NOTRAFIC &&
3853 efs.stype != chan->local_stype)
3854 return -ECONNREFUSED;
3855 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3856 (unsigned long) &efs, endptr - ptr);
3859 case L2CAP_CONF_FCS:
3862 if (*result == L2CAP_CONF_PENDING)
3863 if (val == L2CAP_FCS_NONE)
3864 set_bit(CONF_RECV_NO_FCS,
3870 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3871 return -ECONNREFUSED;
3873 chan->mode = rfc.mode;
3875 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3877 case L2CAP_MODE_ERTM:
3878 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3879 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3880 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3881 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3882 chan->ack_win = min_t(u16, chan->ack_win,
3885 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3886 chan->local_msdu = le16_to_cpu(efs.msdu);
3887 chan->local_sdu_itime =
3888 le32_to_cpu(efs.sdu_itime);
3889 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3890 chan->local_flush_to =
3891 le32_to_cpu(efs.flush_to);
3895 case L2CAP_MODE_STREAMING:
3896 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3900 req->dcid = cpu_to_le16(chan->dcid);
3901 req->flags = cpu_to_le16(0);
3906 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3907 u16 result, u16 flags)
3909 struct l2cap_conf_rsp *rsp = data;
3910 void *ptr = rsp->data;
3912 BT_DBG("chan %p", chan);
3914 rsp->scid = cpu_to_le16(chan->dcid);
3915 rsp->result = cpu_to_le16(result);
3916 rsp->flags = cpu_to_le16(flags);
3921 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3923 struct l2cap_le_conn_rsp rsp;
3924 struct l2cap_conn *conn = chan->conn;
3926 BT_DBG("chan %p", chan);
3928 rsp.dcid = cpu_to_le16(chan->scid);
3929 rsp.mtu = cpu_to_le16(chan->imtu);
3930 rsp.mps = cpu_to_le16(chan->mps);
3931 rsp.credits = cpu_to_le16(chan->rx_credits);
3932 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3934 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3938 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3942 if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3945 switch (chan->state) {
3947 /* If channel still pending accept add to result */
3953 /* If not connected or pending accept it has been refused */
3954 *result = -ECONNREFUSED;
3959 struct l2cap_ecred_rsp_data {
3961 struct l2cap_ecred_conn_rsp rsp;
3962 __le16 scid[L2CAP_ECRED_MAX_CID];
3967 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3969 struct l2cap_ecred_rsp_data *rsp = data;
3971 if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3974 /* Reset ident so only one response is sent */
3977 /* Include all channels pending with the same ident */
3978 if (!rsp->pdu.rsp.result)
3979 rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3981 l2cap_chan_del(chan, ECONNRESET);
3984 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3986 struct l2cap_conn *conn = chan->conn;
3987 struct l2cap_ecred_rsp_data data;
3988 u16 id = chan->ident;
3994 BT_DBG("chan %p id %d", chan, id);
3996 memset(&data, 0, sizeof(data));
3998 data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3999 data.pdu.rsp.mps = cpu_to_le16(chan->mps);
4000 data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
4001 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
4003 /* Verify that all channels are ready */
4004 __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
4010 data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
4012 /* Build response */
4013 __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
4015 l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
4016 sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
4020 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4022 struct l2cap_conn_rsp rsp;
4023 struct l2cap_conn *conn = chan->conn;
4027 rsp.scid = cpu_to_le16(chan->dcid);
4028 rsp.dcid = cpu_to_le16(chan->scid);
4029 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4030 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4033 rsp_code = L2CAP_CREATE_CHAN_RSP;
4035 rsp_code = L2CAP_CONN_RSP;
4037 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4039 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4041 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4044 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4045 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4046 chan->num_conf_req++;
4049 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4053 /* Use sane default values in case a misbehaving remote device
4054 * did not send an RFC or extended window size option.
4056 u16 txwin_ext = chan->ack_win;
4057 struct l2cap_conf_rfc rfc = {
4059 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4060 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4061 .max_pdu_size = cpu_to_le16(chan->imtu),
4062 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4065 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4067 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4070 while (len >= L2CAP_CONF_OPT_SIZE) {
4071 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4076 case L2CAP_CONF_RFC:
4077 if (olen != sizeof(rfc))
4079 memcpy(&rfc, (void *)val, olen);
4081 case L2CAP_CONF_EWS:
4090 case L2CAP_MODE_ERTM:
4091 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4092 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4093 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4094 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4095 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4097 chan->ack_win = min_t(u16, chan->ack_win,
4100 case L2CAP_MODE_STREAMING:
4101 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4105 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4106 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4109 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4111 if (cmd_len < sizeof(*rej))
4114 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4117 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4118 cmd->ident == conn->info_ident) {
4119 cancel_delayed_work(&conn->info_timer);
4121 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4122 conn->info_ident = 0;
4124 l2cap_conn_start(conn);
4130 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4131 struct l2cap_cmd_hdr *cmd,
4132 u8 *data, u8 rsp_code, u8 amp_id)
4134 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4135 struct l2cap_conn_rsp rsp;
4136 struct l2cap_chan *chan = NULL, *pchan;
4137 int result, status = L2CAP_CS_NO_INFO;
4139 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4140 __le16 psm = req->psm;
4142 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4144 /* Check if we have socket listening on psm */
4145 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4146 &conn->hcon->dst, ACL_LINK);
4148 result = L2CAP_CR_BAD_PSM;
4152 mutex_lock(&conn->chan_lock);
4153 l2cap_chan_lock(pchan);
4155 /* Check if the ACL is secure enough (if not SDP) */
4156 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4157 !hci_conn_check_link_mode(conn->hcon)) {
4158 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4159 result = L2CAP_CR_SEC_BLOCK;
4163 result = L2CAP_CR_NO_MEM;
4165 /* Check for valid dynamic CID range (as per Erratum 3253) */
4166 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4167 result = L2CAP_CR_INVALID_SCID;
4171 /* Check if we already have channel with that dcid */
4172 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4173 result = L2CAP_CR_SCID_IN_USE;
4177 chan = pchan->ops->new_connection(pchan);
4181 /* For certain devices (ex: HID mouse), support for authentication,
4182 * pairing and bonding is optional. For such devices, inorder to avoid
4183 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4184 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4186 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4188 bacpy(&chan->src, &conn->hcon->src);
4189 bacpy(&chan->dst, &conn->hcon->dst);
4190 chan->src_type = bdaddr_src_type(conn->hcon);
4191 chan->dst_type = bdaddr_dst_type(conn->hcon);
4194 chan->local_amp_id = amp_id;
4196 __l2cap_chan_add(conn, chan);
4200 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4202 chan->ident = cmd->ident;
4204 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4205 if (l2cap_chan_check_security(chan, false)) {
4206 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4207 l2cap_state_change(chan, BT_CONNECT2);
4208 result = L2CAP_CR_PEND;
4209 status = L2CAP_CS_AUTHOR_PEND;
4210 chan->ops->defer(chan);
4212 /* Force pending result for AMP controllers.
4213 * The connection will succeed after the
4214 * physical link is up.
4216 if (amp_id == AMP_ID_BREDR) {
4217 l2cap_state_change(chan, BT_CONFIG);
4218 result = L2CAP_CR_SUCCESS;
4220 l2cap_state_change(chan, BT_CONNECT2);
4221 result = L2CAP_CR_PEND;
4223 status = L2CAP_CS_NO_INFO;
4226 l2cap_state_change(chan, BT_CONNECT2);
4227 result = L2CAP_CR_PEND;
4228 status = L2CAP_CS_AUTHEN_PEND;
4231 l2cap_state_change(chan, BT_CONNECT2);
4232 result = L2CAP_CR_PEND;
4233 status = L2CAP_CS_NO_INFO;
4237 l2cap_chan_unlock(pchan);
4238 mutex_unlock(&conn->chan_lock);
4239 l2cap_chan_put(pchan);
4242 rsp.scid = cpu_to_le16(scid);
4243 rsp.dcid = cpu_to_le16(dcid);
4244 rsp.result = cpu_to_le16(result);
4245 rsp.status = cpu_to_le16(status);
4246 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4248 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4249 struct l2cap_info_req info;
4250 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4252 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4253 conn->info_ident = l2cap_get_ident(conn);
4255 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4257 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4258 sizeof(info), &info);
4261 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4262 result == L2CAP_CR_SUCCESS) {
4264 set_bit(CONF_REQ_SENT, &chan->conf_state);
4265 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4266 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4267 chan->num_conf_req++;
4273 static int l2cap_connect_req(struct l2cap_conn *conn,
4274 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4276 struct hci_dev *hdev = conn->hcon->hdev;
4277 struct hci_conn *hcon = conn->hcon;
4279 if (cmd_len < sizeof(struct l2cap_conn_req))
4283 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4284 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4285 mgmt_device_connected(hdev, hcon, NULL, 0);
4286 hci_dev_unlock(hdev);
4288 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4292 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4293 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4296 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4297 u16 scid, dcid, result, status;
4298 struct l2cap_chan *chan;
4302 if (cmd_len < sizeof(*rsp))
4305 scid = __le16_to_cpu(rsp->scid);
4306 dcid = __le16_to_cpu(rsp->dcid);
4307 result = __le16_to_cpu(rsp->result);
4308 status = __le16_to_cpu(rsp->status);
4310 if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4311 dcid > L2CAP_CID_DYN_END))
4314 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4315 dcid, scid, result, status);
4317 mutex_lock(&conn->chan_lock);
4320 chan = __l2cap_get_chan_by_scid(conn, scid);
4326 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4333 chan = l2cap_chan_hold_unless_zero(chan);
4341 l2cap_chan_lock(chan);
4344 case L2CAP_CR_SUCCESS:
4345 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4350 l2cap_state_change(chan, BT_CONFIG);
4353 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4355 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4358 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4359 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4360 chan->num_conf_req++;
4364 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4368 l2cap_chan_del(chan, ECONNREFUSED);
4372 l2cap_chan_unlock(chan);
4373 l2cap_chan_put(chan);
4376 mutex_unlock(&conn->chan_lock);
4381 static inline void set_default_fcs(struct l2cap_chan *chan)
4383 /* FCS is enabled only in ERTM or streaming mode, if one or both
4386 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4387 chan->fcs = L2CAP_FCS_NONE;
4388 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4389 chan->fcs = L2CAP_FCS_CRC16;
4392 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4393 u8 ident, u16 flags)
4395 struct l2cap_conn *conn = chan->conn;
4397 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4400 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4401 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4403 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4404 l2cap_build_conf_rsp(chan, data,
4405 L2CAP_CONF_SUCCESS, flags), data);
4408 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4411 struct l2cap_cmd_rej_cid rej;
4413 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4414 rej.scid = __cpu_to_le16(scid);
4415 rej.dcid = __cpu_to_le16(dcid);
4417 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4420 static inline int l2cap_config_req(struct l2cap_conn *conn,
4421 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4424 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4427 struct l2cap_chan *chan;
4430 if (cmd_len < sizeof(*req))
4433 dcid = __le16_to_cpu(req->dcid);
4434 flags = __le16_to_cpu(req->flags);
4436 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4438 chan = l2cap_get_chan_by_scid(conn, dcid);
4440 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4444 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4445 chan->state != BT_CONNECTED) {
4446 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4451 /* Reject if config buffer is too small. */
4452 len = cmd_len - sizeof(*req);
4453 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4454 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4455 l2cap_build_conf_rsp(chan, rsp,
4456 L2CAP_CONF_REJECT, flags), rsp);
4461 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4462 chan->conf_len += len;
4464 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4465 /* Incomplete config. Send empty response. */
4466 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4467 l2cap_build_conf_rsp(chan, rsp,
4468 L2CAP_CONF_SUCCESS, flags), rsp);
4472 /* Complete config. */
4473 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4475 l2cap_send_disconn_req(chan, ECONNRESET);
4479 chan->ident = cmd->ident;
4480 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4481 if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4482 chan->num_conf_rsp++;
4484 /* Reset config buffer. */
4487 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4490 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4491 set_default_fcs(chan);
4493 if (chan->mode == L2CAP_MODE_ERTM ||
4494 chan->mode == L2CAP_MODE_STREAMING)
4495 err = l2cap_ertm_init(chan);
4498 l2cap_send_disconn_req(chan, -err);
4500 l2cap_chan_ready(chan);
4505 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4507 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4508 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4509 chan->num_conf_req++;
4512 /* Got Conf Rsp PENDING from remote side and assume we sent
4513 Conf Rsp PENDING in the code above */
4514 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4515 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4517 /* check compatibility */
4519 /* Send rsp for BR/EDR channel */
4521 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4523 chan->ident = cmd->ident;
4527 l2cap_chan_unlock(chan);
4528 l2cap_chan_put(chan);
4532 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4533 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4536 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4537 u16 scid, flags, result;
4538 struct l2cap_chan *chan;
4539 int len = cmd_len - sizeof(*rsp);
4542 if (cmd_len < sizeof(*rsp))
4545 scid = __le16_to_cpu(rsp->scid);
4546 flags = __le16_to_cpu(rsp->flags);
4547 result = __le16_to_cpu(rsp->result);
4549 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4552 chan = l2cap_get_chan_by_scid(conn, scid);
4557 case L2CAP_CONF_SUCCESS:
4558 l2cap_conf_rfc_get(chan, rsp->data, len);
4559 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4562 case L2CAP_CONF_PENDING:
4563 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4565 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4568 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4569 buf, sizeof(buf), &result);
4571 l2cap_send_disconn_req(chan, ECONNRESET);
4575 if (!chan->hs_hcon) {
4576 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4579 if (l2cap_check_efs(chan)) {
4580 amp_create_logical_link(chan);
4581 chan->ident = cmd->ident;
4587 case L2CAP_CONF_UNKNOWN:
4588 case L2CAP_CONF_UNACCEPT:
4589 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4592 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4593 l2cap_send_disconn_req(chan, ECONNRESET);
4597 /* throw out any old stored conf requests */
4598 result = L2CAP_CONF_SUCCESS;
4599 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4600 req, sizeof(req), &result);
4602 l2cap_send_disconn_req(chan, ECONNRESET);
4606 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4607 L2CAP_CONF_REQ, len, req);
4608 chan->num_conf_req++;
4609 if (result != L2CAP_CONF_SUCCESS)
4616 l2cap_chan_set_err(chan, ECONNRESET);
4618 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4619 l2cap_send_disconn_req(chan, ECONNRESET);
4623 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4626 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4628 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4629 set_default_fcs(chan);
4631 if (chan->mode == L2CAP_MODE_ERTM ||
4632 chan->mode == L2CAP_MODE_STREAMING)
4633 err = l2cap_ertm_init(chan);
4636 l2cap_send_disconn_req(chan, -err);
4638 l2cap_chan_ready(chan);
4642 l2cap_chan_unlock(chan);
4643 l2cap_chan_put(chan);
4647 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4648 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4651 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4652 struct l2cap_disconn_rsp rsp;
4654 struct l2cap_chan *chan;
4656 if (cmd_len != sizeof(*req))
4659 scid = __le16_to_cpu(req->scid);
4660 dcid = __le16_to_cpu(req->dcid);
4662 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4664 chan = l2cap_get_chan_by_scid(conn, dcid);
4666 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4670 rsp.dcid = cpu_to_le16(chan->scid);
4671 rsp.scid = cpu_to_le16(chan->dcid);
4672 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4674 chan->ops->set_shutdown(chan);
4676 l2cap_chan_unlock(chan);
4677 mutex_lock(&conn->chan_lock);
4678 l2cap_chan_lock(chan);
4679 l2cap_chan_del(chan, ECONNRESET);
4680 mutex_unlock(&conn->chan_lock);
4682 chan->ops->close(chan);
4684 l2cap_chan_unlock(chan);
4685 l2cap_chan_put(chan);
4690 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4691 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4694 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4696 struct l2cap_chan *chan;
4698 if (cmd_len != sizeof(*rsp))
4701 scid = __le16_to_cpu(rsp->scid);
4702 dcid = __le16_to_cpu(rsp->dcid);
4704 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4706 chan = l2cap_get_chan_by_scid(conn, scid);
4711 if (chan->state != BT_DISCONN) {
4712 l2cap_chan_unlock(chan);
4713 l2cap_chan_put(chan);
4717 l2cap_chan_unlock(chan);
4718 mutex_lock(&conn->chan_lock);
4719 l2cap_chan_lock(chan);
4720 l2cap_chan_del(chan, 0);
4721 mutex_unlock(&conn->chan_lock);
4723 chan->ops->close(chan);
4725 l2cap_chan_unlock(chan);
4726 l2cap_chan_put(chan);
4731 static inline int l2cap_information_req(struct l2cap_conn *conn,
4732 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4735 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4738 if (cmd_len != sizeof(*req))
4741 type = __le16_to_cpu(req->type);
4743 BT_DBG("type 0x%4.4x", type);
4745 if (type == L2CAP_IT_FEAT_MASK) {
4747 u32 feat_mask = l2cap_feat_mask;
4748 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4749 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4750 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4752 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4754 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4755 feat_mask |= L2CAP_FEAT_EXT_FLOW
4756 | L2CAP_FEAT_EXT_WINDOW;
4758 put_unaligned_le32(feat_mask, rsp->data);
4759 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4761 } else if (type == L2CAP_IT_FIXED_CHAN) {
4763 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4765 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4766 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4767 rsp->data[0] = conn->local_fixed_chan;
4768 memset(rsp->data + 1, 0, 7);
4769 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4772 struct l2cap_info_rsp rsp;
4773 rsp.type = cpu_to_le16(type);
4774 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4775 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4782 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4783 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4786 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4789 if (cmd_len < sizeof(*rsp))
4792 type = __le16_to_cpu(rsp->type);
4793 result = __le16_to_cpu(rsp->result);
4795 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4797 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4798 if (cmd->ident != conn->info_ident ||
4799 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4802 cancel_delayed_work(&conn->info_timer);
4804 if (result != L2CAP_IR_SUCCESS) {
4805 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4806 conn->info_ident = 0;
4808 l2cap_conn_start(conn);
4814 case L2CAP_IT_FEAT_MASK:
4815 conn->feat_mask = get_unaligned_le32(rsp->data);
4817 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4818 struct l2cap_info_req req;
4819 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4821 conn->info_ident = l2cap_get_ident(conn);
4823 l2cap_send_cmd(conn, conn->info_ident,
4824 L2CAP_INFO_REQ, sizeof(req), &req);
4826 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4827 conn->info_ident = 0;
4829 l2cap_conn_start(conn);
4833 case L2CAP_IT_FIXED_CHAN:
4834 conn->remote_fixed_chan = rsp->data[0];
4835 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4836 conn->info_ident = 0;
4838 l2cap_conn_start(conn);
4845 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4846 struct l2cap_cmd_hdr *cmd,
4847 u16 cmd_len, void *data)
4849 struct l2cap_create_chan_req *req = data;
4850 struct l2cap_create_chan_rsp rsp;
4851 struct l2cap_chan *chan;
4852 struct hci_dev *hdev;
4855 if (cmd_len != sizeof(*req))
4858 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4861 psm = le16_to_cpu(req->psm);
4862 scid = le16_to_cpu(req->scid);
4864 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4866 /* For controller id 0 make BR/EDR connection */
4867 if (req->amp_id == AMP_ID_BREDR) {
4868 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4873 /* Validate AMP controller id */
4874 hdev = hci_dev_get(req->amp_id);
4878 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4883 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4886 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4887 struct hci_conn *hs_hcon;
4889 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4893 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4898 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4900 mgr->bredr_chan = chan;
4901 chan->hs_hcon = hs_hcon;
4902 chan->fcs = L2CAP_FCS_NONE;
4903 conn->mtu = hdev->block_mtu;
4912 rsp.scid = cpu_to_le16(scid);
4913 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4914 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4916 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4922 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4924 struct l2cap_move_chan_req req;
4927 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4929 ident = l2cap_get_ident(chan->conn);
4930 chan->ident = ident;
4932 req.icid = cpu_to_le16(chan->scid);
4933 req.dest_amp_id = dest_amp_id;
4935 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4938 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4941 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4943 struct l2cap_move_chan_rsp rsp;
4945 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4947 rsp.icid = cpu_to_le16(chan->dcid);
4948 rsp.result = cpu_to_le16(result);
4950 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4954 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4956 struct l2cap_move_chan_cfm cfm;
4958 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4960 chan->ident = l2cap_get_ident(chan->conn);
4962 cfm.icid = cpu_to_le16(chan->scid);
4963 cfm.result = cpu_to_le16(result);
4965 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4968 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4971 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4973 struct l2cap_move_chan_cfm cfm;
4975 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4977 cfm.icid = cpu_to_le16(icid);
4978 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4980 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4984 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4987 struct l2cap_move_chan_cfm_rsp rsp;
4989 BT_DBG("icid 0x%4.4x", icid);
4991 rsp.icid = cpu_to_le16(icid);
4992 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4995 static void __release_logical_link(struct l2cap_chan *chan)
4997 chan->hs_hchan = NULL;
4998 chan->hs_hcon = NULL;
5000 /* Placeholder - release the logical link */
5003 static void l2cap_logical_fail(struct l2cap_chan *chan)
5005 /* Logical link setup failed */
5006 if (chan->state != BT_CONNECTED) {
5007 /* Create channel failure, disconnect */
5008 l2cap_send_disconn_req(chan, ECONNRESET);
5012 switch (chan->move_role) {
5013 case L2CAP_MOVE_ROLE_RESPONDER:
5014 l2cap_move_done(chan);
5015 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5017 case L2CAP_MOVE_ROLE_INITIATOR:
5018 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5019 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5020 /* Remote has only sent pending or
5021 * success responses, clean up
5023 l2cap_move_done(chan);
5026 /* Other amp move states imply that the move
5027 * has already aborted
5029 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5034 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5035 struct hci_chan *hchan)
5037 struct l2cap_conf_rsp rsp;
5039 chan->hs_hchan = hchan;
5040 chan->hs_hcon->l2cap_data = chan->conn;
5042 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5044 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5047 set_default_fcs(chan);
5049 err = l2cap_ertm_init(chan);
5051 l2cap_send_disconn_req(chan, -err);
5053 l2cap_chan_ready(chan);
5057 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5058 struct hci_chan *hchan)
5060 chan->hs_hcon = hchan->conn;
5061 chan->hs_hcon->l2cap_data = chan->conn;
5063 BT_DBG("move_state %d", chan->move_state);
5065 switch (chan->move_state) {
5066 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5067 /* Move confirm will be sent after a success
5068 * response is received
5070 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5072 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5073 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5074 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5075 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5076 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5077 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5078 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5079 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5080 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5084 /* Move was not in expected state, free the channel */
5085 __release_logical_link(chan);
5087 chan->move_state = L2CAP_MOVE_STABLE;
5091 /* Call with chan locked */
5092 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5095 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5098 l2cap_logical_fail(chan);
5099 __release_logical_link(chan);
5103 if (chan->state != BT_CONNECTED) {
5104 /* Ignore logical link if channel is on BR/EDR */
5105 if (chan->local_amp_id != AMP_ID_BREDR)
5106 l2cap_logical_finish_create(chan, hchan);
5108 l2cap_logical_finish_move(chan, hchan);
5112 void l2cap_move_start(struct l2cap_chan *chan)
5114 BT_DBG("chan %p", chan);
5116 if (chan->local_amp_id == AMP_ID_BREDR) {
5117 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5119 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5120 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5121 /* Placeholder - start physical link setup */
5123 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5124 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5126 l2cap_move_setup(chan);
5127 l2cap_send_move_chan_req(chan, 0);
5131 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5132 u8 local_amp_id, u8 remote_amp_id)
5134 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5135 local_amp_id, remote_amp_id);
5137 chan->fcs = L2CAP_FCS_NONE;
5139 /* Outgoing channel on AMP */
5140 if (chan->state == BT_CONNECT) {
5141 if (result == L2CAP_CR_SUCCESS) {
5142 chan->local_amp_id = local_amp_id;
5143 l2cap_send_create_chan_req(chan, remote_amp_id);
5145 /* Revert to BR/EDR connect */
5146 l2cap_send_conn_req(chan);
5152 /* Incoming channel on AMP */
5153 if (__l2cap_no_conn_pending(chan)) {
5154 struct l2cap_conn_rsp rsp;
5156 rsp.scid = cpu_to_le16(chan->dcid);
5157 rsp.dcid = cpu_to_le16(chan->scid);
5159 if (result == L2CAP_CR_SUCCESS) {
5160 /* Send successful response */
5161 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5162 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5164 /* Send negative response */
5165 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5166 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5169 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5172 if (result == L2CAP_CR_SUCCESS) {
5173 l2cap_state_change(chan, BT_CONFIG);
5174 set_bit(CONF_REQ_SENT, &chan->conf_state);
5175 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5177 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5178 chan->num_conf_req++;
5183 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5186 l2cap_move_setup(chan);
5187 chan->move_id = local_amp_id;
5188 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5190 l2cap_send_move_chan_req(chan, remote_amp_id);
5193 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5195 struct hci_chan *hchan = NULL;
5197 /* Placeholder - get hci_chan for logical link */
5200 if (hchan->state == BT_CONNECTED) {
5201 /* Logical link is ready to go */
5202 chan->hs_hcon = hchan->conn;
5203 chan->hs_hcon->l2cap_data = chan->conn;
5204 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5205 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5207 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5209 /* Wait for logical link to be ready */
5210 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5213 /* Logical link not available */
5214 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5218 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5220 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5222 if (result == -EINVAL)
5223 rsp_result = L2CAP_MR_BAD_ID;
5225 rsp_result = L2CAP_MR_NOT_ALLOWED;
5227 l2cap_send_move_chan_rsp(chan, rsp_result);
5230 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5231 chan->move_state = L2CAP_MOVE_STABLE;
5233 /* Restart data transmission */
5234 l2cap_ertm_send(chan);
5237 /* Invoke with locked chan */
5238 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5240 u8 local_amp_id = chan->local_amp_id;
5241 u8 remote_amp_id = chan->remote_amp_id;
5243 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5244 chan, result, local_amp_id, remote_amp_id);
5246 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5249 if (chan->state != BT_CONNECTED) {
5250 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5251 } else if (result != L2CAP_MR_SUCCESS) {
5252 l2cap_do_move_cancel(chan, result);
5254 switch (chan->move_role) {
5255 case L2CAP_MOVE_ROLE_INITIATOR:
5256 l2cap_do_move_initiate(chan, local_amp_id,
5259 case L2CAP_MOVE_ROLE_RESPONDER:
5260 l2cap_do_move_respond(chan, result);
5263 l2cap_do_move_cancel(chan, result);
5269 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5270 struct l2cap_cmd_hdr *cmd,
5271 u16 cmd_len, void *data)
5273 struct l2cap_move_chan_req *req = data;
5274 struct l2cap_move_chan_rsp rsp;
5275 struct l2cap_chan *chan;
5277 u16 result = L2CAP_MR_NOT_ALLOWED;
5279 if (cmd_len != sizeof(*req))
5282 icid = le16_to_cpu(req->icid);
5284 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5286 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5289 chan = l2cap_get_chan_by_dcid(conn, icid);
5291 rsp.icid = cpu_to_le16(icid);
5292 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5293 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5298 chan->ident = cmd->ident;
5300 if (chan->scid < L2CAP_CID_DYN_START ||
5301 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5302 (chan->mode != L2CAP_MODE_ERTM &&
5303 chan->mode != L2CAP_MODE_STREAMING)) {
5304 result = L2CAP_MR_NOT_ALLOWED;
5305 goto send_move_response;
5308 if (chan->local_amp_id == req->dest_amp_id) {
5309 result = L2CAP_MR_SAME_ID;
5310 goto send_move_response;
5313 if (req->dest_amp_id != AMP_ID_BREDR) {
5314 struct hci_dev *hdev;
5315 hdev = hci_dev_get(req->dest_amp_id);
5316 if (!hdev || hdev->dev_type != HCI_AMP ||
5317 !test_bit(HCI_UP, &hdev->flags)) {
5321 result = L2CAP_MR_BAD_ID;
5322 goto send_move_response;
5327 /* Detect a move collision. Only send a collision response
5328 * if this side has "lost", otherwise proceed with the move.
5329 * The winner has the larger bd_addr.
5331 if ((__chan_is_moving(chan) ||
5332 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5333 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5334 result = L2CAP_MR_COLLISION;
5335 goto send_move_response;
5338 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5339 l2cap_move_setup(chan);
5340 chan->move_id = req->dest_amp_id;
5342 if (req->dest_amp_id == AMP_ID_BREDR) {
5343 /* Moving to BR/EDR */
5344 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5345 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5346 result = L2CAP_MR_PEND;
5348 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5349 result = L2CAP_MR_SUCCESS;
5352 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5353 /* Placeholder - uncomment when amp functions are available */
5354 /*amp_accept_physical(chan, req->dest_amp_id);*/
5355 result = L2CAP_MR_PEND;
5359 l2cap_send_move_chan_rsp(chan, result);
5361 l2cap_chan_unlock(chan);
5362 l2cap_chan_put(chan);
5367 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5369 struct l2cap_chan *chan;
5370 struct hci_chan *hchan = NULL;
5372 chan = l2cap_get_chan_by_scid(conn, icid);
5374 l2cap_send_move_chan_cfm_icid(conn, icid);
5378 __clear_chan_timer(chan);
5379 if (result == L2CAP_MR_PEND)
5380 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5382 switch (chan->move_state) {
5383 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5384 /* Move confirm will be sent when logical link
5387 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5389 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5390 if (result == L2CAP_MR_PEND) {
5392 } else if (test_bit(CONN_LOCAL_BUSY,
5393 &chan->conn_state)) {
5394 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5396 /* Logical link is up or moving to BR/EDR,
5399 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5400 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5403 case L2CAP_MOVE_WAIT_RSP:
5405 if (result == L2CAP_MR_SUCCESS) {
5406 /* Remote is ready, send confirm immediately
5407 * after logical link is ready
5409 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5411 /* Both logical link and move success
5412 * are required to confirm
5414 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5417 /* Placeholder - get hci_chan for logical link */
5419 /* Logical link not available */
5420 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5424 /* If the logical link is not yet connected, do not
5425 * send confirmation.
5427 if (hchan->state != BT_CONNECTED)
5430 /* Logical link is already ready to go */
5432 chan->hs_hcon = hchan->conn;
5433 chan->hs_hcon->l2cap_data = chan->conn;
5435 if (result == L2CAP_MR_SUCCESS) {
5436 /* Can confirm now */
5437 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5439 /* Now only need move success
5442 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5445 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5448 /* Any other amp move state means the move failed. */
5449 chan->move_id = chan->local_amp_id;
5450 l2cap_move_done(chan);
5451 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5454 l2cap_chan_unlock(chan);
5455 l2cap_chan_put(chan);
5458 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5461 struct l2cap_chan *chan;
5463 chan = l2cap_get_chan_by_ident(conn, ident);
5465 /* Could not locate channel, icid is best guess */
5466 l2cap_send_move_chan_cfm_icid(conn, icid);
5470 __clear_chan_timer(chan);
5472 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5473 if (result == L2CAP_MR_COLLISION) {
5474 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5476 /* Cleanup - cancel move */
5477 chan->move_id = chan->local_amp_id;
5478 l2cap_move_done(chan);
5482 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5484 l2cap_chan_unlock(chan);
5485 l2cap_chan_put(chan);
5488 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5489 struct l2cap_cmd_hdr *cmd,
5490 u16 cmd_len, void *data)
5492 struct l2cap_move_chan_rsp *rsp = data;
5495 if (cmd_len != sizeof(*rsp))
5498 icid = le16_to_cpu(rsp->icid);
5499 result = le16_to_cpu(rsp->result);
5501 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5503 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5504 l2cap_move_continue(conn, icid, result);
5506 l2cap_move_fail(conn, cmd->ident, icid, result);
5511 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5512 struct l2cap_cmd_hdr *cmd,
5513 u16 cmd_len, void *data)
5515 struct l2cap_move_chan_cfm *cfm = data;
5516 struct l2cap_chan *chan;
5519 if (cmd_len != sizeof(*cfm))
5522 icid = le16_to_cpu(cfm->icid);
5523 result = le16_to_cpu(cfm->result);
5525 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5527 chan = l2cap_get_chan_by_dcid(conn, icid);
5529 /* Spec requires a response even if the icid was not found */
5530 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5534 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5535 if (result == L2CAP_MC_CONFIRMED) {
5536 chan->local_amp_id = chan->move_id;
5537 if (chan->local_amp_id == AMP_ID_BREDR)
5538 __release_logical_link(chan);
5540 chan->move_id = chan->local_amp_id;
5543 l2cap_move_done(chan);
5546 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5548 l2cap_chan_unlock(chan);
5549 l2cap_chan_put(chan);
5554 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5555 struct l2cap_cmd_hdr *cmd,
5556 u16 cmd_len, void *data)
5558 struct l2cap_move_chan_cfm_rsp *rsp = data;
5559 struct l2cap_chan *chan;
5562 if (cmd_len != sizeof(*rsp))
5565 icid = le16_to_cpu(rsp->icid);
5567 BT_DBG("icid 0x%4.4x", icid);
5569 chan = l2cap_get_chan_by_scid(conn, icid);
5573 __clear_chan_timer(chan);
5575 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5576 chan->local_amp_id = chan->move_id;
5578 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5579 __release_logical_link(chan);
5581 l2cap_move_done(chan);
5584 l2cap_chan_unlock(chan);
5585 l2cap_chan_put(chan);
5590 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5591 struct l2cap_cmd_hdr *cmd,
5592 u16 cmd_len, u8 *data)
5594 struct hci_conn *hcon = conn->hcon;
5595 struct l2cap_conn_param_update_req *req;
5596 struct l2cap_conn_param_update_rsp rsp;
5597 u16 min, max, latency, to_multiplier;
5600 if (hcon->role != HCI_ROLE_MASTER)
5603 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5606 req = (struct l2cap_conn_param_update_req *) data;
5607 min = __le16_to_cpu(req->min);
5608 max = __le16_to_cpu(req->max);
5609 latency = __le16_to_cpu(req->latency);
5610 to_multiplier = __le16_to_cpu(req->to_multiplier);
5612 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5613 min, max, latency, to_multiplier);
5615 memset(&rsp, 0, sizeof(rsp));
5617 err = hci_check_conn_params(min, max, latency, to_multiplier);
5619 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5621 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5623 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5629 store_hint = hci_le_conn_update(hcon, min, max, latency,
5631 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5632 store_hint, min, max, latency,
5640 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5641 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5644 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5645 struct hci_conn *hcon = conn->hcon;
5646 u16 dcid, mtu, mps, credits, result;
5647 struct l2cap_chan *chan;
5650 if (cmd_len < sizeof(*rsp))
5653 dcid = __le16_to_cpu(rsp->dcid);
5654 mtu = __le16_to_cpu(rsp->mtu);
5655 mps = __le16_to_cpu(rsp->mps);
5656 credits = __le16_to_cpu(rsp->credits);
5657 result = __le16_to_cpu(rsp->result);
5659 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5660 dcid < L2CAP_CID_DYN_START ||
5661 dcid > L2CAP_CID_LE_DYN_END))
5664 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5665 dcid, mtu, mps, credits, result);
5667 mutex_lock(&conn->chan_lock);
5669 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5677 l2cap_chan_lock(chan);
5680 case L2CAP_CR_LE_SUCCESS:
5681 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5689 chan->remote_mps = mps;
5690 chan->tx_credits = credits;
5691 l2cap_chan_ready(chan);
5694 case L2CAP_CR_LE_AUTHENTICATION:
5695 case L2CAP_CR_LE_ENCRYPTION:
5696 /* If we already have MITM protection we can't do
5699 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5700 l2cap_chan_del(chan, ECONNREFUSED);
5704 sec_level = hcon->sec_level + 1;
5705 if (chan->sec_level < sec_level)
5706 chan->sec_level = sec_level;
5708 /* We'll need to send a new Connect Request */
5709 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5711 smp_conn_security(hcon, chan->sec_level);
5715 l2cap_chan_del(chan, ECONNREFUSED);
5719 l2cap_chan_unlock(chan);
5722 mutex_unlock(&conn->chan_lock);
5727 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5728 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5733 switch (cmd->code) {
5734 case L2CAP_COMMAND_REJ:
5735 l2cap_command_rej(conn, cmd, cmd_len, data);
5738 case L2CAP_CONN_REQ:
5739 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5742 case L2CAP_CONN_RSP:
5743 case L2CAP_CREATE_CHAN_RSP:
5744 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5747 case L2CAP_CONF_REQ:
5748 err = l2cap_config_req(conn, cmd, cmd_len, data);
5751 case L2CAP_CONF_RSP:
5752 l2cap_config_rsp(conn, cmd, cmd_len, data);
5755 case L2CAP_DISCONN_REQ:
5756 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5759 case L2CAP_DISCONN_RSP:
5760 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5763 case L2CAP_ECHO_REQ:
5764 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5767 case L2CAP_ECHO_RSP:
5770 case L2CAP_INFO_REQ:
5771 err = l2cap_information_req(conn, cmd, cmd_len, data);
5774 case L2CAP_INFO_RSP:
5775 l2cap_information_rsp(conn, cmd, cmd_len, data);
5778 case L2CAP_CREATE_CHAN_REQ:
5779 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5782 case L2CAP_MOVE_CHAN_REQ:
5783 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5786 case L2CAP_MOVE_CHAN_RSP:
5787 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5790 case L2CAP_MOVE_CHAN_CFM:
5791 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5794 case L2CAP_MOVE_CHAN_CFM_RSP:
5795 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5799 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5807 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5808 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5811 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5812 struct l2cap_le_conn_rsp rsp;
5813 struct l2cap_chan *chan, *pchan;
5814 u16 dcid, scid, credits, mtu, mps;
5818 if (cmd_len != sizeof(*req))
5821 scid = __le16_to_cpu(req->scid);
5822 mtu = __le16_to_cpu(req->mtu);
5823 mps = __le16_to_cpu(req->mps);
5828 if (mtu < 23 || mps < 23)
5831 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5834 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5837 * Valid range: 0x0001-0x00ff
5839 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5841 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5842 result = L2CAP_CR_LE_BAD_PSM;
5847 /* Check if we have socket listening on psm */
5848 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5849 &conn->hcon->dst, LE_LINK);
5851 result = L2CAP_CR_LE_BAD_PSM;
5856 mutex_lock(&conn->chan_lock);
5857 l2cap_chan_lock(pchan);
5859 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5861 result = L2CAP_CR_LE_AUTHENTICATION;
5863 goto response_unlock;
5866 /* Check for valid dynamic CID range */
5867 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5868 result = L2CAP_CR_LE_INVALID_SCID;
5870 goto response_unlock;
5873 /* Check if we already have channel with that dcid */
5874 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5875 result = L2CAP_CR_LE_SCID_IN_USE;
5877 goto response_unlock;
5880 chan = pchan->ops->new_connection(pchan);
5882 result = L2CAP_CR_LE_NO_MEM;
5883 goto response_unlock;
5886 bacpy(&chan->src, &conn->hcon->src);
5887 bacpy(&chan->dst, &conn->hcon->dst);
5888 chan->src_type = bdaddr_src_type(conn->hcon);
5889 chan->dst_type = bdaddr_dst_type(conn->hcon);
5893 chan->remote_mps = mps;
5895 __l2cap_chan_add(conn, chan);
5897 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5900 credits = chan->rx_credits;
5902 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5904 chan->ident = cmd->ident;
5906 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5907 l2cap_state_change(chan, BT_CONNECT2);
5908 /* The following result value is actually not defined
5909 * for LE CoC but we use it to let the function know
5910 * that it should bail out after doing its cleanup
5911 * instead of sending a response.
5913 result = L2CAP_CR_PEND;
5914 chan->ops->defer(chan);
5916 l2cap_chan_ready(chan);
5917 result = L2CAP_CR_LE_SUCCESS;
5921 l2cap_chan_unlock(pchan);
5922 mutex_unlock(&conn->chan_lock);
5923 l2cap_chan_put(pchan);
5925 if (result == L2CAP_CR_PEND)
5930 rsp.mtu = cpu_to_le16(chan->imtu);
5931 rsp.mps = cpu_to_le16(chan->mps);
5937 rsp.dcid = cpu_to_le16(dcid);
5938 rsp.credits = cpu_to_le16(credits);
5939 rsp.result = cpu_to_le16(result);
5941 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5946 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5947 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5950 struct l2cap_le_credits *pkt;
5951 struct l2cap_chan *chan;
5952 u16 cid, credits, max_credits;
5954 if (cmd_len != sizeof(*pkt))
5957 pkt = (struct l2cap_le_credits *) data;
5958 cid = __le16_to_cpu(pkt->cid);
5959 credits = __le16_to_cpu(pkt->credits);
5961 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5963 chan = l2cap_get_chan_by_dcid(conn, cid);
5967 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5968 if (credits > max_credits) {
5969 BT_ERR("LE credits overflow");
5970 l2cap_send_disconn_req(chan, ECONNRESET);
5972 /* Return 0 so that we don't trigger an unnecessary
5973 * command reject packet.
5978 chan->tx_credits += credits;
5980 /* Resume sending */
5981 l2cap_le_flowctl_send(chan);
5983 if (chan->tx_credits)
5984 chan->ops->resume(chan);
5987 l2cap_chan_unlock(chan);
5988 l2cap_chan_put(chan);
5993 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5994 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5997 struct l2cap_ecred_conn_req *req = (void *) data;
5999 struct l2cap_ecred_conn_rsp rsp;
6000 __le16 dcid[L2CAP_ECRED_MAX_CID];
6002 struct l2cap_chan *chan, *pchan;
6012 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6013 result = L2CAP_CR_LE_INVALID_PARAMS;
6017 cmd_len -= sizeof(*req);
6018 num_scid = cmd_len / sizeof(u16);
6020 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6021 result = L2CAP_CR_LE_INVALID_PARAMS;
6025 mtu = __le16_to_cpu(req->mtu);
6026 mps = __le16_to_cpu(req->mps);
6028 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6029 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6035 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6038 * Valid range: 0x0001-0x00ff
6040 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6042 if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6043 result = L2CAP_CR_LE_BAD_PSM;
6047 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6049 memset(&pdu, 0, sizeof(pdu));
6051 /* Check if we have socket listening on psm */
6052 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6053 &conn->hcon->dst, LE_LINK);
6055 result = L2CAP_CR_LE_BAD_PSM;
6059 mutex_lock(&conn->chan_lock);
6060 l2cap_chan_lock(pchan);
6062 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6064 result = L2CAP_CR_LE_AUTHENTICATION;
6068 result = L2CAP_CR_LE_SUCCESS;
6070 for (i = 0; i < num_scid; i++) {
6071 u16 scid = __le16_to_cpu(req->scid[i]);
6073 BT_DBG("scid[%d] 0x%4.4x", i, scid);
6075 pdu.dcid[i] = 0x0000;
6076 len += sizeof(*pdu.dcid);
6078 /* Check for valid dynamic CID range */
6079 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6080 result = L2CAP_CR_LE_INVALID_SCID;
6084 /* Check if we already have channel with that dcid */
6085 if (__l2cap_get_chan_by_dcid(conn, scid)) {
6086 result = L2CAP_CR_LE_SCID_IN_USE;
6090 chan = pchan->ops->new_connection(pchan);
6092 result = L2CAP_CR_LE_NO_MEM;
6096 bacpy(&chan->src, &conn->hcon->src);
6097 bacpy(&chan->dst, &conn->hcon->dst);
6098 chan->src_type = bdaddr_src_type(conn->hcon);
6099 chan->dst_type = bdaddr_dst_type(conn->hcon);
6103 chan->remote_mps = mps;
6105 __l2cap_chan_add(conn, chan);
6107 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6110 if (!pdu.rsp.credits) {
6111 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6112 pdu.rsp.mps = cpu_to_le16(chan->mps);
6113 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6116 pdu.dcid[i] = cpu_to_le16(chan->scid);
6118 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6120 chan->ident = cmd->ident;
6121 chan->mode = L2CAP_MODE_EXT_FLOWCTL;
6123 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6124 l2cap_state_change(chan, BT_CONNECT2);
6126 chan->ops->defer(chan);
6128 l2cap_chan_ready(chan);
6133 l2cap_chan_unlock(pchan);
6134 mutex_unlock(&conn->chan_lock);
6135 l2cap_chan_put(pchan);
6138 pdu.rsp.result = cpu_to_le16(result);
6143 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6144 sizeof(pdu.rsp) + len, &pdu);
6149 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6150 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6153 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6154 struct hci_conn *hcon = conn->hcon;
6155 u16 mtu, mps, credits, result;
6156 struct l2cap_chan *chan, *tmp;
6157 int err = 0, sec_level;
6160 if (cmd_len < sizeof(*rsp))
6163 mtu = __le16_to_cpu(rsp->mtu);
6164 mps = __le16_to_cpu(rsp->mps);
6165 credits = __le16_to_cpu(rsp->credits);
6166 result = __le16_to_cpu(rsp->result);
6168 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6171 mutex_lock(&conn->chan_lock);
6173 cmd_len -= sizeof(*rsp);
6175 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6178 if (chan->ident != cmd->ident ||
6179 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6180 chan->state == BT_CONNECTED)
6183 l2cap_chan_lock(chan);
6185 /* Check that there is a dcid for each pending channel */
6186 if (cmd_len < sizeof(dcid)) {
6187 l2cap_chan_del(chan, ECONNREFUSED);
6188 l2cap_chan_unlock(chan);
6192 dcid = __le16_to_cpu(rsp->dcid[i++]);
6193 cmd_len -= sizeof(u16);
6195 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6197 /* Check if dcid is already in use */
6198 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6199 /* If a device receives a
6200 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6201 * already-assigned Destination CID, then both the
6202 * original channel and the new channel shall be
6203 * immediately discarded and not used.
6205 l2cap_chan_del(chan, ECONNREFUSED);
6206 l2cap_chan_unlock(chan);
6207 chan = __l2cap_get_chan_by_dcid(conn, dcid);
6208 l2cap_chan_lock(chan);
6209 l2cap_chan_del(chan, ECONNRESET);
6210 l2cap_chan_unlock(chan);
6215 case L2CAP_CR_LE_AUTHENTICATION:
6216 case L2CAP_CR_LE_ENCRYPTION:
6217 /* If we already have MITM protection we can't do
6220 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6221 l2cap_chan_del(chan, ECONNREFUSED);
6225 sec_level = hcon->sec_level + 1;
6226 if (chan->sec_level < sec_level)
6227 chan->sec_level = sec_level;
6229 /* We'll need to send a new Connect Request */
6230 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6232 smp_conn_security(hcon, chan->sec_level);
6235 case L2CAP_CR_LE_BAD_PSM:
6236 l2cap_chan_del(chan, ECONNREFUSED);
6240 /* If dcid was not set it means channels was refused */
6242 l2cap_chan_del(chan, ECONNREFUSED);
6249 chan->remote_mps = mps;
6250 chan->tx_credits = credits;
6251 l2cap_chan_ready(chan);
6255 l2cap_chan_unlock(chan);
6258 mutex_unlock(&conn->chan_lock);
6263 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6264 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6267 struct l2cap_ecred_reconf_req *req = (void *) data;
6268 struct l2cap_ecred_reconf_rsp rsp;
6269 u16 mtu, mps, result;
6270 struct l2cap_chan *chan;
6276 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6277 result = L2CAP_CR_LE_INVALID_PARAMS;
6281 mtu = __le16_to_cpu(req->mtu);
6282 mps = __le16_to_cpu(req->mps);
6284 BT_DBG("mtu %u mps %u", mtu, mps);
6286 if (mtu < L2CAP_ECRED_MIN_MTU) {
6287 result = L2CAP_RECONF_INVALID_MTU;
6291 if (mps < L2CAP_ECRED_MIN_MPS) {
6292 result = L2CAP_RECONF_INVALID_MPS;
6296 cmd_len -= sizeof(*req);
6297 num_scid = cmd_len / sizeof(u16);
6298 result = L2CAP_RECONF_SUCCESS;
6300 for (i = 0; i < num_scid; i++) {
6303 scid = __le16_to_cpu(req->scid[i]);
6307 chan = __l2cap_get_chan_by_dcid(conn, scid);
6311 /* If the MTU value is decreased for any of the included
6312 * channels, then the receiver shall disconnect all
6313 * included channels.
6315 if (chan->omtu > mtu) {
6316 BT_ERR("chan %p decreased MTU %u -> %u", chan,
6318 result = L2CAP_RECONF_INVALID_MTU;
6322 chan->remote_mps = mps;
6326 rsp.result = cpu_to_le16(result);
6328 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6334 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6335 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6338 struct l2cap_chan *chan, *tmp;
6339 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6342 if (cmd_len < sizeof(*rsp))
6345 result = __le16_to_cpu(rsp->result);
6347 BT_DBG("result 0x%4.4x", rsp->result);
6352 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6353 if (chan->ident != cmd->ident)
6356 l2cap_chan_del(chan, ECONNRESET);
6362 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6363 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6366 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6367 struct l2cap_chan *chan;
6369 if (cmd_len < sizeof(*rej))
6372 mutex_lock(&conn->chan_lock);
6374 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6378 chan = l2cap_chan_hold_unless_zero(chan);
6382 l2cap_chan_lock(chan);
6383 l2cap_chan_del(chan, ECONNREFUSED);
6384 l2cap_chan_unlock(chan);
6385 l2cap_chan_put(chan);
6388 mutex_unlock(&conn->chan_lock);
6392 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6393 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6398 switch (cmd->code) {
6399 case L2CAP_COMMAND_REJ:
6400 l2cap_le_command_rej(conn, cmd, cmd_len, data);
6403 case L2CAP_CONN_PARAM_UPDATE_REQ:
6404 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6407 case L2CAP_CONN_PARAM_UPDATE_RSP:
6410 case L2CAP_LE_CONN_RSP:
6411 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6414 case L2CAP_LE_CONN_REQ:
6415 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6418 case L2CAP_LE_CREDITS:
6419 err = l2cap_le_credits(conn, cmd, cmd_len, data);
6422 case L2CAP_ECRED_CONN_REQ:
6423 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6426 case L2CAP_ECRED_CONN_RSP:
6427 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6430 case L2CAP_ECRED_RECONF_REQ:
6431 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6434 case L2CAP_ECRED_RECONF_RSP:
6435 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6438 case L2CAP_DISCONN_REQ:
6439 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6442 case L2CAP_DISCONN_RSP:
6443 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6447 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6455 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6456 struct sk_buff *skb)
6458 struct hci_conn *hcon = conn->hcon;
6459 struct l2cap_cmd_hdr *cmd;
6463 if (hcon->type != LE_LINK)
6466 if (skb->len < L2CAP_CMD_HDR_SIZE)
6469 cmd = (void *) skb->data;
6470 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6472 len = le16_to_cpu(cmd->len);
6474 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6476 if (len != skb->len || !cmd->ident) {
6477 BT_DBG("corrupted command");
6481 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6483 struct l2cap_cmd_rej_unk rej;
6485 BT_ERR("Wrong link type (%d)", err);
6487 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6488 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6496 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6497 struct sk_buff *skb)
6499 struct hci_conn *hcon = conn->hcon;
6500 struct l2cap_cmd_hdr *cmd;
6503 l2cap_raw_recv(conn, skb);
6505 if (hcon->type != ACL_LINK)
6508 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6511 cmd = (void *) skb->data;
6512 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6514 len = le16_to_cpu(cmd->len);
6516 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6519 if (len > skb->len || !cmd->ident) {
6520 BT_DBG("corrupted command");
6524 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6526 struct l2cap_cmd_rej_unk rej;
6528 BT_ERR("Wrong link type (%d)", err);
6530 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6531 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6542 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
6544 u16 our_fcs, rcv_fcs;
6547 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6548 hdr_size = L2CAP_EXT_HDR_SIZE;
6550 hdr_size = L2CAP_ENH_HDR_SIZE;
6552 if (chan->fcs == L2CAP_FCS_CRC16) {
6553 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6554 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6555 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6557 if (our_fcs != rcv_fcs)
6563 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6565 struct l2cap_ctrl control;
6567 BT_DBG("chan %p", chan);
6569 memset(&control, 0, sizeof(control));
6572 control.reqseq = chan->buffer_seq;
6573 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6575 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6576 control.super = L2CAP_SUPER_RNR;
6577 l2cap_send_sframe(chan, &control);
6580 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6581 chan->unacked_frames > 0)
6582 __set_retrans_timer(chan);
6584 /* Send pending iframes */
6585 l2cap_ertm_send(chan);
6587 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6588 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6589 /* F-bit wasn't sent in an s-frame or i-frame yet, so
6592 control.super = L2CAP_SUPER_RR;
6593 l2cap_send_sframe(chan, &control);
6597 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6598 struct sk_buff **last_frag)
6600 /* skb->len reflects data in skb as well as all fragments
6601 * skb->data_len reflects only data in fragments
6603 if (!skb_has_frag_list(skb))
6604 skb_shinfo(skb)->frag_list = new_frag;
6606 new_frag->next = NULL;
6608 (*last_frag)->next = new_frag;
6609 *last_frag = new_frag;
6611 skb->len += new_frag->len;
6612 skb->data_len += new_frag->len;
6613 skb->truesize += new_frag->truesize;
6616 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6617 struct l2cap_ctrl *control)
6621 switch (control->sar) {
6622 case L2CAP_SAR_UNSEGMENTED:
6626 err = chan->ops->recv(chan, skb);
6629 case L2CAP_SAR_START:
6633 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6636 chan->sdu_len = get_unaligned_le16(skb->data);
6637 skb_pull(skb, L2CAP_SDULEN_SIZE);
6639 if (chan->sdu_len > chan->imtu) {
6644 if (skb->len >= chan->sdu_len)
6648 chan->sdu_last_frag = skb;
6654 case L2CAP_SAR_CONTINUE:
6658 append_skb_frag(chan->sdu, skb,
6659 &chan->sdu_last_frag);
6662 if (chan->sdu->len >= chan->sdu_len)
6672 append_skb_frag(chan->sdu, skb,
6673 &chan->sdu_last_frag);
6676 if (chan->sdu->len != chan->sdu_len)
6679 err = chan->ops->recv(chan, chan->sdu);
6682 /* Reassembly complete */
6684 chan->sdu_last_frag = NULL;
6692 kfree_skb(chan->sdu);
6694 chan->sdu_last_frag = NULL;
6701 static int l2cap_resegment(struct l2cap_chan *chan)
6707 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6711 if (chan->mode != L2CAP_MODE_ERTM)
6714 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6715 l2cap_tx(chan, NULL, NULL, event);
6718 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6721 /* Pass sequential frames to l2cap_reassemble_sdu()
6722 * until a gap is encountered.
6725 BT_DBG("chan %p", chan);
6727 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6728 struct sk_buff *skb;
6729 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6730 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6732 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6737 skb_unlink(skb, &chan->srej_q);
6738 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6739 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6744 if (skb_queue_empty(&chan->srej_q)) {
6745 chan->rx_state = L2CAP_RX_STATE_RECV;
6746 l2cap_send_ack(chan);
6752 static void l2cap_handle_srej(struct l2cap_chan *chan,
6753 struct l2cap_ctrl *control)
6755 struct sk_buff *skb;
6757 BT_DBG("chan %p, control %p", chan, control);
6759 if (control->reqseq == chan->next_tx_seq) {
6760 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6761 l2cap_send_disconn_req(chan, ECONNRESET);
6765 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6768 BT_DBG("Seq %d not available for retransmission",
6773 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6774 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6775 l2cap_send_disconn_req(chan, ECONNRESET);
6779 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6781 if (control->poll) {
6782 l2cap_pass_to_tx(chan, control);
6784 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6785 l2cap_retransmit(chan, control);
6786 l2cap_ertm_send(chan);
6788 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6789 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6790 chan->srej_save_reqseq = control->reqseq;
6793 l2cap_pass_to_tx_fbit(chan, control);
6795 if (control->final) {
6796 if (chan->srej_save_reqseq != control->reqseq ||
6797 !test_and_clear_bit(CONN_SREJ_ACT,
6799 l2cap_retransmit(chan, control);
6801 l2cap_retransmit(chan, control);
6802 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6803 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6804 chan->srej_save_reqseq = control->reqseq;
6810 static void l2cap_handle_rej(struct l2cap_chan *chan,
6811 struct l2cap_ctrl *control)
6813 struct sk_buff *skb;
6815 BT_DBG("chan %p, control %p", chan, control);
6817 if (control->reqseq == chan->next_tx_seq) {
6818 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6819 l2cap_send_disconn_req(chan, ECONNRESET);
6823 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6825 if (chan->max_tx && skb &&
6826 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6827 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6828 l2cap_send_disconn_req(chan, ECONNRESET);
6832 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6834 l2cap_pass_to_tx(chan, control);
6836 if (control->final) {
6837 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6838 l2cap_retransmit_all(chan, control);
6840 l2cap_retransmit_all(chan, control);
6841 l2cap_ertm_send(chan);
6842 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6843 set_bit(CONN_REJ_ACT, &chan->conn_state);
6847 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6849 BT_DBG("chan %p, txseq %d", chan, txseq);
6851 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6852 chan->expected_tx_seq);
6854 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6855 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6857 /* See notes below regarding "double poll" and
6860 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6861 BT_DBG("Invalid/Ignore - after SREJ");
6862 return L2CAP_TXSEQ_INVALID_IGNORE;
6864 BT_DBG("Invalid - in window after SREJ sent");
6865 return L2CAP_TXSEQ_INVALID;
6869 if (chan->srej_list.head == txseq) {
6870 BT_DBG("Expected SREJ");
6871 return L2CAP_TXSEQ_EXPECTED_SREJ;
6874 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6875 BT_DBG("Duplicate SREJ - txseq already stored");
6876 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6879 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6880 BT_DBG("Unexpected SREJ - not requested");
6881 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6885 if (chan->expected_tx_seq == txseq) {
6886 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6888 BT_DBG("Invalid - txseq outside tx window");
6889 return L2CAP_TXSEQ_INVALID;
6892 return L2CAP_TXSEQ_EXPECTED;
6896 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6897 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6898 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6899 return L2CAP_TXSEQ_DUPLICATE;
6902 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6903 /* A source of invalid packets is a "double poll" condition,
6904 * where delays cause us to send multiple poll packets. If
6905 * the remote stack receives and processes both polls,
6906 * sequence numbers can wrap around in such a way that a
6907 * resent frame has a sequence number that looks like new data
6908 * with a sequence gap. This would trigger an erroneous SREJ
6911 * Fortunately, this is impossible with a tx window that's
6912 * less than half of the maximum sequence number, which allows
6913 * invalid frames to be safely ignored.
6915 * With tx window sizes greater than half of the tx window
6916 * maximum, the frame is invalid and cannot be ignored. This
6917 * causes a disconnect.
6920 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6921 BT_DBG("Invalid/Ignore - txseq outside tx window");
6922 return L2CAP_TXSEQ_INVALID_IGNORE;
6924 BT_DBG("Invalid - txseq outside tx window");
6925 return L2CAP_TXSEQ_INVALID;
6928 BT_DBG("Unexpected - txseq indicates missing frames");
6929 return L2CAP_TXSEQ_UNEXPECTED;
6933 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6934 struct l2cap_ctrl *control,
6935 struct sk_buff *skb, u8 event)
6937 struct l2cap_ctrl local_control;
6939 bool skb_in_use = false;
6941 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6945 case L2CAP_EV_RECV_IFRAME:
6946 switch (l2cap_classify_txseq(chan, control->txseq)) {
6947 case L2CAP_TXSEQ_EXPECTED:
6948 l2cap_pass_to_tx(chan, control);
6950 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6951 BT_DBG("Busy, discarding expected seq %d",
6956 chan->expected_tx_seq = __next_seq(chan,
6959 chan->buffer_seq = chan->expected_tx_seq;
6962 /* l2cap_reassemble_sdu may free skb, hence invalidate
6963 * control, so make a copy in advance to use it after
6964 * l2cap_reassemble_sdu returns and to avoid the race
6965 * condition, for example:
6967 * The current thread calls:
6968 * l2cap_reassemble_sdu
6969 * chan->ops->recv == l2cap_sock_recv_cb
6970 * __sock_queue_rcv_skb
6971 * Another thread calls:
6975 * Then the current thread tries to access control, but
6976 * it was freed by skb_free_datagram.
6978 local_control = *control;
6979 err = l2cap_reassemble_sdu(chan, skb, control);
6983 if (local_control.final) {
6984 if (!test_and_clear_bit(CONN_REJ_ACT,
6985 &chan->conn_state)) {
6986 local_control.final = 0;
6987 l2cap_retransmit_all(chan, &local_control);
6988 l2cap_ertm_send(chan);
6992 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6993 l2cap_send_ack(chan);
6995 case L2CAP_TXSEQ_UNEXPECTED:
6996 l2cap_pass_to_tx(chan, control);
6998 /* Can't issue SREJ frames in the local busy state.
6999 * Drop this frame, it will be seen as missing
7000 * when local busy is exited.
7002 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
7003 BT_DBG("Busy, discarding unexpected seq %d",
7008 /* There was a gap in the sequence, so an SREJ
7009 * must be sent for each missing frame. The
7010 * current frame is stored for later use.
7012 skb_queue_tail(&chan->srej_q, skb);
7014 BT_DBG("Queued %p (queue len %d)", skb,
7015 skb_queue_len(&chan->srej_q));
7017 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7018 l2cap_seq_list_clear(&chan->srej_list);
7019 l2cap_send_srej(chan, control->txseq);
7021 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7023 case L2CAP_TXSEQ_DUPLICATE:
7024 l2cap_pass_to_tx(chan, control);
7026 case L2CAP_TXSEQ_INVALID_IGNORE:
7028 case L2CAP_TXSEQ_INVALID:
7030 l2cap_send_disconn_req(chan, ECONNRESET);
7034 case L2CAP_EV_RECV_RR:
7035 l2cap_pass_to_tx(chan, control);
7036 if (control->final) {
7037 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7039 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7040 !__chan_is_moving(chan)) {
7042 l2cap_retransmit_all(chan, control);
7045 l2cap_ertm_send(chan);
7046 } else if (control->poll) {
7047 l2cap_send_i_or_rr_or_rnr(chan);
7049 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7050 &chan->conn_state) &&
7051 chan->unacked_frames)
7052 __set_retrans_timer(chan);
7054 l2cap_ertm_send(chan);
7057 case L2CAP_EV_RECV_RNR:
7058 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7059 l2cap_pass_to_tx(chan, control);
7060 if (control && control->poll) {
7061 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7062 l2cap_send_rr_or_rnr(chan, 0);
7064 __clear_retrans_timer(chan);
7065 l2cap_seq_list_clear(&chan->retrans_list);
7067 case L2CAP_EV_RECV_REJ:
7068 l2cap_handle_rej(chan, control);
7070 case L2CAP_EV_RECV_SREJ:
7071 l2cap_handle_srej(chan, control);
7077 if (skb && !skb_in_use) {
7078 BT_DBG("Freeing %p", skb);
7085 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7086 struct l2cap_ctrl *control,
7087 struct sk_buff *skb, u8 event)
7090 u16 txseq = control->txseq;
7091 bool skb_in_use = false;
7093 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7097 case L2CAP_EV_RECV_IFRAME:
7098 switch (l2cap_classify_txseq(chan, txseq)) {
7099 case L2CAP_TXSEQ_EXPECTED:
7100 /* Keep frame for reassembly later */
7101 l2cap_pass_to_tx(chan, control);
7102 skb_queue_tail(&chan->srej_q, skb);
7104 BT_DBG("Queued %p (queue len %d)", skb,
7105 skb_queue_len(&chan->srej_q));
7107 chan->expected_tx_seq = __next_seq(chan, txseq);
7109 case L2CAP_TXSEQ_EXPECTED_SREJ:
7110 l2cap_seq_list_pop(&chan->srej_list);
7112 l2cap_pass_to_tx(chan, control);
7113 skb_queue_tail(&chan->srej_q, skb);
7115 BT_DBG("Queued %p (queue len %d)", skb,
7116 skb_queue_len(&chan->srej_q));
7118 err = l2cap_rx_queued_iframes(chan);
7123 case L2CAP_TXSEQ_UNEXPECTED:
7124 /* Got a frame that can't be reassembled yet.
7125 * Save it for later, and send SREJs to cover
7126 * the missing frames.
7128 skb_queue_tail(&chan->srej_q, skb);
7130 BT_DBG("Queued %p (queue len %d)", skb,
7131 skb_queue_len(&chan->srej_q));
7133 l2cap_pass_to_tx(chan, control);
7134 l2cap_send_srej(chan, control->txseq);
7136 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7137 /* This frame was requested with an SREJ, but
7138 * some expected retransmitted frames are
7139 * missing. Request retransmission of missing
7142 skb_queue_tail(&chan->srej_q, skb);
7144 BT_DBG("Queued %p (queue len %d)", skb,
7145 skb_queue_len(&chan->srej_q));
7147 l2cap_pass_to_tx(chan, control);
7148 l2cap_send_srej_list(chan, control->txseq);
7150 case L2CAP_TXSEQ_DUPLICATE_SREJ:
7151 /* We've already queued this frame. Drop this copy. */
7152 l2cap_pass_to_tx(chan, control);
7154 case L2CAP_TXSEQ_DUPLICATE:
7155 /* Expecting a later sequence number, so this frame
7156 * was already received. Ignore it completely.
7159 case L2CAP_TXSEQ_INVALID_IGNORE:
7161 case L2CAP_TXSEQ_INVALID:
7163 l2cap_send_disconn_req(chan, ECONNRESET);
7167 case L2CAP_EV_RECV_RR:
7168 l2cap_pass_to_tx(chan, control);
7169 if (control->final) {
7170 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7172 if (!test_and_clear_bit(CONN_REJ_ACT,
7173 &chan->conn_state)) {
7175 l2cap_retransmit_all(chan, control);
7178 l2cap_ertm_send(chan);
7179 } else if (control->poll) {
7180 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7181 &chan->conn_state) &&
7182 chan->unacked_frames) {
7183 __set_retrans_timer(chan);
7186 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7187 l2cap_send_srej_tail(chan);
7189 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7190 &chan->conn_state) &&
7191 chan->unacked_frames)
7192 __set_retrans_timer(chan);
7194 l2cap_send_ack(chan);
7197 case L2CAP_EV_RECV_RNR:
7198 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7199 l2cap_pass_to_tx(chan, control);
7200 if (control->poll) {
7201 l2cap_send_srej_tail(chan);
7203 struct l2cap_ctrl rr_control;
7204 memset(&rr_control, 0, sizeof(rr_control));
7205 rr_control.sframe = 1;
7206 rr_control.super = L2CAP_SUPER_RR;
7207 rr_control.reqseq = chan->buffer_seq;
7208 l2cap_send_sframe(chan, &rr_control);
7212 case L2CAP_EV_RECV_REJ:
7213 l2cap_handle_rej(chan, control);
7215 case L2CAP_EV_RECV_SREJ:
7216 l2cap_handle_srej(chan, control);
7220 if (skb && !skb_in_use) {
7221 BT_DBG("Freeing %p", skb);
7228 static int l2cap_finish_move(struct l2cap_chan *chan)
7230 BT_DBG("chan %p", chan);
7232 chan->rx_state = L2CAP_RX_STATE_RECV;
7235 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7237 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7239 return l2cap_resegment(chan);
7242 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7243 struct l2cap_ctrl *control,
7244 struct sk_buff *skb, u8 event)
7248 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7254 l2cap_process_reqseq(chan, control->reqseq);
7256 if (!skb_queue_empty(&chan->tx_q))
7257 chan->tx_send_head = skb_peek(&chan->tx_q);
7259 chan->tx_send_head = NULL;
7261 /* Rewind next_tx_seq to the point expected
7264 chan->next_tx_seq = control->reqseq;
7265 chan->unacked_frames = 0;
7267 err = l2cap_finish_move(chan);
7271 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7272 l2cap_send_i_or_rr_or_rnr(chan);
7274 if (event == L2CAP_EV_RECV_IFRAME)
7277 return l2cap_rx_state_recv(chan, control, NULL, event);
7280 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7281 struct l2cap_ctrl *control,
7282 struct sk_buff *skb, u8 event)
7286 if (!control->final)
7289 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7291 chan->rx_state = L2CAP_RX_STATE_RECV;
7292 l2cap_process_reqseq(chan, control->reqseq);
7294 if (!skb_queue_empty(&chan->tx_q))
7295 chan->tx_send_head = skb_peek(&chan->tx_q);
7297 chan->tx_send_head = NULL;
7299 /* Rewind next_tx_seq to the point expected
7302 chan->next_tx_seq = control->reqseq;
7303 chan->unacked_frames = 0;
7306 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7308 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7310 err = l2cap_resegment(chan);
7313 err = l2cap_rx_state_recv(chan, control, skb, event);
7318 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7320 /* Make sure reqseq is for a packet that has been sent but not acked */
7323 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7324 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7327 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7328 struct sk_buff *skb, u8 event)
7332 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7333 control, skb, event, chan->rx_state);
7335 if (__valid_reqseq(chan, control->reqseq)) {
7336 switch (chan->rx_state) {
7337 case L2CAP_RX_STATE_RECV:
7338 err = l2cap_rx_state_recv(chan, control, skb, event);
7340 case L2CAP_RX_STATE_SREJ_SENT:
7341 err = l2cap_rx_state_srej_sent(chan, control, skb,
7344 case L2CAP_RX_STATE_WAIT_P:
7345 err = l2cap_rx_state_wait_p(chan, control, skb, event);
7347 case L2CAP_RX_STATE_WAIT_F:
7348 err = l2cap_rx_state_wait_f(chan, control, skb, event);
7355 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7356 control->reqseq, chan->next_tx_seq,
7357 chan->expected_ack_seq);
7358 l2cap_send_disconn_req(chan, ECONNRESET);
7364 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7365 struct sk_buff *skb)
7367 /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7368 * the txseq field in advance to use it after l2cap_reassemble_sdu
7369 * returns and to avoid the race condition, for example:
7371 * The current thread calls:
7372 * l2cap_reassemble_sdu
7373 * chan->ops->recv == l2cap_sock_recv_cb
7374 * __sock_queue_rcv_skb
7375 * Another thread calls:
7379 * Then the current thread tries to access control, but it was freed by
7380 * skb_free_datagram.
7382 u16 txseq = control->txseq;
7384 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7387 if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7388 l2cap_pass_to_tx(chan, control);
7390 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7391 __next_seq(chan, chan->buffer_seq));
7393 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7395 l2cap_reassemble_sdu(chan, skb, control);
7398 kfree_skb(chan->sdu);
7401 chan->sdu_last_frag = NULL;
7405 BT_DBG("Freeing %p", skb);
7410 chan->last_acked_seq = txseq;
7411 chan->expected_tx_seq = __next_seq(chan, txseq);
7416 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7418 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7422 __unpack_control(chan, skb);
7427 * We can just drop the corrupted I-frame here.
7428 * Receiver will miss it and start proper recovery
7429 * procedures and ask for retransmission.
7431 if (l2cap_check_fcs(chan, skb))
7434 if (!control->sframe && control->sar == L2CAP_SAR_START)
7435 len -= L2CAP_SDULEN_SIZE;
7437 if (chan->fcs == L2CAP_FCS_CRC16)
7438 len -= L2CAP_FCS_SIZE;
7440 if (len > chan->mps) {
7441 l2cap_send_disconn_req(chan, ECONNRESET);
7445 if (chan->ops->filter) {
7446 if (chan->ops->filter(chan, skb))
7450 if (!control->sframe) {
7453 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7454 control->sar, control->reqseq, control->final,
7457 /* Validate F-bit - F=0 always valid, F=1 only
7458 * valid in TX WAIT_F
7460 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7463 if (chan->mode != L2CAP_MODE_STREAMING) {
7464 event = L2CAP_EV_RECV_IFRAME;
7465 err = l2cap_rx(chan, control, skb, event);
7467 err = l2cap_stream_rx(chan, control, skb);
7471 l2cap_send_disconn_req(chan, ECONNRESET);
7473 const u8 rx_func_to_event[4] = {
7474 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7475 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7478 /* Only I-frames are expected in streaming mode */
7479 if (chan->mode == L2CAP_MODE_STREAMING)
7482 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7483 control->reqseq, control->final, control->poll,
7487 BT_ERR("Trailing bytes: %d in sframe", len);
7488 l2cap_send_disconn_req(chan, ECONNRESET);
7492 /* Validate F and P bits */
7493 if (control->final && (control->poll ||
7494 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7497 event = rx_func_to_event[control->super];
7498 if (l2cap_rx(chan, control, skb, event))
7499 l2cap_send_disconn_req(chan, ECONNRESET);
7509 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7511 struct l2cap_conn *conn = chan->conn;
7512 struct l2cap_le_credits pkt;
7515 return_credits = (chan->imtu / chan->mps) + 1;
7517 if (chan->rx_credits >= return_credits)
7520 return_credits -= chan->rx_credits;
7522 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7524 chan->rx_credits += return_credits;
7526 pkt.cid = cpu_to_le16(chan->scid);
7527 pkt.credits = cpu_to_le16(return_credits);
7529 chan->ident = l2cap_get_ident(conn);
7531 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7534 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7538 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7540 /* Wait recv to confirm reception before updating the credits */
7541 err = chan->ops->recv(chan, skb);
7543 /* Update credits whenever an SDU is received */
7544 l2cap_chan_le_send_credits(chan);
7549 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7553 if (!chan->rx_credits) {
7554 BT_ERR("No credits to receive LE L2CAP data");
7555 l2cap_send_disconn_req(chan, ECONNRESET);
7559 if (chan->imtu < skb->len) {
7560 BT_ERR("Too big LE L2CAP PDU");
7565 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7567 /* Update if remote had run out of credits, this should only happens
7568 * if the remote is not using the entire MPS.
7570 if (!chan->rx_credits)
7571 l2cap_chan_le_send_credits(chan);
7578 sdu_len = get_unaligned_le16(skb->data);
7579 skb_pull(skb, L2CAP_SDULEN_SIZE);
7581 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7582 sdu_len, skb->len, chan->imtu);
7584 if (sdu_len > chan->imtu) {
7585 BT_ERR("Too big LE L2CAP SDU length received");
7590 if (skb->len > sdu_len) {
7591 BT_ERR("Too much LE L2CAP data received");
7596 if (skb->len == sdu_len)
7597 return l2cap_ecred_recv(chan, skb);
7600 chan->sdu_len = sdu_len;
7601 chan->sdu_last_frag = skb;
7603 /* Detect if remote is not able to use the selected MPS */
7604 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7605 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7607 /* Adjust the number of credits */
7608 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7609 chan->mps = mps_len;
7610 l2cap_chan_le_send_credits(chan);
7616 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7617 chan->sdu->len, skb->len, chan->sdu_len);
7619 if (chan->sdu->len + skb->len > chan->sdu_len) {
7620 BT_ERR("Too much LE L2CAP data received");
7625 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7628 if (chan->sdu->len == chan->sdu_len) {
7629 err = l2cap_ecred_recv(chan, chan->sdu);
7632 chan->sdu_last_frag = NULL;
7640 kfree_skb(chan->sdu);
7642 chan->sdu_last_frag = NULL;
7646 /* We can't return an error here since we took care of the skb
7647 * freeing internally. An error return would cause the caller to
7648 * do a double-free of the skb.
7653 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7654 struct sk_buff *skb)
7656 struct l2cap_chan *chan;
7658 chan = l2cap_get_chan_by_scid(conn, cid);
7660 if (cid == L2CAP_CID_A2MP) {
7661 chan = a2mp_channel_create(conn, skb);
7667 l2cap_chan_hold(chan);
7668 l2cap_chan_lock(chan);
7670 BT_DBG("unknown cid 0x%4.4x", cid);
7671 /* Drop packet and return */
7677 BT_DBG("chan %p, len %d", chan, skb->len);
7679 /* If we receive data on a fixed channel before the info req/rsp
7680 * procedure is done simply assume that the channel is supported
7681 * and mark it as ready.
7683 if (chan->chan_type == L2CAP_CHAN_FIXED)
7684 l2cap_chan_ready(chan);
7686 if (chan->state != BT_CONNECTED)
7689 switch (chan->mode) {
7690 case L2CAP_MODE_LE_FLOWCTL:
7691 case L2CAP_MODE_EXT_FLOWCTL:
7692 if (l2cap_ecred_data_rcv(chan, skb) < 0)
7697 case L2CAP_MODE_BASIC:
7698 /* If socket recv buffers overflows we drop data here
7699 * which is *bad* because L2CAP has to be reliable.
7700 * But we don't have any other choice. L2CAP doesn't
7701 * provide flow control mechanism. */
7703 if (chan->imtu < skb->len) {
7704 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7708 if (!chan->ops->recv(chan, skb))
7712 case L2CAP_MODE_ERTM:
7713 case L2CAP_MODE_STREAMING:
7714 l2cap_data_rcv(chan, skb);
7718 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7726 l2cap_chan_unlock(chan);
7727 l2cap_chan_put(chan);
7730 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7731 struct sk_buff *skb)
7733 struct hci_conn *hcon = conn->hcon;
7734 struct l2cap_chan *chan;
7736 if (hcon->type != ACL_LINK)
7739 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7744 BT_DBG("chan %p, len %d", chan, skb->len);
7746 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7749 if (chan->imtu < skb->len)
7752 /* Store remote BD_ADDR and PSM for msg_name */
7753 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7754 bt_cb(skb)->l2cap.psm = psm;
7756 if (!chan->ops->recv(chan, skb)) {
7757 l2cap_chan_put(chan);
7762 l2cap_chan_put(chan);
7767 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7769 struct l2cap_hdr *lh = (void *) skb->data;
7770 struct hci_conn *hcon = conn->hcon;
7774 if (hcon->state != BT_CONNECTED) {
7775 BT_DBG("queueing pending rx skb");
7776 skb_queue_tail(&conn->pending_rx, skb);
7780 skb_pull(skb, L2CAP_HDR_SIZE);
7781 cid = __le16_to_cpu(lh->cid);
7782 len = __le16_to_cpu(lh->len);
7784 if (len != skb->len) {
7789 /* Since we can't actively block incoming LE connections we must
7790 * at least ensure that we ignore incoming data from them.
7792 if (hcon->type == LE_LINK &&
7793 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7794 bdaddr_dst_type(hcon))) {
7799 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7802 case L2CAP_CID_SIGNALING:
7803 l2cap_sig_channel(conn, skb);
7806 case L2CAP_CID_CONN_LESS:
7807 psm = get_unaligned((__le16 *) skb->data);
7808 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7809 l2cap_conless_channel(conn, psm, skb);
7812 case L2CAP_CID_LE_SIGNALING:
7813 l2cap_le_sig_channel(conn, skb);
7817 l2cap_data_channel(conn, cid, skb);
7822 static void process_pending_rx(struct work_struct *work)
7824 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7826 struct sk_buff *skb;
7830 while ((skb = skb_dequeue(&conn->pending_rx)))
7831 l2cap_recv_frame(conn, skb);
7834 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7836 struct l2cap_conn *conn = hcon->l2cap_data;
7837 struct hci_chan *hchan;
7842 hchan = hci_chan_create(hcon);
7846 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7848 hci_chan_del(hchan);
7852 kref_init(&conn->ref);
7853 hcon->l2cap_data = conn;
7854 conn->hcon = hci_conn_get(hcon);
7855 conn->hchan = hchan;
7857 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7859 switch (hcon->type) {
7861 if (hcon->hdev->le_mtu) {
7862 conn->mtu = hcon->hdev->le_mtu;
7867 conn->mtu = hcon->hdev->acl_mtu;
7871 conn->feat_mask = 0;
7873 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7875 if (hcon->type == ACL_LINK &&
7876 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7877 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7879 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7880 (bredr_sc_enabled(hcon->hdev) ||
7881 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7882 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7884 mutex_init(&conn->ident_lock);
7885 mutex_init(&conn->chan_lock);
7887 INIT_LIST_HEAD(&conn->chan_l);
7888 INIT_LIST_HEAD(&conn->users);
7890 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7892 skb_queue_head_init(&conn->pending_rx);
7893 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7894 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7896 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7901 static bool is_valid_psm(u16 psm, u8 dst_type)
7906 if (bdaddr_type_is_le(dst_type))
7907 return (psm <= 0x00ff);
7909 /* PSM must be odd and lsb of upper byte must be 0 */
7910 return ((psm & 0x0101) == 0x0001);
7913 struct l2cap_chan_data {
7914 struct l2cap_chan *chan;
7919 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7921 struct l2cap_chan_data *d = data;
7924 if (chan == d->chan)
7927 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7930 pid = chan->ops->get_peer_pid(chan);
7932 /* Only count deferred channels with the same PID/PSM */
7933 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7934 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7940 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7941 bdaddr_t *dst, u8 dst_type)
7943 struct l2cap_conn *conn;
7944 struct hci_conn *hcon;
7945 struct hci_dev *hdev;
7948 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7949 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7951 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7953 return -EHOSTUNREACH;
7957 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7958 chan->chan_type != L2CAP_CHAN_RAW) {
7963 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7968 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7973 switch (chan->mode) {
7974 case L2CAP_MODE_BASIC:
7976 case L2CAP_MODE_LE_FLOWCTL:
7978 case L2CAP_MODE_EXT_FLOWCTL:
7979 if (!enable_ecred) {
7984 case L2CAP_MODE_ERTM:
7985 case L2CAP_MODE_STREAMING:
7994 switch (chan->state) {
7998 /* Already connecting */
8003 /* Already connected */
8017 /* Set destination address and psm */
8018 bacpy(&chan->dst, dst);
8019 chan->dst_type = dst_type;
8024 if (bdaddr_type_is_le(dst_type)) {
8025 /* Convert from L2CAP channel address type to HCI address type
8027 if (dst_type == BDADDR_LE_PUBLIC)
8028 dst_type = ADDR_LE_DEV_PUBLIC;
8030 dst_type = ADDR_LE_DEV_RANDOM;
8032 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8033 hcon = hci_connect_le(hdev, dst, dst_type,
8035 HCI_LE_CONN_TIMEOUT,
8036 HCI_ROLE_SLAVE, NULL);
8038 hcon = hci_connect_le_scan(hdev, dst, dst_type,
8040 HCI_LE_CONN_TIMEOUT,
8041 CONN_REASON_L2CAP_CHAN);
8044 u8 auth_type = l2cap_get_auth_type(chan);
8045 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8046 CONN_REASON_L2CAP_CHAN);
8050 err = PTR_ERR(hcon);
8054 conn = l2cap_conn_add(hcon);
8056 hci_conn_drop(hcon);
8061 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8062 struct l2cap_chan_data data;
8065 data.pid = chan->ops->get_peer_pid(chan);
8068 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8070 /* Check if there isn't too many channels being connected */
8071 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8072 hci_conn_drop(hcon);
8078 mutex_lock(&conn->chan_lock);
8079 l2cap_chan_lock(chan);
8081 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8082 hci_conn_drop(hcon);
8087 /* Update source addr of the socket */
8088 bacpy(&chan->src, &hcon->src);
8089 chan->src_type = bdaddr_src_type(hcon);
8091 __l2cap_chan_add(conn, chan);
8093 /* l2cap_chan_add takes its own ref so we can drop this one */
8094 hci_conn_drop(hcon);
8096 l2cap_state_change(chan, BT_CONNECT);
8097 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8099 /* Release chan->sport so that it can be reused by other
8100 * sockets (as it's only used for listening sockets).
8102 write_lock(&chan_list_lock);
8104 write_unlock(&chan_list_lock);
8106 if (hcon->state == BT_CONNECTED) {
8107 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8108 __clear_chan_timer(chan);
8109 if (l2cap_chan_check_security(chan, true))
8110 l2cap_state_change(chan, BT_CONNECTED);
8112 l2cap_do_start(chan);
8118 l2cap_chan_unlock(chan);
8119 mutex_unlock(&conn->chan_lock);
8121 hci_dev_unlock(hdev);
8125 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8127 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8129 struct l2cap_conn *conn = chan->conn;
8131 struct l2cap_ecred_reconf_req req;
8135 pdu.req.mtu = cpu_to_le16(chan->imtu);
8136 pdu.req.mps = cpu_to_le16(chan->mps);
8137 pdu.scid = cpu_to_le16(chan->scid);
8139 chan->ident = l2cap_get_ident(conn);
8141 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8145 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8147 if (chan->imtu > mtu)
8150 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8154 l2cap_ecred_reconfigure(chan);
8159 /* ---- L2CAP interface with lower layer (HCI) ---- */
8161 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8163 int exact = 0, lm1 = 0, lm2 = 0;
8164 struct l2cap_chan *c;
8166 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8168 /* Find listening sockets and check their link_mode */
8169 read_lock(&chan_list_lock);
8170 list_for_each_entry(c, &chan_list, global_l) {
8171 if (c->state != BT_LISTEN)
8174 if (!bacmp(&c->src, &hdev->bdaddr)) {
8175 lm1 |= HCI_LM_ACCEPT;
8176 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8177 lm1 |= HCI_LM_MASTER;
8179 } else if (!bacmp(&c->src, BDADDR_ANY)) {
8180 lm2 |= HCI_LM_ACCEPT;
8181 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8182 lm2 |= HCI_LM_MASTER;
8185 read_unlock(&chan_list_lock);
8187 return exact ? lm1 : lm2;
8190 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8191 * from an existing channel in the list or from the beginning of the
8192 * global list (by passing NULL as first parameter).
8194 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8195 struct hci_conn *hcon)
8197 u8 src_type = bdaddr_src_type(hcon);
8199 read_lock(&chan_list_lock);
8202 c = list_next_entry(c, global_l);
8204 c = list_entry(chan_list.next, typeof(*c), global_l);
8206 list_for_each_entry_from(c, &chan_list, global_l) {
8207 if (c->chan_type != L2CAP_CHAN_FIXED)
8209 if (c->state != BT_LISTEN)
8211 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8213 if (src_type != c->src_type)
8216 c = l2cap_chan_hold_unless_zero(c);
8217 read_unlock(&chan_list_lock);
8221 read_unlock(&chan_list_lock);
8226 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8228 struct hci_dev *hdev = hcon->hdev;
8229 struct l2cap_conn *conn;
8230 struct l2cap_chan *pchan;
8233 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8236 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8239 l2cap_conn_del(hcon, bt_to_errno(status));
8243 conn = l2cap_conn_add(hcon);
8247 dst_type = bdaddr_dst_type(hcon);
8249 /* If device is blocked, do not create channels for it */
8250 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8253 /* Find fixed channels and notify them of the new connection. We
8254 * use multiple individual lookups, continuing each time where
8255 * we left off, because the list lock would prevent calling the
8256 * potentially sleeping l2cap_chan_lock() function.
8258 pchan = l2cap_global_fixed_chan(NULL, hcon);
8260 struct l2cap_chan *chan, *next;
8262 /* Client fixed channels should override server ones */
8263 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8266 l2cap_chan_lock(pchan);
8267 chan = pchan->ops->new_connection(pchan);
8269 bacpy(&chan->src, &hcon->src);
8270 bacpy(&chan->dst, &hcon->dst);
8271 chan->src_type = bdaddr_src_type(hcon);
8272 chan->dst_type = dst_type;
8274 __l2cap_chan_add(conn, chan);
8277 l2cap_chan_unlock(pchan);
8279 next = l2cap_global_fixed_chan(pchan, hcon);
8280 l2cap_chan_put(pchan);
8284 l2cap_conn_ready(conn);
8287 int l2cap_disconn_ind(struct hci_conn *hcon)
8289 struct l2cap_conn *conn = hcon->l2cap_data;
8291 BT_DBG("hcon %p", hcon);
8294 return HCI_ERROR_REMOTE_USER_TERM;
8295 return conn->disc_reason;
8298 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8300 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8303 BT_DBG("hcon %p reason %d", hcon, reason);
8305 l2cap_conn_del(hcon, bt_to_errno(reason));
8308 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8310 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8313 if (encrypt == 0x00) {
8314 if (chan->sec_level == BT_SECURITY_MEDIUM) {
8315 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8316 } else if (chan->sec_level == BT_SECURITY_HIGH ||
8317 chan->sec_level == BT_SECURITY_FIPS)
8318 l2cap_chan_close(chan, ECONNREFUSED);
8320 if (chan->sec_level == BT_SECURITY_MEDIUM)
8321 __clear_chan_timer(chan);
8325 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8327 struct l2cap_conn *conn = hcon->l2cap_data;
8328 struct l2cap_chan *chan;
8333 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8335 mutex_lock(&conn->chan_lock);
8337 list_for_each_entry(chan, &conn->chan_l, list) {
8338 l2cap_chan_lock(chan);
8340 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8341 state_to_string(chan->state));
8343 if (chan->scid == L2CAP_CID_A2MP) {
8344 l2cap_chan_unlock(chan);
8348 if (!status && encrypt)
8349 chan->sec_level = hcon->sec_level;
8351 if (!__l2cap_no_conn_pending(chan)) {
8352 l2cap_chan_unlock(chan);
8356 if (!status && (chan->state == BT_CONNECTED ||
8357 chan->state == BT_CONFIG)) {
8358 chan->ops->resume(chan);
8359 l2cap_check_encryption(chan, encrypt);
8360 l2cap_chan_unlock(chan);
8364 if (chan->state == BT_CONNECT) {
8365 if (!status && l2cap_check_enc_key_size(hcon))
8366 l2cap_start_connection(chan);
8368 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8369 } else if (chan->state == BT_CONNECT2 &&
8370 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8371 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8372 struct l2cap_conn_rsp rsp;
8375 if (!status && l2cap_check_enc_key_size(hcon)) {
8376 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8377 res = L2CAP_CR_PEND;
8378 stat = L2CAP_CS_AUTHOR_PEND;
8379 chan->ops->defer(chan);
8381 l2cap_state_change(chan, BT_CONFIG);
8382 res = L2CAP_CR_SUCCESS;
8383 stat = L2CAP_CS_NO_INFO;
8386 l2cap_state_change(chan, BT_DISCONN);
8387 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8388 res = L2CAP_CR_SEC_BLOCK;
8389 stat = L2CAP_CS_NO_INFO;
8392 rsp.scid = cpu_to_le16(chan->dcid);
8393 rsp.dcid = cpu_to_le16(chan->scid);
8394 rsp.result = cpu_to_le16(res);
8395 rsp.status = cpu_to_le16(stat);
8396 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8399 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8400 res == L2CAP_CR_SUCCESS) {
8402 set_bit(CONF_REQ_SENT, &chan->conf_state);
8403 l2cap_send_cmd(conn, l2cap_get_ident(conn),
8405 l2cap_build_conf_req(chan, buf, sizeof(buf)),
8407 chan->num_conf_req++;
8411 l2cap_chan_unlock(chan);
8414 mutex_unlock(&conn->chan_lock);
8417 /* Append fragment into frame respecting the maximum len of rx_skb */
8418 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8421 if (!conn->rx_skb) {
8422 /* Allocate skb for the complete frame (with header) */
8423 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8430 /* Copy as much as the rx_skb can hold */
8431 len = min_t(u16, len, skb->len);
8432 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8434 conn->rx_len -= len;
8439 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8441 struct sk_buff *rx_skb;
8444 /* Append just enough to complete the header */
8445 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8447 /* If header could not be read just continue */
8448 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8451 rx_skb = conn->rx_skb;
8452 len = get_unaligned_le16(rx_skb->data);
8454 /* Check if rx_skb has enough space to received all fragments */
8455 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8456 /* Update expected len */
8457 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8458 return L2CAP_LEN_SIZE;
8461 /* Reset conn->rx_skb since it will need to be reallocated in order to
8462 * fit all fragments.
8464 conn->rx_skb = NULL;
8466 /* Reallocates rx_skb using the exact expected length */
8467 len = l2cap_recv_frag(conn, rx_skb,
8468 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8474 static void l2cap_recv_reset(struct l2cap_conn *conn)
8476 kfree_skb(conn->rx_skb);
8477 conn->rx_skb = NULL;
8481 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8483 struct l2cap_conn *conn = hcon->l2cap_data;
8486 /* For AMP controller do not create l2cap conn */
8487 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8491 conn = l2cap_conn_add(hcon);
8496 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8500 case ACL_START_NO_FLUSH:
8503 BT_ERR("Unexpected start frame (len %d)", skb->len);
8504 l2cap_recv_reset(conn);
8505 l2cap_conn_unreliable(conn, ECOMM);
8508 /* Start fragment may not contain the L2CAP length so just
8509 * copy the initial byte when that happens and use conn->mtu as
8512 if (skb->len < L2CAP_LEN_SIZE) {
8513 l2cap_recv_frag(conn, skb, conn->mtu);
8517 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8519 if (len == skb->len) {
8520 /* Complete frame received */
8521 l2cap_recv_frame(conn, skb);
8525 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8527 if (skb->len > len) {
8528 BT_ERR("Frame is too long (len %u, expected len %d)",
8530 l2cap_conn_unreliable(conn, ECOMM);
8534 /* Append fragment into frame (with header) */
8535 if (l2cap_recv_frag(conn, skb, len) < 0)
8541 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8543 if (!conn->rx_skb) {
8544 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8545 l2cap_conn_unreliable(conn, ECOMM);
8549 /* Complete the L2CAP length if it has not been read */
8550 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8551 if (l2cap_recv_len(conn, skb) < 0) {
8552 l2cap_conn_unreliable(conn, ECOMM);
8556 /* Header still could not be read just continue */
8557 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8561 if (skb->len > conn->rx_len) {
8562 BT_ERR("Fragment is too long (len %u, expected %u)",
8563 skb->len, conn->rx_len);
8564 l2cap_recv_reset(conn);
8565 l2cap_conn_unreliable(conn, ECOMM);
8569 /* Append fragment into frame (with header) */
8570 l2cap_recv_frag(conn, skb, skb->len);
8572 if (!conn->rx_len) {
8573 /* Complete frame received. l2cap_recv_frame
8574 * takes ownership of the skb so set the global
8575 * rx_skb pointer to NULL first.
8577 struct sk_buff *rx_skb = conn->rx_skb;
8578 conn->rx_skb = NULL;
8579 l2cap_recv_frame(conn, rx_skb);
8588 static struct hci_cb l2cap_cb = {
8590 .connect_cfm = l2cap_connect_cfm,
8591 .disconn_cfm = l2cap_disconn_cfm,
8592 .security_cfm = l2cap_security_cfm,
8595 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8597 struct l2cap_chan *c;
8599 read_lock(&chan_list_lock);
8601 list_for_each_entry(c, &chan_list, global_l) {
8602 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8603 &c->src, c->src_type, &c->dst, c->dst_type,
8604 c->state, __le16_to_cpu(c->psm),
8605 c->scid, c->dcid, c->imtu, c->omtu,
8606 c->sec_level, c->mode);
8609 read_unlock(&chan_list_lock);
8614 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8616 static struct dentry *l2cap_debugfs;
8618 int __init l2cap_init(void)
8622 err = l2cap_init_sockets();
8626 hci_register_cb(&l2cap_cb);
8628 if (IS_ERR_OR_NULL(bt_debugfs))
8631 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8632 NULL, &l2cap_debugfs_fops);
8637 void l2cap_exit(void)
8639 debugfs_remove(l2cap_debugfs);
8640 hci_unregister_cb(&l2cap_cb);
8641 l2cap_cleanup_sockets();
8644 module_param(disable_ertm, bool, 0644);
8645 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8647 module_param(enable_ecred, bool, 0644);
8648 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");