2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
54 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 u8 code, u8 ident, u16 dlen, void *data);
56 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 struct sk_buff_head *skbs, u8 event);
64 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
66 if (link_type == LE_LINK) {
67 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 return BDADDR_LE_PUBLIC;
70 return BDADDR_LE_RANDOM;
76 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
78 return bdaddr_type(hcon->type, hcon->src_type);
81 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
83 return bdaddr_type(hcon->type, hcon->dst_type);
86 /* ---- L2CAP channels ---- */
88 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 list_for_each_entry(c, &conn->chan_l, list) {
100 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 struct l2cap_chan *c;
105 list_for_each_entry(c, &conn->chan_l, list) {
112 /* Find channel with given SCID.
113 * Returns locked channel. */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 struct l2cap_chan *c;
119 mutex_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
123 mutex_unlock(&conn->chan_lock);
128 /* Find channel with given DCID.
129 * Returns locked channel.
131 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 struct l2cap_chan *c;
136 mutex_lock(&conn->chan_lock);
137 c = __l2cap_get_chan_by_dcid(conn, cid);
140 mutex_unlock(&conn->chan_lock);
145 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 struct l2cap_chan *c;
150 list_for_each_entry(c, &conn->chan_l, list) {
151 if (c->ident == ident)
157 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 struct l2cap_chan *c;
162 mutex_lock(&conn->chan_lock);
163 c = __l2cap_get_chan_by_ident(conn, ident);
166 mutex_unlock(&conn->chan_lock);
171 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
173 struct l2cap_chan *c;
175 list_for_each_entry(c, &chan_list, global_l) {
176 if (c->sport == psm && !bacmp(&c->src, src))
182 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
186 write_lock(&chan_list_lock);
188 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
198 u16 p, start, end, incr;
200 if (chan->src_type == BDADDR_BREDR) {
201 start = L2CAP_PSM_DYN_START;
202 end = L2CAP_PSM_AUTO_END;
205 start = L2CAP_PSM_LE_DYN_START;
206 end = L2CAP_PSM_LE_DYN_END;
211 for (p = start; p <= end; p += incr)
212 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
213 chan->psm = cpu_to_le16(p);
214 chan->sport = cpu_to_le16(p);
221 write_unlock(&chan_list_lock);
224 EXPORT_SYMBOL_GPL(l2cap_add_psm);
226 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
228 write_lock(&chan_list_lock);
230 /* Override the defaults (which are for conn-oriented) */
231 chan->omtu = L2CAP_DEFAULT_MTU;
232 chan->chan_type = L2CAP_CHAN_FIXED;
236 write_unlock(&chan_list_lock);
241 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
245 if (conn->hcon->type == LE_LINK)
246 dyn_end = L2CAP_CID_LE_DYN_END;
248 dyn_end = L2CAP_CID_DYN_END;
250 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
251 if (!__l2cap_get_chan_by_scid(conn, cid))
258 static void l2cap_state_change(struct l2cap_chan *chan, int state)
260 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
261 state_to_string(state));
264 chan->ops->state_change(chan, state, 0);
267 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
271 chan->ops->state_change(chan, chan->state, err);
274 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276 chan->ops->state_change(chan, chan->state, err);
279 static void __set_retrans_timer(struct l2cap_chan *chan)
281 if (!delayed_work_pending(&chan->monitor_timer) &&
282 chan->retrans_timeout) {
283 l2cap_set_timer(chan, &chan->retrans_timer,
284 msecs_to_jiffies(chan->retrans_timeout));
288 static void __set_monitor_timer(struct l2cap_chan *chan)
290 __clear_retrans_timer(chan);
291 if (chan->monitor_timeout) {
292 l2cap_set_timer(chan, &chan->monitor_timer,
293 msecs_to_jiffies(chan->monitor_timeout));
297 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
302 skb_queue_walk(head, skb) {
303 if (bt_cb(skb)->l2cap.txseq == seq)
310 /* ---- L2CAP sequence number lists ---- */
312 /* For ERTM, ordered lists of sequence numbers must be tracked for
313 * SREJ requests that are received and for frames that are to be
314 * retransmitted. These seq_list functions implement a singly-linked
315 * list in an array, where membership in the list can also be checked
316 * in constant time. Items can also be added to the tail of the list
317 * and removed from the head in constant time, without further memory
321 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323 size_t alloc_size, i;
325 /* Allocated size is a power of 2 to map sequence numbers
326 * (which may be up to 14 bits) in to a smaller array that is
327 * sized for the negotiated ERTM transmit windows.
329 alloc_size = roundup_pow_of_two(size);
331 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
335 seq_list->mask = alloc_size - 1;
336 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
337 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
338 for (i = 0; i < alloc_size; i++)
339 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
344 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346 kfree(seq_list->list);
349 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
352 /* Constant-time check for list membership */
353 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
356 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358 u16 seq = seq_list->head;
359 u16 mask = seq_list->mask;
361 seq_list->head = seq_list->list[seq & mask];
362 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
372 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
376 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
379 for (i = 0; i <= seq_list->mask; i++)
380 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
383 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
386 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388 u16 mask = seq_list->mask;
390 /* All appends happen in constant time */
392 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
395 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
396 seq_list->head = seq;
398 seq_list->list[seq_list->tail & mask] = seq;
400 seq_list->tail = seq;
401 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
404 static void l2cap_chan_timeout(struct work_struct *work)
406 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408 struct l2cap_conn *conn = chan->conn;
411 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413 mutex_lock(&conn->chan_lock);
414 l2cap_chan_lock(chan);
416 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
417 reason = ECONNREFUSED;
418 else if (chan->state == BT_CONNECT &&
419 chan->sec_level != BT_SECURITY_SDP)
420 reason = ECONNREFUSED;
424 l2cap_chan_close(chan, reason);
426 l2cap_chan_unlock(chan);
428 chan->ops->close(chan);
429 mutex_unlock(&conn->chan_lock);
431 l2cap_chan_put(chan);
434 struct l2cap_chan *l2cap_chan_create(void)
436 struct l2cap_chan *chan;
438 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
442 mutex_init(&chan->lock);
444 /* Set default lock nesting level */
445 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
447 write_lock(&chan_list_lock);
448 list_add(&chan->global_l, &chan_list);
449 write_unlock(&chan_list_lock);
451 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
453 chan->state = BT_OPEN;
455 kref_init(&chan->kref);
457 /* This flag is cleared in l2cap_chan_ready() */
458 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
460 BT_DBG("chan %p", chan);
464 EXPORT_SYMBOL_GPL(l2cap_chan_create);
466 static void l2cap_chan_destroy(struct kref *kref)
468 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
470 BT_DBG("chan %p", chan);
472 write_lock(&chan_list_lock);
473 list_del(&chan->global_l);
474 write_unlock(&chan_list_lock);
479 void l2cap_chan_hold(struct l2cap_chan *c)
481 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
486 void l2cap_chan_put(struct l2cap_chan *c)
488 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
490 kref_put(&c->kref, l2cap_chan_destroy);
492 EXPORT_SYMBOL_GPL(l2cap_chan_put);
494 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
496 chan->fcs = L2CAP_FCS_CRC16;
497 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
498 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
499 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
500 chan->remote_max_tx = chan->max_tx;
501 chan->remote_tx_win = chan->tx_win;
502 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
503 chan->sec_level = BT_SECURITY_LOW;
504 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
505 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
506 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
507 chan->conf_state = 0;
509 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
511 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
513 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
516 chan->sdu_last_frag = NULL;
518 chan->tx_credits = tx_credits;
519 /* Derive MPS from connection MTU to stop HCI fragmentation */
520 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
521 /* Give enough credits for a full packet */
522 chan->rx_credits = (chan->imtu / chan->mps) + 1;
524 skb_queue_head_init(&chan->tx_q);
527 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
529 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
530 __le16_to_cpu(chan->psm), chan->dcid);
532 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
536 switch (chan->chan_type) {
537 case L2CAP_CHAN_CONN_ORIENTED:
538 /* Alloc CID for connection-oriented socket */
539 chan->scid = l2cap_alloc_cid(conn);
540 if (conn->hcon->type == ACL_LINK)
541 chan->omtu = L2CAP_DEFAULT_MTU;
544 case L2CAP_CHAN_CONN_LESS:
545 /* Connectionless socket */
546 chan->scid = L2CAP_CID_CONN_LESS;
547 chan->dcid = L2CAP_CID_CONN_LESS;
548 chan->omtu = L2CAP_DEFAULT_MTU;
551 case L2CAP_CHAN_FIXED:
552 /* Caller will set CID and CID specific MTU values */
556 /* Raw socket can send/recv signalling messages only */
557 chan->scid = L2CAP_CID_SIGNALING;
558 chan->dcid = L2CAP_CID_SIGNALING;
559 chan->omtu = L2CAP_DEFAULT_MTU;
562 chan->local_id = L2CAP_BESTEFFORT_ID;
563 chan->local_stype = L2CAP_SERV_BESTEFFORT;
564 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
565 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
566 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
567 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
569 l2cap_chan_hold(chan);
571 /* Only keep a reference for fixed channels if they requested it */
572 if (chan->chan_type != L2CAP_CHAN_FIXED ||
573 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
574 hci_conn_hold(conn->hcon);
576 list_add(&chan->list, &conn->chan_l);
579 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
581 mutex_lock(&conn->chan_lock);
582 __l2cap_chan_add(conn, chan);
583 mutex_unlock(&conn->chan_lock);
586 void l2cap_chan_del(struct l2cap_chan *chan, int err)
588 struct l2cap_conn *conn = chan->conn;
590 __clear_chan_timer(chan);
592 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
593 state_to_string(chan->state));
595 chan->ops->teardown(chan, err);
598 struct amp_mgr *mgr = conn->hcon->amp_mgr;
599 /* Delete from channel list */
600 list_del(&chan->list);
602 l2cap_chan_put(chan);
606 /* Reference was only held for non-fixed channels or
607 * fixed channels that explicitly requested it using the
608 * FLAG_HOLD_HCI_CONN flag.
610 if (chan->chan_type != L2CAP_CHAN_FIXED ||
611 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
612 hci_conn_drop(conn->hcon);
614 if (mgr && mgr->bredr_chan == chan)
615 mgr->bredr_chan = NULL;
618 if (chan->hs_hchan) {
619 struct hci_chan *hs_hchan = chan->hs_hchan;
621 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
622 amp_disconnect_logical_link(hs_hchan);
625 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
629 case L2CAP_MODE_BASIC:
632 case L2CAP_MODE_LE_FLOWCTL:
633 skb_queue_purge(&chan->tx_q);
636 case L2CAP_MODE_ERTM:
637 __clear_retrans_timer(chan);
638 __clear_monitor_timer(chan);
639 __clear_ack_timer(chan);
641 skb_queue_purge(&chan->srej_q);
643 l2cap_seq_list_free(&chan->srej_list);
644 l2cap_seq_list_free(&chan->retrans_list);
648 case L2CAP_MODE_STREAMING:
649 skb_queue_purge(&chan->tx_q);
655 EXPORT_SYMBOL_GPL(l2cap_chan_del);
657 static void l2cap_conn_update_id_addr(struct work_struct *work)
659 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
660 id_addr_update_work);
661 struct hci_conn *hcon = conn->hcon;
662 struct l2cap_chan *chan;
664 mutex_lock(&conn->chan_lock);
666 list_for_each_entry(chan, &conn->chan_l, list) {
667 l2cap_chan_lock(chan);
668 bacpy(&chan->dst, &hcon->dst);
669 chan->dst_type = bdaddr_dst_type(hcon);
670 l2cap_chan_unlock(chan);
673 mutex_unlock(&conn->chan_lock);
676 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
678 struct l2cap_conn *conn = chan->conn;
679 struct l2cap_le_conn_rsp rsp;
682 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 result = L2CAP_CR_LE_AUTHORIZATION;
685 result = L2CAP_CR_LE_BAD_PSM;
687 l2cap_state_change(chan, BT_DISCONN);
689 rsp.dcid = cpu_to_le16(chan->scid);
690 rsp.mtu = cpu_to_le16(chan->imtu);
691 rsp.mps = cpu_to_le16(chan->mps);
692 rsp.credits = cpu_to_le16(chan->rx_credits);
693 rsp.result = cpu_to_le16(result);
695 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
699 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
701 struct l2cap_conn *conn = chan->conn;
702 struct l2cap_conn_rsp rsp;
705 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
706 result = L2CAP_CR_SEC_BLOCK;
708 result = L2CAP_CR_BAD_PSM;
710 l2cap_state_change(chan, BT_DISCONN);
712 rsp.scid = cpu_to_le16(chan->dcid);
713 rsp.dcid = cpu_to_le16(chan->scid);
714 rsp.result = cpu_to_le16(result);
715 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
717 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
720 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
722 struct l2cap_conn *conn = chan->conn;
724 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
726 switch (chan->state) {
728 chan->ops->teardown(chan, 0);
733 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
734 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
735 l2cap_send_disconn_req(chan, reason);
737 l2cap_chan_del(chan, reason);
741 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 if (conn->hcon->type == ACL_LINK)
743 l2cap_chan_connect_reject(chan);
744 else if (conn->hcon->type == LE_LINK)
745 l2cap_chan_le_connect_reject(chan);
748 l2cap_chan_del(chan, reason);
753 l2cap_chan_del(chan, reason);
757 chan->ops->teardown(chan, 0);
761 EXPORT_SYMBOL(l2cap_chan_close);
763 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
765 switch (chan->chan_type) {
767 switch (chan->sec_level) {
768 case BT_SECURITY_HIGH:
769 case BT_SECURITY_FIPS:
770 return HCI_AT_DEDICATED_BONDING_MITM;
771 case BT_SECURITY_MEDIUM:
772 return HCI_AT_DEDICATED_BONDING;
774 return HCI_AT_NO_BONDING;
777 case L2CAP_CHAN_CONN_LESS:
778 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
779 if (chan->sec_level == BT_SECURITY_LOW)
780 chan->sec_level = BT_SECURITY_SDP;
782 if (chan->sec_level == BT_SECURITY_HIGH ||
783 chan->sec_level == BT_SECURITY_FIPS)
784 return HCI_AT_NO_BONDING_MITM;
786 return HCI_AT_NO_BONDING;
788 case L2CAP_CHAN_CONN_ORIENTED:
789 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
790 if (chan->sec_level == BT_SECURITY_LOW)
791 chan->sec_level = BT_SECURITY_SDP;
793 if (chan->sec_level == BT_SECURITY_HIGH ||
794 chan->sec_level == BT_SECURITY_FIPS)
795 return HCI_AT_NO_BONDING_MITM;
797 return HCI_AT_NO_BONDING;
801 switch (chan->sec_level) {
802 case BT_SECURITY_HIGH:
803 case BT_SECURITY_FIPS:
804 return HCI_AT_GENERAL_BONDING_MITM;
805 case BT_SECURITY_MEDIUM:
806 return HCI_AT_GENERAL_BONDING;
808 return HCI_AT_NO_BONDING;
814 /* Service level security */
815 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
817 struct l2cap_conn *conn = chan->conn;
820 if (conn->hcon->type == LE_LINK)
821 return smp_conn_security(conn->hcon, chan->sec_level);
823 auth_type = l2cap_get_auth_type(chan);
825 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
829 static u8 l2cap_get_ident(struct l2cap_conn *conn)
833 /* Get next available identificator.
834 * 1 - 128 are used by kernel.
835 * 129 - 199 are reserved.
836 * 200 - 254 are used by utilities like l2ping, etc.
839 mutex_lock(&conn->ident_lock);
841 if (++conn->tx_ident > 128)
846 mutex_unlock(&conn->ident_lock);
851 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
854 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
857 BT_DBG("code 0x%2.2x", code);
862 /* Use NO_FLUSH if supported or we have an LE link (which does
863 * not support auto-flushing packets) */
864 if (lmp_no_flush_capable(conn->hcon->hdev) ||
865 conn->hcon->type == LE_LINK)
866 flags = ACL_START_NO_FLUSH;
870 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
871 skb->priority = HCI_PRIO_MAX;
873 hci_send_acl(conn->hchan, skb, flags);
876 static bool __chan_is_moving(struct l2cap_chan *chan)
878 return chan->move_state != L2CAP_MOVE_STABLE &&
879 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
882 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
884 struct hci_conn *hcon = chan->conn->hcon;
887 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
890 if (chan->hs_hcon && !__chan_is_moving(chan)) {
892 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
899 /* Use NO_FLUSH for LE links (where this is the only option) or
900 * if the BR/EDR link supports it and flushing has not been
901 * explicitly requested (through FLAG_FLUSHABLE).
903 if (hcon->type == LE_LINK ||
904 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
905 lmp_no_flush_capable(hcon->hdev)))
906 flags = ACL_START_NO_FLUSH;
910 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
911 hci_send_acl(chan->conn->hchan, skb, flags);
914 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
916 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
917 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
919 if (enh & L2CAP_CTRL_FRAME_TYPE) {
922 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
923 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
930 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
931 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
938 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
940 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
941 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
943 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
946 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
947 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
954 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
955 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
962 static inline void __unpack_control(struct l2cap_chan *chan,
965 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
966 __unpack_extended_control(get_unaligned_le32(skb->data),
968 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
970 __unpack_enhanced_control(get_unaligned_le16(skb->data),
972 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
976 static u32 __pack_extended_control(struct l2cap_ctrl *control)
980 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
981 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
983 if (control->sframe) {
984 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
985 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
986 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
988 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
989 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
995 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
999 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1000 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1002 if (control->sframe) {
1003 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1004 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1005 packed |= L2CAP_CTRL_FRAME_TYPE;
1007 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1008 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1014 static inline void __pack_control(struct l2cap_chan *chan,
1015 struct l2cap_ctrl *control,
1016 struct sk_buff *skb)
1018 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1019 put_unaligned_le32(__pack_extended_control(control),
1020 skb->data + L2CAP_HDR_SIZE);
1022 put_unaligned_le16(__pack_enhanced_control(control),
1023 skb->data + L2CAP_HDR_SIZE);
1027 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1029 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1030 return L2CAP_EXT_HDR_SIZE;
1032 return L2CAP_ENH_HDR_SIZE;
1035 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1038 struct sk_buff *skb;
1039 struct l2cap_hdr *lh;
1040 int hlen = __ertm_hdr_size(chan);
1042 if (chan->fcs == L2CAP_FCS_CRC16)
1043 hlen += L2CAP_FCS_SIZE;
1045 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1048 return ERR_PTR(-ENOMEM);
1050 lh = skb_put(skb, L2CAP_HDR_SIZE);
1051 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1052 lh->cid = cpu_to_le16(chan->dcid);
1054 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1055 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1057 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1059 if (chan->fcs == L2CAP_FCS_CRC16) {
1060 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1061 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1064 skb->priority = HCI_PRIO_MAX;
1068 static void l2cap_send_sframe(struct l2cap_chan *chan,
1069 struct l2cap_ctrl *control)
1071 struct sk_buff *skb;
1074 BT_DBG("chan %p, control %p", chan, control);
1076 if (!control->sframe)
1079 if (__chan_is_moving(chan))
1082 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1086 if (control->super == L2CAP_SUPER_RR)
1087 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1088 else if (control->super == L2CAP_SUPER_RNR)
1089 set_bit(CONN_RNR_SENT, &chan->conn_state);
1091 if (control->super != L2CAP_SUPER_SREJ) {
1092 chan->last_acked_seq = control->reqseq;
1093 __clear_ack_timer(chan);
1096 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1097 control->final, control->poll, control->super);
1099 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1100 control_field = __pack_extended_control(control);
1102 control_field = __pack_enhanced_control(control);
1104 skb = l2cap_create_sframe_pdu(chan, control_field);
1106 l2cap_do_send(chan, skb);
1109 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1111 struct l2cap_ctrl control;
1113 BT_DBG("chan %p, poll %d", chan, poll);
1115 memset(&control, 0, sizeof(control));
1117 control.poll = poll;
1119 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1120 control.super = L2CAP_SUPER_RNR;
1122 control.super = L2CAP_SUPER_RR;
1124 control.reqseq = chan->buffer_seq;
1125 l2cap_send_sframe(chan, &control);
1128 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1130 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1133 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1136 static bool __amp_capable(struct l2cap_chan *chan)
1138 struct l2cap_conn *conn = chan->conn;
1139 struct hci_dev *hdev;
1140 bool amp_available = false;
1142 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1145 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1148 read_lock(&hci_dev_list_lock);
1149 list_for_each_entry(hdev, &hci_dev_list, list) {
1150 if (hdev->amp_type != AMP_TYPE_BREDR &&
1151 test_bit(HCI_UP, &hdev->flags)) {
1152 amp_available = true;
1156 read_unlock(&hci_dev_list_lock);
1158 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1159 return amp_available;
1164 static bool l2cap_check_efs(struct l2cap_chan *chan)
1166 /* Check EFS parameters */
1170 void l2cap_send_conn_req(struct l2cap_chan *chan)
1172 struct l2cap_conn *conn = chan->conn;
1173 struct l2cap_conn_req req;
1175 req.scid = cpu_to_le16(chan->scid);
1176 req.psm = chan->psm;
1178 chan->ident = l2cap_get_ident(conn);
1180 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1182 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1185 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1187 struct l2cap_create_chan_req req;
1188 req.scid = cpu_to_le16(chan->scid);
1189 req.psm = chan->psm;
1190 req.amp_id = amp_id;
1192 chan->ident = l2cap_get_ident(chan->conn);
1194 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1198 static void l2cap_move_setup(struct l2cap_chan *chan)
1200 struct sk_buff *skb;
1202 BT_DBG("chan %p", chan);
1204 if (chan->mode != L2CAP_MODE_ERTM)
1207 __clear_retrans_timer(chan);
1208 __clear_monitor_timer(chan);
1209 __clear_ack_timer(chan);
1211 chan->retry_count = 0;
1212 skb_queue_walk(&chan->tx_q, skb) {
1213 if (bt_cb(skb)->l2cap.retries)
1214 bt_cb(skb)->l2cap.retries = 1;
1219 chan->expected_tx_seq = chan->buffer_seq;
1221 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1222 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1223 l2cap_seq_list_clear(&chan->retrans_list);
1224 l2cap_seq_list_clear(&chan->srej_list);
1225 skb_queue_purge(&chan->srej_q);
1227 chan->tx_state = L2CAP_TX_STATE_XMIT;
1228 chan->rx_state = L2CAP_RX_STATE_MOVE;
1230 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1233 static void l2cap_move_done(struct l2cap_chan *chan)
1235 u8 move_role = chan->move_role;
1236 BT_DBG("chan %p", chan);
1238 chan->move_state = L2CAP_MOVE_STABLE;
1239 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1241 if (chan->mode != L2CAP_MODE_ERTM)
1244 switch (move_role) {
1245 case L2CAP_MOVE_ROLE_INITIATOR:
1246 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1247 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1249 case L2CAP_MOVE_ROLE_RESPONDER:
1250 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1255 static void l2cap_chan_ready(struct l2cap_chan *chan)
1257 /* The channel may have already been flagged as connected in
1258 * case of receiving data before the L2CAP info req/rsp
1259 * procedure is complete.
1261 if (chan->state == BT_CONNECTED)
1264 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1265 chan->conf_state = 0;
1266 __clear_chan_timer(chan);
1268 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1269 chan->ops->suspend(chan);
1271 chan->state = BT_CONNECTED;
1273 chan->ops->ready(chan);
1276 static void l2cap_le_connect(struct l2cap_chan *chan)
1278 struct l2cap_conn *conn = chan->conn;
1279 struct l2cap_le_conn_req req;
1281 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1284 l2cap_le_flowctl_init(chan, 0);
1286 req.psm = chan->psm;
1287 req.scid = cpu_to_le16(chan->scid);
1288 req.mtu = cpu_to_le16(chan->imtu);
1289 req.mps = cpu_to_le16(chan->mps);
1290 req.credits = cpu_to_le16(chan->rx_credits);
1292 chan->ident = l2cap_get_ident(conn);
1294 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1298 static void l2cap_le_start(struct l2cap_chan *chan)
1300 struct l2cap_conn *conn = chan->conn;
1302 if (!smp_conn_security(conn->hcon, chan->sec_level))
1306 l2cap_chan_ready(chan);
1310 if (chan->state == BT_CONNECT)
1311 l2cap_le_connect(chan);
1314 static void l2cap_start_connection(struct l2cap_chan *chan)
1316 if (__amp_capable(chan)) {
1317 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1318 a2mp_discover_amp(chan);
1319 } else if (chan->conn->hcon->type == LE_LINK) {
1320 l2cap_le_start(chan);
1322 l2cap_send_conn_req(chan);
1326 static void l2cap_request_info(struct l2cap_conn *conn)
1328 struct l2cap_info_req req;
1330 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1333 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1335 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1336 conn->info_ident = l2cap_get_ident(conn);
1338 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1340 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1344 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1346 /* The minimum encryption key size needs to be enforced by the
1347 * host stack before establishing any L2CAP connections. The
1348 * specification in theory allows a minimum of 1, but to align
1349 * BR/EDR and LE transports, a minimum of 7 is chosen.
1351 * This check might also be called for unencrypted connections
1352 * that have no key size requirements. Ensure that the link is
1353 * actually encrypted before enforcing a key size.
1355 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1356 hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE);
1359 static void l2cap_do_start(struct l2cap_chan *chan)
1361 struct l2cap_conn *conn = chan->conn;
1363 if (conn->hcon->type == LE_LINK) {
1364 l2cap_le_start(chan);
1368 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1369 l2cap_request_info(conn);
1373 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1376 if (!l2cap_chan_check_security(chan, true) ||
1377 !__l2cap_no_conn_pending(chan))
1380 if (l2cap_check_enc_key_size(conn->hcon))
1381 l2cap_start_connection(chan);
1383 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1386 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1388 u32 local_feat_mask = l2cap_feat_mask;
1390 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1393 case L2CAP_MODE_ERTM:
1394 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1395 case L2CAP_MODE_STREAMING:
1396 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1402 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1404 struct l2cap_conn *conn = chan->conn;
1405 struct l2cap_disconn_req req;
1410 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1411 __clear_retrans_timer(chan);
1412 __clear_monitor_timer(chan);
1413 __clear_ack_timer(chan);
1416 if (chan->scid == L2CAP_CID_A2MP) {
1417 l2cap_state_change(chan, BT_DISCONN);
1421 req.dcid = cpu_to_le16(chan->dcid);
1422 req.scid = cpu_to_le16(chan->scid);
1423 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1426 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1429 /* ---- L2CAP connections ---- */
1430 static void l2cap_conn_start(struct l2cap_conn *conn)
1432 struct l2cap_chan *chan, *tmp;
1434 BT_DBG("conn %p", conn);
1436 mutex_lock(&conn->chan_lock);
1438 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1439 l2cap_chan_lock(chan);
1441 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1442 l2cap_chan_ready(chan);
1443 l2cap_chan_unlock(chan);
1447 if (chan->state == BT_CONNECT) {
1448 if (!l2cap_chan_check_security(chan, true) ||
1449 !__l2cap_no_conn_pending(chan)) {
1450 l2cap_chan_unlock(chan);
1454 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1455 && test_bit(CONF_STATE2_DEVICE,
1456 &chan->conf_state)) {
1457 l2cap_chan_close(chan, ECONNRESET);
1458 l2cap_chan_unlock(chan);
1462 if (l2cap_check_enc_key_size(conn->hcon))
1463 l2cap_start_connection(chan);
1465 l2cap_chan_close(chan, ECONNREFUSED);
1467 } else if (chan->state == BT_CONNECT2) {
1468 struct l2cap_conn_rsp rsp;
1470 rsp.scid = cpu_to_le16(chan->dcid);
1471 rsp.dcid = cpu_to_le16(chan->scid);
1473 if (l2cap_chan_check_security(chan, false)) {
1474 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1475 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1476 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1477 chan->ops->defer(chan);
1480 l2cap_state_change(chan, BT_CONFIG);
1481 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1482 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1489 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1492 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1493 rsp.result != L2CAP_CR_SUCCESS) {
1494 l2cap_chan_unlock(chan);
1498 set_bit(CONF_REQ_SENT, &chan->conf_state);
1499 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1500 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1501 chan->num_conf_req++;
1504 l2cap_chan_unlock(chan);
1507 mutex_unlock(&conn->chan_lock);
1510 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1512 struct hci_conn *hcon = conn->hcon;
1513 struct hci_dev *hdev = hcon->hdev;
1515 BT_DBG("%s conn %p", hdev->name, conn);
1517 /* For outgoing pairing which doesn't necessarily have an
1518 * associated socket (e.g. mgmt_pair_device).
1521 smp_conn_security(hcon, hcon->pending_sec_level);
1523 /* For LE slave connections, make sure the connection interval
1524 * is in the range of the minium and maximum interval that has
1525 * been configured for this connection. If not, then trigger
1526 * the connection update procedure.
1528 if (hcon->role == HCI_ROLE_SLAVE &&
1529 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1530 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1531 struct l2cap_conn_param_update_req req;
1533 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1534 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1535 req.latency = cpu_to_le16(hcon->le_conn_latency);
1536 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1538 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1539 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1543 static void l2cap_conn_ready(struct l2cap_conn *conn)
1545 struct l2cap_chan *chan;
1546 struct hci_conn *hcon = conn->hcon;
1548 BT_DBG("conn %p", conn);
1550 if (hcon->type == ACL_LINK)
1551 l2cap_request_info(conn);
1553 mutex_lock(&conn->chan_lock);
1555 list_for_each_entry(chan, &conn->chan_l, list) {
1557 l2cap_chan_lock(chan);
1559 if (chan->scid == L2CAP_CID_A2MP) {
1560 l2cap_chan_unlock(chan);
1564 if (hcon->type == LE_LINK) {
1565 l2cap_le_start(chan);
1566 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1567 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1568 l2cap_chan_ready(chan);
1569 } else if (chan->state == BT_CONNECT) {
1570 l2cap_do_start(chan);
1573 l2cap_chan_unlock(chan);
1576 mutex_unlock(&conn->chan_lock);
1578 if (hcon->type == LE_LINK)
1579 l2cap_le_conn_ready(conn);
1581 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1584 /* Notify sockets that we cannot guaranty reliability anymore */
1585 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1587 struct l2cap_chan *chan;
1589 BT_DBG("conn %p", conn);
1591 mutex_lock(&conn->chan_lock);
1593 list_for_each_entry(chan, &conn->chan_l, list) {
1594 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1595 l2cap_chan_set_err(chan, err);
1598 mutex_unlock(&conn->chan_lock);
1601 static void l2cap_info_timeout(struct work_struct *work)
1603 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1606 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1607 conn->info_ident = 0;
1609 l2cap_conn_start(conn);
1614 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1615 * callback is called during registration. The ->remove callback is called
1616 * during unregistration.
1617 * An l2cap_user object can either be explicitly unregistered or when the
1618 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1619 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1620 * External modules must own a reference to the l2cap_conn object if they intend
1621 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1622 * any time if they don't.
1625 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1627 struct hci_dev *hdev = conn->hcon->hdev;
1630 /* We need to check whether l2cap_conn is registered. If it is not, we
1631 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1632 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1633 * relies on the parent hci_conn object to be locked. This itself relies
1634 * on the hci_dev object to be locked. So we must lock the hci device
1639 if (!list_empty(&user->list)) {
1644 /* conn->hchan is NULL after l2cap_conn_del() was called */
1650 ret = user->probe(conn, user);
1654 list_add(&user->list, &conn->users);
1658 hci_dev_unlock(hdev);
1661 EXPORT_SYMBOL(l2cap_register_user);
1663 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1665 struct hci_dev *hdev = conn->hcon->hdev;
1669 if (list_empty(&user->list))
1672 list_del_init(&user->list);
1673 user->remove(conn, user);
1676 hci_dev_unlock(hdev);
1678 EXPORT_SYMBOL(l2cap_unregister_user);
1680 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1682 struct l2cap_user *user;
1684 while (!list_empty(&conn->users)) {
1685 user = list_first_entry(&conn->users, struct l2cap_user, list);
1686 list_del_init(&user->list);
1687 user->remove(conn, user);
1691 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1693 struct l2cap_conn *conn = hcon->l2cap_data;
1694 struct l2cap_chan *chan, *l;
1699 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1701 kfree_skb(conn->rx_skb);
1703 skb_queue_purge(&conn->pending_rx);
1705 /* We can not call flush_work(&conn->pending_rx_work) here since we
1706 * might block if we are running on a worker from the same workqueue
1707 * pending_rx_work is waiting on.
1709 if (work_pending(&conn->pending_rx_work))
1710 cancel_work_sync(&conn->pending_rx_work);
1712 if (work_pending(&conn->id_addr_update_work))
1713 cancel_work_sync(&conn->id_addr_update_work);
1715 l2cap_unregister_all_users(conn);
1717 /* Force the connection to be immediately dropped */
1718 hcon->disc_timeout = 0;
1720 mutex_lock(&conn->chan_lock);
1723 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1724 l2cap_chan_hold(chan);
1725 l2cap_chan_lock(chan);
1727 l2cap_chan_del(chan, err);
1729 l2cap_chan_unlock(chan);
1731 chan->ops->close(chan);
1732 l2cap_chan_put(chan);
1735 mutex_unlock(&conn->chan_lock);
1737 hci_chan_del(conn->hchan);
1739 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1740 cancel_delayed_work_sync(&conn->info_timer);
1742 hcon->l2cap_data = NULL;
1744 l2cap_conn_put(conn);
1747 static void l2cap_conn_free(struct kref *ref)
1749 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1751 hci_conn_put(conn->hcon);
1755 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1757 kref_get(&conn->ref);
1760 EXPORT_SYMBOL(l2cap_conn_get);
1762 void l2cap_conn_put(struct l2cap_conn *conn)
1764 kref_put(&conn->ref, l2cap_conn_free);
1766 EXPORT_SYMBOL(l2cap_conn_put);
1768 /* ---- Socket interface ---- */
1770 /* Find socket with psm and source / destination bdaddr.
1771 * Returns closest match.
1773 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1778 struct l2cap_chan *c, *c1 = NULL;
1780 read_lock(&chan_list_lock);
1782 list_for_each_entry(c, &chan_list, global_l) {
1783 if (state && c->state != state)
1786 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1789 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1792 if (c->psm == psm) {
1793 int src_match, dst_match;
1794 int src_any, dst_any;
1797 src_match = !bacmp(&c->src, src);
1798 dst_match = !bacmp(&c->dst, dst);
1799 if (src_match && dst_match) {
1801 read_unlock(&chan_list_lock);
1806 src_any = !bacmp(&c->src, BDADDR_ANY);
1807 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1808 if ((src_match && dst_any) || (src_any && dst_match) ||
1809 (src_any && dst_any))
1815 l2cap_chan_hold(c1);
1817 read_unlock(&chan_list_lock);
1822 static void l2cap_monitor_timeout(struct work_struct *work)
1824 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1825 monitor_timer.work);
1827 BT_DBG("chan %p", chan);
1829 l2cap_chan_lock(chan);
1832 l2cap_chan_unlock(chan);
1833 l2cap_chan_put(chan);
1837 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1839 l2cap_chan_unlock(chan);
1840 l2cap_chan_put(chan);
1843 static void l2cap_retrans_timeout(struct work_struct *work)
1845 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1846 retrans_timer.work);
1848 BT_DBG("chan %p", chan);
1850 l2cap_chan_lock(chan);
1853 l2cap_chan_unlock(chan);
1854 l2cap_chan_put(chan);
1858 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1859 l2cap_chan_unlock(chan);
1860 l2cap_chan_put(chan);
1863 static void l2cap_streaming_send(struct l2cap_chan *chan,
1864 struct sk_buff_head *skbs)
1866 struct sk_buff *skb;
1867 struct l2cap_ctrl *control;
1869 BT_DBG("chan %p, skbs %p", chan, skbs);
1871 if (__chan_is_moving(chan))
1874 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1876 while (!skb_queue_empty(&chan->tx_q)) {
1878 skb = skb_dequeue(&chan->tx_q);
1880 bt_cb(skb)->l2cap.retries = 1;
1881 control = &bt_cb(skb)->l2cap;
1883 control->reqseq = 0;
1884 control->txseq = chan->next_tx_seq;
1886 __pack_control(chan, control, skb);
1888 if (chan->fcs == L2CAP_FCS_CRC16) {
1889 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1890 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1893 l2cap_do_send(chan, skb);
1895 BT_DBG("Sent txseq %u", control->txseq);
1897 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1898 chan->frames_sent++;
1902 static int l2cap_ertm_send(struct l2cap_chan *chan)
1904 struct sk_buff *skb, *tx_skb;
1905 struct l2cap_ctrl *control;
1908 BT_DBG("chan %p", chan);
1910 if (chan->state != BT_CONNECTED)
1913 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1916 if (__chan_is_moving(chan))
1919 while (chan->tx_send_head &&
1920 chan->unacked_frames < chan->remote_tx_win &&
1921 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1923 skb = chan->tx_send_head;
1925 bt_cb(skb)->l2cap.retries = 1;
1926 control = &bt_cb(skb)->l2cap;
1928 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1931 control->reqseq = chan->buffer_seq;
1932 chan->last_acked_seq = chan->buffer_seq;
1933 control->txseq = chan->next_tx_seq;
1935 __pack_control(chan, control, skb);
1937 if (chan->fcs == L2CAP_FCS_CRC16) {
1938 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1939 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1942 /* Clone after data has been modified. Data is assumed to be
1943 read-only (for locking purposes) on cloned sk_buffs.
1945 tx_skb = skb_clone(skb, GFP_KERNEL);
1950 __set_retrans_timer(chan);
1952 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1953 chan->unacked_frames++;
1954 chan->frames_sent++;
1957 if (skb_queue_is_last(&chan->tx_q, skb))
1958 chan->tx_send_head = NULL;
1960 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1962 l2cap_do_send(chan, tx_skb);
1963 BT_DBG("Sent txseq %u", control->txseq);
1966 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1967 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1972 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1974 struct l2cap_ctrl control;
1975 struct sk_buff *skb;
1976 struct sk_buff *tx_skb;
1979 BT_DBG("chan %p", chan);
1981 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1984 if (__chan_is_moving(chan))
1987 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1988 seq = l2cap_seq_list_pop(&chan->retrans_list);
1990 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1992 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1997 bt_cb(skb)->l2cap.retries++;
1998 control = bt_cb(skb)->l2cap;
2000 if (chan->max_tx != 0 &&
2001 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2002 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2003 l2cap_send_disconn_req(chan, ECONNRESET);
2004 l2cap_seq_list_clear(&chan->retrans_list);
2008 control.reqseq = chan->buffer_seq;
2009 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2014 if (skb_cloned(skb)) {
2015 /* Cloned sk_buffs are read-only, so we need a
2018 tx_skb = skb_copy(skb, GFP_KERNEL);
2020 tx_skb = skb_clone(skb, GFP_KERNEL);
2024 l2cap_seq_list_clear(&chan->retrans_list);
2028 /* Update skb contents */
2029 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2030 put_unaligned_le32(__pack_extended_control(&control),
2031 tx_skb->data + L2CAP_HDR_SIZE);
2033 put_unaligned_le16(__pack_enhanced_control(&control),
2034 tx_skb->data + L2CAP_HDR_SIZE);
2038 if (chan->fcs == L2CAP_FCS_CRC16) {
2039 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2040 tx_skb->len - L2CAP_FCS_SIZE);
2041 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2045 l2cap_do_send(chan, tx_skb);
2047 BT_DBG("Resent txseq %d", control.txseq);
2049 chan->last_acked_seq = chan->buffer_seq;
2053 static void l2cap_retransmit(struct l2cap_chan *chan,
2054 struct l2cap_ctrl *control)
2056 BT_DBG("chan %p, control %p", chan, control);
2058 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2059 l2cap_ertm_resend(chan);
2062 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2063 struct l2cap_ctrl *control)
2065 struct sk_buff *skb;
2067 BT_DBG("chan %p, control %p", chan, control);
2070 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2072 l2cap_seq_list_clear(&chan->retrans_list);
2074 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2077 if (chan->unacked_frames) {
2078 skb_queue_walk(&chan->tx_q, skb) {
2079 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2080 skb == chan->tx_send_head)
2084 skb_queue_walk_from(&chan->tx_q, skb) {
2085 if (skb == chan->tx_send_head)
2088 l2cap_seq_list_append(&chan->retrans_list,
2089 bt_cb(skb)->l2cap.txseq);
2092 l2cap_ertm_resend(chan);
2096 static void l2cap_send_ack(struct l2cap_chan *chan)
2098 struct l2cap_ctrl control;
2099 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2100 chan->last_acked_seq);
2103 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2104 chan, chan->last_acked_seq, chan->buffer_seq);
2106 memset(&control, 0, sizeof(control));
2109 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2110 chan->rx_state == L2CAP_RX_STATE_RECV) {
2111 __clear_ack_timer(chan);
2112 control.super = L2CAP_SUPER_RNR;
2113 control.reqseq = chan->buffer_seq;
2114 l2cap_send_sframe(chan, &control);
2116 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2117 l2cap_ertm_send(chan);
2118 /* If any i-frames were sent, they included an ack */
2119 if (chan->buffer_seq == chan->last_acked_seq)
2123 /* Ack now if the window is 3/4ths full.
2124 * Calculate without mul or div
2126 threshold = chan->ack_win;
2127 threshold += threshold << 1;
2130 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2133 if (frames_to_ack >= threshold) {
2134 __clear_ack_timer(chan);
2135 control.super = L2CAP_SUPER_RR;
2136 control.reqseq = chan->buffer_seq;
2137 l2cap_send_sframe(chan, &control);
2142 __set_ack_timer(chan);
2146 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2147 struct msghdr *msg, int len,
2148 int count, struct sk_buff *skb)
2150 struct l2cap_conn *conn = chan->conn;
2151 struct sk_buff **frag;
2154 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2160 /* Continuation fragments (no L2CAP header) */
2161 frag = &skb_shinfo(skb)->frag_list;
2163 struct sk_buff *tmp;
2165 count = min_t(unsigned int, conn->mtu, len);
2167 tmp = chan->ops->alloc_skb(chan, 0, count,
2168 msg->msg_flags & MSG_DONTWAIT);
2170 return PTR_ERR(tmp);
2174 if (!copy_from_iter_full(skb_put(*frag, count), count,
2181 skb->len += (*frag)->len;
2182 skb->data_len += (*frag)->len;
2184 frag = &(*frag)->next;
2190 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2191 struct msghdr *msg, size_t len)
2193 struct l2cap_conn *conn = chan->conn;
2194 struct sk_buff *skb;
2195 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2196 struct l2cap_hdr *lh;
2198 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2199 __le16_to_cpu(chan->psm), len);
2201 count = min_t(unsigned int, (conn->mtu - hlen), len);
2203 skb = chan->ops->alloc_skb(chan, hlen, count,
2204 msg->msg_flags & MSG_DONTWAIT);
2208 /* Create L2CAP header */
2209 lh = skb_put(skb, L2CAP_HDR_SIZE);
2210 lh->cid = cpu_to_le16(chan->dcid);
2211 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2212 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2214 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2215 if (unlikely(err < 0)) {
2217 return ERR_PTR(err);
2222 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2223 struct msghdr *msg, size_t len)
2225 struct l2cap_conn *conn = chan->conn;
2226 struct sk_buff *skb;
2228 struct l2cap_hdr *lh;
2230 BT_DBG("chan %p len %zu", chan, len);
2232 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2234 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2235 msg->msg_flags & MSG_DONTWAIT);
2239 /* Create L2CAP header */
2240 lh = skb_put(skb, L2CAP_HDR_SIZE);
2241 lh->cid = cpu_to_le16(chan->dcid);
2242 lh->len = cpu_to_le16(len);
2244 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2245 if (unlikely(err < 0)) {
2247 return ERR_PTR(err);
2252 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2253 struct msghdr *msg, size_t len,
2256 struct l2cap_conn *conn = chan->conn;
2257 struct sk_buff *skb;
2258 int err, count, hlen;
2259 struct l2cap_hdr *lh;
2261 BT_DBG("chan %p len %zu", chan, len);
2264 return ERR_PTR(-ENOTCONN);
2266 hlen = __ertm_hdr_size(chan);
2269 hlen += L2CAP_SDULEN_SIZE;
2271 if (chan->fcs == L2CAP_FCS_CRC16)
2272 hlen += L2CAP_FCS_SIZE;
2274 count = min_t(unsigned int, (conn->mtu - hlen), len);
2276 skb = chan->ops->alloc_skb(chan, hlen, count,
2277 msg->msg_flags & MSG_DONTWAIT);
2281 /* Create L2CAP header */
2282 lh = skb_put(skb, L2CAP_HDR_SIZE);
2283 lh->cid = cpu_to_le16(chan->dcid);
2284 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2286 /* Control header is populated later */
2287 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2288 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2290 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2293 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2295 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2296 if (unlikely(err < 0)) {
2298 return ERR_PTR(err);
2301 bt_cb(skb)->l2cap.fcs = chan->fcs;
2302 bt_cb(skb)->l2cap.retries = 0;
2306 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2307 struct sk_buff_head *seg_queue,
2308 struct msghdr *msg, size_t len)
2310 struct sk_buff *skb;
2315 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2317 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2318 * so fragmented skbs are not used. The HCI layer's handling
2319 * of fragmented skbs is not compatible with ERTM's queueing.
2322 /* PDU size is derived from the HCI MTU */
2323 pdu_len = chan->conn->mtu;
2325 /* Constrain PDU size for BR/EDR connections */
2327 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2329 /* Adjust for largest possible L2CAP overhead. */
2331 pdu_len -= L2CAP_FCS_SIZE;
2333 pdu_len -= __ertm_hdr_size(chan);
2335 /* Remote device may have requested smaller PDUs */
2336 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2338 if (len <= pdu_len) {
2339 sar = L2CAP_SAR_UNSEGMENTED;
2343 sar = L2CAP_SAR_START;
2348 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2351 __skb_queue_purge(seg_queue);
2352 return PTR_ERR(skb);
2355 bt_cb(skb)->l2cap.sar = sar;
2356 __skb_queue_tail(seg_queue, skb);
2362 if (len <= pdu_len) {
2363 sar = L2CAP_SAR_END;
2366 sar = L2CAP_SAR_CONTINUE;
2373 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2375 size_t len, u16 sdulen)
2377 struct l2cap_conn *conn = chan->conn;
2378 struct sk_buff *skb;
2379 int err, count, hlen;
2380 struct l2cap_hdr *lh;
2382 BT_DBG("chan %p len %zu", chan, len);
2385 return ERR_PTR(-ENOTCONN);
2387 hlen = L2CAP_HDR_SIZE;
2390 hlen += L2CAP_SDULEN_SIZE;
2392 count = min_t(unsigned int, (conn->mtu - hlen), len);
2394 skb = chan->ops->alloc_skb(chan, hlen, count,
2395 msg->msg_flags & MSG_DONTWAIT);
2399 /* Create L2CAP header */
2400 lh = skb_put(skb, L2CAP_HDR_SIZE);
2401 lh->cid = cpu_to_le16(chan->dcid);
2402 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2405 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2407 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2408 if (unlikely(err < 0)) {
2410 return ERR_PTR(err);
2416 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2417 struct sk_buff_head *seg_queue,
2418 struct msghdr *msg, size_t len)
2420 struct sk_buff *skb;
2424 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2427 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2433 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2435 __skb_queue_purge(seg_queue);
2436 return PTR_ERR(skb);
2439 __skb_queue_tail(seg_queue, skb);
2445 pdu_len += L2CAP_SDULEN_SIZE;
2452 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2456 BT_DBG("chan %p", chan);
2458 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2459 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2464 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2465 skb_queue_len(&chan->tx_q));
2468 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2470 struct sk_buff *skb;
2472 struct sk_buff_head seg_queue;
2477 /* Connectionless channel */
2478 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2479 skb = l2cap_create_connless_pdu(chan, msg, len);
2481 return PTR_ERR(skb);
2483 /* Channel lock is released before requesting new skb and then
2484 * reacquired thus we need to recheck channel state.
2486 if (chan->state != BT_CONNECTED) {
2491 l2cap_do_send(chan, skb);
2495 switch (chan->mode) {
2496 case L2CAP_MODE_LE_FLOWCTL:
2497 /* Check outgoing MTU */
2498 if (len > chan->omtu)
2501 __skb_queue_head_init(&seg_queue);
2503 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2505 if (chan->state != BT_CONNECTED) {
2506 __skb_queue_purge(&seg_queue);
2513 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2515 l2cap_le_flowctl_send(chan);
2517 if (!chan->tx_credits)
2518 chan->ops->suspend(chan);
2524 case L2CAP_MODE_BASIC:
2525 /* Check outgoing MTU */
2526 if (len > chan->omtu)
2529 /* Create a basic PDU */
2530 skb = l2cap_create_basic_pdu(chan, msg, len);
2532 return PTR_ERR(skb);
2534 /* Channel lock is released before requesting new skb and then
2535 * reacquired thus we need to recheck channel state.
2537 if (chan->state != BT_CONNECTED) {
2542 l2cap_do_send(chan, skb);
2546 case L2CAP_MODE_ERTM:
2547 case L2CAP_MODE_STREAMING:
2548 /* Check outgoing MTU */
2549 if (len > chan->omtu) {
2554 __skb_queue_head_init(&seg_queue);
2556 /* Do segmentation before calling in to the state machine,
2557 * since it's possible to block while waiting for memory
2560 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2562 /* The channel could have been closed while segmenting,
2563 * check that it is still connected.
2565 if (chan->state != BT_CONNECTED) {
2566 __skb_queue_purge(&seg_queue);
2573 if (chan->mode == L2CAP_MODE_ERTM)
2574 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2576 l2cap_streaming_send(chan, &seg_queue);
2580 /* If the skbs were not queued for sending, they'll still be in
2581 * seg_queue and need to be purged.
2583 __skb_queue_purge(&seg_queue);
2587 BT_DBG("bad state %1.1x", chan->mode);
2593 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2595 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2597 struct l2cap_ctrl control;
2600 BT_DBG("chan %p, txseq %u", chan, txseq);
2602 memset(&control, 0, sizeof(control));
2604 control.super = L2CAP_SUPER_SREJ;
2606 for (seq = chan->expected_tx_seq; seq != txseq;
2607 seq = __next_seq(chan, seq)) {
2608 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2609 control.reqseq = seq;
2610 l2cap_send_sframe(chan, &control);
2611 l2cap_seq_list_append(&chan->srej_list, seq);
2615 chan->expected_tx_seq = __next_seq(chan, txseq);
2618 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2620 struct l2cap_ctrl control;
2622 BT_DBG("chan %p", chan);
2624 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2627 memset(&control, 0, sizeof(control));
2629 control.super = L2CAP_SUPER_SREJ;
2630 control.reqseq = chan->srej_list.tail;
2631 l2cap_send_sframe(chan, &control);
2634 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2636 struct l2cap_ctrl control;
2640 BT_DBG("chan %p, txseq %u", chan, txseq);
2642 memset(&control, 0, sizeof(control));
2644 control.super = L2CAP_SUPER_SREJ;
2646 /* Capture initial list head to allow only one pass through the list. */
2647 initial_head = chan->srej_list.head;
2650 seq = l2cap_seq_list_pop(&chan->srej_list);
2651 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2654 control.reqseq = seq;
2655 l2cap_send_sframe(chan, &control);
2656 l2cap_seq_list_append(&chan->srej_list, seq);
2657 } while (chan->srej_list.head != initial_head);
2660 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2662 struct sk_buff *acked_skb;
2665 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2667 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2670 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2671 chan->expected_ack_seq, chan->unacked_frames);
2673 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2674 ackseq = __next_seq(chan, ackseq)) {
2676 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2678 skb_unlink(acked_skb, &chan->tx_q);
2679 kfree_skb(acked_skb);
2680 chan->unacked_frames--;
2684 chan->expected_ack_seq = reqseq;
2686 if (chan->unacked_frames == 0)
2687 __clear_retrans_timer(chan);
2689 BT_DBG("unacked_frames %u", chan->unacked_frames);
2692 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2694 BT_DBG("chan %p", chan);
2696 chan->expected_tx_seq = chan->buffer_seq;
2697 l2cap_seq_list_clear(&chan->srej_list);
2698 skb_queue_purge(&chan->srej_q);
2699 chan->rx_state = L2CAP_RX_STATE_RECV;
2702 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2703 struct l2cap_ctrl *control,
2704 struct sk_buff_head *skbs, u8 event)
2706 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2710 case L2CAP_EV_DATA_REQUEST:
2711 if (chan->tx_send_head == NULL)
2712 chan->tx_send_head = skb_peek(skbs);
2714 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2715 l2cap_ertm_send(chan);
2717 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2718 BT_DBG("Enter LOCAL_BUSY");
2719 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2721 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2722 /* The SREJ_SENT state must be aborted if we are to
2723 * enter the LOCAL_BUSY state.
2725 l2cap_abort_rx_srej_sent(chan);
2728 l2cap_send_ack(chan);
2731 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2732 BT_DBG("Exit LOCAL_BUSY");
2733 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2735 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2736 struct l2cap_ctrl local_control;
2738 memset(&local_control, 0, sizeof(local_control));
2739 local_control.sframe = 1;
2740 local_control.super = L2CAP_SUPER_RR;
2741 local_control.poll = 1;
2742 local_control.reqseq = chan->buffer_seq;
2743 l2cap_send_sframe(chan, &local_control);
2745 chan->retry_count = 1;
2746 __set_monitor_timer(chan);
2747 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2750 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2751 l2cap_process_reqseq(chan, control->reqseq);
2753 case L2CAP_EV_EXPLICIT_POLL:
2754 l2cap_send_rr_or_rnr(chan, 1);
2755 chan->retry_count = 1;
2756 __set_monitor_timer(chan);
2757 __clear_ack_timer(chan);
2758 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2760 case L2CAP_EV_RETRANS_TO:
2761 l2cap_send_rr_or_rnr(chan, 1);
2762 chan->retry_count = 1;
2763 __set_monitor_timer(chan);
2764 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2766 case L2CAP_EV_RECV_FBIT:
2767 /* Nothing to process */
2774 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2775 struct l2cap_ctrl *control,
2776 struct sk_buff_head *skbs, u8 event)
2778 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2782 case L2CAP_EV_DATA_REQUEST:
2783 if (chan->tx_send_head == NULL)
2784 chan->tx_send_head = skb_peek(skbs);
2785 /* Queue data, but don't send. */
2786 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2788 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2789 BT_DBG("Enter LOCAL_BUSY");
2790 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2792 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2793 /* The SREJ_SENT state must be aborted if we are to
2794 * enter the LOCAL_BUSY state.
2796 l2cap_abort_rx_srej_sent(chan);
2799 l2cap_send_ack(chan);
2802 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2803 BT_DBG("Exit LOCAL_BUSY");
2804 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2806 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2807 struct l2cap_ctrl local_control;
2808 memset(&local_control, 0, sizeof(local_control));
2809 local_control.sframe = 1;
2810 local_control.super = L2CAP_SUPER_RR;
2811 local_control.poll = 1;
2812 local_control.reqseq = chan->buffer_seq;
2813 l2cap_send_sframe(chan, &local_control);
2815 chan->retry_count = 1;
2816 __set_monitor_timer(chan);
2817 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2820 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2821 l2cap_process_reqseq(chan, control->reqseq);
2825 case L2CAP_EV_RECV_FBIT:
2826 if (control && control->final) {
2827 __clear_monitor_timer(chan);
2828 if (chan->unacked_frames > 0)
2829 __set_retrans_timer(chan);
2830 chan->retry_count = 0;
2831 chan->tx_state = L2CAP_TX_STATE_XMIT;
2832 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2835 case L2CAP_EV_EXPLICIT_POLL:
2838 case L2CAP_EV_MONITOR_TO:
2839 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2840 l2cap_send_rr_or_rnr(chan, 1);
2841 __set_monitor_timer(chan);
2842 chan->retry_count++;
2844 l2cap_send_disconn_req(chan, ECONNABORTED);
2852 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2853 struct sk_buff_head *skbs, u8 event)
2855 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2856 chan, control, skbs, event, chan->tx_state);
2858 switch (chan->tx_state) {
2859 case L2CAP_TX_STATE_XMIT:
2860 l2cap_tx_state_xmit(chan, control, skbs, event);
2862 case L2CAP_TX_STATE_WAIT_F:
2863 l2cap_tx_state_wait_f(chan, control, skbs, event);
2871 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2872 struct l2cap_ctrl *control)
2874 BT_DBG("chan %p, control %p", chan, control);
2875 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2878 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2879 struct l2cap_ctrl *control)
2881 BT_DBG("chan %p, control %p", chan, control);
2882 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2885 /* Copy frame to all raw sockets on that connection */
2886 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2888 struct sk_buff *nskb;
2889 struct l2cap_chan *chan;
2891 BT_DBG("conn %p", conn);
2893 mutex_lock(&conn->chan_lock);
2895 list_for_each_entry(chan, &conn->chan_l, list) {
2896 if (chan->chan_type != L2CAP_CHAN_RAW)
2899 /* Don't send frame to the channel it came from */
2900 if (bt_cb(skb)->l2cap.chan == chan)
2903 nskb = skb_clone(skb, GFP_KERNEL);
2906 if (chan->ops->recv(chan, nskb))
2910 mutex_unlock(&conn->chan_lock);
2913 /* ---- L2CAP signalling commands ---- */
2914 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2915 u8 ident, u16 dlen, void *data)
2917 struct sk_buff *skb, **frag;
2918 struct l2cap_cmd_hdr *cmd;
2919 struct l2cap_hdr *lh;
2922 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2923 conn, code, ident, dlen);
2925 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2928 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2929 count = min_t(unsigned int, conn->mtu, len);
2931 skb = bt_skb_alloc(count, GFP_KERNEL);
2935 lh = skb_put(skb, L2CAP_HDR_SIZE);
2936 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2938 if (conn->hcon->type == LE_LINK)
2939 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2941 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2943 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2946 cmd->len = cpu_to_le16(dlen);
2949 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2950 skb_put_data(skb, data, count);
2956 /* Continuation fragments (no L2CAP header) */
2957 frag = &skb_shinfo(skb)->frag_list;
2959 count = min_t(unsigned int, conn->mtu, len);
2961 *frag = bt_skb_alloc(count, GFP_KERNEL);
2965 skb_put_data(*frag, data, count);
2970 frag = &(*frag)->next;
2980 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2983 struct l2cap_conf_opt *opt = *ptr;
2986 len = L2CAP_CONF_OPT_SIZE + opt->len;
2994 *val = *((u8 *) opt->val);
2998 *val = get_unaligned_le16(opt->val);
3002 *val = get_unaligned_le32(opt->val);
3006 *val = (unsigned long) opt->val;
3010 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3014 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3016 struct l2cap_conf_opt *opt = *ptr;
3018 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3020 if (size < L2CAP_CONF_OPT_SIZE + len)
3028 *((u8 *) opt->val) = val;
3032 put_unaligned_le16(val, opt->val);
3036 put_unaligned_le32(val, opt->val);
3040 memcpy(opt->val, (void *) val, len);
3044 *ptr += L2CAP_CONF_OPT_SIZE + len;
3047 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3049 struct l2cap_conf_efs efs;
3051 switch (chan->mode) {
3052 case L2CAP_MODE_ERTM:
3053 efs.id = chan->local_id;
3054 efs.stype = chan->local_stype;
3055 efs.msdu = cpu_to_le16(chan->local_msdu);
3056 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3057 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3058 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3061 case L2CAP_MODE_STREAMING:
3063 efs.stype = L2CAP_SERV_BESTEFFORT;
3064 efs.msdu = cpu_to_le16(chan->local_msdu);
3065 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3074 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3075 (unsigned long) &efs, size);
3078 static void l2cap_ack_timeout(struct work_struct *work)
3080 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3084 BT_DBG("chan %p", chan);
3086 l2cap_chan_lock(chan);
3088 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3089 chan->last_acked_seq);
3092 l2cap_send_rr_or_rnr(chan, 0);
3094 l2cap_chan_unlock(chan);
3095 l2cap_chan_put(chan);
3098 int l2cap_ertm_init(struct l2cap_chan *chan)
3102 chan->next_tx_seq = 0;
3103 chan->expected_tx_seq = 0;
3104 chan->expected_ack_seq = 0;
3105 chan->unacked_frames = 0;
3106 chan->buffer_seq = 0;
3107 chan->frames_sent = 0;
3108 chan->last_acked_seq = 0;
3110 chan->sdu_last_frag = NULL;
3113 skb_queue_head_init(&chan->tx_q);
3115 chan->local_amp_id = AMP_ID_BREDR;
3116 chan->move_id = AMP_ID_BREDR;
3117 chan->move_state = L2CAP_MOVE_STABLE;
3118 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3120 if (chan->mode != L2CAP_MODE_ERTM)
3123 chan->rx_state = L2CAP_RX_STATE_RECV;
3124 chan->tx_state = L2CAP_TX_STATE_XMIT;
3126 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3127 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3128 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3130 skb_queue_head_init(&chan->srej_q);
3132 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3136 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3138 l2cap_seq_list_free(&chan->srej_list);
3143 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3146 case L2CAP_MODE_STREAMING:
3147 case L2CAP_MODE_ERTM:
3148 if (l2cap_mode_supported(mode, remote_feat_mask))
3152 return L2CAP_MODE_BASIC;
3156 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3158 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3159 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3162 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3164 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3165 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3168 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3169 struct l2cap_conf_rfc *rfc)
3171 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3172 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3174 /* Class 1 devices have must have ERTM timeouts
3175 * exceeding the Link Supervision Timeout. The
3176 * default Link Supervision Timeout for AMP
3177 * controllers is 10 seconds.
3179 * Class 1 devices use 0xffffffff for their
3180 * best-effort flush timeout, so the clamping logic
3181 * will result in a timeout that meets the above
3182 * requirement. ERTM timeouts are 16-bit values, so
3183 * the maximum timeout is 65.535 seconds.
3186 /* Convert timeout to milliseconds and round */
3187 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3189 /* This is the recommended formula for class 2 devices
3190 * that start ERTM timers when packets are sent to the
3193 ertm_to = 3 * ertm_to + 500;
3195 if (ertm_to > 0xffff)
3198 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3199 rfc->monitor_timeout = rfc->retrans_timeout;
3201 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3202 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3206 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3208 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3209 __l2cap_ews_supported(chan->conn)) {
3210 /* use extended control field */
3211 set_bit(FLAG_EXT_CTRL, &chan->flags);
3212 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3214 chan->tx_win = min_t(u16, chan->tx_win,
3215 L2CAP_DEFAULT_TX_WINDOW);
3216 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3218 chan->ack_win = chan->tx_win;
3221 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3223 struct l2cap_conf_req *req = data;
3224 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3225 void *ptr = req->data;
3226 void *endptr = data + data_size;
3229 BT_DBG("chan %p", chan);
3231 if (chan->num_conf_req || chan->num_conf_rsp)
3234 switch (chan->mode) {
3235 case L2CAP_MODE_STREAMING:
3236 case L2CAP_MODE_ERTM:
3237 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3240 if (__l2cap_efs_supported(chan->conn))
3241 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3245 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3250 if (chan->imtu != L2CAP_DEFAULT_MTU)
3251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3253 switch (chan->mode) {
3254 case L2CAP_MODE_BASIC:
3258 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3259 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3262 rfc.mode = L2CAP_MODE_BASIC;
3264 rfc.max_transmit = 0;
3265 rfc.retrans_timeout = 0;
3266 rfc.monitor_timeout = 0;
3267 rfc.max_pdu_size = 0;
3269 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3270 (unsigned long) &rfc, endptr - ptr);
3273 case L2CAP_MODE_ERTM:
3274 rfc.mode = L2CAP_MODE_ERTM;
3275 rfc.max_transmit = chan->max_tx;
3277 __l2cap_set_ertm_timeouts(chan, &rfc);
3279 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3280 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3282 rfc.max_pdu_size = cpu_to_le16(size);
3284 l2cap_txwin_setup(chan);
3286 rfc.txwin_size = min_t(u16, chan->tx_win,
3287 L2CAP_DEFAULT_TX_WINDOW);
3289 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3290 (unsigned long) &rfc, endptr - ptr);
3292 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3293 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3295 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3297 chan->tx_win, endptr - ptr);
3299 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3300 if (chan->fcs == L2CAP_FCS_NONE ||
3301 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3302 chan->fcs = L2CAP_FCS_NONE;
3303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3304 chan->fcs, endptr - ptr);
3308 case L2CAP_MODE_STREAMING:
3309 l2cap_txwin_setup(chan);
3310 rfc.mode = L2CAP_MODE_STREAMING;
3312 rfc.max_transmit = 0;
3313 rfc.retrans_timeout = 0;
3314 rfc.monitor_timeout = 0;
3316 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3317 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3319 rfc.max_pdu_size = cpu_to_le16(size);
3321 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3322 (unsigned long) &rfc, endptr - ptr);
3324 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3325 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3327 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3328 if (chan->fcs == L2CAP_FCS_NONE ||
3329 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3330 chan->fcs = L2CAP_FCS_NONE;
3331 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3332 chan->fcs, endptr - ptr);
3337 req->dcid = cpu_to_le16(chan->dcid);
3338 req->flags = cpu_to_le16(0);
3343 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3345 struct l2cap_conf_rsp *rsp = data;
3346 void *ptr = rsp->data;
3347 void *endptr = data + data_size;
3348 void *req = chan->conf_req;
3349 int len = chan->conf_len;
3350 int type, hint, olen;
3352 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3353 struct l2cap_conf_efs efs;
3355 u16 mtu = L2CAP_DEFAULT_MTU;
3356 u16 result = L2CAP_CONF_SUCCESS;
3359 BT_DBG("chan %p", chan);
3361 while (len >= L2CAP_CONF_OPT_SIZE) {
3362 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3366 hint = type & L2CAP_CONF_HINT;
3367 type &= L2CAP_CONF_MASK;
3370 case L2CAP_CONF_MTU:
3376 case L2CAP_CONF_FLUSH_TO:
3379 chan->flush_to = val;
3382 case L2CAP_CONF_QOS:
3385 case L2CAP_CONF_RFC:
3386 if (olen != sizeof(rfc))
3388 memcpy(&rfc, (void *) val, olen);
3391 case L2CAP_CONF_FCS:
3394 if (val == L2CAP_FCS_NONE)
3395 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3398 case L2CAP_CONF_EFS:
3399 if (olen != sizeof(efs))
3402 memcpy(&efs, (void *) val, olen);
3405 case L2CAP_CONF_EWS:
3408 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3409 return -ECONNREFUSED;
3410 set_bit(FLAG_EXT_CTRL, &chan->flags);
3411 set_bit(CONF_EWS_RECV, &chan->conf_state);
3412 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3413 chan->remote_tx_win = val;
3419 result = L2CAP_CONF_UNKNOWN;
3420 *((u8 *) ptr++) = type;
3425 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3428 switch (chan->mode) {
3429 case L2CAP_MODE_STREAMING:
3430 case L2CAP_MODE_ERTM:
3431 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3432 chan->mode = l2cap_select_mode(rfc.mode,
3433 chan->conn->feat_mask);
3438 if (__l2cap_efs_supported(chan->conn))
3439 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3441 return -ECONNREFUSED;
3444 if (chan->mode != rfc.mode)
3445 return -ECONNREFUSED;
3451 if (chan->mode != rfc.mode) {
3452 result = L2CAP_CONF_UNACCEPT;
3453 rfc.mode = chan->mode;
3455 if (chan->num_conf_rsp == 1)
3456 return -ECONNREFUSED;
3458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3459 (unsigned long) &rfc, endptr - ptr);
3462 if (result == L2CAP_CONF_SUCCESS) {
3463 /* Configure output options and let the other side know
3464 * which ones we don't like. */
3466 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3467 result = L2CAP_CONF_UNACCEPT;
3470 set_bit(CONF_MTU_DONE, &chan->conf_state);
3472 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3475 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3476 efs.stype != L2CAP_SERV_NOTRAFIC &&
3477 efs.stype != chan->local_stype) {
3479 result = L2CAP_CONF_UNACCEPT;
3481 if (chan->num_conf_req >= 1)
3482 return -ECONNREFUSED;
3484 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3486 (unsigned long) &efs, endptr - ptr);
3488 /* Send PENDING Conf Rsp */
3489 result = L2CAP_CONF_PENDING;
3490 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3495 case L2CAP_MODE_BASIC:
3496 chan->fcs = L2CAP_FCS_NONE;
3497 set_bit(CONF_MODE_DONE, &chan->conf_state);
3500 case L2CAP_MODE_ERTM:
3501 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3502 chan->remote_tx_win = rfc.txwin_size;
3504 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3506 chan->remote_max_tx = rfc.max_transmit;
3508 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3509 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3510 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3511 rfc.max_pdu_size = cpu_to_le16(size);
3512 chan->remote_mps = size;
3514 __l2cap_set_ertm_timeouts(chan, &rfc);
3516 set_bit(CONF_MODE_DONE, &chan->conf_state);
3518 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3519 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3521 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3522 chan->remote_id = efs.id;
3523 chan->remote_stype = efs.stype;
3524 chan->remote_msdu = le16_to_cpu(efs.msdu);
3525 chan->remote_flush_to =
3526 le32_to_cpu(efs.flush_to);
3527 chan->remote_acc_lat =
3528 le32_to_cpu(efs.acc_lat);
3529 chan->remote_sdu_itime =
3530 le32_to_cpu(efs.sdu_itime);
3531 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3533 (unsigned long) &efs, endptr - ptr);
3537 case L2CAP_MODE_STREAMING:
3538 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3539 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3540 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3541 rfc.max_pdu_size = cpu_to_le16(size);
3542 chan->remote_mps = size;
3544 set_bit(CONF_MODE_DONE, &chan->conf_state);
3546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3547 (unsigned long) &rfc, endptr - ptr);
3552 result = L2CAP_CONF_UNACCEPT;
3554 memset(&rfc, 0, sizeof(rfc));
3555 rfc.mode = chan->mode;
3558 if (result == L2CAP_CONF_SUCCESS)
3559 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3561 rsp->scid = cpu_to_le16(chan->dcid);
3562 rsp->result = cpu_to_le16(result);
3563 rsp->flags = cpu_to_le16(0);
3568 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3569 void *data, size_t size, u16 *result)
3571 struct l2cap_conf_req *req = data;
3572 void *ptr = req->data;
3573 void *endptr = data + size;
3576 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3577 struct l2cap_conf_efs efs;
3579 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3581 while (len >= L2CAP_CONF_OPT_SIZE) {
3582 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3587 case L2CAP_CONF_MTU:
3590 if (val < L2CAP_DEFAULT_MIN_MTU) {
3591 *result = L2CAP_CONF_UNACCEPT;
3592 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3595 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3599 case L2CAP_CONF_FLUSH_TO:
3602 chan->flush_to = val;
3603 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3604 chan->flush_to, endptr - ptr);
3607 case L2CAP_CONF_RFC:
3608 if (olen != sizeof(rfc))
3610 memcpy(&rfc, (void *)val, olen);
3611 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3612 rfc.mode != chan->mode)
3613 return -ECONNREFUSED;
3615 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3616 (unsigned long) &rfc, endptr - ptr);
3619 case L2CAP_CONF_EWS:
3622 chan->ack_win = min_t(u16, val, chan->ack_win);
3623 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3624 chan->tx_win, endptr - ptr);
3627 case L2CAP_CONF_EFS:
3628 if (olen != sizeof(efs))
3630 memcpy(&efs, (void *)val, olen);
3631 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3632 efs.stype != L2CAP_SERV_NOTRAFIC &&
3633 efs.stype != chan->local_stype)
3634 return -ECONNREFUSED;
3635 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3636 (unsigned long) &efs, endptr - ptr);
3639 case L2CAP_CONF_FCS:
3642 if (*result == L2CAP_CONF_PENDING)
3643 if (val == L2CAP_FCS_NONE)
3644 set_bit(CONF_RECV_NO_FCS,
3650 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3651 return -ECONNREFUSED;
3653 chan->mode = rfc.mode;
3655 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3657 case L2CAP_MODE_ERTM:
3658 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3659 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3660 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3661 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3662 chan->ack_win = min_t(u16, chan->ack_win,
3665 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3666 chan->local_msdu = le16_to_cpu(efs.msdu);
3667 chan->local_sdu_itime =
3668 le32_to_cpu(efs.sdu_itime);
3669 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3670 chan->local_flush_to =
3671 le32_to_cpu(efs.flush_to);
3675 case L2CAP_MODE_STREAMING:
3676 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3680 req->dcid = cpu_to_le16(chan->dcid);
3681 req->flags = cpu_to_le16(0);
3686 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3687 u16 result, u16 flags)
3689 struct l2cap_conf_rsp *rsp = data;
3690 void *ptr = rsp->data;
3692 BT_DBG("chan %p", chan);
3694 rsp->scid = cpu_to_le16(chan->dcid);
3695 rsp->result = cpu_to_le16(result);
3696 rsp->flags = cpu_to_le16(flags);
3701 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3703 struct l2cap_le_conn_rsp rsp;
3704 struct l2cap_conn *conn = chan->conn;
3706 BT_DBG("chan %p", chan);
3708 rsp.dcid = cpu_to_le16(chan->scid);
3709 rsp.mtu = cpu_to_le16(chan->imtu);
3710 rsp.mps = cpu_to_le16(chan->mps);
3711 rsp.credits = cpu_to_le16(chan->rx_credits);
3712 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3714 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3718 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3720 struct l2cap_conn_rsp rsp;
3721 struct l2cap_conn *conn = chan->conn;
3725 rsp.scid = cpu_to_le16(chan->dcid);
3726 rsp.dcid = cpu_to_le16(chan->scid);
3727 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3728 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3731 rsp_code = L2CAP_CREATE_CHAN_RSP;
3733 rsp_code = L2CAP_CONN_RSP;
3735 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3737 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3739 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3742 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3743 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3744 chan->num_conf_req++;
3747 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3751 /* Use sane default values in case a misbehaving remote device
3752 * did not send an RFC or extended window size option.
3754 u16 txwin_ext = chan->ack_win;
3755 struct l2cap_conf_rfc rfc = {
3757 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3758 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3759 .max_pdu_size = cpu_to_le16(chan->imtu),
3760 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3763 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3765 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3768 while (len >= L2CAP_CONF_OPT_SIZE) {
3769 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3774 case L2CAP_CONF_RFC:
3775 if (olen != sizeof(rfc))
3777 memcpy(&rfc, (void *)val, olen);
3779 case L2CAP_CONF_EWS:
3788 case L2CAP_MODE_ERTM:
3789 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3790 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3791 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3792 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3793 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3795 chan->ack_win = min_t(u16, chan->ack_win,
3798 case L2CAP_MODE_STREAMING:
3799 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3803 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3804 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3807 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3809 if (cmd_len < sizeof(*rej))
3812 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3815 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3816 cmd->ident == conn->info_ident) {
3817 cancel_delayed_work(&conn->info_timer);
3819 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3820 conn->info_ident = 0;
3822 l2cap_conn_start(conn);
3828 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3829 struct l2cap_cmd_hdr *cmd,
3830 u8 *data, u8 rsp_code, u8 amp_id)
3832 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3833 struct l2cap_conn_rsp rsp;
3834 struct l2cap_chan *chan = NULL, *pchan;
3835 int result, status = L2CAP_CS_NO_INFO;
3837 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3838 __le16 psm = req->psm;
3840 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3842 /* Check if we have socket listening on psm */
3843 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3844 &conn->hcon->dst, ACL_LINK);
3846 result = L2CAP_CR_BAD_PSM;
3850 mutex_lock(&conn->chan_lock);
3851 l2cap_chan_lock(pchan);
3853 /* Check if the ACL is secure enough (if not SDP) */
3854 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3855 !hci_conn_check_link_mode(conn->hcon)) {
3856 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3857 result = L2CAP_CR_SEC_BLOCK;
3861 result = L2CAP_CR_NO_MEM;
3863 /* Check for valid dynamic CID range (as per Erratum 3253) */
3864 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3865 result = L2CAP_CR_INVALID_SCID;
3869 /* Check if we already have channel with that dcid */
3870 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3871 result = L2CAP_CR_SCID_IN_USE;
3875 chan = pchan->ops->new_connection(pchan);
3879 /* For certain devices (ex: HID mouse), support for authentication,
3880 * pairing and bonding is optional. For such devices, inorder to avoid
3881 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3882 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3884 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3886 bacpy(&chan->src, &conn->hcon->src);
3887 bacpy(&chan->dst, &conn->hcon->dst);
3888 chan->src_type = bdaddr_src_type(conn->hcon);
3889 chan->dst_type = bdaddr_dst_type(conn->hcon);
3892 chan->local_amp_id = amp_id;
3894 __l2cap_chan_add(conn, chan);
3898 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3900 chan->ident = cmd->ident;
3902 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3903 if (l2cap_chan_check_security(chan, false)) {
3904 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3905 l2cap_state_change(chan, BT_CONNECT2);
3906 result = L2CAP_CR_PEND;
3907 status = L2CAP_CS_AUTHOR_PEND;
3908 chan->ops->defer(chan);
3910 /* Force pending result for AMP controllers.
3911 * The connection will succeed after the
3912 * physical link is up.
3914 if (amp_id == AMP_ID_BREDR) {
3915 l2cap_state_change(chan, BT_CONFIG);
3916 result = L2CAP_CR_SUCCESS;
3918 l2cap_state_change(chan, BT_CONNECT2);
3919 result = L2CAP_CR_PEND;
3921 status = L2CAP_CS_NO_INFO;
3924 l2cap_state_change(chan, BT_CONNECT2);
3925 result = L2CAP_CR_PEND;
3926 status = L2CAP_CS_AUTHEN_PEND;
3929 l2cap_state_change(chan, BT_CONNECT2);
3930 result = L2CAP_CR_PEND;
3931 status = L2CAP_CS_NO_INFO;
3935 l2cap_chan_unlock(pchan);
3936 mutex_unlock(&conn->chan_lock);
3937 l2cap_chan_put(pchan);
3940 rsp.scid = cpu_to_le16(scid);
3941 rsp.dcid = cpu_to_le16(dcid);
3942 rsp.result = cpu_to_le16(result);
3943 rsp.status = cpu_to_le16(status);
3944 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3946 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3947 struct l2cap_info_req info;
3948 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3950 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3951 conn->info_ident = l2cap_get_ident(conn);
3953 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3955 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3956 sizeof(info), &info);
3959 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3960 result == L2CAP_CR_SUCCESS) {
3962 set_bit(CONF_REQ_SENT, &chan->conf_state);
3963 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3964 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3965 chan->num_conf_req++;
3971 static int l2cap_connect_req(struct l2cap_conn *conn,
3972 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3974 struct hci_dev *hdev = conn->hcon->hdev;
3975 struct hci_conn *hcon = conn->hcon;
3977 if (cmd_len < sizeof(struct l2cap_conn_req))
3981 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3982 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3983 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3984 hci_dev_unlock(hdev);
3986 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3990 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3991 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3994 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3995 u16 scid, dcid, result, status;
3996 struct l2cap_chan *chan;
4000 if (cmd_len < sizeof(*rsp))
4003 scid = __le16_to_cpu(rsp->scid);
4004 dcid = __le16_to_cpu(rsp->dcid);
4005 result = __le16_to_cpu(rsp->result);
4006 status = __le16_to_cpu(rsp->status);
4008 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4009 dcid, scid, result, status);
4011 mutex_lock(&conn->chan_lock);
4014 chan = __l2cap_get_chan_by_scid(conn, scid);
4020 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4029 l2cap_chan_lock(chan);
4032 case L2CAP_CR_SUCCESS:
4033 l2cap_state_change(chan, BT_CONFIG);
4036 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4038 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4041 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4042 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4043 chan->num_conf_req++;
4047 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4051 l2cap_chan_del(chan, ECONNREFUSED);
4055 l2cap_chan_unlock(chan);
4058 mutex_unlock(&conn->chan_lock);
4063 static inline void set_default_fcs(struct l2cap_chan *chan)
4065 /* FCS is enabled only in ERTM or streaming mode, if one or both
4068 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4069 chan->fcs = L2CAP_FCS_NONE;
4070 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4071 chan->fcs = L2CAP_FCS_CRC16;
4074 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4075 u8 ident, u16 flags)
4077 struct l2cap_conn *conn = chan->conn;
4079 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4082 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4083 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4085 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4086 l2cap_build_conf_rsp(chan, data,
4087 L2CAP_CONF_SUCCESS, flags), data);
4090 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4093 struct l2cap_cmd_rej_cid rej;
4095 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4096 rej.scid = __cpu_to_le16(scid);
4097 rej.dcid = __cpu_to_le16(dcid);
4099 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4102 static inline int l2cap_config_req(struct l2cap_conn *conn,
4103 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4106 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4109 struct l2cap_chan *chan;
4112 if (cmd_len < sizeof(*req))
4115 dcid = __le16_to_cpu(req->dcid);
4116 flags = __le16_to_cpu(req->flags);
4118 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4120 chan = l2cap_get_chan_by_scid(conn, dcid);
4122 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4126 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4127 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4132 /* Reject if config buffer is too small. */
4133 len = cmd_len - sizeof(*req);
4134 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4135 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4136 l2cap_build_conf_rsp(chan, rsp,
4137 L2CAP_CONF_REJECT, flags), rsp);
4142 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4143 chan->conf_len += len;
4145 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4146 /* Incomplete config. Send empty response. */
4147 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4148 l2cap_build_conf_rsp(chan, rsp,
4149 L2CAP_CONF_SUCCESS, flags), rsp);
4153 /* Complete config. */
4154 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4156 l2cap_send_disconn_req(chan, ECONNRESET);
4160 chan->ident = cmd->ident;
4161 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4162 chan->num_conf_rsp++;
4164 /* Reset config buffer. */
4167 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4170 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4171 set_default_fcs(chan);
4173 if (chan->mode == L2CAP_MODE_ERTM ||
4174 chan->mode == L2CAP_MODE_STREAMING)
4175 err = l2cap_ertm_init(chan);
4178 l2cap_send_disconn_req(chan, -err);
4180 l2cap_chan_ready(chan);
4185 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4187 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4188 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4189 chan->num_conf_req++;
4192 /* Got Conf Rsp PENDING from remote side and assume we sent
4193 Conf Rsp PENDING in the code above */
4194 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4195 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4197 /* check compatibility */
4199 /* Send rsp for BR/EDR channel */
4201 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4203 chan->ident = cmd->ident;
4207 l2cap_chan_unlock(chan);
4211 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4212 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4215 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4216 u16 scid, flags, result;
4217 struct l2cap_chan *chan;
4218 int len = cmd_len - sizeof(*rsp);
4221 if (cmd_len < sizeof(*rsp))
4224 scid = __le16_to_cpu(rsp->scid);
4225 flags = __le16_to_cpu(rsp->flags);
4226 result = __le16_to_cpu(rsp->result);
4228 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4231 chan = l2cap_get_chan_by_scid(conn, scid);
4236 case L2CAP_CONF_SUCCESS:
4237 l2cap_conf_rfc_get(chan, rsp->data, len);
4238 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4241 case L2CAP_CONF_PENDING:
4242 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4244 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4247 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4248 buf, sizeof(buf), &result);
4250 l2cap_send_disconn_req(chan, ECONNRESET);
4254 if (!chan->hs_hcon) {
4255 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4258 if (l2cap_check_efs(chan)) {
4259 amp_create_logical_link(chan);
4260 chan->ident = cmd->ident;
4266 case L2CAP_CONF_UNACCEPT:
4267 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4270 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4271 l2cap_send_disconn_req(chan, ECONNRESET);
4275 /* throw out any old stored conf requests */
4276 result = L2CAP_CONF_SUCCESS;
4277 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4278 req, sizeof(req), &result);
4280 l2cap_send_disconn_req(chan, ECONNRESET);
4284 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4285 L2CAP_CONF_REQ, len, req);
4286 chan->num_conf_req++;
4287 if (result != L2CAP_CONF_SUCCESS)
4294 l2cap_chan_set_err(chan, ECONNRESET);
4296 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4297 l2cap_send_disconn_req(chan, ECONNRESET);
4301 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4304 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4306 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4307 set_default_fcs(chan);
4309 if (chan->mode == L2CAP_MODE_ERTM ||
4310 chan->mode == L2CAP_MODE_STREAMING)
4311 err = l2cap_ertm_init(chan);
4314 l2cap_send_disconn_req(chan, -err);
4316 l2cap_chan_ready(chan);
4320 l2cap_chan_unlock(chan);
4324 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4325 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4328 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4329 struct l2cap_disconn_rsp rsp;
4331 struct l2cap_chan *chan;
4333 if (cmd_len != sizeof(*req))
4336 scid = __le16_to_cpu(req->scid);
4337 dcid = __le16_to_cpu(req->dcid);
4339 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4341 mutex_lock(&conn->chan_lock);
4343 chan = __l2cap_get_chan_by_scid(conn, dcid);
4345 mutex_unlock(&conn->chan_lock);
4346 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4350 l2cap_chan_lock(chan);
4352 rsp.dcid = cpu_to_le16(chan->scid);
4353 rsp.scid = cpu_to_le16(chan->dcid);
4354 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4356 chan->ops->set_shutdown(chan);
4358 l2cap_chan_hold(chan);
4359 l2cap_chan_del(chan, ECONNRESET);
4361 l2cap_chan_unlock(chan);
4363 chan->ops->close(chan);
4364 l2cap_chan_put(chan);
4366 mutex_unlock(&conn->chan_lock);
4371 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4372 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4375 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4377 struct l2cap_chan *chan;
4379 if (cmd_len != sizeof(*rsp))
4382 scid = __le16_to_cpu(rsp->scid);
4383 dcid = __le16_to_cpu(rsp->dcid);
4385 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4387 mutex_lock(&conn->chan_lock);
4389 chan = __l2cap_get_chan_by_scid(conn, scid);
4391 mutex_unlock(&conn->chan_lock);
4395 l2cap_chan_lock(chan);
4397 l2cap_chan_hold(chan);
4398 l2cap_chan_del(chan, 0);
4400 l2cap_chan_unlock(chan);
4402 chan->ops->close(chan);
4403 l2cap_chan_put(chan);
4405 mutex_unlock(&conn->chan_lock);
4410 static inline int l2cap_information_req(struct l2cap_conn *conn,
4411 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4414 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4417 if (cmd_len != sizeof(*req))
4420 type = __le16_to_cpu(req->type);
4422 BT_DBG("type 0x%4.4x", type);
4424 if (type == L2CAP_IT_FEAT_MASK) {
4426 u32 feat_mask = l2cap_feat_mask;
4427 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4428 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4429 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4431 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4433 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4434 feat_mask |= L2CAP_FEAT_EXT_FLOW
4435 | L2CAP_FEAT_EXT_WINDOW;
4437 put_unaligned_le32(feat_mask, rsp->data);
4438 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4440 } else if (type == L2CAP_IT_FIXED_CHAN) {
4442 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4444 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4445 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4446 rsp->data[0] = conn->local_fixed_chan;
4447 memset(rsp->data + 1, 0, 7);
4448 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4451 struct l2cap_info_rsp rsp;
4452 rsp.type = cpu_to_le16(type);
4453 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4454 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4461 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4462 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4465 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4468 if (cmd_len < sizeof(*rsp))
4471 type = __le16_to_cpu(rsp->type);
4472 result = __le16_to_cpu(rsp->result);
4474 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4476 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4477 if (cmd->ident != conn->info_ident ||
4478 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4481 cancel_delayed_work(&conn->info_timer);
4483 if (result != L2CAP_IR_SUCCESS) {
4484 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4485 conn->info_ident = 0;
4487 l2cap_conn_start(conn);
4493 case L2CAP_IT_FEAT_MASK:
4494 conn->feat_mask = get_unaligned_le32(rsp->data);
4496 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4497 struct l2cap_info_req req;
4498 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4500 conn->info_ident = l2cap_get_ident(conn);
4502 l2cap_send_cmd(conn, conn->info_ident,
4503 L2CAP_INFO_REQ, sizeof(req), &req);
4505 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4506 conn->info_ident = 0;
4508 l2cap_conn_start(conn);
4512 case L2CAP_IT_FIXED_CHAN:
4513 conn->remote_fixed_chan = rsp->data[0];
4514 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4515 conn->info_ident = 0;
4517 l2cap_conn_start(conn);
4524 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4525 struct l2cap_cmd_hdr *cmd,
4526 u16 cmd_len, void *data)
4528 struct l2cap_create_chan_req *req = data;
4529 struct l2cap_create_chan_rsp rsp;
4530 struct l2cap_chan *chan;
4531 struct hci_dev *hdev;
4534 if (cmd_len != sizeof(*req))
4537 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4540 psm = le16_to_cpu(req->psm);
4541 scid = le16_to_cpu(req->scid);
4543 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4545 /* For controller id 0 make BR/EDR connection */
4546 if (req->amp_id == AMP_ID_BREDR) {
4547 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4552 /* Validate AMP controller id */
4553 hdev = hci_dev_get(req->amp_id);
4557 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4562 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4565 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4566 struct hci_conn *hs_hcon;
4568 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4572 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4577 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4579 mgr->bredr_chan = chan;
4580 chan->hs_hcon = hs_hcon;
4581 chan->fcs = L2CAP_FCS_NONE;
4582 conn->mtu = hdev->block_mtu;
4591 rsp.scid = cpu_to_le16(scid);
4592 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4593 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4595 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4601 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4603 struct l2cap_move_chan_req req;
4606 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4608 ident = l2cap_get_ident(chan->conn);
4609 chan->ident = ident;
4611 req.icid = cpu_to_le16(chan->scid);
4612 req.dest_amp_id = dest_amp_id;
4614 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4617 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4620 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4622 struct l2cap_move_chan_rsp rsp;
4624 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4626 rsp.icid = cpu_to_le16(chan->dcid);
4627 rsp.result = cpu_to_le16(result);
4629 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4633 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4635 struct l2cap_move_chan_cfm cfm;
4637 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4639 chan->ident = l2cap_get_ident(chan->conn);
4641 cfm.icid = cpu_to_le16(chan->scid);
4642 cfm.result = cpu_to_le16(result);
4644 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4647 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4650 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4652 struct l2cap_move_chan_cfm cfm;
4654 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4656 cfm.icid = cpu_to_le16(icid);
4657 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4659 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4663 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4666 struct l2cap_move_chan_cfm_rsp rsp;
4668 BT_DBG("icid 0x%4.4x", icid);
4670 rsp.icid = cpu_to_le16(icid);
4671 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4674 static void __release_logical_link(struct l2cap_chan *chan)
4676 chan->hs_hchan = NULL;
4677 chan->hs_hcon = NULL;
4679 /* Placeholder - release the logical link */
4682 static void l2cap_logical_fail(struct l2cap_chan *chan)
4684 /* Logical link setup failed */
4685 if (chan->state != BT_CONNECTED) {
4686 /* Create channel failure, disconnect */
4687 l2cap_send_disconn_req(chan, ECONNRESET);
4691 switch (chan->move_role) {
4692 case L2CAP_MOVE_ROLE_RESPONDER:
4693 l2cap_move_done(chan);
4694 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4696 case L2CAP_MOVE_ROLE_INITIATOR:
4697 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4698 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4699 /* Remote has only sent pending or
4700 * success responses, clean up
4702 l2cap_move_done(chan);
4705 /* Other amp move states imply that the move
4706 * has already aborted
4708 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4713 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4714 struct hci_chan *hchan)
4716 struct l2cap_conf_rsp rsp;
4718 chan->hs_hchan = hchan;
4719 chan->hs_hcon->l2cap_data = chan->conn;
4721 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4723 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4726 set_default_fcs(chan);
4728 err = l2cap_ertm_init(chan);
4730 l2cap_send_disconn_req(chan, -err);
4732 l2cap_chan_ready(chan);
4736 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4737 struct hci_chan *hchan)
4739 chan->hs_hcon = hchan->conn;
4740 chan->hs_hcon->l2cap_data = chan->conn;
4742 BT_DBG("move_state %d", chan->move_state);
4744 switch (chan->move_state) {
4745 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4746 /* Move confirm will be sent after a success
4747 * response is received
4749 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4751 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4752 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4753 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4754 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4755 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4756 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4757 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4758 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4759 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4763 /* Move was not in expected state, free the channel */
4764 __release_logical_link(chan);
4766 chan->move_state = L2CAP_MOVE_STABLE;
4770 /* Call with chan locked */
4771 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4774 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4777 l2cap_logical_fail(chan);
4778 __release_logical_link(chan);
4782 if (chan->state != BT_CONNECTED) {
4783 /* Ignore logical link if channel is on BR/EDR */
4784 if (chan->local_amp_id != AMP_ID_BREDR)
4785 l2cap_logical_finish_create(chan, hchan);
4787 l2cap_logical_finish_move(chan, hchan);
4791 void l2cap_move_start(struct l2cap_chan *chan)
4793 BT_DBG("chan %p", chan);
4795 if (chan->local_amp_id == AMP_ID_BREDR) {
4796 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4798 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4799 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4800 /* Placeholder - start physical link setup */
4802 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4803 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4805 l2cap_move_setup(chan);
4806 l2cap_send_move_chan_req(chan, 0);
4810 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4811 u8 local_amp_id, u8 remote_amp_id)
4813 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4814 local_amp_id, remote_amp_id);
4816 chan->fcs = L2CAP_FCS_NONE;
4818 /* Outgoing channel on AMP */
4819 if (chan->state == BT_CONNECT) {
4820 if (result == L2CAP_CR_SUCCESS) {
4821 chan->local_amp_id = local_amp_id;
4822 l2cap_send_create_chan_req(chan, remote_amp_id);
4824 /* Revert to BR/EDR connect */
4825 l2cap_send_conn_req(chan);
4831 /* Incoming channel on AMP */
4832 if (__l2cap_no_conn_pending(chan)) {
4833 struct l2cap_conn_rsp rsp;
4835 rsp.scid = cpu_to_le16(chan->dcid);
4836 rsp.dcid = cpu_to_le16(chan->scid);
4838 if (result == L2CAP_CR_SUCCESS) {
4839 /* Send successful response */
4840 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4841 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4843 /* Send negative response */
4844 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4845 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4848 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4851 if (result == L2CAP_CR_SUCCESS) {
4852 l2cap_state_change(chan, BT_CONFIG);
4853 set_bit(CONF_REQ_SENT, &chan->conf_state);
4854 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4856 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4857 chan->num_conf_req++;
4862 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4865 l2cap_move_setup(chan);
4866 chan->move_id = local_amp_id;
4867 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4869 l2cap_send_move_chan_req(chan, remote_amp_id);
4872 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4874 struct hci_chan *hchan = NULL;
4876 /* Placeholder - get hci_chan for logical link */
4879 if (hchan->state == BT_CONNECTED) {
4880 /* Logical link is ready to go */
4881 chan->hs_hcon = hchan->conn;
4882 chan->hs_hcon->l2cap_data = chan->conn;
4883 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4884 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4886 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4888 /* Wait for logical link to be ready */
4889 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4892 /* Logical link not available */
4893 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4897 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4899 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4901 if (result == -EINVAL)
4902 rsp_result = L2CAP_MR_BAD_ID;
4904 rsp_result = L2CAP_MR_NOT_ALLOWED;
4906 l2cap_send_move_chan_rsp(chan, rsp_result);
4909 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4910 chan->move_state = L2CAP_MOVE_STABLE;
4912 /* Restart data transmission */
4913 l2cap_ertm_send(chan);
4916 /* Invoke with locked chan */
4917 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4919 u8 local_amp_id = chan->local_amp_id;
4920 u8 remote_amp_id = chan->remote_amp_id;
4922 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4923 chan, result, local_amp_id, remote_amp_id);
4925 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4926 l2cap_chan_unlock(chan);
4930 if (chan->state != BT_CONNECTED) {
4931 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4932 } else if (result != L2CAP_MR_SUCCESS) {
4933 l2cap_do_move_cancel(chan, result);
4935 switch (chan->move_role) {
4936 case L2CAP_MOVE_ROLE_INITIATOR:
4937 l2cap_do_move_initiate(chan, local_amp_id,
4940 case L2CAP_MOVE_ROLE_RESPONDER:
4941 l2cap_do_move_respond(chan, result);
4944 l2cap_do_move_cancel(chan, result);
4950 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4951 struct l2cap_cmd_hdr *cmd,
4952 u16 cmd_len, void *data)
4954 struct l2cap_move_chan_req *req = data;
4955 struct l2cap_move_chan_rsp rsp;
4956 struct l2cap_chan *chan;
4958 u16 result = L2CAP_MR_NOT_ALLOWED;
4960 if (cmd_len != sizeof(*req))
4963 icid = le16_to_cpu(req->icid);
4965 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4967 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4970 chan = l2cap_get_chan_by_dcid(conn, icid);
4972 rsp.icid = cpu_to_le16(icid);
4973 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4974 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4979 chan->ident = cmd->ident;
4981 if (chan->scid < L2CAP_CID_DYN_START ||
4982 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4983 (chan->mode != L2CAP_MODE_ERTM &&
4984 chan->mode != L2CAP_MODE_STREAMING)) {
4985 result = L2CAP_MR_NOT_ALLOWED;
4986 goto send_move_response;
4989 if (chan->local_amp_id == req->dest_amp_id) {
4990 result = L2CAP_MR_SAME_ID;
4991 goto send_move_response;
4994 if (req->dest_amp_id != AMP_ID_BREDR) {
4995 struct hci_dev *hdev;
4996 hdev = hci_dev_get(req->dest_amp_id);
4997 if (!hdev || hdev->dev_type != HCI_AMP ||
4998 !test_bit(HCI_UP, &hdev->flags)) {
5002 result = L2CAP_MR_BAD_ID;
5003 goto send_move_response;
5008 /* Detect a move collision. Only send a collision response
5009 * if this side has "lost", otherwise proceed with the move.
5010 * The winner has the larger bd_addr.
5012 if ((__chan_is_moving(chan) ||
5013 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5014 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5015 result = L2CAP_MR_COLLISION;
5016 goto send_move_response;
5019 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5020 l2cap_move_setup(chan);
5021 chan->move_id = req->dest_amp_id;
5024 if (req->dest_amp_id == AMP_ID_BREDR) {
5025 /* Moving to BR/EDR */
5026 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5027 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5028 result = L2CAP_MR_PEND;
5030 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5031 result = L2CAP_MR_SUCCESS;
5034 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5035 /* Placeholder - uncomment when amp functions are available */
5036 /*amp_accept_physical(chan, req->dest_amp_id);*/
5037 result = L2CAP_MR_PEND;
5041 l2cap_send_move_chan_rsp(chan, result);
5043 l2cap_chan_unlock(chan);
5048 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5050 struct l2cap_chan *chan;
5051 struct hci_chan *hchan = NULL;
5053 chan = l2cap_get_chan_by_scid(conn, icid);
5055 l2cap_send_move_chan_cfm_icid(conn, icid);
5059 __clear_chan_timer(chan);
5060 if (result == L2CAP_MR_PEND)
5061 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5063 switch (chan->move_state) {
5064 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5065 /* Move confirm will be sent when logical link
5068 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5070 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5071 if (result == L2CAP_MR_PEND) {
5073 } else if (test_bit(CONN_LOCAL_BUSY,
5074 &chan->conn_state)) {
5075 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5077 /* Logical link is up or moving to BR/EDR,
5080 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5081 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5084 case L2CAP_MOVE_WAIT_RSP:
5086 if (result == L2CAP_MR_SUCCESS) {
5087 /* Remote is ready, send confirm immediately
5088 * after logical link is ready
5090 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5092 /* Both logical link and move success
5093 * are required to confirm
5095 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5098 /* Placeholder - get hci_chan for logical link */
5100 /* Logical link not available */
5101 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5105 /* If the logical link is not yet connected, do not
5106 * send confirmation.
5108 if (hchan->state != BT_CONNECTED)
5111 /* Logical link is already ready to go */
5113 chan->hs_hcon = hchan->conn;
5114 chan->hs_hcon->l2cap_data = chan->conn;
5116 if (result == L2CAP_MR_SUCCESS) {
5117 /* Can confirm now */
5118 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5120 /* Now only need move success
5123 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5126 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5129 /* Any other amp move state means the move failed. */
5130 chan->move_id = chan->local_amp_id;
5131 l2cap_move_done(chan);
5132 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5135 l2cap_chan_unlock(chan);
5138 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5141 struct l2cap_chan *chan;
5143 chan = l2cap_get_chan_by_ident(conn, ident);
5145 /* Could not locate channel, icid is best guess */
5146 l2cap_send_move_chan_cfm_icid(conn, icid);
5150 __clear_chan_timer(chan);
5152 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5153 if (result == L2CAP_MR_COLLISION) {
5154 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5156 /* Cleanup - cancel move */
5157 chan->move_id = chan->local_amp_id;
5158 l2cap_move_done(chan);
5162 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5164 l2cap_chan_unlock(chan);
5167 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5168 struct l2cap_cmd_hdr *cmd,
5169 u16 cmd_len, void *data)
5171 struct l2cap_move_chan_rsp *rsp = data;
5174 if (cmd_len != sizeof(*rsp))
5177 icid = le16_to_cpu(rsp->icid);
5178 result = le16_to_cpu(rsp->result);
5180 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5182 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5183 l2cap_move_continue(conn, icid, result);
5185 l2cap_move_fail(conn, cmd->ident, icid, result);
5190 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5191 struct l2cap_cmd_hdr *cmd,
5192 u16 cmd_len, void *data)
5194 struct l2cap_move_chan_cfm *cfm = data;
5195 struct l2cap_chan *chan;
5198 if (cmd_len != sizeof(*cfm))
5201 icid = le16_to_cpu(cfm->icid);
5202 result = le16_to_cpu(cfm->result);
5204 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5206 chan = l2cap_get_chan_by_dcid(conn, icid);
5208 /* Spec requires a response even if the icid was not found */
5209 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5213 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5214 if (result == L2CAP_MC_CONFIRMED) {
5215 chan->local_amp_id = chan->move_id;
5216 if (chan->local_amp_id == AMP_ID_BREDR)
5217 __release_logical_link(chan);
5219 chan->move_id = chan->local_amp_id;
5222 l2cap_move_done(chan);
5225 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5227 l2cap_chan_unlock(chan);
5232 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5233 struct l2cap_cmd_hdr *cmd,
5234 u16 cmd_len, void *data)
5236 struct l2cap_move_chan_cfm_rsp *rsp = data;
5237 struct l2cap_chan *chan;
5240 if (cmd_len != sizeof(*rsp))
5243 icid = le16_to_cpu(rsp->icid);
5245 BT_DBG("icid 0x%4.4x", icid);
5247 chan = l2cap_get_chan_by_scid(conn, icid);
5251 __clear_chan_timer(chan);
5253 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5254 chan->local_amp_id = chan->move_id;
5256 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5257 __release_logical_link(chan);
5259 l2cap_move_done(chan);
5262 l2cap_chan_unlock(chan);
5267 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5268 struct l2cap_cmd_hdr *cmd,
5269 u16 cmd_len, u8 *data)
5271 struct hci_conn *hcon = conn->hcon;
5272 struct l2cap_conn_param_update_req *req;
5273 struct l2cap_conn_param_update_rsp rsp;
5274 u16 min, max, latency, to_multiplier;
5277 if (hcon->role != HCI_ROLE_MASTER)
5280 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5283 req = (struct l2cap_conn_param_update_req *) data;
5284 min = __le16_to_cpu(req->min);
5285 max = __le16_to_cpu(req->max);
5286 latency = __le16_to_cpu(req->latency);
5287 to_multiplier = __le16_to_cpu(req->to_multiplier);
5289 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5290 min, max, latency, to_multiplier);
5292 memset(&rsp, 0, sizeof(rsp));
5294 err = hci_check_conn_params(min, max, latency, to_multiplier);
5296 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5298 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5300 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5306 store_hint = hci_le_conn_update(hcon, min, max, latency,
5308 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5309 store_hint, min, max, latency,
5317 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5318 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5321 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5322 struct hci_conn *hcon = conn->hcon;
5323 u16 dcid, mtu, mps, credits, result;
5324 struct l2cap_chan *chan;
5327 if (cmd_len < sizeof(*rsp))
5330 dcid = __le16_to_cpu(rsp->dcid);
5331 mtu = __le16_to_cpu(rsp->mtu);
5332 mps = __le16_to_cpu(rsp->mps);
5333 credits = __le16_to_cpu(rsp->credits);
5334 result = __le16_to_cpu(rsp->result);
5336 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5337 dcid < L2CAP_CID_DYN_START ||
5338 dcid > L2CAP_CID_LE_DYN_END))
5341 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5342 dcid, mtu, mps, credits, result);
5344 mutex_lock(&conn->chan_lock);
5346 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5354 l2cap_chan_lock(chan);
5357 case L2CAP_CR_LE_SUCCESS:
5358 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5366 chan->remote_mps = mps;
5367 chan->tx_credits = credits;
5368 l2cap_chan_ready(chan);
5371 case L2CAP_CR_LE_AUTHENTICATION:
5372 case L2CAP_CR_LE_ENCRYPTION:
5373 /* If we already have MITM protection we can't do
5376 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5377 l2cap_chan_del(chan, ECONNREFUSED);
5381 sec_level = hcon->sec_level + 1;
5382 if (chan->sec_level < sec_level)
5383 chan->sec_level = sec_level;
5385 /* We'll need to send a new Connect Request */
5386 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5388 smp_conn_security(hcon, chan->sec_level);
5392 l2cap_chan_del(chan, ECONNREFUSED);
5396 l2cap_chan_unlock(chan);
5399 mutex_unlock(&conn->chan_lock);
5404 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5405 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5410 switch (cmd->code) {
5411 case L2CAP_COMMAND_REJ:
5412 l2cap_command_rej(conn, cmd, cmd_len, data);
5415 case L2CAP_CONN_REQ:
5416 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5419 case L2CAP_CONN_RSP:
5420 case L2CAP_CREATE_CHAN_RSP:
5421 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5424 case L2CAP_CONF_REQ:
5425 err = l2cap_config_req(conn, cmd, cmd_len, data);
5428 case L2CAP_CONF_RSP:
5429 l2cap_config_rsp(conn, cmd, cmd_len, data);
5432 case L2CAP_DISCONN_REQ:
5433 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5436 case L2CAP_DISCONN_RSP:
5437 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5440 case L2CAP_ECHO_REQ:
5441 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5444 case L2CAP_ECHO_RSP:
5447 case L2CAP_INFO_REQ:
5448 err = l2cap_information_req(conn, cmd, cmd_len, data);
5451 case L2CAP_INFO_RSP:
5452 l2cap_information_rsp(conn, cmd, cmd_len, data);
5455 case L2CAP_CREATE_CHAN_REQ:
5456 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5459 case L2CAP_MOVE_CHAN_REQ:
5460 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5463 case L2CAP_MOVE_CHAN_RSP:
5464 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5467 case L2CAP_MOVE_CHAN_CFM:
5468 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5471 case L2CAP_MOVE_CHAN_CFM_RSP:
5472 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5476 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5484 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5485 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5488 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5489 struct l2cap_le_conn_rsp rsp;
5490 struct l2cap_chan *chan, *pchan;
5491 u16 dcid, scid, credits, mtu, mps;
5495 if (cmd_len != sizeof(*req))
5498 scid = __le16_to_cpu(req->scid);
5499 mtu = __le16_to_cpu(req->mtu);
5500 mps = __le16_to_cpu(req->mps);
5505 if (mtu < 23 || mps < 23)
5508 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5511 /* Check if we have socket listening on psm */
5512 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5513 &conn->hcon->dst, LE_LINK);
5515 result = L2CAP_CR_LE_BAD_PSM;
5520 mutex_lock(&conn->chan_lock);
5521 l2cap_chan_lock(pchan);
5523 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5525 result = L2CAP_CR_LE_AUTHENTICATION;
5527 goto response_unlock;
5530 /* Check for valid dynamic CID range */
5531 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5532 result = L2CAP_CR_LE_INVALID_SCID;
5534 goto response_unlock;
5537 /* Check if we already have channel with that dcid */
5538 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5539 result = L2CAP_CR_LE_SCID_IN_USE;
5541 goto response_unlock;
5544 chan = pchan->ops->new_connection(pchan);
5546 result = L2CAP_CR_LE_NO_MEM;
5547 goto response_unlock;
5550 bacpy(&chan->src, &conn->hcon->src);
5551 bacpy(&chan->dst, &conn->hcon->dst);
5552 chan->src_type = bdaddr_src_type(conn->hcon);
5553 chan->dst_type = bdaddr_dst_type(conn->hcon);
5557 chan->remote_mps = mps;
5559 __l2cap_chan_add(conn, chan);
5561 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5564 credits = chan->rx_credits;
5566 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5568 chan->ident = cmd->ident;
5570 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5571 l2cap_state_change(chan, BT_CONNECT2);
5572 /* The following result value is actually not defined
5573 * for LE CoC but we use it to let the function know
5574 * that it should bail out after doing its cleanup
5575 * instead of sending a response.
5577 result = L2CAP_CR_PEND;
5578 chan->ops->defer(chan);
5580 l2cap_chan_ready(chan);
5581 result = L2CAP_CR_LE_SUCCESS;
5585 l2cap_chan_unlock(pchan);
5586 mutex_unlock(&conn->chan_lock);
5587 l2cap_chan_put(pchan);
5589 if (result == L2CAP_CR_PEND)
5594 rsp.mtu = cpu_to_le16(chan->imtu);
5595 rsp.mps = cpu_to_le16(chan->mps);
5601 rsp.dcid = cpu_to_le16(dcid);
5602 rsp.credits = cpu_to_le16(credits);
5603 rsp.result = cpu_to_le16(result);
5605 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5610 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5611 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5614 struct l2cap_le_credits *pkt;
5615 struct l2cap_chan *chan;
5616 u16 cid, credits, max_credits;
5618 if (cmd_len != sizeof(*pkt))
5621 pkt = (struct l2cap_le_credits *) data;
5622 cid = __le16_to_cpu(pkt->cid);
5623 credits = __le16_to_cpu(pkt->credits);
5625 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5627 chan = l2cap_get_chan_by_dcid(conn, cid);
5631 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5632 if (credits > max_credits) {
5633 BT_ERR("LE credits overflow");
5634 l2cap_send_disconn_req(chan, ECONNRESET);
5635 l2cap_chan_unlock(chan);
5637 /* Return 0 so that we don't trigger an unnecessary
5638 * command reject packet.
5643 chan->tx_credits += credits;
5645 /* Resume sending */
5646 l2cap_le_flowctl_send(chan);
5648 if (chan->tx_credits)
5649 chan->ops->resume(chan);
5651 l2cap_chan_unlock(chan);
5656 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5657 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5660 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5661 struct l2cap_chan *chan;
5663 if (cmd_len < sizeof(*rej))
5666 mutex_lock(&conn->chan_lock);
5668 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5672 l2cap_chan_lock(chan);
5673 l2cap_chan_del(chan, ECONNREFUSED);
5674 l2cap_chan_unlock(chan);
5677 mutex_unlock(&conn->chan_lock);
5681 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5682 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5687 switch (cmd->code) {
5688 case L2CAP_COMMAND_REJ:
5689 l2cap_le_command_rej(conn, cmd, cmd_len, data);
5692 case L2CAP_CONN_PARAM_UPDATE_REQ:
5693 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5696 case L2CAP_CONN_PARAM_UPDATE_RSP:
5699 case L2CAP_LE_CONN_RSP:
5700 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5703 case L2CAP_LE_CONN_REQ:
5704 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5707 case L2CAP_LE_CREDITS:
5708 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5711 case L2CAP_DISCONN_REQ:
5712 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5715 case L2CAP_DISCONN_RSP:
5716 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5720 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5728 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5729 struct sk_buff *skb)
5731 struct hci_conn *hcon = conn->hcon;
5732 struct l2cap_cmd_hdr *cmd;
5736 if (hcon->type != LE_LINK)
5739 if (skb->len < L2CAP_CMD_HDR_SIZE)
5742 cmd = (void *) skb->data;
5743 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5745 len = le16_to_cpu(cmd->len);
5747 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5749 if (len != skb->len || !cmd->ident) {
5750 BT_DBG("corrupted command");
5754 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5756 struct l2cap_cmd_rej_unk rej;
5758 BT_ERR("Wrong link type (%d)", err);
5760 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5761 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5769 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5770 struct sk_buff *skb)
5772 struct hci_conn *hcon = conn->hcon;
5773 u8 *data = skb->data;
5775 struct l2cap_cmd_hdr cmd;
5778 l2cap_raw_recv(conn, skb);
5780 if (hcon->type != ACL_LINK)
5783 while (len >= L2CAP_CMD_HDR_SIZE) {
5785 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5786 data += L2CAP_CMD_HDR_SIZE;
5787 len -= L2CAP_CMD_HDR_SIZE;
5789 cmd_len = le16_to_cpu(cmd.len);
5791 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5794 if (cmd_len > len || !cmd.ident) {
5795 BT_DBG("corrupted command");
5799 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5801 struct l2cap_cmd_rej_unk rej;
5803 BT_ERR("Wrong link type (%d)", err);
5805 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5806 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5818 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5820 u16 our_fcs, rcv_fcs;
5823 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5824 hdr_size = L2CAP_EXT_HDR_SIZE;
5826 hdr_size = L2CAP_ENH_HDR_SIZE;
5828 if (chan->fcs == L2CAP_FCS_CRC16) {
5829 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5830 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5831 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5833 if (our_fcs != rcv_fcs)
5839 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5841 struct l2cap_ctrl control;
5843 BT_DBG("chan %p", chan);
5845 memset(&control, 0, sizeof(control));
5848 control.reqseq = chan->buffer_seq;
5849 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5851 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5852 control.super = L2CAP_SUPER_RNR;
5853 l2cap_send_sframe(chan, &control);
5856 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5857 chan->unacked_frames > 0)
5858 __set_retrans_timer(chan);
5860 /* Send pending iframes */
5861 l2cap_ertm_send(chan);
5863 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5864 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5865 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5868 control.super = L2CAP_SUPER_RR;
5869 l2cap_send_sframe(chan, &control);
5873 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5874 struct sk_buff **last_frag)
5876 /* skb->len reflects data in skb as well as all fragments
5877 * skb->data_len reflects only data in fragments
5879 if (!skb_has_frag_list(skb))
5880 skb_shinfo(skb)->frag_list = new_frag;
5882 new_frag->next = NULL;
5884 (*last_frag)->next = new_frag;
5885 *last_frag = new_frag;
5887 skb->len += new_frag->len;
5888 skb->data_len += new_frag->len;
5889 skb->truesize += new_frag->truesize;
5892 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5893 struct l2cap_ctrl *control)
5897 switch (control->sar) {
5898 case L2CAP_SAR_UNSEGMENTED:
5902 err = chan->ops->recv(chan, skb);
5905 case L2CAP_SAR_START:
5909 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5912 chan->sdu_len = get_unaligned_le16(skb->data);
5913 skb_pull(skb, L2CAP_SDULEN_SIZE);
5915 if (chan->sdu_len > chan->imtu) {
5920 if (skb->len >= chan->sdu_len)
5924 chan->sdu_last_frag = skb;
5930 case L2CAP_SAR_CONTINUE:
5934 append_skb_frag(chan->sdu, skb,
5935 &chan->sdu_last_frag);
5938 if (chan->sdu->len >= chan->sdu_len)
5948 append_skb_frag(chan->sdu, skb,
5949 &chan->sdu_last_frag);
5952 if (chan->sdu->len != chan->sdu_len)
5955 err = chan->ops->recv(chan, chan->sdu);
5958 /* Reassembly complete */
5960 chan->sdu_last_frag = NULL;
5968 kfree_skb(chan->sdu);
5970 chan->sdu_last_frag = NULL;
5977 static int l2cap_resegment(struct l2cap_chan *chan)
5983 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5987 if (chan->mode != L2CAP_MODE_ERTM)
5990 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5991 l2cap_tx(chan, NULL, NULL, event);
5994 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5997 /* Pass sequential frames to l2cap_reassemble_sdu()
5998 * until a gap is encountered.
6001 BT_DBG("chan %p", chan);
6003 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6004 struct sk_buff *skb;
6005 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6006 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6008 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6013 skb_unlink(skb, &chan->srej_q);
6014 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6015 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6020 if (skb_queue_empty(&chan->srej_q)) {
6021 chan->rx_state = L2CAP_RX_STATE_RECV;
6022 l2cap_send_ack(chan);
6028 static void l2cap_handle_srej(struct l2cap_chan *chan,
6029 struct l2cap_ctrl *control)
6031 struct sk_buff *skb;
6033 BT_DBG("chan %p, control %p", chan, control);
6035 if (control->reqseq == chan->next_tx_seq) {
6036 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6037 l2cap_send_disconn_req(chan, ECONNRESET);
6041 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6044 BT_DBG("Seq %d not available for retransmission",
6049 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6050 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6051 l2cap_send_disconn_req(chan, ECONNRESET);
6055 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6057 if (control->poll) {
6058 l2cap_pass_to_tx(chan, control);
6060 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6061 l2cap_retransmit(chan, control);
6062 l2cap_ertm_send(chan);
6064 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6065 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6066 chan->srej_save_reqseq = control->reqseq;
6069 l2cap_pass_to_tx_fbit(chan, control);
6071 if (control->final) {
6072 if (chan->srej_save_reqseq != control->reqseq ||
6073 !test_and_clear_bit(CONN_SREJ_ACT,
6075 l2cap_retransmit(chan, control);
6077 l2cap_retransmit(chan, control);
6078 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6079 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6080 chan->srej_save_reqseq = control->reqseq;
6086 static void l2cap_handle_rej(struct l2cap_chan *chan,
6087 struct l2cap_ctrl *control)
6089 struct sk_buff *skb;
6091 BT_DBG("chan %p, control %p", chan, control);
6093 if (control->reqseq == chan->next_tx_seq) {
6094 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6095 l2cap_send_disconn_req(chan, ECONNRESET);
6099 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6101 if (chan->max_tx && skb &&
6102 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6103 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6104 l2cap_send_disconn_req(chan, ECONNRESET);
6108 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6110 l2cap_pass_to_tx(chan, control);
6112 if (control->final) {
6113 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6114 l2cap_retransmit_all(chan, control);
6116 l2cap_retransmit_all(chan, control);
6117 l2cap_ertm_send(chan);
6118 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6119 set_bit(CONN_REJ_ACT, &chan->conn_state);
6123 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6125 BT_DBG("chan %p, txseq %d", chan, txseq);
6127 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6128 chan->expected_tx_seq);
6130 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6131 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6133 /* See notes below regarding "double poll" and
6136 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6137 BT_DBG("Invalid/Ignore - after SREJ");
6138 return L2CAP_TXSEQ_INVALID_IGNORE;
6140 BT_DBG("Invalid - in window after SREJ sent");
6141 return L2CAP_TXSEQ_INVALID;
6145 if (chan->srej_list.head == txseq) {
6146 BT_DBG("Expected SREJ");
6147 return L2CAP_TXSEQ_EXPECTED_SREJ;
6150 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6151 BT_DBG("Duplicate SREJ - txseq already stored");
6152 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6155 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6156 BT_DBG("Unexpected SREJ - not requested");
6157 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6161 if (chan->expected_tx_seq == txseq) {
6162 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6164 BT_DBG("Invalid - txseq outside tx window");
6165 return L2CAP_TXSEQ_INVALID;
6168 return L2CAP_TXSEQ_EXPECTED;
6172 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6173 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6174 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6175 return L2CAP_TXSEQ_DUPLICATE;
6178 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6179 /* A source of invalid packets is a "double poll" condition,
6180 * where delays cause us to send multiple poll packets. If
6181 * the remote stack receives and processes both polls,
6182 * sequence numbers can wrap around in such a way that a
6183 * resent frame has a sequence number that looks like new data
6184 * with a sequence gap. This would trigger an erroneous SREJ
6187 * Fortunately, this is impossible with a tx window that's
6188 * less than half of the maximum sequence number, which allows
6189 * invalid frames to be safely ignored.
6191 * With tx window sizes greater than half of the tx window
6192 * maximum, the frame is invalid and cannot be ignored. This
6193 * causes a disconnect.
6196 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6197 BT_DBG("Invalid/Ignore - txseq outside tx window");
6198 return L2CAP_TXSEQ_INVALID_IGNORE;
6200 BT_DBG("Invalid - txseq outside tx window");
6201 return L2CAP_TXSEQ_INVALID;
6204 BT_DBG("Unexpected - txseq indicates missing frames");
6205 return L2CAP_TXSEQ_UNEXPECTED;
6209 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6210 struct l2cap_ctrl *control,
6211 struct sk_buff *skb, u8 event)
6214 bool skb_in_use = false;
6216 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6220 case L2CAP_EV_RECV_IFRAME:
6221 switch (l2cap_classify_txseq(chan, control->txseq)) {
6222 case L2CAP_TXSEQ_EXPECTED:
6223 l2cap_pass_to_tx(chan, control);
6225 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6226 BT_DBG("Busy, discarding expected seq %d",
6231 chan->expected_tx_seq = __next_seq(chan,
6234 chan->buffer_seq = chan->expected_tx_seq;
6237 err = l2cap_reassemble_sdu(chan, skb, control);
6241 if (control->final) {
6242 if (!test_and_clear_bit(CONN_REJ_ACT,
6243 &chan->conn_state)) {
6245 l2cap_retransmit_all(chan, control);
6246 l2cap_ertm_send(chan);
6250 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6251 l2cap_send_ack(chan);
6253 case L2CAP_TXSEQ_UNEXPECTED:
6254 l2cap_pass_to_tx(chan, control);
6256 /* Can't issue SREJ frames in the local busy state.
6257 * Drop this frame, it will be seen as missing
6258 * when local busy is exited.
6260 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6261 BT_DBG("Busy, discarding unexpected seq %d",
6266 /* There was a gap in the sequence, so an SREJ
6267 * must be sent for each missing frame. The
6268 * current frame is stored for later use.
6270 skb_queue_tail(&chan->srej_q, skb);
6272 BT_DBG("Queued %p (queue len %d)", skb,
6273 skb_queue_len(&chan->srej_q));
6275 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6276 l2cap_seq_list_clear(&chan->srej_list);
6277 l2cap_send_srej(chan, control->txseq);
6279 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6281 case L2CAP_TXSEQ_DUPLICATE:
6282 l2cap_pass_to_tx(chan, control);
6284 case L2CAP_TXSEQ_INVALID_IGNORE:
6286 case L2CAP_TXSEQ_INVALID:
6288 l2cap_send_disconn_req(chan, ECONNRESET);
6292 case L2CAP_EV_RECV_RR:
6293 l2cap_pass_to_tx(chan, control);
6294 if (control->final) {
6295 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6297 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6298 !__chan_is_moving(chan)) {
6300 l2cap_retransmit_all(chan, control);
6303 l2cap_ertm_send(chan);
6304 } else if (control->poll) {
6305 l2cap_send_i_or_rr_or_rnr(chan);
6307 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6308 &chan->conn_state) &&
6309 chan->unacked_frames)
6310 __set_retrans_timer(chan);
6312 l2cap_ertm_send(chan);
6315 case L2CAP_EV_RECV_RNR:
6316 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6317 l2cap_pass_to_tx(chan, control);
6318 if (control && control->poll) {
6319 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6320 l2cap_send_rr_or_rnr(chan, 0);
6322 __clear_retrans_timer(chan);
6323 l2cap_seq_list_clear(&chan->retrans_list);
6325 case L2CAP_EV_RECV_REJ:
6326 l2cap_handle_rej(chan, control);
6328 case L2CAP_EV_RECV_SREJ:
6329 l2cap_handle_srej(chan, control);
6335 if (skb && !skb_in_use) {
6336 BT_DBG("Freeing %p", skb);
6343 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6344 struct l2cap_ctrl *control,
6345 struct sk_buff *skb, u8 event)
6348 u16 txseq = control->txseq;
6349 bool skb_in_use = false;
6351 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6355 case L2CAP_EV_RECV_IFRAME:
6356 switch (l2cap_classify_txseq(chan, txseq)) {
6357 case L2CAP_TXSEQ_EXPECTED:
6358 /* Keep frame for reassembly later */
6359 l2cap_pass_to_tx(chan, control);
6360 skb_queue_tail(&chan->srej_q, skb);
6362 BT_DBG("Queued %p (queue len %d)", skb,
6363 skb_queue_len(&chan->srej_q));
6365 chan->expected_tx_seq = __next_seq(chan, txseq);
6367 case L2CAP_TXSEQ_EXPECTED_SREJ:
6368 l2cap_seq_list_pop(&chan->srej_list);
6370 l2cap_pass_to_tx(chan, control);
6371 skb_queue_tail(&chan->srej_q, skb);
6373 BT_DBG("Queued %p (queue len %d)", skb,
6374 skb_queue_len(&chan->srej_q));
6376 err = l2cap_rx_queued_iframes(chan);
6381 case L2CAP_TXSEQ_UNEXPECTED:
6382 /* Got a frame that can't be reassembled yet.
6383 * Save it for later, and send SREJs to cover
6384 * the missing frames.
6386 skb_queue_tail(&chan->srej_q, skb);
6388 BT_DBG("Queued %p (queue len %d)", skb,
6389 skb_queue_len(&chan->srej_q));
6391 l2cap_pass_to_tx(chan, control);
6392 l2cap_send_srej(chan, control->txseq);
6394 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6395 /* This frame was requested with an SREJ, but
6396 * some expected retransmitted frames are
6397 * missing. Request retransmission of missing
6400 skb_queue_tail(&chan->srej_q, skb);
6402 BT_DBG("Queued %p (queue len %d)", skb,
6403 skb_queue_len(&chan->srej_q));
6405 l2cap_pass_to_tx(chan, control);
6406 l2cap_send_srej_list(chan, control->txseq);
6408 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6409 /* We've already queued this frame. Drop this copy. */
6410 l2cap_pass_to_tx(chan, control);
6412 case L2CAP_TXSEQ_DUPLICATE:
6413 /* Expecting a later sequence number, so this frame
6414 * was already received. Ignore it completely.
6417 case L2CAP_TXSEQ_INVALID_IGNORE:
6419 case L2CAP_TXSEQ_INVALID:
6421 l2cap_send_disconn_req(chan, ECONNRESET);
6425 case L2CAP_EV_RECV_RR:
6426 l2cap_pass_to_tx(chan, control);
6427 if (control->final) {
6428 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6430 if (!test_and_clear_bit(CONN_REJ_ACT,
6431 &chan->conn_state)) {
6433 l2cap_retransmit_all(chan, control);
6436 l2cap_ertm_send(chan);
6437 } else if (control->poll) {
6438 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6439 &chan->conn_state) &&
6440 chan->unacked_frames) {
6441 __set_retrans_timer(chan);
6444 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6445 l2cap_send_srej_tail(chan);
6447 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6448 &chan->conn_state) &&
6449 chan->unacked_frames)
6450 __set_retrans_timer(chan);
6452 l2cap_send_ack(chan);
6455 case L2CAP_EV_RECV_RNR:
6456 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6457 l2cap_pass_to_tx(chan, control);
6458 if (control->poll) {
6459 l2cap_send_srej_tail(chan);
6461 struct l2cap_ctrl rr_control;
6462 memset(&rr_control, 0, sizeof(rr_control));
6463 rr_control.sframe = 1;
6464 rr_control.super = L2CAP_SUPER_RR;
6465 rr_control.reqseq = chan->buffer_seq;
6466 l2cap_send_sframe(chan, &rr_control);
6470 case L2CAP_EV_RECV_REJ:
6471 l2cap_handle_rej(chan, control);
6473 case L2CAP_EV_RECV_SREJ:
6474 l2cap_handle_srej(chan, control);
6478 if (skb && !skb_in_use) {
6479 BT_DBG("Freeing %p", skb);
6486 static int l2cap_finish_move(struct l2cap_chan *chan)
6488 BT_DBG("chan %p", chan);
6490 chan->rx_state = L2CAP_RX_STATE_RECV;
6493 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6495 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6497 return l2cap_resegment(chan);
6500 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6501 struct l2cap_ctrl *control,
6502 struct sk_buff *skb, u8 event)
6506 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6512 l2cap_process_reqseq(chan, control->reqseq);
6514 if (!skb_queue_empty(&chan->tx_q))
6515 chan->tx_send_head = skb_peek(&chan->tx_q);
6517 chan->tx_send_head = NULL;
6519 /* Rewind next_tx_seq to the point expected
6522 chan->next_tx_seq = control->reqseq;
6523 chan->unacked_frames = 0;
6525 err = l2cap_finish_move(chan);
6529 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6530 l2cap_send_i_or_rr_or_rnr(chan);
6532 if (event == L2CAP_EV_RECV_IFRAME)
6535 return l2cap_rx_state_recv(chan, control, NULL, event);
6538 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6539 struct l2cap_ctrl *control,
6540 struct sk_buff *skb, u8 event)
6544 if (!control->final)
6547 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6549 chan->rx_state = L2CAP_RX_STATE_RECV;
6550 l2cap_process_reqseq(chan, control->reqseq);
6552 if (!skb_queue_empty(&chan->tx_q))
6553 chan->tx_send_head = skb_peek(&chan->tx_q);
6555 chan->tx_send_head = NULL;
6557 /* Rewind next_tx_seq to the point expected
6560 chan->next_tx_seq = control->reqseq;
6561 chan->unacked_frames = 0;
6564 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6566 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6568 err = l2cap_resegment(chan);
6571 err = l2cap_rx_state_recv(chan, control, skb, event);
6576 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6578 /* Make sure reqseq is for a packet that has been sent but not acked */
6581 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6582 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6585 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6586 struct sk_buff *skb, u8 event)
6590 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6591 control, skb, event, chan->rx_state);
6593 if (__valid_reqseq(chan, control->reqseq)) {
6594 switch (chan->rx_state) {
6595 case L2CAP_RX_STATE_RECV:
6596 err = l2cap_rx_state_recv(chan, control, skb, event);
6598 case L2CAP_RX_STATE_SREJ_SENT:
6599 err = l2cap_rx_state_srej_sent(chan, control, skb,
6602 case L2CAP_RX_STATE_WAIT_P:
6603 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6605 case L2CAP_RX_STATE_WAIT_F:
6606 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6613 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6614 control->reqseq, chan->next_tx_seq,
6615 chan->expected_ack_seq);
6616 l2cap_send_disconn_req(chan, ECONNRESET);
6622 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6623 struct sk_buff *skb)
6625 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6628 if (l2cap_classify_txseq(chan, control->txseq) ==
6629 L2CAP_TXSEQ_EXPECTED) {
6630 l2cap_pass_to_tx(chan, control);
6632 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6633 __next_seq(chan, chan->buffer_seq));
6635 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6637 l2cap_reassemble_sdu(chan, skb, control);
6640 kfree_skb(chan->sdu);
6643 chan->sdu_last_frag = NULL;
6647 BT_DBG("Freeing %p", skb);
6652 chan->last_acked_seq = control->txseq;
6653 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6658 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6660 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6664 __unpack_control(chan, skb);
6669 * We can just drop the corrupted I-frame here.
6670 * Receiver will miss it and start proper recovery
6671 * procedures and ask for retransmission.
6673 if (l2cap_check_fcs(chan, skb))
6676 if (!control->sframe && control->sar == L2CAP_SAR_START)
6677 len -= L2CAP_SDULEN_SIZE;
6679 if (chan->fcs == L2CAP_FCS_CRC16)
6680 len -= L2CAP_FCS_SIZE;
6682 if (len > chan->mps) {
6683 l2cap_send_disconn_req(chan, ECONNRESET);
6687 if ((chan->mode == L2CAP_MODE_ERTM ||
6688 chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6691 if (!control->sframe) {
6694 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6695 control->sar, control->reqseq, control->final,
6698 /* Validate F-bit - F=0 always valid, F=1 only
6699 * valid in TX WAIT_F
6701 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6704 if (chan->mode != L2CAP_MODE_STREAMING) {
6705 event = L2CAP_EV_RECV_IFRAME;
6706 err = l2cap_rx(chan, control, skb, event);
6708 err = l2cap_stream_rx(chan, control, skb);
6712 l2cap_send_disconn_req(chan, ECONNRESET);
6714 const u8 rx_func_to_event[4] = {
6715 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6716 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6719 /* Only I-frames are expected in streaming mode */
6720 if (chan->mode == L2CAP_MODE_STREAMING)
6723 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6724 control->reqseq, control->final, control->poll,
6728 BT_ERR("Trailing bytes: %d in sframe", len);
6729 l2cap_send_disconn_req(chan, ECONNRESET);
6733 /* Validate F and P bits */
6734 if (control->final && (control->poll ||
6735 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6738 event = rx_func_to_event[control->super];
6739 if (l2cap_rx(chan, control, skb, event))
6740 l2cap_send_disconn_req(chan, ECONNRESET);
6750 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6752 struct l2cap_conn *conn = chan->conn;
6753 struct l2cap_le_credits pkt;
6756 return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6758 if (!return_credits)
6761 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6763 chan->rx_credits += return_credits;
6765 pkt.cid = cpu_to_le16(chan->scid);
6766 pkt.credits = cpu_to_le16(return_credits);
6768 chan->ident = l2cap_get_ident(conn);
6770 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6773 static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6777 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6779 /* Wait recv to confirm reception before updating the credits */
6780 err = chan->ops->recv(chan, skb);
6782 /* Update credits whenever an SDU is received */
6783 l2cap_chan_le_send_credits(chan);
6788 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6792 if (!chan->rx_credits) {
6793 BT_ERR("No credits to receive LE L2CAP data");
6794 l2cap_send_disconn_req(chan, ECONNRESET);
6798 if (chan->imtu < skb->len) {
6799 BT_ERR("Too big LE L2CAP PDU");
6804 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6806 /* Update if remote had run out of credits, this should only happens
6807 * if the remote is not using the entire MPS.
6809 if (!chan->rx_credits)
6810 l2cap_chan_le_send_credits(chan);
6817 sdu_len = get_unaligned_le16(skb->data);
6818 skb_pull(skb, L2CAP_SDULEN_SIZE);
6820 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6821 sdu_len, skb->len, chan->imtu);
6823 if (sdu_len > chan->imtu) {
6824 BT_ERR("Too big LE L2CAP SDU length received");
6829 if (skb->len > sdu_len) {
6830 BT_ERR("Too much LE L2CAP data received");
6835 if (skb->len == sdu_len)
6836 return l2cap_le_recv(chan, skb);
6839 chan->sdu_len = sdu_len;
6840 chan->sdu_last_frag = skb;
6842 /* Detect if remote is not able to use the selected MPS */
6843 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6844 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6846 /* Adjust the number of credits */
6847 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6848 chan->mps = mps_len;
6849 l2cap_chan_le_send_credits(chan);
6855 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6856 chan->sdu->len, skb->len, chan->sdu_len);
6858 if (chan->sdu->len + skb->len > chan->sdu_len) {
6859 BT_ERR("Too much LE L2CAP data received");
6864 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6867 if (chan->sdu->len == chan->sdu_len) {
6868 err = l2cap_le_recv(chan, chan->sdu);
6871 chan->sdu_last_frag = NULL;
6879 kfree_skb(chan->sdu);
6881 chan->sdu_last_frag = NULL;
6885 /* We can't return an error here since we took care of the skb
6886 * freeing internally. An error return would cause the caller to
6887 * do a double-free of the skb.
6892 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6893 struct sk_buff *skb)
6895 struct l2cap_chan *chan;
6897 chan = l2cap_get_chan_by_scid(conn, cid);
6899 if (cid == L2CAP_CID_A2MP) {
6900 chan = a2mp_channel_create(conn, skb);
6906 l2cap_chan_lock(chan);
6908 BT_DBG("unknown cid 0x%4.4x", cid);
6909 /* Drop packet and return */
6915 BT_DBG("chan %p, len %d", chan, skb->len);
6917 /* If we receive data on a fixed channel before the info req/rsp
6918 * procdure is done simply assume that the channel is supported
6919 * and mark it as ready.
6921 if (chan->chan_type == L2CAP_CHAN_FIXED)
6922 l2cap_chan_ready(chan);
6924 if (chan->state != BT_CONNECTED)
6927 switch (chan->mode) {
6928 case L2CAP_MODE_LE_FLOWCTL:
6929 if (l2cap_le_data_rcv(chan, skb) < 0)
6934 case L2CAP_MODE_BASIC:
6935 /* If socket recv buffers overflows we drop data here
6936 * which is *bad* because L2CAP has to be reliable.
6937 * But we don't have any other choice. L2CAP doesn't
6938 * provide flow control mechanism. */
6940 if (chan->imtu < skb->len) {
6941 BT_ERR("Dropping L2CAP data: receive buffer overflow");
6945 if (!chan->ops->recv(chan, skb))
6949 case L2CAP_MODE_ERTM:
6950 case L2CAP_MODE_STREAMING:
6951 l2cap_data_rcv(chan, skb);
6955 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6963 l2cap_chan_unlock(chan);
6966 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6967 struct sk_buff *skb)
6969 struct hci_conn *hcon = conn->hcon;
6970 struct l2cap_chan *chan;
6972 if (hcon->type != ACL_LINK)
6975 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6980 BT_DBG("chan %p, len %d", chan, skb->len);
6982 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6985 if (chan->imtu < skb->len)
6988 /* Store remote BD_ADDR and PSM for msg_name */
6989 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6990 bt_cb(skb)->l2cap.psm = psm;
6992 if (!chan->ops->recv(chan, skb)) {
6993 l2cap_chan_put(chan);
6998 l2cap_chan_put(chan);
7003 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7005 struct l2cap_hdr *lh = (void *) skb->data;
7006 struct hci_conn *hcon = conn->hcon;
7010 if (hcon->state != BT_CONNECTED) {
7011 BT_DBG("queueing pending rx skb");
7012 skb_queue_tail(&conn->pending_rx, skb);
7016 skb_pull(skb, L2CAP_HDR_SIZE);
7017 cid = __le16_to_cpu(lh->cid);
7018 len = __le16_to_cpu(lh->len);
7020 if (len != skb->len) {
7025 /* Since we can't actively block incoming LE connections we must
7026 * at least ensure that we ignore incoming data from them.
7028 if (hcon->type == LE_LINK &&
7029 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7030 bdaddr_dst_type(hcon))) {
7035 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7038 case L2CAP_CID_SIGNALING:
7039 l2cap_sig_channel(conn, skb);
7042 case L2CAP_CID_CONN_LESS:
7043 psm = get_unaligned((__le16 *) skb->data);
7044 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7045 l2cap_conless_channel(conn, psm, skb);
7048 case L2CAP_CID_LE_SIGNALING:
7049 l2cap_le_sig_channel(conn, skb);
7053 l2cap_data_channel(conn, cid, skb);
7058 static void process_pending_rx(struct work_struct *work)
7060 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7062 struct sk_buff *skb;
7066 while ((skb = skb_dequeue(&conn->pending_rx)))
7067 l2cap_recv_frame(conn, skb);
7070 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7072 struct l2cap_conn *conn = hcon->l2cap_data;
7073 struct hci_chan *hchan;
7078 hchan = hci_chan_create(hcon);
7082 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7084 hci_chan_del(hchan);
7088 kref_init(&conn->ref);
7089 hcon->l2cap_data = conn;
7090 conn->hcon = hci_conn_get(hcon);
7091 conn->hchan = hchan;
7093 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7095 switch (hcon->type) {
7097 if (hcon->hdev->le_mtu) {
7098 conn->mtu = hcon->hdev->le_mtu;
7103 conn->mtu = hcon->hdev->acl_mtu;
7107 conn->feat_mask = 0;
7109 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7111 if (hcon->type == ACL_LINK &&
7112 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7113 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7115 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7116 (bredr_sc_enabled(hcon->hdev) ||
7117 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7118 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7120 mutex_init(&conn->ident_lock);
7121 mutex_init(&conn->chan_lock);
7123 INIT_LIST_HEAD(&conn->chan_l);
7124 INIT_LIST_HEAD(&conn->users);
7126 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7128 skb_queue_head_init(&conn->pending_rx);
7129 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7130 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7132 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7137 static bool is_valid_psm(u16 psm, u8 dst_type) {
7141 if (bdaddr_type_is_le(dst_type))
7142 return (psm <= 0x00ff);
7144 /* PSM must be odd and lsb of upper byte must be 0 */
7145 return ((psm & 0x0101) == 0x0001);
7148 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7149 bdaddr_t *dst, u8 dst_type)
7151 struct l2cap_conn *conn;
7152 struct hci_conn *hcon;
7153 struct hci_dev *hdev;
7156 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7157 dst_type, __le16_to_cpu(psm));
7159 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7161 return -EHOSTUNREACH;
7165 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7166 chan->chan_type != L2CAP_CHAN_RAW) {
7171 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7176 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7181 switch (chan->mode) {
7182 case L2CAP_MODE_BASIC:
7184 case L2CAP_MODE_LE_FLOWCTL:
7186 case L2CAP_MODE_ERTM:
7187 case L2CAP_MODE_STREAMING:
7196 switch (chan->state) {
7200 /* Already connecting */
7205 /* Already connected */
7219 /* Set destination address and psm */
7220 bacpy(&chan->dst, dst);
7221 chan->dst_type = dst_type;
7226 if (bdaddr_type_is_le(dst_type)) {
7227 /* Convert from L2CAP channel address type to HCI address type
7229 if (dst_type == BDADDR_LE_PUBLIC)
7230 dst_type = ADDR_LE_DEV_PUBLIC;
7232 dst_type = ADDR_LE_DEV_RANDOM;
7234 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7235 hcon = hci_connect_le(hdev, dst, dst_type,
7237 HCI_LE_CONN_TIMEOUT,
7238 HCI_ROLE_SLAVE, NULL);
7240 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7242 HCI_LE_CONN_TIMEOUT);
7245 u8 auth_type = l2cap_get_auth_type(chan);
7246 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7250 err = PTR_ERR(hcon);
7254 conn = l2cap_conn_add(hcon);
7256 hci_conn_drop(hcon);
7261 mutex_lock(&conn->chan_lock);
7262 l2cap_chan_lock(chan);
7264 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7265 hci_conn_drop(hcon);
7270 /* Update source addr of the socket */
7271 bacpy(&chan->src, &hcon->src);
7272 chan->src_type = bdaddr_src_type(hcon);
7274 __l2cap_chan_add(conn, chan);
7276 /* l2cap_chan_add takes its own ref so we can drop this one */
7277 hci_conn_drop(hcon);
7279 l2cap_state_change(chan, BT_CONNECT);
7280 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7282 /* Release chan->sport so that it can be reused by other
7283 * sockets (as it's only used for listening sockets).
7285 write_lock(&chan_list_lock);
7287 write_unlock(&chan_list_lock);
7289 if (hcon->state == BT_CONNECTED) {
7290 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7291 __clear_chan_timer(chan);
7292 if (l2cap_chan_check_security(chan, true))
7293 l2cap_state_change(chan, BT_CONNECTED);
7295 l2cap_do_start(chan);
7301 l2cap_chan_unlock(chan);
7302 mutex_unlock(&conn->chan_lock);
7304 hci_dev_unlock(hdev);
7308 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7310 /* ---- L2CAP interface with lower layer (HCI) ---- */
7312 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7314 int exact = 0, lm1 = 0, lm2 = 0;
7315 struct l2cap_chan *c;
7317 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7319 /* Find listening sockets and check their link_mode */
7320 read_lock(&chan_list_lock);
7321 list_for_each_entry(c, &chan_list, global_l) {
7322 if (c->state != BT_LISTEN)
7325 if (!bacmp(&c->src, &hdev->bdaddr)) {
7326 lm1 |= HCI_LM_ACCEPT;
7327 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7328 lm1 |= HCI_LM_MASTER;
7330 } else if (!bacmp(&c->src, BDADDR_ANY)) {
7331 lm2 |= HCI_LM_ACCEPT;
7332 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7333 lm2 |= HCI_LM_MASTER;
7336 read_unlock(&chan_list_lock);
7338 return exact ? lm1 : lm2;
7341 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7342 * from an existing channel in the list or from the beginning of the
7343 * global list (by passing NULL as first parameter).
7345 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7346 struct hci_conn *hcon)
7348 u8 src_type = bdaddr_src_type(hcon);
7350 read_lock(&chan_list_lock);
7353 c = list_next_entry(c, global_l);
7355 c = list_entry(chan_list.next, typeof(*c), global_l);
7357 list_for_each_entry_from(c, &chan_list, global_l) {
7358 if (c->chan_type != L2CAP_CHAN_FIXED)
7360 if (c->state != BT_LISTEN)
7362 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7364 if (src_type != c->src_type)
7368 read_unlock(&chan_list_lock);
7372 read_unlock(&chan_list_lock);
7377 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7379 struct hci_dev *hdev = hcon->hdev;
7380 struct l2cap_conn *conn;
7381 struct l2cap_chan *pchan;
7384 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7387 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7390 l2cap_conn_del(hcon, bt_to_errno(status));
7394 conn = l2cap_conn_add(hcon);
7398 dst_type = bdaddr_dst_type(hcon);
7400 /* If device is blocked, do not create channels for it */
7401 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7404 /* Find fixed channels and notify them of the new connection. We
7405 * use multiple individual lookups, continuing each time where
7406 * we left off, because the list lock would prevent calling the
7407 * potentially sleeping l2cap_chan_lock() function.
7409 pchan = l2cap_global_fixed_chan(NULL, hcon);
7411 struct l2cap_chan *chan, *next;
7413 /* Client fixed channels should override server ones */
7414 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7417 l2cap_chan_lock(pchan);
7418 chan = pchan->ops->new_connection(pchan);
7420 bacpy(&chan->src, &hcon->src);
7421 bacpy(&chan->dst, &hcon->dst);
7422 chan->src_type = bdaddr_src_type(hcon);
7423 chan->dst_type = dst_type;
7425 __l2cap_chan_add(conn, chan);
7428 l2cap_chan_unlock(pchan);
7430 next = l2cap_global_fixed_chan(pchan, hcon);
7431 l2cap_chan_put(pchan);
7435 l2cap_conn_ready(conn);
7438 int l2cap_disconn_ind(struct hci_conn *hcon)
7440 struct l2cap_conn *conn = hcon->l2cap_data;
7442 BT_DBG("hcon %p", hcon);
7445 return HCI_ERROR_REMOTE_USER_TERM;
7446 return conn->disc_reason;
7449 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7451 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7454 BT_DBG("hcon %p reason %d", hcon, reason);
7456 l2cap_conn_del(hcon, bt_to_errno(reason));
7459 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7461 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7464 if (encrypt == 0x00) {
7465 if (chan->sec_level == BT_SECURITY_MEDIUM) {
7466 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7467 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7468 chan->sec_level == BT_SECURITY_FIPS)
7469 l2cap_chan_close(chan, ECONNREFUSED);
7471 if (chan->sec_level == BT_SECURITY_MEDIUM)
7472 __clear_chan_timer(chan);
7476 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7478 struct l2cap_conn *conn = hcon->l2cap_data;
7479 struct l2cap_chan *chan;
7484 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7486 mutex_lock(&conn->chan_lock);
7488 list_for_each_entry(chan, &conn->chan_l, list) {
7489 l2cap_chan_lock(chan);
7491 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7492 state_to_string(chan->state));
7494 if (chan->scid == L2CAP_CID_A2MP) {
7495 l2cap_chan_unlock(chan);
7499 if (!status && encrypt)
7500 chan->sec_level = hcon->sec_level;
7502 if (!__l2cap_no_conn_pending(chan)) {
7503 l2cap_chan_unlock(chan);
7507 if (!status && (chan->state == BT_CONNECTED ||
7508 chan->state == BT_CONFIG)) {
7509 chan->ops->resume(chan);
7510 l2cap_check_encryption(chan, encrypt);
7511 l2cap_chan_unlock(chan);
7515 if (chan->state == BT_CONNECT) {
7516 if (!status && l2cap_check_enc_key_size(hcon))
7517 l2cap_start_connection(chan);
7519 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7520 } else if (chan->state == BT_CONNECT2 &&
7521 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7522 struct l2cap_conn_rsp rsp;
7525 if (!status && l2cap_check_enc_key_size(hcon)) {
7526 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7527 res = L2CAP_CR_PEND;
7528 stat = L2CAP_CS_AUTHOR_PEND;
7529 chan->ops->defer(chan);
7531 l2cap_state_change(chan, BT_CONFIG);
7532 res = L2CAP_CR_SUCCESS;
7533 stat = L2CAP_CS_NO_INFO;
7536 l2cap_state_change(chan, BT_DISCONN);
7537 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7538 res = L2CAP_CR_SEC_BLOCK;
7539 stat = L2CAP_CS_NO_INFO;
7542 rsp.scid = cpu_to_le16(chan->dcid);
7543 rsp.dcid = cpu_to_le16(chan->scid);
7544 rsp.result = cpu_to_le16(res);
7545 rsp.status = cpu_to_le16(stat);
7546 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7549 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7550 res == L2CAP_CR_SUCCESS) {
7552 set_bit(CONF_REQ_SENT, &chan->conf_state);
7553 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7555 l2cap_build_conf_req(chan, buf, sizeof(buf)),
7557 chan->num_conf_req++;
7561 l2cap_chan_unlock(chan);
7564 mutex_unlock(&conn->chan_lock);
7567 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7569 struct l2cap_conn *conn = hcon->l2cap_data;
7570 struct l2cap_hdr *hdr;
7573 /* For AMP controller do not create l2cap conn */
7574 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7578 conn = l2cap_conn_add(hcon);
7583 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7587 case ACL_START_NO_FLUSH:
7590 BT_ERR("Unexpected start frame (len %d)", skb->len);
7591 kfree_skb(conn->rx_skb);
7592 conn->rx_skb = NULL;
7594 l2cap_conn_unreliable(conn, ECOMM);
7597 /* Start fragment always begin with Basic L2CAP header */
7598 if (skb->len < L2CAP_HDR_SIZE) {
7599 BT_ERR("Frame is too short (len %d)", skb->len);
7600 l2cap_conn_unreliable(conn, ECOMM);
7604 hdr = (struct l2cap_hdr *) skb->data;
7605 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7607 if (len == skb->len) {
7608 /* Complete frame received */
7609 l2cap_recv_frame(conn, skb);
7613 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7615 if (skb->len > len) {
7616 BT_ERR("Frame is too long (len %d, expected len %d)",
7618 l2cap_conn_unreliable(conn, ECOMM);
7622 /* Allocate skb for the complete frame (with header) */
7623 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7627 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7629 conn->rx_len = len - skb->len;
7633 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7635 if (!conn->rx_len) {
7636 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7637 l2cap_conn_unreliable(conn, ECOMM);
7641 if (skb->len > conn->rx_len) {
7642 BT_ERR("Fragment is too long (len %d, expected %d)",
7643 skb->len, conn->rx_len);
7644 kfree_skb(conn->rx_skb);
7645 conn->rx_skb = NULL;
7647 l2cap_conn_unreliable(conn, ECOMM);
7651 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7653 conn->rx_len -= skb->len;
7655 if (!conn->rx_len) {
7656 /* Complete frame received. l2cap_recv_frame
7657 * takes ownership of the skb so set the global
7658 * rx_skb pointer to NULL first.
7660 struct sk_buff *rx_skb = conn->rx_skb;
7661 conn->rx_skb = NULL;
7662 l2cap_recv_frame(conn, rx_skb);
7671 static struct hci_cb l2cap_cb = {
7673 .connect_cfm = l2cap_connect_cfm,
7674 .disconn_cfm = l2cap_disconn_cfm,
7675 .security_cfm = l2cap_security_cfm,
7678 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7680 struct l2cap_chan *c;
7682 read_lock(&chan_list_lock);
7684 list_for_each_entry(c, &chan_list, global_l) {
7685 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7686 &c->src, c->src_type, &c->dst, c->dst_type,
7687 c->state, __le16_to_cpu(c->psm),
7688 c->scid, c->dcid, c->imtu, c->omtu,
7689 c->sec_level, c->mode);
7692 read_unlock(&chan_list_lock);
7697 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7699 static struct dentry *l2cap_debugfs;
7701 int __init l2cap_init(void)
7705 err = l2cap_init_sockets();
7709 hci_register_cb(&l2cap_cb);
7711 if (IS_ERR_OR_NULL(bt_debugfs))
7714 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7715 NULL, &l2cap_debugfs_fops);
7720 void l2cap_exit(void)
7722 debugfs_remove(l2cap_debugfs);
7723 hci_unregister_cb(&l2cap_cb);
7724 l2cap_cleanup_sockets();
7727 module_param(disable_ertm, bool, 0644);
7728 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");