2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 if (link_type == LE_LINK) {
68 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 return BDADDR_LE_PUBLIC;
71 return BDADDR_LE_RANDOM;
77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 return bdaddr_type(hcon->type, hcon->src_type);
82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 return bdaddr_type(hcon->type, hcon->dst_type);
87 /* ---- L2CAP channels ---- */
89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
94 list_for_each_entry(c, &conn->chan_l, list) {
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns locked channel. */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 struct l2cap_chan *c;
120 mutex_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 mutex_unlock(&conn->chan_lock);
129 /* Find channel with given DCID.
130 * Returns locked channel.
132 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
135 struct l2cap_chan *c;
137 mutex_lock(&conn->chan_lock);
138 c = __l2cap_get_chan_by_dcid(conn, cid);
141 mutex_unlock(&conn->chan_lock);
146 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
149 struct l2cap_chan *c;
151 list_for_each_entry(c, &conn->chan_l, list) {
152 if (c->ident == ident)
158 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 struct l2cap_chan *c;
163 mutex_lock(&conn->chan_lock);
164 c = __l2cap_get_chan_by_ident(conn, ident);
167 mutex_unlock(&conn->chan_lock);
172 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
175 struct l2cap_chan *c;
177 list_for_each_entry(c, &chan_list, global_l) {
178 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
181 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
184 if (c->sport == psm && !bacmp(&c->src, src))
190 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
194 write_lock(&chan_list_lock);
196 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
206 u16 p, start, end, incr;
208 if (chan->src_type == BDADDR_BREDR) {
209 start = L2CAP_PSM_DYN_START;
210 end = L2CAP_PSM_AUTO_END;
213 start = L2CAP_PSM_LE_DYN_START;
214 end = L2CAP_PSM_LE_DYN_END;
219 for (p = start; p <= end; p += incr)
220 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
222 chan->psm = cpu_to_le16(p);
223 chan->sport = cpu_to_le16(p);
230 write_unlock(&chan_list_lock);
233 EXPORT_SYMBOL_GPL(l2cap_add_psm);
235 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
237 write_lock(&chan_list_lock);
239 /* Override the defaults (which are for conn-oriented) */
240 chan->omtu = L2CAP_DEFAULT_MTU;
241 chan->chan_type = L2CAP_CHAN_FIXED;
245 write_unlock(&chan_list_lock);
250 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
254 if (conn->hcon->type == LE_LINK)
255 dyn_end = L2CAP_CID_LE_DYN_END;
257 dyn_end = L2CAP_CID_DYN_END;
259 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
260 if (!__l2cap_get_chan_by_scid(conn, cid))
267 static void l2cap_state_change(struct l2cap_chan *chan, int state)
269 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
270 state_to_string(state));
273 chan->ops->state_change(chan, state, 0);
276 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
280 chan->ops->state_change(chan, chan->state, err);
283 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
285 chan->ops->state_change(chan, chan->state, err);
288 static void __set_retrans_timer(struct l2cap_chan *chan)
290 if (!delayed_work_pending(&chan->monitor_timer) &&
291 chan->retrans_timeout) {
292 l2cap_set_timer(chan, &chan->retrans_timer,
293 msecs_to_jiffies(chan->retrans_timeout));
297 static void __set_monitor_timer(struct l2cap_chan *chan)
299 __clear_retrans_timer(chan);
300 if (chan->monitor_timeout) {
301 l2cap_set_timer(chan, &chan->monitor_timer,
302 msecs_to_jiffies(chan->monitor_timeout));
306 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
311 skb_queue_walk(head, skb) {
312 if (bt_cb(skb)->l2cap.txseq == seq)
319 /* ---- L2CAP sequence number lists ---- */
321 /* For ERTM, ordered lists of sequence numbers must be tracked for
322 * SREJ requests that are received and for frames that are to be
323 * retransmitted. These seq_list functions implement a singly-linked
324 * list in an array, where membership in the list can also be checked
325 * in constant time. Items can also be added to the tail of the list
326 * and removed from the head in constant time, without further memory
330 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
332 size_t alloc_size, i;
334 /* Allocated size is a power of 2 to map sequence numbers
335 * (which may be up to 14 bits) in to a smaller array that is
336 * sized for the negotiated ERTM transmit windows.
338 alloc_size = roundup_pow_of_two(size);
340 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
344 seq_list->mask = alloc_size - 1;
345 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
346 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 for (i = 0; i < alloc_size; i++)
348 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
353 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
355 kfree(seq_list->list);
358 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
361 /* Constant-time check for list membership */
362 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
365 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
367 u16 seq = seq_list->head;
368 u16 mask = seq_list->mask;
370 seq_list->head = seq_list->list[seq & mask];
371 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
373 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
381 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
385 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
388 for (i = 0; i <= seq_list->mask; i++)
389 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
391 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
392 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
395 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
397 u16 mask = seq_list->mask;
399 /* All appends happen in constant time */
401 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
404 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
405 seq_list->head = seq;
407 seq_list->list[seq_list->tail & mask] = seq;
409 seq_list->tail = seq;
410 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
413 static void l2cap_chan_timeout(struct work_struct *work)
415 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
417 struct l2cap_conn *conn = chan->conn;
420 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
422 mutex_lock(&conn->chan_lock);
423 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
424 * this work. No need to call l2cap_chan_hold(chan) here again.
426 l2cap_chan_lock(chan);
428 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
429 reason = ECONNREFUSED;
430 else if (chan->state == BT_CONNECT &&
431 chan->sec_level != BT_SECURITY_SDP)
432 reason = ECONNREFUSED;
436 l2cap_chan_close(chan, reason);
438 chan->ops->close(chan);
440 l2cap_chan_unlock(chan);
441 l2cap_chan_put(chan);
443 mutex_unlock(&conn->chan_lock);
446 struct l2cap_chan *l2cap_chan_create(void)
448 struct l2cap_chan *chan;
450 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
454 skb_queue_head_init(&chan->tx_q);
455 skb_queue_head_init(&chan->srej_q);
456 mutex_init(&chan->lock);
458 /* Set default lock nesting level */
459 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
461 write_lock(&chan_list_lock);
462 list_add(&chan->global_l, &chan_list);
463 write_unlock(&chan_list_lock);
465 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
467 chan->state = BT_OPEN;
469 kref_init(&chan->kref);
471 /* This flag is cleared in l2cap_chan_ready() */
472 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474 BT_DBG("chan %p", chan);
478 EXPORT_SYMBOL_GPL(l2cap_chan_create);
480 static void l2cap_chan_destroy(struct kref *kref)
482 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484 BT_DBG("chan %p", chan);
486 write_lock(&chan_list_lock);
487 list_del(&chan->global_l);
488 write_unlock(&chan_list_lock);
493 void l2cap_chan_hold(struct l2cap_chan *c)
495 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
500 void l2cap_chan_put(struct l2cap_chan *c)
502 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
504 kref_put(&c->kref, l2cap_chan_destroy);
506 EXPORT_SYMBOL_GPL(l2cap_chan_put);
508 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
510 chan->fcs = L2CAP_FCS_CRC16;
511 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
512 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
513 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
514 chan->remote_max_tx = chan->max_tx;
515 chan->remote_tx_win = chan->tx_win;
516 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
517 chan->sec_level = BT_SECURITY_LOW;
518 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
519 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
520 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
522 chan->conf_state = 0;
523 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
525 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
527 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
529 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
532 chan->sdu_last_frag = NULL;
534 chan->tx_credits = tx_credits;
535 /* Derive MPS from connection MTU to stop HCI fragmentation */
536 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
537 /* Give enough credits for a full packet */
538 chan->rx_credits = (chan->imtu / chan->mps) + 1;
540 skb_queue_head_init(&chan->tx_q);
543 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
545 l2cap_le_flowctl_init(chan, tx_credits);
547 /* L2CAP implementations shall support a minimum MPS of 64 octets */
548 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
549 chan->mps = L2CAP_ECRED_MIN_MPS;
550 chan->rx_credits = (chan->imtu / chan->mps) + 1;
554 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
556 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
557 __le16_to_cpu(chan->psm), chan->dcid);
559 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
563 switch (chan->chan_type) {
564 case L2CAP_CHAN_CONN_ORIENTED:
565 /* Alloc CID for connection-oriented socket */
566 chan->scid = l2cap_alloc_cid(conn);
567 if (conn->hcon->type == ACL_LINK)
568 chan->omtu = L2CAP_DEFAULT_MTU;
571 case L2CAP_CHAN_CONN_LESS:
572 /* Connectionless socket */
573 chan->scid = L2CAP_CID_CONN_LESS;
574 chan->dcid = L2CAP_CID_CONN_LESS;
575 chan->omtu = L2CAP_DEFAULT_MTU;
578 case L2CAP_CHAN_FIXED:
579 /* Caller will set CID and CID specific MTU values */
583 /* Raw socket can send/recv signalling messages only */
584 chan->scid = L2CAP_CID_SIGNALING;
585 chan->dcid = L2CAP_CID_SIGNALING;
586 chan->omtu = L2CAP_DEFAULT_MTU;
589 chan->local_id = L2CAP_BESTEFFORT_ID;
590 chan->local_stype = L2CAP_SERV_BESTEFFORT;
591 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
592 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
593 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
594 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
596 l2cap_chan_hold(chan);
598 /* Only keep a reference for fixed channels if they requested it */
599 if (chan->chan_type != L2CAP_CHAN_FIXED ||
600 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
601 hci_conn_hold(conn->hcon);
603 list_add(&chan->list, &conn->chan_l);
606 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
608 mutex_lock(&conn->chan_lock);
609 __l2cap_chan_add(conn, chan);
610 mutex_unlock(&conn->chan_lock);
613 void l2cap_chan_del(struct l2cap_chan *chan, int err)
615 struct l2cap_conn *conn = chan->conn;
617 __clear_chan_timer(chan);
619 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
620 state_to_string(chan->state));
622 chan->ops->teardown(chan, err);
625 struct amp_mgr *mgr = conn->hcon->amp_mgr;
626 /* Delete from channel list */
627 list_del(&chan->list);
629 l2cap_chan_put(chan);
633 /* Reference was only held for non-fixed channels or
634 * fixed channels that explicitly requested it using the
635 * FLAG_HOLD_HCI_CONN flag.
637 if (chan->chan_type != L2CAP_CHAN_FIXED ||
638 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
639 hci_conn_drop(conn->hcon);
641 if (mgr && mgr->bredr_chan == chan)
642 mgr->bredr_chan = NULL;
645 if (chan->hs_hchan) {
646 struct hci_chan *hs_hchan = chan->hs_hchan;
648 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
649 amp_disconnect_logical_link(hs_hchan);
652 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
656 case L2CAP_MODE_BASIC:
659 case L2CAP_MODE_LE_FLOWCTL:
660 case L2CAP_MODE_EXT_FLOWCTL:
661 skb_queue_purge(&chan->tx_q);
664 case L2CAP_MODE_ERTM:
665 __clear_retrans_timer(chan);
666 __clear_monitor_timer(chan);
667 __clear_ack_timer(chan);
669 skb_queue_purge(&chan->srej_q);
671 l2cap_seq_list_free(&chan->srej_list);
672 l2cap_seq_list_free(&chan->retrans_list);
675 case L2CAP_MODE_STREAMING:
676 skb_queue_purge(&chan->tx_q);
682 EXPORT_SYMBOL_GPL(l2cap_chan_del);
684 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
687 struct l2cap_chan *chan;
689 list_for_each_entry(chan, &conn->chan_l, list) {
694 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
700 mutex_lock(&conn->chan_lock);
701 __l2cap_chan_list(conn, func, data);
702 mutex_unlock(&conn->chan_lock);
705 EXPORT_SYMBOL_GPL(l2cap_chan_list);
707 static void l2cap_conn_update_id_addr(struct work_struct *work)
709 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
710 id_addr_update_work);
711 struct hci_conn *hcon = conn->hcon;
712 struct l2cap_chan *chan;
714 mutex_lock(&conn->chan_lock);
716 list_for_each_entry(chan, &conn->chan_l, list) {
717 l2cap_chan_lock(chan);
718 bacpy(&chan->dst, &hcon->dst);
719 chan->dst_type = bdaddr_dst_type(hcon);
720 l2cap_chan_unlock(chan);
723 mutex_unlock(&conn->chan_lock);
726 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
728 struct l2cap_conn *conn = chan->conn;
729 struct l2cap_le_conn_rsp rsp;
732 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
733 result = L2CAP_CR_LE_AUTHORIZATION;
735 result = L2CAP_CR_LE_BAD_PSM;
737 l2cap_state_change(chan, BT_DISCONN);
739 rsp.dcid = cpu_to_le16(chan->scid);
740 rsp.mtu = cpu_to_le16(chan->imtu);
741 rsp.mps = cpu_to_le16(chan->mps);
742 rsp.credits = cpu_to_le16(chan->rx_credits);
743 rsp.result = cpu_to_le16(result);
745 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
749 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
751 struct l2cap_conn *conn = chan->conn;
752 struct l2cap_ecred_conn_rsp rsp;
755 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
756 result = L2CAP_CR_LE_AUTHORIZATION;
758 result = L2CAP_CR_LE_BAD_PSM;
760 l2cap_state_change(chan, BT_DISCONN);
762 memset(&rsp, 0, sizeof(rsp));
764 rsp.result = cpu_to_le16(result);
766 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
770 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
772 struct l2cap_conn *conn = chan->conn;
773 struct l2cap_conn_rsp rsp;
776 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
777 result = L2CAP_CR_SEC_BLOCK;
779 result = L2CAP_CR_BAD_PSM;
781 l2cap_state_change(chan, BT_DISCONN);
783 rsp.scid = cpu_to_le16(chan->dcid);
784 rsp.dcid = cpu_to_le16(chan->scid);
785 rsp.result = cpu_to_le16(result);
786 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
788 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
791 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
793 struct l2cap_conn *conn = chan->conn;
795 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
797 switch (chan->state) {
799 chan->ops->teardown(chan, 0);
804 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
805 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
806 l2cap_send_disconn_req(chan, reason);
808 l2cap_chan_del(chan, reason);
812 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
813 if (conn->hcon->type == ACL_LINK)
814 l2cap_chan_connect_reject(chan);
815 else if (conn->hcon->type == LE_LINK) {
816 switch (chan->mode) {
817 case L2CAP_MODE_LE_FLOWCTL:
818 l2cap_chan_le_connect_reject(chan);
820 case L2CAP_MODE_EXT_FLOWCTL:
821 l2cap_chan_ecred_connect_reject(chan);
827 l2cap_chan_del(chan, reason);
832 l2cap_chan_del(chan, reason);
836 chan->ops->teardown(chan, 0);
840 EXPORT_SYMBOL(l2cap_chan_close);
842 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
844 switch (chan->chan_type) {
846 switch (chan->sec_level) {
847 case BT_SECURITY_HIGH:
848 case BT_SECURITY_FIPS:
849 return HCI_AT_DEDICATED_BONDING_MITM;
850 case BT_SECURITY_MEDIUM:
851 return HCI_AT_DEDICATED_BONDING;
853 return HCI_AT_NO_BONDING;
856 case L2CAP_CHAN_CONN_LESS:
857 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
858 if (chan->sec_level == BT_SECURITY_LOW)
859 chan->sec_level = BT_SECURITY_SDP;
861 if (chan->sec_level == BT_SECURITY_HIGH ||
862 chan->sec_level == BT_SECURITY_FIPS)
863 return HCI_AT_NO_BONDING_MITM;
865 return HCI_AT_NO_BONDING;
867 case L2CAP_CHAN_CONN_ORIENTED:
868 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
869 if (chan->sec_level == BT_SECURITY_LOW)
870 chan->sec_level = BT_SECURITY_SDP;
872 if (chan->sec_level == BT_SECURITY_HIGH ||
873 chan->sec_level == BT_SECURITY_FIPS)
874 return HCI_AT_NO_BONDING_MITM;
876 return HCI_AT_NO_BONDING;
881 switch (chan->sec_level) {
882 case BT_SECURITY_HIGH:
883 case BT_SECURITY_FIPS:
884 return HCI_AT_GENERAL_BONDING_MITM;
885 case BT_SECURITY_MEDIUM:
886 return HCI_AT_GENERAL_BONDING;
888 return HCI_AT_NO_BONDING;
894 /* Service level security */
895 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
897 struct l2cap_conn *conn = chan->conn;
900 if (conn->hcon->type == LE_LINK)
901 return smp_conn_security(conn->hcon, chan->sec_level);
903 auth_type = l2cap_get_auth_type(chan);
905 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
909 static u8 l2cap_get_ident(struct l2cap_conn *conn)
913 /* Get next available identificator.
914 * 1 - 128 are used by kernel.
915 * 129 - 199 are reserved.
916 * 200 - 254 are used by utilities like l2ping, etc.
919 mutex_lock(&conn->ident_lock);
921 if (++conn->tx_ident > 128)
926 mutex_unlock(&conn->ident_lock);
931 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
934 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
937 BT_DBG("code 0x%2.2x", code);
942 /* Use NO_FLUSH if supported or we have an LE link (which does
943 * not support auto-flushing packets) */
944 if (lmp_no_flush_capable(conn->hcon->hdev) ||
945 conn->hcon->type == LE_LINK)
946 flags = ACL_START_NO_FLUSH;
950 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
951 skb->priority = HCI_PRIO_MAX;
953 hci_send_acl(conn->hchan, skb, flags);
956 static bool __chan_is_moving(struct l2cap_chan *chan)
958 return chan->move_state != L2CAP_MOVE_STABLE &&
959 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
962 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
964 struct hci_conn *hcon = chan->conn->hcon;
967 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
970 if (chan->hs_hcon && !__chan_is_moving(chan)) {
972 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
979 /* Use NO_FLUSH for LE links (where this is the only option) or
980 * if the BR/EDR link supports it and flushing has not been
981 * explicitly requested (through FLAG_FLUSHABLE).
983 if (hcon->type == LE_LINK ||
984 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
985 lmp_no_flush_capable(hcon->hdev)))
986 flags = ACL_START_NO_FLUSH;
990 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
991 hci_send_acl(chan->conn->hchan, skb, flags);
994 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
996 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
997 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
999 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1001 control->sframe = 1;
1002 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1003 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1009 control->sframe = 0;
1010 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1011 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1018 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1020 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1021 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1023 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1025 control->sframe = 1;
1026 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1027 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1033 control->sframe = 0;
1034 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1035 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1042 static inline void __unpack_control(struct l2cap_chan *chan,
1043 struct sk_buff *skb)
1045 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1046 __unpack_extended_control(get_unaligned_le32(skb->data),
1047 &bt_cb(skb)->l2cap);
1048 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1050 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1051 &bt_cb(skb)->l2cap);
1052 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1056 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1060 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1061 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1063 if (control->sframe) {
1064 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1065 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1066 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1068 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1069 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1075 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1079 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1080 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1082 if (control->sframe) {
1083 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1084 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1085 packed |= L2CAP_CTRL_FRAME_TYPE;
1087 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1088 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1094 static inline void __pack_control(struct l2cap_chan *chan,
1095 struct l2cap_ctrl *control,
1096 struct sk_buff *skb)
1098 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1099 put_unaligned_le32(__pack_extended_control(control),
1100 skb->data + L2CAP_HDR_SIZE);
1102 put_unaligned_le16(__pack_enhanced_control(control),
1103 skb->data + L2CAP_HDR_SIZE);
1107 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1109 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1110 return L2CAP_EXT_HDR_SIZE;
1112 return L2CAP_ENH_HDR_SIZE;
1115 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1118 struct sk_buff *skb;
1119 struct l2cap_hdr *lh;
1120 int hlen = __ertm_hdr_size(chan);
1122 if (chan->fcs == L2CAP_FCS_CRC16)
1123 hlen += L2CAP_FCS_SIZE;
1125 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1128 return ERR_PTR(-ENOMEM);
1130 lh = skb_put(skb, L2CAP_HDR_SIZE);
1131 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1132 lh->cid = cpu_to_le16(chan->dcid);
1134 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1135 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1137 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1139 if (chan->fcs == L2CAP_FCS_CRC16) {
1140 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1141 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1144 skb->priority = HCI_PRIO_MAX;
1148 static void l2cap_send_sframe(struct l2cap_chan *chan,
1149 struct l2cap_ctrl *control)
1151 struct sk_buff *skb;
1154 BT_DBG("chan %p, control %p", chan, control);
1156 if (!control->sframe)
1159 if (__chan_is_moving(chan))
1162 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1166 if (control->super == L2CAP_SUPER_RR)
1167 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1168 else if (control->super == L2CAP_SUPER_RNR)
1169 set_bit(CONN_RNR_SENT, &chan->conn_state);
1171 if (control->super != L2CAP_SUPER_SREJ) {
1172 chan->last_acked_seq = control->reqseq;
1173 __clear_ack_timer(chan);
1176 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1177 control->final, control->poll, control->super);
1179 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1180 control_field = __pack_extended_control(control);
1182 control_field = __pack_enhanced_control(control);
1184 skb = l2cap_create_sframe_pdu(chan, control_field);
1186 l2cap_do_send(chan, skb);
1189 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1191 struct l2cap_ctrl control;
1193 BT_DBG("chan %p, poll %d", chan, poll);
1195 memset(&control, 0, sizeof(control));
1197 control.poll = poll;
1199 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1200 control.super = L2CAP_SUPER_RNR;
1202 control.super = L2CAP_SUPER_RR;
1204 control.reqseq = chan->buffer_seq;
1205 l2cap_send_sframe(chan, &control);
1208 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1210 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1213 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1216 static bool __amp_capable(struct l2cap_chan *chan)
1218 struct l2cap_conn *conn = chan->conn;
1219 struct hci_dev *hdev;
1220 bool amp_available = false;
1222 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1225 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1228 read_lock(&hci_dev_list_lock);
1229 list_for_each_entry(hdev, &hci_dev_list, list) {
1230 if (hdev->amp_type != AMP_TYPE_BREDR &&
1231 test_bit(HCI_UP, &hdev->flags)) {
1232 amp_available = true;
1236 read_unlock(&hci_dev_list_lock);
1238 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1239 return amp_available;
1244 static bool l2cap_check_efs(struct l2cap_chan *chan)
1246 /* Check EFS parameters */
1250 void l2cap_send_conn_req(struct l2cap_chan *chan)
1252 struct l2cap_conn *conn = chan->conn;
1253 struct l2cap_conn_req req;
1255 req.scid = cpu_to_le16(chan->scid);
1256 req.psm = chan->psm;
1258 chan->ident = l2cap_get_ident(conn);
1260 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1262 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1265 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1267 struct l2cap_create_chan_req req;
1268 req.scid = cpu_to_le16(chan->scid);
1269 req.psm = chan->psm;
1270 req.amp_id = amp_id;
1272 chan->ident = l2cap_get_ident(chan->conn);
1274 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1278 static void l2cap_move_setup(struct l2cap_chan *chan)
1280 struct sk_buff *skb;
1282 BT_DBG("chan %p", chan);
1284 if (chan->mode != L2CAP_MODE_ERTM)
1287 __clear_retrans_timer(chan);
1288 __clear_monitor_timer(chan);
1289 __clear_ack_timer(chan);
1291 chan->retry_count = 0;
1292 skb_queue_walk(&chan->tx_q, skb) {
1293 if (bt_cb(skb)->l2cap.retries)
1294 bt_cb(skb)->l2cap.retries = 1;
1299 chan->expected_tx_seq = chan->buffer_seq;
1301 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1302 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1303 l2cap_seq_list_clear(&chan->retrans_list);
1304 l2cap_seq_list_clear(&chan->srej_list);
1305 skb_queue_purge(&chan->srej_q);
1307 chan->tx_state = L2CAP_TX_STATE_XMIT;
1308 chan->rx_state = L2CAP_RX_STATE_MOVE;
1310 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1313 static void l2cap_move_done(struct l2cap_chan *chan)
1315 u8 move_role = chan->move_role;
1316 BT_DBG("chan %p", chan);
1318 chan->move_state = L2CAP_MOVE_STABLE;
1319 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1321 if (chan->mode != L2CAP_MODE_ERTM)
1324 switch (move_role) {
1325 case L2CAP_MOVE_ROLE_INITIATOR:
1326 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1327 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1329 case L2CAP_MOVE_ROLE_RESPONDER:
1330 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1335 static void l2cap_chan_ready(struct l2cap_chan *chan)
1337 /* The channel may have already been flagged as connected in
1338 * case of receiving data before the L2CAP info req/rsp
1339 * procedure is complete.
1341 if (chan->state == BT_CONNECTED)
1344 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1345 chan->conf_state = 0;
1346 __clear_chan_timer(chan);
1348 switch (chan->mode) {
1349 case L2CAP_MODE_LE_FLOWCTL:
1350 case L2CAP_MODE_EXT_FLOWCTL:
1351 if (!chan->tx_credits)
1352 chan->ops->suspend(chan);
1356 chan->state = BT_CONNECTED;
1358 chan->ops->ready(chan);
1361 static void l2cap_le_connect(struct l2cap_chan *chan)
1363 struct l2cap_conn *conn = chan->conn;
1364 struct l2cap_le_conn_req req;
1366 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1370 chan->imtu = chan->conn->mtu;
1372 l2cap_le_flowctl_init(chan, 0);
1374 req.psm = chan->psm;
1375 req.scid = cpu_to_le16(chan->scid);
1376 req.mtu = cpu_to_le16(chan->imtu);
1377 req.mps = cpu_to_le16(chan->mps);
1378 req.credits = cpu_to_le16(chan->rx_credits);
1380 chan->ident = l2cap_get_ident(conn);
1382 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1386 struct l2cap_ecred_conn_data {
1388 struct l2cap_ecred_conn_req req;
1391 struct l2cap_chan *chan;
1396 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1398 struct l2cap_ecred_conn_data *conn = data;
1401 if (chan == conn->chan)
1404 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1407 pid = chan->ops->get_peer_pid(chan);
1409 /* Only add deferred channels with the same PID/PSM */
1410 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1411 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1414 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1417 l2cap_ecred_init(chan, 0);
1419 /* Set the same ident so we can match on the rsp */
1420 chan->ident = conn->chan->ident;
1422 /* Include all channels deferred */
1423 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1428 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1430 struct l2cap_conn *conn = chan->conn;
1431 struct l2cap_ecred_conn_data data;
1433 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1436 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1439 l2cap_ecred_init(chan, 0);
1441 data.pdu.req.psm = chan->psm;
1442 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1443 data.pdu.req.mps = cpu_to_le16(chan->mps);
1444 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1445 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1447 chan->ident = l2cap_get_ident(conn);
1448 data.pid = chan->ops->get_peer_pid(chan);
1452 data.pid = chan->ops->get_peer_pid(chan);
1454 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1456 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1457 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1461 static void l2cap_le_start(struct l2cap_chan *chan)
1463 struct l2cap_conn *conn = chan->conn;
1465 if (!smp_conn_security(conn->hcon, chan->sec_level))
1469 l2cap_chan_ready(chan);
1473 if (chan->state == BT_CONNECT) {
1474 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1475 l2cap_ecred_connect(chan);
1477 l2cap_le_connect(chan);
1481 static void l2cap_start_connection(struct l2cap_chan *chan)
1483 if (__amp_capable(chan)) {
1484 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1485 a2mp_discover_amp(chan);
1486 } else if (chan->conn->hcon->type == LE_LINK) {
1487 l2cap_le_start(chan);
1489 l2cap_send_conn_req(chan);
1493 static void l2cap_request_info(struct l2cap_conn *conn)
1495 struct l2cap_info_req req;
1497 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1500 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1502 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1503 conn->info_ident = l2cap_get_ident(conn);
1505 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1507 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1511 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1513 /* The minimum encryption key size needs to be enforced by the
1514 * host stack before establishing any L2CAP connections. The
1515 * specification in theory allows a minimum of 1, but to align
1516 * BR/EDR and LE transports, a minimum of 7 is chosen.
1518 * This check might also be called for unencrypted connections
1519 * that have no key size requirements. Ensure that the link is
1520 * actually encrypted before enforcing a key size.
1522 int min_key_size = hcon->hdev->min_enc_key_size;
1524 /* On FIPS security level, key size must be 16 bytes */
1525 if (hcon->sec_level == BT_SECURITY_FIPS)
1528 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1529 hcon->enc_key_size >= min_key_size);
1532 static void l2cap_do_start(struct l2cap_chan *chan)
1534 struct l2cap_conn *conn = chan->conn;
1536 if (conn->hcon->type == LE_LINK) {
1537 l2cap_le_start(chan);
1541 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1542 l2cap_request_info(conn);
1546 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1549 if (!l2cap_chan_check_security(chan, true) ||
1550 !__l2cap_no_conn_pending(chan))
1553 if (l2cap_check_enc_key_size(conn->hcon))
1554 l2cap_start_connection(chan);
1556 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1559 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1561 u32 local_feat_mask = l2cap_feat_mask;
1563 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1566 case L2CAP_MODE_ERTM:
1567 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1568 case L2CAP_MODE_STREAMING:
1569 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1575 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1577 struct l2cap_conn *conn = chan->conn;
1578 struct l2cap_disconn_req req;
1583 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1584 __clear_retrans_timer(chan);
1585 __clear_monitor_timer(chan);
1586 __clear_ack_timer(chan);
1589 if (chan->scid == L2CAP_CID_A2MP) {
1590 l2cap_state_change(chan, BT_DISCONN);
1594 req.dcid = cpu_to_le16(chan->dcid);
1595 req.scid = cpu_to_le16(chan->scid);
1596 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1599 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1602 /* ---- L2CAP connections ---- */
1603 static void l2cap_conn_start(struct l2cap_conn *conn)
1605 struct l2cap_chan *chan, *tmp;
1607 BT_DBG("conn %p", conn);
1609 mutex_lock(&conn->chan_lock);
1611 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1612 l2cap_chan_lock(chan);
1614 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1615 l2cap_chan_ready(chan);
1616 l2cap_chan_unlock(chan);
1620 if (chan->state == BT_CONNECT) {
1621 if (!l2cap_chan_check_security(chan, true) ||
1622 !__l2cap_no_conn_pending(chan)) {
1623 l2cap_chan_unlock(chan);
1627 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1628 && test_bit(CONF_STATE2_DEVICE,
1629 &chan->conf_state)) {
1630 l2cap_chan_close(chan, ECONNRESET);
1631 l2cap_chan_unlock(chan);
1635 if (l2cap_check_enc_key_size(conn->hcon))
1636 l2cap_start_connection(chan);
1638 l2cap_chan_close(chan, ECONNREFUSED);
1640 } else if (chan->state == BT_CONNECT2) {
1641 struct l2cap_conn_rsp rsp;
1643 rsp.scid = cpu_to_le16(chan->dcid);
1644 rsp.dcid = cpu_to_le16(chan->scid);
1646 if (l2cap_chan_check_security(chan, false)) {
1647 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1648 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1649 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1650 chan->ops->defer(chan);
1653 l2cap_state_change(chan, BT_CONFIG);
1654 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1655 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1658 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1659 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1662 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1665 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1666 rsp.result != L2CAP_CR_SUCCESS) {
1667 l2cap_chan_unlock(chan);
1671 set_bit(CONF_REQ_SENT, &chan->conf_state);
1672 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1673 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1674 chan->num_conf_req++;
1677 l2cap_chan_unlock(chan);
1680 mutex_unlock(&conn->chan_lock);
1683 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1685 struct hci_conn *hcon = conn->hcon;
1686 struct hci_dev *hdev = hcon->hdev;
1688 BT_DBG("%s conn %p", hdev->name, conn);
1690 /* For outgoing pairing which doesn't necessarily have an
1691 * associated socket (e.g. mgmt_pair_device).
1694 smp_conn_security(hcon, hcon->pending_sec_level);
1696 /* For LE slave connections, make sure the connection interval
1697 * is in the range of the minium and maximum interval that has
1698 * been configured for this connection. If not, then trigger
1699 * the connection update procedure.
1701 if (hcon->role == HCI_ROLE_SLAVE &&
1702 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1703 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1704 struct l2cap_conn_param_update_req req;
1706 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1707 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1708 req.latency = cpu_to_le16(hcon->le_conn_latency);
1709 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1711 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1712 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1716 static void l2cap_conn_ready(struct l2cap_conn *conn)
1718 struct l2cap_chan *chan;
1719 struct hci_conn *hcon = conn->hcon;
1721 BT_DBG("conn %p", conn);
1723 if (hcon->type == ACL_LINK)
1724 l2cap_request_info(conn);
1726 mutex_lock(&conn->chan_lock);
1728 list_for_each_entry(chan, &conn->chan_l, list) {
1730 l2cap_chan_lock(chan);
1732 if (chan->scid == L2CAP_CID_A2MP) {
1733 l2cap_chan_unlock(chan);
1737 if (hcon->type == LE_LINK) {
1738 l2cap_le_start(chan);
1739 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1740 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1741 l2cap_chan_ready(chan);
1742 } else if (chan->state == BT_CONNECT) {
1743 l2cap_do_start(chan);
1746 l2cap_chan_unlock(chan);
1749 mutex_unlock(&conn->chan_lock);
1751 if (hcon->type == LE_LINK)
1752 l2cap_le_conn_ready(conn);
1754 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1757 /* Notify sockets that we cannot guaranty reliability anymore */
1758 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1760 struct l2cap_chan *chan;
1762 BT_DBG("conn %p", conn);
1764 mutex_lock(&conn->chan_lock);
1766 list_for_each_entry(chan, &conn->chan_l, list) {
1767 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1768 l2cap_chan_set_err(chan, err);
1771 mutex_unlock(&conn->chan_lock);
1774 static void l2cap_info_timeout(struct work_struct *work)
1776 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1779 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1780 conn->info_ident = 0;
1782 l2cap_conn_start(conn);
1787 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1788 * callback is called during registration. The ->remove callback is called
1789 * during unregistration.
1790 * An l2cap_user object can either be explicitly unregistered or when the
1791 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1792 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1793 * External modules must own a reference to the l2cap_conn object if they intend
1794 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1795 * any time if they don't.
1798 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1800 struct hci_dev *hdev = conn->hcon->hdev;
1803 /* We need to check whether l2cap_conn is registered. If it is not, we
1804 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1805 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1806 * relies on the parent hci_conn object to be locked. This itself relies
1807 * on the hci_dev object to be locked. So we must lock the hci device
1812 if (!list_empty(&user->list)) {
1817 /* conn->hchan is NULL after l2cap_conn_del() was called */
1823 ret = user->probe(conn, user);
1827 list_add(&user->list, &conn->users);
1831 hci_dev_unlock(hdev);
1834 EXPORT_SYMBOL(l2cap_register_user);
1836 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1838 struct hci_dev *hdev = conn->hcon->hdev;
1842 if (list_empty(&user->list))
1845 list_del_init(&user->list);
1846 user->remove(conn, user);
1849 hci_dev_unlock(hdev);
1851 EXPORT_SYMBOL(l2cap_unregister_user);
1853 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1855 struct l2cap_user *user;
1857 while (!list_empty(&conn->users)) {
1858 user = list_first_entry(&conn->users, struct l2cap_user, list);
1859 list_del_init(&user->list);
1860 user->remove(conn, user);
1864 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1866 struct l2cap_conn *conn = hcon->l2cap_data;
1867 struct l2cap_chan *chan, *l;
1872 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1874 kfree_skb(conn->rx_skb);
1876 skb_queue_purge(&conn->pending_rx);
1878 /* We can not call flush_work(&conn->pending_rx_work) here since we
1879 * might block if we are running on a worker from the same workqueue
1880 * pending_rx_work is waiting on.
1882 if (work_pending(&conn->pending_rx_work))
1883 cancel_work_sync(&conn->pending_rx_work);
1885 if (work_pending(&conn->id_addr_update_work))
1886 cancel_work_sync(&conn->id_addr_update_work);
1888 l2cap_unregister_all_users(conn);
1890 /* Force the connection to be immediately dropped */
1891 hcon->disc_timeout = 0;
1893 mutex_lock(&conn->chan_lock);
1896 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1897 l2cap_chan_hold(chan);
1898 l2cap_chan_lock(chan);
1900 l2cap_chan_del(chan, err);
1902 chan->ops->close(chan);
1904 l2cap_chan_unlock(chan);
1905 l2cap_chan_put(chan);
1908 mutex_unlock(&conn->chan_lock);
1910 hci_chan_del(conn->hchan);
1912 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1913 cancel_delayed_work_sync(&conn->info_timer);
1915 hcon->l2cap_data = NULL;
1917 l2cap_conn_put(conn);
1920 static void l2cap_conn_free(struct kref *ref)
1922 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1924 hci_conn_put(conn->hcon);
1928 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1930 kref_get(&conn->ref);
1933 EXPORT_SYMBOL(l2cap_conn_get);
1935 void l2cap_conn_put(struct l2cap_conn *conn)
1937 kref_put(&conn->ref, l2cap_conn_free);
1939 EXPORT_SYMBOL(l2cap_conn_put);
1941 /* ---- Socket interface ---- */
1943 /* Find socket with psm and source / destination bdaddr.
1944 * Returns closest match.
1946 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1951 struct l2cap_chan *c, *c1 = NULL;
1953 read_lock(&chan_list_lock);
1955 list_for_each_entry(c, &chan_list, global_l) {
1956 if (state && c->state != state)
1959 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1962 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1965 if (c->psm == psm) {
1966 int src_match, dst_match;
1967 int src_any, dst_any;
1970 src_match = !bacmp(&c->src, src);
1971 dst_match = !bacmp(&c->dst, dst);
1972 if (src_match && dst_match) {
1974 read_unlock(&chan_list_lock);
1979 src_any = !bacmp(&c->src, BDADDR_ANY);
1980 dst_any = !bacmp(&c->dst, BDADDR_ANY);
1981 if ((src_match && dst_any) || (src_any && dst_match) ||
1982 (src_any && dst_any))
1988 l2cap_chan_hold(c1);
1990 read_unlock(&chan_list_lock);
1995 static void l2cap_monitor_timeout(struct work_struct *work)
1997 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1998 monitor_timer.work);
2000 BT_DBG("chan %p", chan);
2002 l2cap_chan_lock(chan);
2005 l2cap_chan_unlock(chan);
2006 l2cap_chan_put(chan);
2010 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2012 l2cap_chan_unlock(chan);
2013 l2cap_chan_put(chan);
2016 static void l2cap_retrans_timeout(struct work_struct *work)
2018 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2019 retrans_timer.work);
2021 BT_DBG("chan %p", chan);
2023 l2cap_chan_lock(chan);
2026 l2cap_chan_unlock(chan);
2027 l2cap_chan_put(chan);
2031 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2032 l2cap_chan_unlock(chan);
2033 l2cap_chan_put(chan);
2036 static void l2cap_streaming_send(struct l2cap_chan *chan,
2037 struct sk_buff_head *skbs)
2039 struct sk_buff *skb;
2040 struct l2cap_ctrl *control;
2042 BT_DBG("chan %p, skbs %p", chan, skbs);
2044 if (__chan_is_moving(chan))
2047 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2049 while (!skb_queue_empty(&chan->tx_q)) {
2051 skb = skb_dequeue(&chan->tx_q);
2053 bt_cb(skb)->l2cap.retries = 1;
2054 control = &bt_cb(skb)->l2cap;
2056 control->reqseq = 0;
2057 control->txseq = chan->next_tx_seq;
2059 __pack_control(chan, control, skb);
2061 if (chan->fcs == L2CAP_FCS_CRC16) {
2062 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2063 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2066 l2cap_do_send(chan, skb);
2068 BT_DBG("Sent txseq %u", control->txseq);
2070 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2071 chan->frames_sent++;
2075 static int l2cap_ertm_send(struct l2cap_chan *chan)
2077 struct sk_buff *skb, *tx_skb;
2078 struct l2cap_ctrl *control;
2081 BT_DBG("chan %p", chan);
2083 if (chan->state != BT_CONNECTED)
2086 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2089 if (__chan_is_moving(chan))
2092 while (chan->tx_send_head &&
2093 chan->unacked_frames < chan->remote_tx_win &&
2094 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2096 skb = chan->tx_send_head;
2098 bt_cb(skb)->l2cap.retries = 1;
2099 control = &bt_cb(skb)->l2cap;
2101 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2104 control->reqseq = chan->buffer_seq;
2105 chan->last_acked_seq = chan->buffer_seq;
2106 control->txseq = chan->next_tx_seq;
2108 __pack_control(chan, control, skb);
2110 if (chan->fcs == L2CAP_FCS_CRC16) {
2111 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2112 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2115 /* Clone after data has been modified. Data is assumed to be
2116 read-only (for locking purposes) on cloned sk_buffs.
2118 tx_skb = skb_clone(skb, GFP_KERNEL);
2123 __set_retrans_timer(chan);
2125 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2126 chan->unacked_frames++;
2127 chan->frames_sent++;
2130 if (skb_queue_is_last(&chan->tx_q, skb))
2131 chan->tx_send_head = NULL;
2133 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2135 l2cap_do_send(chan, tx_skb);
2136 BT_DBG("Sent txseq %u", control->txseq);
2139 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2140 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2145 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2147 struct l2cap_ctrl control;
2148 struct sk_buff *skb;
2149 struct sk_buff *tx_skb;
2152 BT_DBG("chan %p", chan);
2154 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2157 if (__chan_is_moving(chan))
2160 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2161 seq = l2cap_seq_list_pop(&chan->retrans_list);
2163 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2165 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2170 bt_cb(skb)->l2cap.retries++;
2171 control = bt_cb(skb)->l2cap;
2173 if (chan->max_tx != 0 &&
2174 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2175 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2176 l2cap_send_disconn_req(chan, ECONNRESET);
2177 l2cap_seq_list_clear(&chan->retrans_list);
2181 control.reqseq = chan->buffer_seq;
2182 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2187 if (skb_cloned(skb)) {
2188 /* Cloned sk_buffs are read-only, so we need a
2191 tx_skb = skb_copy(skb, GFP_KERNEL);
2193 tx_skb = skb_clone(skb, GFP_KERNEL);
2197 l2cap_seq_list_clear(&chan->retrans_list);
2201 /* Update skb contents */
2202 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2203 put_unaligned_le32(__pack_extended_control(&control),
2204 tx_skb->data + L2CAP_HDR_SIZE);
2206 put_unaligned_le16(__pack_enhanced_control(&control),
2207 tx_skb->data + L2CAP_HDR_SIZE);
2211 if (chan->fcs == L2CAP_FCS_CRC16) {
2212 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2213 tx_skb->len - L2CAP_FCS_SIZE);
2214 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2218 l2cap_do_send(chan, tx_skb);
2220 BT_DBG("Resent txseq %d", control.txseq);
2222 chan->last_acked_seq = chan->buffer_seq;
2226 static void l2cap_retransmit(struct l2cap_chan *chan,
2227 struct l2cap_ctrl *control)
2229 BT_DBG("chan %p, control %p", chan, control);
2231 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2232 l2cap_ertm_resend(chan);
2235 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2236 struct l2cap_ctrl *control)
2238 struct sk_buff *skb;
2240 BT_DBG("chan %p, control %p", chan, control);
2243 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2245 l2cap_seq_list_clear(&chan->retrans_list);
2247 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2250 if (chan->unacked_frames) {
2251 skb_queue_walk(&chan->tx_q, skb) {
2252 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2253 skb == chan->tx_send_head)
2257 skb_queue_walk_from(&chan->tx_q, skb) {
2258 if (skb == chan->tx_send_head)
2261 l2cap_seq_list_append(&chan->retrans_list,
2262 bt_cb(skb)->l2cap.txseq);
2265 l2cap_ertm_resend(chan);
2269 static void l2cap_send_ack(struct l2cap_chan *chan)
2271 struct l2cap_ctrl control;
2272 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2273 chan->last_acked_seq);
2276 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2277 chan, chan->last_acked_seq, chan->buffer_seq);
2279 memset(&control, 0, sizeof(control));
2282 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2283 chan->rx_state == L2CAP_RX_STATE_RECV) {
2284 __clear_ack_timer(chan);
2285 control.super = L2CAP_SUPER_RNR;
2286 control.reqseq = chan->buffer_seq;
2287 l2cap_send_sframe(chan, &control);
2289 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2290 l2cap_ertm_send(chan);
2291 /* If any i-frames were sent, they included an ack */
2292 if (chan->buffer_seq == chan->last_acked_seq)
2296 /* Ack now if the window is 3/4ths full.
2297 * Calculate without mul or div
2299 threshold = chan->ack_win;
2300 threshold += threshold << 1;
2303 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2306 if (frames_to_ack >= threshold) {
2307 __clear_ack_timer(chan);
2308 control.super = L2CAP_SUPER_RR;
2309 control.reqseq = chan->buffer_seq;
2310 l2cap_send_sframe(chan, &control);
2315 __set_ack_timer(chan);
2319 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2320 struct msghdr *msg, int len,
2321 int count, struct sk_buff *skb)
2323 struct l2cap_conn *conn = chan->conn;
2324 struct sk_buff **frag;
2327 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2333 /* Continuation fragments (no L2CAP header) */
2334 frag = &skb_shinfo(skb)->frag_list;
2336 struct sk_buff *tmp;
2338 count = min_t(unsigned int, conn->mtu, len);
2340 tmp = chan->ops->alloc_skb(chan, 0, count,
2341 msg->msg_flags & MSG_DONTWAIT);
2343 return PTR_ERR(tmp);
2347 if (!copy_from_iter_full(skb_put(*frag, count), count,
2354 skb->len += (*frag)->len;
2355 skb->data_len += (*frag)->len;
2357 frag = &(*frag)->next;
2363 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2364 struct msghdr *msg, size_t len)
2366 struct l2cap_conn *conn = chan->conn;
2367 struct sk_buff *skb;
2368 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2369 struct l2cap_hdr *lh;
2371 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2372 __le16_to_cpu(chan->psm), len);
2374 count = min_t(unsigned int, (conn->mtu - hlen), len);
2376 skb = chan->ops->alloc_skb(chan, hlen, count,
2377 msg->msg_flags & MSG_DONTWAIT);
2381 /* Create L2CAP header */
2382 lh = skb_put(skb, L2CAP_HDR_SIZE);
2383 lh->cid = cpu_to_le16(chan->dcid);
2384 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2385 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2387 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2388 if (unlikely(err < 0)) {
2390 return ERR_PTR(err);
2395 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2396 struct msghdr *msg, size_t len)
2398 struct l2cap_conn *conn = chan->conn;
2399 struct sk_buff *skb;
2401 struct l2cap_hdr *lh;
2403 BT_DBG("chan %p len %zu", chan, len);
2405 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2407 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2408 msg->msg_flags & MSG_DONTWAIT);
2412 /* Create L2CAP header */
2413 lh = skb_put(skb, L2CAP_HDR_SIZE);
2414 lh->cid = cpu_to_le16(chan->dcid);
2415 lh->len = cpu_to_le16(len);
2417 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2418 if (unlikely(err < 0)) {
2420 return ERR_PTR(err);
2425 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2426 struct msghdr *msg, size_t len,
2429 struct l2cap_conn *conn = chan->conn;
2430 struct sk_buff *skb;
2431 int err, count, hlen;
2432 struct l2cap_hdr *lh;
2434 BT_DBG("chan %p len %zu", chan, len);
2437 return ERR_PTR(-ENOTCONN);
2439 hlen = __ertm_hdr_size(chan);
2442 hlen += L2CAP_SDULEN_SIZE;
2444 if (chan->fcs == L2CAP_FCS_CRC16)
2445 hlen += L2CAP_FCS_SIZE;
2447 count = min_t(unsigned int, (conn->mtu - hlen), len);
2449 skb = chan->ops->alloc_skb(chan, hlen, count,
2450 msg->msg_flags & MSG_DONTWAIT);
2454 /* Create L2CAP header */
2455 lh = skb_put(skb, L2CAP_HDR_SIZE);
2456 lh->cid = cpu_to_le16(chan->dcid);
2457 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2459 /* Control header is populated later */
2460 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2461 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2463 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2466 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2468 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2469 if (unlikely(err < 0)) {
2471 return ERR_PTR(err);
2474 bt_cb(skb)->l2cap.fcs = chan->fcs;
2475 bt_cb(skb)->l2cap.retries = 0;
2479 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2480 struct sk_buff_head *seg_queue,
2481 struct msghdr *msg, size_t len)
2483 struct sk_buff *skb;
2488 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2490 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2491 * so fragmented skbs are not used. The HCI layer's handling
2492 * of fragmented skbs is not compatible with ERTM's queueing.
2495 /* PDU size is derived from the HCI MTU */
2496 pdu_len = chan->conn->mtu;
2498 /* Constrain PDU size for BR/EDR connections */
2500 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2502 /* Adjust for largest possible L2CAP overhead. */
2504 pdu_len -= L2CAP_FCS_SIZE;
2506 pdu_len -= __ertm_hdr_size(chan);
2508 /* Remote device may have requested smaller PDUs */
2509 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2511 if (len <= pdu_len) {
2512 sar = L2CAP_SAR_UNSEGMENTED;
2516 sar = L2CAP_SAR_START;
2521 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2524 __skb_queue_purge(seg_queue);
2525 return PTR_ERR(skb);
2528 bt_cb(skb)->l2cap.sar = sar;
2529 __skb_queue_tail(seg_queue, skb);
2535 if (len <= pdu_len) {
2536 sar = L2CAP_SAR_END;
2539 sar = L2CAP_SAR_CONTINUE;
2546 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2548 size_t len, u16 sdulen)
2550 struct l2cap_conn *conn = chan->conn;
2551 struct sk_buff *skb;
2552 int err, count, hlen;
2553 struct l2cap_hdr *lh;
2555 BT_DBG("chan %p len %zu", chan, len);
2558 return ERR_PTR(-ENOTCONN);
2560 hlen = L2CAP_HDR_SIZE;
2563 hlen += L2CAP_SDULEN_SIZE;
2565 count = min_t(unsigned int, (conn->mtu - hlen), len);
2567 skb = chan->ops->alloc_skb(chan, hlen, count,
2568 msg->msg_flags & MSG_DONTWAIT);
2572 /* Create L2CAP header */
2573 lh = skb_put(skb, L2CAP_HDR_SIZE);
2574 lh->cid = cpu_to_le16(chan->dcid);
2575 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2578 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2580 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2581 if (unlikely(err < 0)) {
2583 return ERR_PTR(err);
2589 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2590 struct sk_buff_head *seg_queue,
2591 struct msghdr *msg, size_t len)
2593 struct sk_buff *skb;
2597 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2600 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2606 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2608 __skb_queue_purge(seg_queue);
2609 return PTR_ERR(skb);
2612 __skb_queue_tail(seg_queue, skb);
2618 pdu_len += L2CAP_SDULEN_SIZE;
2625 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2629 BT_DBG("chan %p", chan);
2631 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2632 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2637 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2638 skb_queue_len(&chan->tx_q));
2641 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2643 struct sk_buff *skb;
2645 struct sk_buff_head seg_queue;
2650 /* Connectionless channel */
2651 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2652 skb = l2cap_create_connless_pdu(chan, msg, len);
2654 return PTR_ERR(skb);
2656 /* Channel lock is released before requesting new skb and then
2657 * reacquired thus we need to recheck channel state.
2659 if (chan->state != BT_CONNECTED) {
2664 l2cap_do_send(chan, skb);
2668 switch (chan->mode) {
2669 case L2CAP_MODE_LE_FLOWCTL:
2670 case L2CAP_MODE_EXT_FLOWCTL:
2671 /* Check outgoing MTU */
2672 if (len > chan->omtu)
2675 __skb_queue_head_init(&seg_queue);
2677 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2679 if (chan->state != BT_CONNECTED) {
2680 __skb_queue_purge(&seg_queue);
2687 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2689 l2cap_le_flowctl_send(chan);
2691 if (!chan->tx_credits)
2692 chan->ops->suspend(chan);
2698 case L2CAP_MODE_BASIC:
2699 /* Check outgoing MTU */
2700 if (len > chan->omtu)
2703 /* Create a basic PDU */
2704 skb = l2cap_create_basic_pdu(chan, msg, len);
2706 return PTR_ERR(skb);
2708 /* Channel lock is released before requesting new skb and then
2709 * reacquired thus we need to recheck channel state.
2711 if (chan->state != BT_CONNECTED) {
2716 l2cap_do_send(chan, skb);
2720 case L2CAP_MODE_ERTM:
2721 case L2CAP_MODE_STREAMING:
2722 /* Check outgoing MTU */
2723 if (len > chan->omtu) {
2728 __skb_queue_head_init(&seg_queue);
2730 /* Do segmentation before calling in to the state machine,
2731 * since it's possible to block while waiting for memory
2734 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2736 /* The channel could have been closed while segmenting,
2737 * check that it is still connected.
2739 if (chan->state != BT_CONNECTED) {
2740 __skb_queue_purge(&seg_queue);
2747 if (chan->mode == L2CAP_MODE_ERTM)
2748 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2750 l2cap_streaming_send(chan, &seg_queue);
2754 /* If the skbs were not queued for sending, they'll still be in
2755 * seg_queue and need to be purged.
2757 __skb_queue_purge(&seg_queue);
2761 BT_DBG("bad state %1.1x", chan->mode);
2767 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2769 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2771 struct l2cap_ctrl control;
2774 BT_DBG("chan %p, txseq %u", chan, txseq);
2776 memset(&control, 0, sizeof(control));
2778 control.super = L2CAP_SUPER_SREJ;
2780 for (seq = chan->expected_tx_seq; seq != txseq;
2781 seq = __next_seq(chan, seq)) {
2782 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2783 control.reqseq = seq;
2784 l2cap_send_sframe(chan, &control);
2785 l2cap_seq_list_append(&chan->srej_list, seq);
2789 chan->expected_tx_seq = __next_seq(chan, txseq);
2792 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2794 struct l2cap_ctrl control;
2796 BT_DBG("chan %p", chan);
2798 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2801 memset(&control, 0, sizeof(control));
2803 control.super = L2CAP_SUPER_SREJ;
2804 control.reqseq = chan->srej_list.tail;
2805 l2cap_send_sframe(chan, &control);
2808 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2810 struct l2cap_ctrl control;
2814 BT_DBG("chan %p, txseq %u", chan, txseq);
2816 memset(&control, 0, sizeof(control));
2818 control.super = L2CAP_SUPER_SREJ;
2820 /* Capture initial list head to allow only one pass through the list. */
2821 initial_head = chan->srej_list.head;
2824 seq = l2cap_seq_list_pop(&chan->srej_list);
2825 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2828 control.reqseq = seq;
2829 l2cap_send_sframe(chan, &control);
2830 l2cap_seq_list_append(&chan->srej_list, seq);
2831 } while (chan->srej_list.head != initial_head);
2834 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2836 struct sk_buff *acked_skb;
2839 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2841 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2844 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2845 chan->expected_ack_seq, chan->unacked_frames);
2847 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2848 ackseq = __next_seq(chan, ackseq)) {
2850 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2852 skb_unlink(acked_skb, &chan->tx_q);
2853 kfree_skb(acked_skb);
2854 chan->unacked_frames--;
2858 chan->expected_ack_seq = reqseq;
2860 if (chan->unacked_frames == 0)
2861 __clear_retrans_timer(chan);
2863 BT_DBG("unacked_frames %u", chan->unacked_frames);
2866 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2868 BT_DBG("chan %p", chan);
2870 chan->expected_tx_seq = chan->buffer_seq;
2871 l2cap_seq_list_clear(&chan->srej_list);
2872 skb_queue_purge(&chan->srej_q);
2873 chan->rx_state = L2CAP_RX_STATE_RECV;
2876 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2877 struct l2cap_ctrl *control,
2878 struct sk_buff_head *skbs, u8 event)
2880 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2884 case L2CAP_EV_DATA_REQUEST:
2885 if (chan->tx_send_head == NULL)
2886 chan->tx_send_head = skb_peek(skbs);
2888 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2889 l2cap_ertm_send(chan);
2891 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2892 BT_DBG("Enter LOCAL_BUSY");
2893 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2895 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2896 /* The SREJ_SENT state must be aborted if we are to
2897 * enter the LOCAL_BUSY state.
2899 l2cap_abort_rx_srej_sent(chan);
2902 l2cap_send_ack(chan);
2905 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2906 BT_DBG("Exit LOCAL_BUSY");
2907 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2909 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2910 struct l2cap_ctrl local_control;
2912 memset(&local_control, 0, sizeof(local_control));
2913 local_control.sframe = 1;
2914 local_control.super = L2CAP_SUPER_RR;
2915 local_control.poll = 1;
2916 local_control.reqseq = chan->buffer_seq;
2917 l2cap_send_sframe(chan, &local_control);
2919 chan->retry_count = 1;
2920 __set_monitor_timer(chan);
2921 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2924 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2925 l2cap_process_reqseq(chan, control->reqseq);
2927 case L2CAP_EV_EXPLICIT_POLL:
2928 l2cap_send_rr_or_rnr(chan, 1);
2929 chan->retry_count = 1;
2930 __set_monitor_timer(chan);
2931 __clear_ack_timer(chan);
2932 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2934 case L2CAP_EV_RETRANS_TO:
2935 l2cap_send_rr_or_rnr(chan, 1);
2936 chan->retry_count = 1;
2937 __set_monitor_timer(chan);
2938 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2940 case L2CAP_EV_RECV_FBIT:
2941 /* Nothing to process */
2948 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2949 struct l2cap_ctrl *control,
2950 struct sk_buff_head *skbs, u8 event)
2952 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2956 case L2CAP_EV_DATA_REQUEST:
2957 if (chan->tx_send_head == NULL)
2958 chan->tx_send_head = skb_peek(skbs);
2959 /* Queue data, but don't send. */
2960 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2962 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2963 BT_DBG("Enter LOCAL_BUSY");
2964 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2966 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2967 /* The SREJ_SENT state must be aborted if we are to
2968 * enter the LOCAL_BUSY state.
2970 l2cap_abort_rx_srej_sent(chan);
2973 l2cap_send_ack(chan);
2976 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2977 BT_DBG("Exit LOCAL_BUSY");
2978 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2980 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2981 struct l2cap_ctrl local_control;
2982 memset(&local_control, 0, sizeof(local_control));
2983 local_control.sframe = 1;
2984 local_control.super = L2CAP_SUPER_RR;
2985 local_control.poll = 1;
2986 local_control.reqseq = chan->buffer_seq;
2987 l2cap_send_sframe(chan, &local_control);
2989 chan->retry_count = 1;
2990 __set_monitor_timer(chan);
2991 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2994 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2995 l2cap_process_reqseq(chan, control->reqseq);
2998 case L2CAP_EV_RECV_FBIT:
2999 if (control && control->final) {
3000 __clear_monitor_timer(chan);
3001 if (chan->unacked_frames > 0)
3002 __set_retrans_timer(chan);
3003 chan->retry_count = 0;
3004 chan->tx_state = L2CAP_TX_STATE_XMIT;
3005 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3008 case L2CAP_EV_EXPLICIT_POLL:
3011 case L2CAP_EV_MONITOR_TO:
3012 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3013 l2cap_send_rr_or_rnr(chan, 1);
3014 __set_monitor_timer(chan);
3015 chan->retry_count++;
3017 l2cap_send_disconn_req(chan, ECONNABORTED);
3025 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3026 struct sk_buff_head *skbs, u8 event)
3028 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3029 chan, control, skbs, event, chan->tx_state);
3031 switch (chan->tx_state) {
3032 case L2CAP_TX_STATE_XMIT:
3033 l2cap_tx_state_xmit(chan, control, skbs, event);
3035 case L2CAP_TX_STATE_WAIT_F:
3036 l2cap_tx_state_wait_f(chan, control, skbs, event);
3044 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3045 struct l2cap_ctrl *control)
3047 BT_DBG("chan %p, control %p", chan, control);
3048 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3051 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3052 struct l2cap_ctrl *control)
3054 BT_DBG("chan %p, control %p", chan, control);
3055 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3058 /* Copy frame to all raw sockets on that connection */
3059 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3061 struct sk_buff *nskb;
3062 struct l2cap_chan *chan;
3064 BT_DBG("conn %p", conn);
3066 mutex_lock(&conn->chan_lock);
3068 list_for_each_entry(chan, &conn->chan_l, list) {
3069 if (chan->chan_type != L2CAP_CHAN_RAW)
3072 /* Don't send frame to the channel it came from */
3073 if (bt_cb(skb)->l2cap.chan == chan)
3076 nskb = skb_clone(skb, GFP_KERNEL);
3079 if (chan->ops->recv(chan, nskb))
3083 mutex_unlock(&conn->chan_lock);
3086 /* ---- L2CAP signalling commands ---- */
3087 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3088 u8 ident, u16 dlen, void *data)
3090 struct sk_buff *skb, **frag;
3091 struct l2cap_cmd_hdr *cmd;
3092 struct l2cap_hdr *lh;
3095 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3096 conn, code, ident, dlen);
3098 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3101 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3102 count = min_t(unsigned int, conn->mtu, len);
3104 skb = bt_skb_alloc(count, GFP_KERNEL);
3108 lh = skb_put(skb, L2CAP_HDR_SIZE);
3109 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3111 if (conn->hcon->type == LE_LINK)
3112 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3114 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3116 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3119 cmd->len = cpu_to_le16(dlen);
3122 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3123 skb_put_data(skb, data, count);
3129 /* Continuation fragments (no L2CAP header) */
3130 frag = &skb_shinfo(skb)->frag_list;
3132 count = min_t(unsigned int, conn->mtu, len);
3134 *frag = bt_skb_alloc(count, GFP_KERNEL);
3138 skb_put_data(*frag, data, count);
3143 frag = &(*frag)->next;
3153 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3156 struct l2cap_conf_opt *opt = *ptr;
3159 len = L2CAP_CONF_OPT_SIZE + opt->len;
3167 *val = *((u8 *) opt->val);
3171 *val = get_unaligned_le16(opt->val);
3175 *val = get_unaligned_le32(opt->val);
3179 *val = (unsigned long) opt->val;
3183 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3187 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3189 struct l2cap_conf_opt *opt = *ptr;
3191 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3193 if (size < L2CAP_CONF_OPT_SIZE + len)
3201 *((u8 *) opt->val) = val;
3205 put_unaligned_le16(val, opt->val);
3209 put_unaligned_le32(val, opt->val);
3213 memcpy(opt->val, (void *) val, len);
3217 *ptr += L2CAP_CONF_OPT_SIZE + len;
3220 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3222 struct l2cap_conf_efs efs;
3224 switch (chan->mode) {
3225 case L2CAP_MODE_ERTM:
3226 efs.id = chan->local_id;
3227 efs.stype = chan->local_stype;
3228 efs.msdu = cpu_to_le16(chan->local_msdu);
3229 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3230 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3231 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3234 case L2CAP_MODE_STREAMING:
3236 efs.stype = L2CAP_SERV_BESTEFFORT;
3237 efs.msdu = cpu_to_le16(chan->local_msdu);
3238 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3247 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3248 (unsigned long) &efs, size);
3251 static void l2cap_ack_timeout(struct work_struct *work)
3253 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3257 BT_DBG("chan %p", chan);
3259 l2cap_chan_lock(chan);
3261 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3262 chan->last_acked_seq);
3265 l2cap_send_rr_or_rnr(chan, 0);
3267 l2cap_chan_unlock(chan);
3268 l2cap_chan_put(chan);
3271 int l2cap_ertm_init(struct l2cap_chan *chan)
3275 chan->next_tx_seq = 0;
3276 chan->expected_tx_seq = 0;
3277 chan->expected_ack_seq = 0;
3278 chan->unacked_frames = 0;
3279 chan->buffer_seq = 0;
3280 chan->frames_sent = 0;
3281 chan->last_acked_seq = 0;
3283 chan->sdu_last_frag = NULL;
3286 skb_queue_head_init(&chan->tx_q);
3288 chan->local_amp_id = AMP_ID_BREDR;
3289 chan->move_id = AMP_ID_BREDR;
3290 chan->move_state = L2CAP_MOVE_STABLE;
3291 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3293 if (chan->mode != L2CAP_MODE_ERTM)
3296 chan->rx_state = L2CAP_RX_STATE_RECV;
3297 chan->tx_state = L2CAP_TX_STATE_XMIT;
3299 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3300 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3301 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3303 skb_queue_head_init(&chan->srej_q);
3305 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3309 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3311 l2cap_seq_list_free(&chan->srej_list);
3316 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3319 case L2CAP_MODE_STREAMING:
3320 case L2CAP_MODE_ERTM:
3321 if (l2cap_mode_supported(mode, remote_feat_mask))
3325 return L2CAP_MODE_BASIC;
3329 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3331 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3332 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3335 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3337 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3338 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3341 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3342 struct l2cap_conf_rfc *rfc)
3344 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3345 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3347 /* Class 1 devices have must have ERTM timeouts
3348 * exceeding the Link Supervision Timeout. The
3349 * default Link Supervision Timeout for AMP
3350 * controllers is 10 seconds.
3352 * Class 1 devices use 0xffffffff for their
3353 * best-effort flush timeout, so the clamping logic
3354 * will result in a timeout that meets the above
3355 * requirement. ERTM timeouts are 16-bit values, so
3356 * the maximum timeout is 65.535 seconds.
3359 /* Convert timeout to milliseconds and round */
3360 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3362 /* This is the recommended formula for class 2 devices
3363 * that start ERTM timers when packets are sent to the
3366 ertm_to = 3 * ertm_to + 500;
3368 if (ertm_to > 0xffff)
3371 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3372 rfc->monitor_timeout = rfc->retrans_timeout;
3374 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3375 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3379 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3381 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3382 __l2cap_ews_supported(chan->conn)) {
3383 /* use extended control field */
3384 set_bit(FLAG_EXT_CTRL, &chan->flags);
3385 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3387 chan->tx_win = min_t(u16, chan->tx_win,
3388 L2CAP_DEFAULT_TX_WINDOW);
3389 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3391 chan->ack_win = chan->tx_win;
3394 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3396 struct hci_conn *conn = chan->conn->hcon;
3398 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3400 /* The 2-DH1 packet has between 2 and 56 information bytes
3401 * (including the 2-byte payload header)
3403 if (!(conn->pkt_type & HCI_2DH1))
3406 /* The 3-DH1 packet has between 2 and 85 information bytes
3407 * (including the 2-byte payload header)
3409 if (!(conn->pkt_type & HCI_3DH1))
3412 /* The 2-DH3 packet has between 2 and 369 information bytes
3413 * (including the 2-byte payload header)
3415 if (!(conn->pkt_type & HCI_2DH3))
3418 /* The 3-DH3 packet has between 2 and 554 information bytes
3419 * (including the 2-byte payload header)
3421 if (!(conn->pkt_type & HCI_3DH3))
3424 /* The 2-DH5 packet has between 2 and 681 information bytes
3425 * (including the 2-byte payload header)
3427 if (!(conn->pkt_type & HCI_2DH5))
3430 /* The 3-DH5 packet has between 2 and 1023 information bytes
3431 * (including the 2-byte payload header)
3433 if (!(conn->pkt_type & HCI_3DH5))
3437 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3439 struct l2cap_conf_req *req = data;
3440 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3441 void *ptr = req->data;
3442 void *endptr = data + data_size;
3445 BT_DBG("chan %p", chan);
3447 if (chan->num_conf_req || chan->num_conf_rsp)
3450 switch (chan->mode) {
3451 case L2CAP_MODE_STREAMING:
3452 case L2CAP_MODE_ERTM:
3453 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3456 if (__l2cap_efs_supported(chan->conn))
3457 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3461 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3466 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3468 l2cap_mtu_auto(chan);
3469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3473 switch (chan->mode) {
3474 case L2CAP_MODE_BASIC:
3478 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3479 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3482 rfc.mode = L2CAP_MODE_BASIC;
3484 rfc.max_transmit = 0;
3485 rfc.retrans_timeout = 0;
3486 rfc.monitor_timeout = 0;
3487 rfc.max_pdu_size = 0;
3489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3490 (unsigned long) &rfc, endptr - ptr);
3493 case L2CAP_MODE_ERTM:
3494 rfc.mode = L2CAP_MODE_ERTM;
3495 rfc.max_transmit = chan->max_tx;
3497 __l2cap_set_ertm_timeouts(chan, &rfc);
3499 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3500 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3502 rfc.max_pdu_size = cpu_to_le16(size);
3504 l2cap_txwin_setup(chan);
3506 rfc.txwin_size = min_t(u16, chan->tx_win,
3507 L2CAP_DEFAULT_TX_WINDOW);
3509 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3510 (unsigned long) &rfc, endptr - ptr);
3512 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3513 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3515 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3517 chan->tx_win, endptr - ptr);
3519 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3520 if (chan->fcs == L2CAP_FCS_NONE ||
3521 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3522 chan->fcs = L2CAP_FCS_NONE;
3523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3524 chan->fcs, endptr - ptr);
3528 case L2CAP_MODE_STREAMING:
3529 l2cap_txwin_setup(chan);
3530 rfc.mode = L2CAP_MODE_STREAMING;
3532 rfc.max_transmit = 0;
3533 rfc.retrans_timeout = 0;
3534 rfc.monitor_timeout = 0;
3536 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3537 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3539 rfc.max_pdu_size = cpu_to_le16(size);
3541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3542 (unsigned long) &rfc, endptr - ptr);
3544 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3545 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3547 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3548 if (chan->fcs == L2CAP_FCS_NONE ||
3549 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3550 chan->fcs = L2CAP_FCS_NONE;
3551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3552 chan->fcs, endptr - ptr);
3557 req->dcid = cpu_to_le16(chan->dcid);
3558 req->flags = cpu_to_le16(0);
3563 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3565 struct l2cap_conf_rsp *rsp = data;
3566 void *ptr = rsp->data;
3567 void *endptr = data + data_size;
3568 void *req = chan->conf_req;
3569 int len = chan->conf_len;
3570 int type, hint, olen;
3572 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3573 struct l2cap_conf_efs efs;
3575 u16 mtu = L2CAP_DEFAULT_MTU;
3576 u16 result = L2CAP_CONF_SUCCESS;
3579 BT_DBG("chan %p", chan);
3581 while (len >= L2CAP_CONF_OPT_SIZE) {
3582 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3586 hint = type & L2CAP_CONF_HINT;
3587 type &= L2CAP_CONF_MASK;
3590 case L2CAP_CONF_MTU:
3596 case L2CAP_CONF_FLUSH_TO:
3599 chan->flush_to = val;
3602 case L2CAP_CONF_QOS:
3605 case L2CAP_CONF_RFC:
3606 if (olen != sizeof(rfc))
3608 memcpy(&rfc, (void *) val, olen);
3611 case L2CAP_CONF_FCS:
3614 if (val == L2CAP_FCS_NONE)
3615 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3618 case L2CAP_CONF_EFS:
3619 if (olen != sizeof(efs))
3622 memcpy(&efs, (void *) val, olen);
3625 case L2CAP_CONF_EWS:
3628 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3629 return -ECONNREFUSED;
3630 set_bit(FLAG_EXT_CTRL, &chan->flags);
3631 set_bit(CONF_EWS_RECV, &chan->conf_state);
3632 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3633 chan->remote_tx_win = val;
3639 result = L2CAP_CONF_UNKNOWN;
3640 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3645 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3648 switch (chan->mode) {
3649 case L2CAP_MODE_STREAMING:
3650 case L2CAP_MODE_ERTM:
3651 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3652 chan->mode = l2cap_select_mode(rfc.mode,
3653 chan->conn->feat_mask);
3658 if (__l2cap_efs_supported(chan->conn))
3659 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3661 return -ECONNREFUSED;
3664 if (chan->mode != rfc.mode)
3665 return -ECONNREFUSED;
3671 if (chan->mode != rfc.mode) {
3672 result = L2CAP_CONF_UNACCEPT;
3673 rfc.mode = chan->mode;
3675 if (chan->num_conf_rsp == 1)
3676 return -ECONNREFUSED;
3678 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3679 (unsigned long) &rfc, endptr - ptr);
3682 if (result == L2CAP_CONF_SUCCESS) {
3683 /* Configure output options and let the other side know
3684 * which ones we don't like. */
3686 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3687 result = L2CAP_CONF_UNACCEPT;
3690 set_bit(CONF_MTU_DONE, &chan->conf_state);
3692 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3695 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3696 efs.stype != L2CAP_SERV_NOTRAFIC &&
3697 efs.stype != chan->local_stype) {
3699 result = L2CAP_CONF_UNACCEPT;
3701 if (chan->num_conf_req >= 1)
3702 return -ECONNREFUSED;
3704 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3706 (unsigned long) &efs, endptr - ptr);
3708 /* Send PENDING Conf Rsp */
3709 result = L2CAP_CONF_PENDING;
3710 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3715 case L2CAP_MODE_BASIC:
3716 chan->fcs = L2CAP_FCS_NONE;
3717 set_bit(CONF_MODE_DONE, &chan->conf_state);
3720 case L2CAP_MODE_ERTM:
3721 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3722 chan->remote_tx_win = rfc.txwin_size;
3724 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3726 chan->remote_max_tx = rfc.max_transmit;
3728 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3729 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3730 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3731 rfc.max_pdu_size = cpu_to_le16(size);
3732 chan->remote_mps = size;
3734 __l2cap_set_ertm_timeouts(chan, &rfc);
3736 set_bit(CONF_MODE_DONE, &chan->conf_state);
3738 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3739 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3741 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3742 chan->remote_id = efs.id;
3743 chan->remote_stype = efs.stype;
3744 chan->remote_msdu = le16_to_cpu(efs.msdu);
3745 chan->remote_flush_to =
3746 le32_to_cpu(efs.flush_to);
3747 chan->remote_acc_lat =
3748 le32_to_cpu(efs.acc_lat);
3749 chan->remote_sdu_itime =
3750 le32_to_cpu(efs.sdu_itime);
3751 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3753 (unsigned long) &efs, endptr - ptr);
3757 case L2CAP_MODE_STREAMING:
3758 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3759 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3760 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3761 rfc.max_pdu_size = cpu_to_le16(size);
3762 chan->remote_mps = size;
3764 set_bit(CONF_MODE_DONE, &chan->conf_state);
3766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3767 (unsigned long) &rfc, endptr - ptr);
3772 result = L2CAP_CONF_UNACCEPT;
3774 memset(&rfc, 0, sizeof(rfc));
3775 rfc.mode = chan->mode;
3778 if (result == L2CAP_CONF_SUCCESS)
3779 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3781 rsp->scid = cpu_to_le16(chan->dcid);
3782 rsp->result = cpu_to_le16(result);
3783 rsp->flags = cpu_to_le16(0);
3788 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3789 void *data, size_t size, u16 *result)
3791 struct l2cap_conf_req *req = data;
3792 void *ptr = req->data;
3793 void *endptr = data + size;
3796 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3797 struct l2cap_conf_efs efs;
3799 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3801 while (len >= L2CAP_CONF_OPT_SIZE) {
3802 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3807 case L2CAP_CONF_MTU:
3810 if (val < L2CAP_DEFAULT_MIN_MTU) {
3811 *result = L2CAP_CONF_UNACCEPT;
3812 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3815 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3819 case L2CAP_CONF_FLUSH_TO:
3822 chan->flush_to = val;
3823 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3824 chan->flush_to, endptr - ptr);
3827 case L2CAP_CONF_RFC:
3828 if (olen != sizeof(rfc))
3830 memcpy(&rfc, (void *)val, olen);
3831 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3832 rfc.mode != chan->mode)
3833 return -ECONNREFUSED;
3835 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3836 (unsigned long) &rfc, endptr - ptr);
3839 case L2CAP_CONF_EWS:
3842 chan->ack_win = min_t(u16, val, chan->ack_win);
3843 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3844 chan->tx_win, endptr - ptr);
3847 case L2CAP_CONF_EFS:
3848 if (olen != sizeof(efs))
3850 memcpy(&efs, (void *)val, olen);
3851 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3852 efs.stype != L2CAP_SERV_NOTRAFIC &&
3853 efs.stype != chan->local_stype)
3854 return -ECONNREFUSED;
3855 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3856 (unsigned long) &efs, endptr - ptr);
3859 case L2CAP_CONF_FCS:
3862 if (*result == L2CAP_CONF_PENDING)
3863 if (val == L2CAP_FCS_NONE)
3864 set_bit(CONF_RECV_NO_FCS,
3870 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3871 return -ECONNREFUSED;
3873 chan->mode = rfc.mode;
3875 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3877 case L2CAP_MODE_ERTM:
3878 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3879 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3880 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3881 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3882 chan->ack_win = min_t(u16, chan->ack_win,
3885 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3886 chan->local_msdu = le16_to_cpu(efs.msdu);
3887 chan->local_sdu_itime =
3888 le32_to_cpu(efs.sdu_itime);
3889 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3890 chan->local_flush_to =
3891 le32_to_cpu(efs.flush_to);
3895 case L2CAP_MODE_STREAMING:
3896 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3900 req->dcid = cpu_to_le16(chan->dcid);
3901 req->flags = cpu_to_le16(0);
3906 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3907 u16 result, u16 flags)
3909 struct l2cap_conf_rsp *rsp = data;
3910 void *ptr = rsp->data;
3912 BT_DBG("chan %p", chan);
3914 rsp->scid = cpu_to_le16(chan->dcid);
3915 rsp->result = cpu_to_le16(result);
3916 rsp->flags = cpu_to_le16(flags);
3921 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3923 struct l2cap_le_conn_rsp rsp;
3924 struct l2cap_conn *conn = chan->conn;
3926 BT_DBG("chan %p", chan);
3928 rsp.dcid = cpu_to_le16(chan->scid);
3929 rsp.mtu = cpu_to_le16(chan->imtu);
3930 rsp.mps = cpu_to_le16(chan->mps);
3931 rsp.credits = cpu_to_le16(chan->rx_credits);
3932 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3934 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3938 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3941 struct l2cap_ecred_conn_rsp rsp;
3944 struct l2cap_conn *conn = chan->conn;
3945 u16 ident = chan->ident;
3951 BT_DBG("chan %p ident %d", chan, ident);
3953 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3954 pdu.rsp.mps = cpu_to_le16(chan->mps);
3955 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3956 pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3958 mutex_lock(&conn->chan_lock);
3960 list_for_each_entry(chan, &conn->chan_l, list) {
3961 if (chan->ident != ident)
3964 /* Reset ident so only one response is sent */
3967 /* Include all channels pending with the same ident */
3968 pdu.dcid[i++] = cpu_to_le16(chan->scid);
3971 mutex_unlock(&conn->chan_lock);
3973 l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3974 sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
3977 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3979 struct l2cap_conn_rsp rsp;
3980 struct l2cap_conn *conn = chan->conn;
3984 rsp.scid = cpu_to_le16(chan->dcid);
3985 rsp.dcid = cpu_to_le16(chan->scid);
3986 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3987 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3990 rsp_code = L2CAP_CREATE_CHAN_RSP;
3992 rsp_code = L2CAP_CONN_RSP;
3994 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3996 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3998 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4001 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4002 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4003 chan->num_conf_req++;
4006 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4010 /* Use sane default values in case a misbehaving remote device
4011 * did not send an RFC or extended window size option.
4013 u16 txwin_ext = chan->ack_win;
4014 struct l2cap_conf_rfc rfc = {
4016 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4017 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4018 .max_pdu_size = cpu_to_le16(chan->imtu),
4019 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4022 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4024 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4027 while (len >= L2CAP_CONF_OPT_SIZE) {
4028 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4033 case L2CAP_CONF_RFC:
4034 if (olen != sizeof(rfc))
4036 memcpy(&rfc, (void *)val, olen);
4038 case L2CAP_CONF_EWS:
4047 case L2CAP_MODE_ERTM:
4048 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4049 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4050 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4051 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4052 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4054 chan->ack_win = min_t(u16, chan->ack_win,
4057 case L2CAP_MODE_STREAMING:
4058 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4062 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4063 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4066 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4068 if (cmd_len < sizeof(*rej))
4071 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4074 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4075 cmd->ident == conn->info_ident) {
4076 cancel_delayed_work(&conn->info_timer);
4078 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4079 conn->info_ident = 0;
4081 l2cap_conn_start(conn);
4087 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4088 struct l2cap_cmd_hdr *cmd,
4089 u8 *data, u8 rsp_code, u8 amp_id)
4091 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4092 struct l2cap_conn_rsp rsp;
4093 struct l2cap_chan *chan = NULL, *pchan;
4094 int result, status = L2CAP_CS_NO_INFO;
4096 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4097 __le16 psm = req->psm;
4099 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4101 /* Check if we have socket listening on psm */
4102 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4103 &conn->hcon->dst, ACL_LINK);
4105 result = L2CAP_CR_BAD_PSM;
4109 mutex_lock(&conn->chan_lock);
4110 l2cap_chan_lock(pchan);
4112 /* Check if the ACL is secure enough (if not SDP) */
4113 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4114 !hci_conn_check_link_mode(conn->hcon)) {
4115 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4116 result = L2CAP_CR_SEC_BLOCK;
4120 result = L2CAP_CR_NO_MEM;
4122 /* Check for valid dynamic CID range (as per Erratum 3253) */
4123 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4124 result = L2CAP_CR_INVALID_SCID;
4128 /* Check if we already have channel with that dcid */
4129 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4130 result = L2CAP_CR_SCID_IN_USE;
4134 chan = pchan->ops->new_connection(pchan);
4138 /* For certain devices (ex: HID mouse), support for authentication,
4139 * pairing and bonding is optional. For such devices, inorder to avoid
4140 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4141 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4143 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4145 bacpy(&chan->src, &conn->hcon->src);
4146 bacpy(&chan->dst, &conn->hcon->dst);
4147 chan->src_type = bdaddr_src_type(conn->hcon);
4148 chan->dst_type = bdaddr_dst_type(conn->hcon);
4151 chan->local_amp_id = amp_id;
4153 __l2cap_chan_add(conn, chan);
4157 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4159 chan->ident = cmd->ident;
4161 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4162 if (l2cap_chan_check_security(chan, false)) {
4163 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4164 l2cap_state_change(chan, BT_CONNECT2);
4165 result = L2CAP_CR_PEND;
4166 status = L2CAP_CS_AUTHOR_PEND;
4167 chan->ops->defer(chan);
4169 /* Force pending result for AMP controllers.
4170 * The connection will succeed after the
4171 * physical link is up.
4173 if (amp_id == AMP_ID_BREDR) {
4174 l2cap_state_change(chan, BT_CONFIG);
4175 result = L2CAP_CR_SUCCESS;
4177 l2cap_state_change(chan, BT_CONNECT2);
4178 result = L2CAP_CR_PEND;
4180 status = L2CAP_CS_NO_INFO;
4183 l2cap_state_change(chan, BT_CONNECT2);
4184 result = L2CAP_CR_PEND;
4185 status = L2CAP_CS_AUTHEN_PEND;
4188 l2cap_state_change(chan, BT_CONNECT2);
4189 result = L2CAP_CR_PEND;
4190 status = L2CAP_CS_NO_INFO;
4194 l2cap_chan_unlock(pchan);
4195 mutex_unlock(&conn->chan_lock);
4196 l2cap_chan_put(pchan);
4199 rsp.scid = cpu_to_le16(scid);
4200 rsp.dcid = cpu_to_le16(dcid);
4201 rsp.result = cpu_to_le16(result);
4202 rsp.status = cpu_to_le16(status);
4203 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4205 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4206 struct l2cap_info_req info;
4207 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4209 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4210 conn->info_ident = l2cap_get_ident(conn);
4212 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4214 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4215 sizeof(info), &info);
4218 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4219 result == L2CAP_CR_SUCCESS) {
4221 set_bit(CONF_REQ_SENT, &chan->conf_state);
4222 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4223 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4224 chan->num_conf_req++;
4230 static int l2cap_connect_req(struct l2cap_conn *conn,
4231 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4233 struct hci_dev *hdev = conn->hcon->hdev;
4234 struct hci_conn *hcon = conn->hcon;
4236 if (cmd_len < sizeof(struct l2cap_conn_req))
4240 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4241 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4242 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4243 hci_dev_unlock(hdev);
4245 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4249 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4250 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4253 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4254 u16 scid, dcid, result, status;
4255 struct l2cap_chan *chan;
4259 if (cmd_len < sizeof(*rsp))
4262 scid = __le16_to_cpu(rsp->scid);
4263 dcid = __le16_to_cpu(rsp->dcid);
4264 result = __le16_to_cpu(rsp->result);
4265 status = __le16_to_cpu(rsp->status);
4267 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4268 dcid, scid, result, status);
4270 mutex_lock(&conn->chan_lock);
4273 chan = __l2cap_get_chan_by_scid(conn, scid);
4279 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4288 l2cap_chan_lock(chan);
4291 case L2CAP_CR_SUCCESS:
4292 l2cap_state_change(chan, BT_CONFIG);
4295 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4297 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4300 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4301 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4302 chan->num_conf_req++;
4306 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4310 l2cap_chan_del(chan, ECONNREFUSED);
4314 l2cap_chan_unlock(chan);
4317 mutex_unlock(&conn->chan_lock);
4322 static inline void set_default_fcs(struct l2cap_chan *chan)
4324 /* FCS is enabled only in ERTM or streaming mode, if one or both
4327 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4328 chan->fcs = L2CAP_FCS_NONE;
4329 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4330 chan->fcs = L2CAP_FCS_CRC16;
4333 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4334 u8 ident, u16 flags)
4336 struct l2cap_conn *conn = chan->conn;
4338 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4341 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4342 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4344 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4345 l2cap_build_conf_rsp(chan, data,
4346 L2CAP_CONF_SUCCESS, flags), data);
4349 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4352 struct l2cap_cmd_rej_cid rej;
4354 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4355 rej.scid = __cpu_to_le16(scid);
4356 rej.dcid = __cpu_to_le16(dcid);
4358 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4361 static inline int l2cap_config_req(struct l2cap_conn *conn,
4362 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4365 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4368 struct l2cap_chan *chan;
4371 if (cmd_len < sizeof(*req))
4374 dcid = __le16_to_cpu(req->dcid);
4375 flags = __le16_to_cpu(req->flags);
4377 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4379 chan = l2cap_get_chan_by_scid(conn, dcid);
4381 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4385 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4386 chan->state != BT_CONNECTED) {
4387 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4392 /* Reject if config buffer is too small. */
4393 len = cmd_len - sizeof(*req);
4394 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4395 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4396 l2cap_build_conf_rsp(chan, rsp,
4397 L2CAP_CONF_REJECT, flags), rsp);
4402 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4403 chan->conf_len += len;
4405 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4406 /* Incomplete config. Send empty response. */
4407 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4408 l2cap_build_conf_rsp(chan, rsp,
4409 L2CAP_CONF_SUCCESS, flags), rsp);
4413 /* Complete config. */
4414 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4416 l2cap_send_disconn_req(chan, ECONNRESET);
4420 chan->ident = cmd->ident;
4421 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4422 chan->num_conf_rsp++;
4424 /* Reset config buffer. */
4427 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4430 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4431 set_default_fcs(chan);
4433 if (chan->mode == L2CAP_MODE_ERTM ||
4434 chan->mode == L2CAP_MODE_STREAMING)
4435 err = l2cap_ertm_init(chan);
4438 l2cap_send_disconn_req(chan, -err);
4440 l2cap_chan_ready(chan);
4445 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4447 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4448 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4449 chan->num_conf_req++;
4452 /* Got Conf Rsp PENDING from remote side and assume we sent
4453 Conf Rsp PENDING in the code above */
4454 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4455 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4457 /* check compatibility */
4459 /* Send rsp for BR/EDR channel */
4461 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4463 chan->ident = cmd->ident;
4467 l2cap_chan_unlock(chan);
4471 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4472 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4475 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4476 u16 scid, flags, result;
4477 struct l2cap_chan *chan;
4478 int len = cmd_len - sizeof(*rsp);
4481 if (cmd_len < sizeof(*rsp))
4484 scid = __le16_to_cpu(rsp->scid);
4485 flags = __le16_to_cpu(rsp->flags);
4486 result = __le16_to_cpu(rsp->result);
4488 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4491 chan = l2cap_get_chan_by_scid(conn, scid);
4496 case L2CAP_CONF_SUCCESS:
4497 l2cap_conf_rfc_get(chan, rsp->data, len);
4498 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4501 case L2CAP_CONF_PENDING:
4502 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4504 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4507 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4508 buf, sizeof(buf), &result);
4510 l2cap_send_disconn_req(chan, ECONNRESET);
4514 if (!chan->hs_hcon) {
4515 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4518 if (l2cap_check_efs(chan)) {
4519 amp_create_logical_link(chan);
4520 chan->ident = cmd->ident;
4526 case L2CAP_CONF_UNKNOWN:
4527 case L2CAP_CONF_UNACCEPT:
4528 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4531 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4532 l2cap_send_disconn_req(chan, ECONNRESET);
4536 /* throw out any old stored conf requests */
4537 result = L2CAP_CONF_SUCCESS;
4538 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4539 req, sizeof(req), &result);
4541 l2cap_send_disconn_req(chan, ECONNRESET);
4545 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4546 L2CAP_CONF_REQ, len, req);
4547 chan->num_conf_req++;
4548 if (result != L2CAP_CONF_SUCCESS)
4555 l2cap_chan_set_err(chan, ECONNRESET);
4557 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4558 l2cap_send_disconn_req(chan, ECONNRESET);
4562 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4565 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4567 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4568 set_default_fcs(chan);
4570 if (chan->mode == L2CAP_MODE_ERTM ||
4571 chan->mode == L2CAP_MODE_STREAMING)
4572 err = l2cap_ertm_init(chan);
4575 l2cap_send_disconn_req(chan, -err);
4577 l2cap_chan_ready(chan);
4581 l2cap_chan_unlock(chan);
4585 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4586 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4589 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4590 struct l2cap_disconn_rsp rsp;
4592 struct l2cap_chan *chan;
4594 if (cmd_len != sizeof(*req))
4597 scid = __le16_to_cpu(req->scid);
4598 dcid = __le16_to_cpu(req->dcid);
4600 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4602 mutex_lock(&conn->chan_lock);
4604 chan = __l2cap_get_chan_by_scid(conn, dcid);
4606 mutex_unlock(&conn->chan_lock);
4607 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4611 l2cap_chan_hold(chan);
4612 l2cap_chan_lock(chan);
4614 rsp.dcid = cpu_to_le16(chan->scid);
4615 rsp.scid = cpu_to_le16(chan->dcid);
4616 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4618 chan->ops->set_shutdown(chan);
4620 l2cap_chan_del(chan, ECONNRESET);
4622 chan->ops->close(chan);
4624 l2cap_chan_unlock(chan);
4625 l2cap_chan_put(chan);
4627 mutex_unlock(&conn->chan_lock);
4632 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4633 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4636 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4638 struct l2cap_chan *chan;
4640 if (cmd_len != sizeof(*rsp))
4643 scid = __le16_to_cpu(rsp->scid);
4644 dcid = __le16_to_cpu(rsp->dcid);
4646 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4648 mutex_lock(&conn->chan_lock);
4650 chan = __l2cap_get_chan_by_scid(conn, scid);
4652 mutex_unlock(&conn->chan_lock);
4656 l2cap_chan_hold(chan);
4657 l2cap_chan_lock(chan);
4659 if (chan->state != BT_DISCONN) {
4660 l2cap_chan_unlock(chan);
4661 l2cap_chan_put(chan);
4662 mutex_unlock(&conn->chan_lock);
4666 l2cap_chan_del(chan, 0);
4668 chan->ops->close(chan);
4670 l2cap_chan_unlock(chan);
4671 l2cap_chan_put(chan);
4673 mutex_unlock(&conn->chan_lock);
4678 static inline int l2cap_information_req(struct l2cap_conn *conn,
4679 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4682 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4685 if (cmd_len != sizeof(*req))
4688 type = __le16_to_cpu(req->type);
4690 BT_DBG("type 0x%4.4x", type);
4692 if (type == L2CAP_IT_FEAT_MASK) {
4694 u32 feat_mask = l2cap_feat_mask;
4695 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4696 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4697 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4699 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4701 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4702 feat_mask |= L2CAP_FEAT_EXT_FLOW
4703 | L2CAP_FEAT_EXT_WINDOW;
4705 put_unaligned_le32(feat_mask, rsp->data);
4706 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4708 } else if (type == L2CAP_IT_FIXED_CHAN) {
4710 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4712 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4713 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4714 rsp->data[0] = conn->local_fixed_chan;
4715 memset(rsp->data + 1, 0, 7);
4716 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4719 struct l2cap_info_rsp rsp;
4720 rsp.type = cpu_to_le16(type);
4721 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4722 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4729 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4730 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4733 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4736 if (cmd_len < sizeof(*rsp))
4739 type = __le16_to_cpu(rsp->type);
4740 result = __le16_to_cpu(rsp->result);
4742 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4744 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4745 if (cmd->ident != conn->info_ident ||
4746 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4749 cancel_delayed_work(&conn->info_timer);
4751 if (result != L2CAP_IR_SUCCESS) {
4752 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4753 conn->info_ident = 0;
4755 l2cap_conn_start(conn);
4761 case L2CAP_IT_FEAT_MASK:
4762 conn->feat_mask = get_unaligned_le32(rsp->data);
4764 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4765 struct l2cap_info_req req;
4766 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4768 conn->info_ident = l2cap_get_ident(conn);
4770 l2cap_send_cmd(conn, conn->info_ident,
4771 L2CAP_INFO_REQ, sizeof(req), &req);
4773 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4774 conn->info_ident = 0;
4776 l2cap_conn_start(conn);
4780 case L2CAP_IT_FIXED_CHAN:
4781 conn->remote_fixed_chan = rsp->data[0];
4782 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4783 conn->info_ident = 0;
4785 l2cap_conn_start(conn);
4792 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4793 struct l2cap_cmd_hdr *cmd,
4794 u16 cmd_len, void *data)
4796 struct l2cap_create_chan_req *req = data;
4797 struct l2cap_create_chan_rsp rsp;
4798 struct l2cap_chan *chan;
4799 struct hci_dev *hdev;
4802 if (cmd_len != sizeof(*req))
4805 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4808 psm = le16_to_cpu(req->psm);
4809 scid = le16_to_cpu(req->scid);
4811 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4813 /* For controller id 0 make BR/EDR connection */
4814 if (req->amp_id == AMP_ID_BREDR) {
4815 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4820 /* Validate AMP controller id */
4821 hdev = hci_dev_get(req->amp_id);
4825 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4830 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4833 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4834 struct hci_conn *hs_hcon;
4836 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4840 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4845 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4847 mgr->bredr_chan = chan;
4848 chan->hs_hcon = hs_hcon;
4849 chan->fcs = L2CAP_FCS_NONE;
4850 conn->mtu = hdev->block_mtu;
4859 rsp.scid = cpu_to_le16(scid);
4860 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4861 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4863 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4869 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4871 struct l2cap_move_chan_req req;
4874 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4876 ident = l2cap_get_ident(chan->conn);
4877 chan->ident = ident;
4879 req.icid = cpu_to_le16(chan->scid);
4880 req.dest_amp_id = dest_amp_id;
4882 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4885 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4888 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4890 struct l2cap_move_chan_rsp rsp;
4892 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4894 rsp.icid = cpu_to_le16(chan->dcid);
4895 rsp.result = cpu_to_le16(result);
4897 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4901 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4903 struct l2cap_move_chan_cfm cfm;
4905 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4907 chan->ident = l2cap_get_ident(chan->conn);
4909 cfm.icid = cpu_to_le16(chan->scid);
4910 cfm.result = cpu_to_le16(result);
4912 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4915 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4918 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4920 struct l2cap_move_chan_cfm cfm;
4922 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4924 cfm.icid = cpu_to_le16(icid);
4925 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4931 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4934 struct l2cap_move_chan_cfm_rsp rsp;
4936 BT_DBG("icid 0x%4.4x", icid);
4938 rsp.icid = cpu_to_le16(icid);
4939 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4942 static void __release_logical_link(struct l2cap_chan *chan)
4944 chan->hs_hchan = NULL;
4945 chan->hs_hcon = NULL;
4947 /* Placeholder - release the logical link */
4950 static void l2cap_logical_fail(struct l2cap_chan *chan)
4952 /* Logical link setup failed */
4953 if (chan->state != BT_CONNECTED) {
4954 /* Create channel failure, disconnect */
4955 l2cap_send_disconn_req(chan, ECONNRESET);
4959 switch (chan->move_role) {
4960 case L2CAP_MOVE_ROLE_RESPONDER:
4961 l2cap_move_done(chan);
4962 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4964 case L2CAP_MOVE_ROLE_INITIATOR:
4965 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4966 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4967 /* Remote has only sent pending or
4968 * success responses, clean up
4970 l2cap_move_done(chan);
4973 /* Other amp move states imply that the move
4974 * has already aborted
4976 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4981 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4982 struct hci_chan *hchan)
4984 struct l2cap_conf_rsp rsp;
4986 chan->hs_hchan = hchan;
4987 chan->hs_hcon->l2cap_data = chan->conn;
4989 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4991 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4994 set_default_fcs(chan);
4996 err = l2cap_ertm_init(chan);
4998 l2cap_send_disconn_req(chan, -err);
5000 l2cap_chan_ready(chan);
5004 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5005 struct hci_chan *hchan)
5007 chan->hs_hcon = hchan->conn;
5008 chan->hs_hcon->l2cap_data = chan->conn;
5010 BT_DBG("move_state %d", chan->move_state);
5012 switch (chan->move_state) {
5013 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5014 /* Move confirm will be sent after a success
5015 * response is received
5017 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5019 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5020 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5021 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5022 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5023 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5024 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5025 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5026 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5027 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5031 /* Move was not in expected state, free the channel */
5032 __release_logical_link(chan);
5034 chan->move_state = L2CAP_MOVE_STABLE;
5038 /* Call with chan locked */
5039 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5042 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5045 l2cap_logical_fail(chan);
5046 __release_logical_link(chan);
5050 if (chan->state != BT_CONNECTED) {
5051 /* Ignore logical link if channel is on BR/EDR */
5052 if (chan->local_amp_id != AMP_ID_BREDR)
5053 l2cap_logical_finish_create(chan, hchan);
5055 l2cap_logical_finish_move(chan, hchan);
5059 void l2cap_move_start(struct l2cap_chan *chan)
5061 BT_DBG("chan %p", chan);
5063 if (chan->local_amp_id == AMP_ID_BREDR) {
5064 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5066 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5067 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5068 /* Placeholder - start physical link setup */
5070 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5071 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5073 l2cap_move_setup(chan);
5074 l2cap_send_move_chan_req(chan, 0);
5078 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5079 u8 local_amp_id, u8 remote_amp_id)
5081 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5082 local_amp_id, remote_amp_id);
5084 chan->fcs = L2CAP_FCS_NONE;
5086 /* Outgoing channel on AMP */
5087 if (chan->state == BT_CONNECT) {
5088 if (result == L2CAP_CR_SUCCESS) {
5089 chan->local_amp_id = local_amp_id;
5090 l2cap_send_create_chan_req(chan, remote_amp_id);
5092 /* Revert to BR/EDR connect */
5093 l2cap_send_conn_req(chan);
5099 /* Incoming channel on AMP */
5100 if (__l2cap_no_conn_pending(chan)) {
5101 struct l2cap_conn_rsp rsp;
5103 rsp.scid = cpu_to_le16(chan->dcid);
5104 rsp.dcid = cpu_to_le16(chan->scid);
5106 if (result == L2CAP_CR_SUCCESS) {
5107 /* Send successful response */
5108 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5109 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5111 /* Send negative response */
5112 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5113 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5116 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5119 if (result == L2CAP_CR_SUCCESS) {
5120 l2cap_state_change(chan, BT_CONFIG);
5121 set_bit(CONF_REQ_SENT, &chan->conf_state);
5122 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5124 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5125 chan->num_conf_req++;
5130 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5133 l2cap_move_setup(chan);
5134 chan->move_id = local_amp_id;
5135 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5137 l2cap_send_move_chan_req(chan, remote_amp_id);
5140 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5142 struct hci_chan *hchan = NULL;
5144 /* Placeholder - get hci_chan for logical link */
5147 if (hchan->state == BT_CONNECTED) {
5148 /* Logical link is ready to go */
5149 chan->hs_hcon = hchan->conn;
5150 chan->hs_hcon->l2cap_data = chan->conn;
5151 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5152 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5154 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5156 /* Wait for logical link to be ready */
5157 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5160 /* Logical link not available */
5161 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5165 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5167 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5169 if (result == -EINVAL)
5170 rsp_result = L2CAP_MR_BAD_ID;
5172 rsp_result = L2CAP_MR_NOT_ALLOWED;
5174 l2cap_send_move_chan_rsp(chan, rsp_result);
5177 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5178 chan->move_state = L2CAP_MOVE_STABLE;
5180 /* Restart data transmission */
5181 l2cap_ertm_send(chan);
5184 /* Invoke with locked chan */
5185 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5187 u8 local_amp_id = chan->local_amp_id;
5188 u8 remote_amp_id = chan->remote_amp_id;
5190 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5191 chan, result, local_amp_id, remote_amp_id);
5193 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5196 if (chan->state != BT_CONNECTED) {
5197 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5198 } else if (result != L2CAP_MR_SUCCESS) {
5199 l2cap_do_move_cancel(chan, result);
5201 switch (chan->move_role) {
5202 case L2CAP_MOVE_ROLE_INITIATOR:
5203 l2cap_do_move_initiate(chan, local_amp_id,
5206 case L2CAP_MOVE_ROLE_RESPONDER:
5207 l2cap_do_move_respond(chan, result);
5210 l2cap_do_move_cancel(chan, result);
5216 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5217 struct l2cap_cmd_hdr *cmd,
5218 u16 cmd_len, void *data)
5220 struct l2cap_move_chan_req *req = data;
5221 struct l2cap_move_chan_rsp rsp;
5222 struct l2cap_chan *chan;
5224 u16 result = L2CAP_MR_NOT_ALLOWED;
5226 if (cmd_len != sizeof(*req))
5229 icid = le16_to_cpu(req->icid);
5231 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5233 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5236 chan = l2cap_get_chan_by_dcid(conn, icid);
5238 rsp.icid = cpu_to_le16(icid);
5239 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5240 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5245 chan->ident = cmd->ident;
5247 if (chan->scid < L2CAP_CID_DYN_START ||
5248 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5249 (chan->mode != L2CAP_MODE_ERTM &&
5250 chan->mode != L2CAP_MODE_STREAMING)) {
5251 result = L2CAP_MR_NOT_ALLOWED;
5252 goto send_move_response;
5255 if (chan->local_amp_id == req->dest_amp_id) {
5256 result = L2CAP_MR_SAME_ID;
5257 goto send_move_response;
5260 if (req->dest_amp_id != AMP_ID_BREDR) {
5261 struct hci_dev *hdev;
5262 hdev = hci_dev_get(req->dest_amp_id);
5263 if (!hdev || hdev->dev_type != HCI_AMP ||
5264 !test_bit(HCI_UP, &hdev->flags)) {
5268 result = L2CAP_MR_BAD_ID;
5269 goto send_move_response;
5274 /* Detect a move collision. Only send a collision response
5275 * if this side has "lost", otherwise proceed with the move.
5276 * The winner has the larger bd_addr.
5278 if ((__chan_is_moving(chan) ||
5279 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5280 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5281 result = L2CAP_MR_COLLISION;
5282 goto send_move_response;
5285 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5286 l2cap_move_setup(chan);
5287 chan->move_id = req->dest_amp_id;
5289 if (req->dest_amp_id == AMP_ID_BREDR) {
5290 /* Moving to BR/EDR */
5291 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5292 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5293 result = L2CAP_MR_PEND;
5295 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5296 result = L2CAP_MR_SUCCESS;
5299 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5300 /* Placeholder - uncomment when amp functions are available */
5301 /*amp_accept_physical(chan, req->dest_amp_id);*/
5302 result = L2CAP_MR_PEND;
5306 l2cap_send_move_chan_rsp(chan, result);
5308 l2cap_chan_unlock(chan);
5313 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5315 struct l2cap_chan *chan;
5316 struct hci_chan *hchan = NULL;
5318 chan = l2cap_get_chan_by_scid(conn, icid);
5320 l2cap_send_move_chan_cfm_icid(conn, icid);
5324 __clear_chan_timer(chan);
5325 if (result == L2CAP_MR_PEND)
5326 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5328 switch (chan->move_state) {
5329 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5330 /* Move confirm will be sent when logical link
5333 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5335 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5336 if (result == L2CAP_MR_PEND) {
5338 } else if (test_bit(CONN_LOCAL_BUSY,
5339 &chan->conn_state)) {
5340 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5342 /* Logical link is up or moving to BR/EDR,
5345 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5346 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5349 case L2CAP_MOVE_WAIT_RSP:
5351 if (result == L2CAP_MR_SUCCESS) {
5352 /* Remote is ready, send confirm immediately
5353 * after logical link is ready
5355 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5357 /* Both logical link and move success
5358 * are required to confirm
5360 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5363 /* Placeholder - get hci_chan for logical link */
5365 /* Logical link not available */
5366 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5370 /* If the logical link is not yet connected, do not
5371 * send confirmation.
5373 if (hchan->state != BT_CONNECTED)
5376 /* Logical link is already ready to go */
5378 chan->hs_hcon = hchan->conn;
5379 chan->hs_hcon->l2cap_data = chan->conn;
5381 if (result == L2CAP_MR_SUCCESS) {
5382 /* Can confirm now */
5383 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5385 /* Now only need move success
5388 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5391 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5394 /* Any other amp move state means the move failed. */
5395 chan->move_id = chan->local_amp_id;
5396 l2cap_move_done(chan);
5397 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5400 l2cap_chan_unlock(chan);
5403 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5406 struct l2cap_chan *chan;
5408 chan = l2cap_get_chan_by_ident(conn, ident);
5410 /* Could not locate channel, icid is best guess */
5411 l2cap_send_move_chan_cfm_icid(conn, icid);
5415 __clear_chan_timer(chan);
5417 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5418 if (result == L2CAP_MR_COLLISION) {
5419 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5421 /* Cleanup - cancel move */
5422 chan->move_id = chan->local_amp_id;
5423 l2cap_move_done(chan);
5427 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5429 l2cap_chan_unlock(chan);
5432 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5433 struct l2cap_cmd_hdr *cmd,
5434 u16 cmd_len, void *data)
5436 struct l2cap_move_chan_rsp *rsp = data;
5439 if (cmd_len != sizeof(*rsp))
5442 icid = le16_to_cpu(rsp->icid);
5443 result = le16_to_cpu(rsp->result);
5445 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5447 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5448 l2cap_move_continue(conn, icid, result);
5450 l2cap_move_fail(conn, cmd->ident, icid, result);
5455 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5456 struct l2cap_cmd_hdr *cmd,
5457 u16 cmd_len, void *data)
5459 struct l2cap_move_chan_cfm *cfm = data;
5460 struct l2cap_chan *chan;
5463 if (cmd_len != sizeof(*cfm))
5466 icid = le16_to_cpu(cfm->icid);
5467 result = le16_to_cpu(cfm->result);
5469 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5471 chan = l2cap_get_chan_by_dcid(conn, icid);
5473 /* Spec requires a response even if the icid was not found */
5474 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5478 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5479 if (result == L2CAP_MC_CONFIRMED) {
5480 chan->local_amp_id = chan->move_id;
5481 if (chan->local_amp_id == AMP_ID_BREDR)
5482 __release_logical_link(chan);
5484 chan->move_id = chan->local_amp_id;
5487 l2cap_move_done(chan);
5490 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5492 l2cap_chan_unlock(chan);
5497 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5498 struct l2cap_cmd_hdr *cmd,
5499 u16 cmd_len, void *data)
5501 struct l2cap_move_chan_cfm_rsp *rsp = data;
5502 struct l2cap_chan *chan;
5505 if (cmd_len != sizeof(*rsp))
5508 icid = le16_to_cpu(rsp->icid);
5510 BT_DBG("icid 0x%4.4x", icid);
5512 chan = l2cap_get_chan_by_scid(conn, icid);
5516 __clear_chan_timer(chan);
5518 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5519 chan->local_amp_id = chan->move_id;
5521 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5522 __release_logical_link(chan);
5524 l2cap_move_done(chan);
5527 l2cap_chan_unlock(chan);
5532 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5533 struct l2cap_cmd_hdr *cmd,
5534 u16 cmd_len, u8 *data)
5536 struct hci_conn *hcon = conn->hcon;
5537 struct l2cap_conn_param_update_req *req;
5538 struct l2cap_conn_param_update_rsp rsp;
5539 u16 min, max, latency, to_multiplier;
5542 if (hcon->role != HCI_ROLE_MASTER)
5545 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5548 req = (struct l2cap_conn_param_update_req *) data;
5549 min = __le16_to_cpu(req->min);
5550 max = __le16_to_cpu(req->max);
5551 latency = __le16_to_cpu(req->latency);
5552 to_multiplier = __le16_to_cpu(req->to_multiplier);
5554 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5555 min, max, latency, to_multiplier);
5557 memset(&rsp, 0, sizeof(rsp));
5559 err = hci_check_conn_params(min, max, latency, to_multiplier);
5561 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5563 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5565 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5571 store_hint = hci_le_conn_update(hcon, min, max, latency,
5573 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5574 store_hint, min, max, latency,
5582 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5583 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5586 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5587 struct hci_conn *hcon = conn->hcon;
5588 u16 dcid, mtu, mps, credits, result;
5589 struct l2cap_chan *chan;
5592 if (cmd_len < sizeof(*rsp))
5595 dcid = __le16_to_cpu(rsp->dcid);
5596 mtu = __le16_to_cpu(rsp->mtu);
5597 mps = __le16_to_cpu(rsp->mps);
5598 credits = __le16_to_cpu(rsp->credits);
5599 result = __le16_to_cpu(rsp->result);
5601 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5602 dcid < L2CAP_CID_DYN_START ||
5603 dcid > L2CAP_CID_LE_DYN_END))
5606 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5607 dcid, mtu, mps, credits, result);
5609 mutex_lock(&conn->chan_lock);
5611 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5619 l2cap_chan_lock(chan);
5622 case L2CAP_CR_LE_SUCCESS:
5623 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5631 chan->remote_mps = mps;
5632 chan->tx_credits = credits;
5633 l2cap_chan_ready(chan);
5636 case L2CAP_CR_LE_AUTHENTICATION:
5637 case L2CAP_CR_LE_ENCRYPTION:
5638 /* If we already have MITM protection we can't do
5641 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5642 l2cap_chan_del(chan, ECONNREFUSED);
5646 sec_level = hcon->sec_level + 1;
5647 if (chan->sec_level < sec_level)
5648 chan->sec_level = sec_level;
5650 /* We'll need to send a new Connect Request */
5651 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5653 smp_conn_security(hcon, chan->sec_level);
5657 l2cap_chan_del(chan, ECONNREFUSED);
5661 l2cap_chan_unlock(chan);
5664 mutex_unlock(&conn->chan_lock);
5669 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5670 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5675 switch (cmd->code) {
5676 case L2CAP_COMMAND_REJ:
5677 l2cap_command_rej(conn, cmd, cmd_len, data);
5680 case L2CAP_CONN_REQ:
5681 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5684 case L2CAP_CONN_RSP:
5685 case L2CAP_CREATE_CHAN_RSP:
5686 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5689 case L2CAP_CONF_REQ:
5690 err = l2cap_config_req(conn, cmd, cmd_len, data);
5693 case L2CAP_CONF_RSP:
5694 l2cap_config_rsp(conn, cmd, cmd_len, data);
5697 case L2CAP_DISCONN_REQ:
5698 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5701 case L2CAP_DISCONN_RSP:
5702 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5705 case L2CAP_ECHO_REQ:
5706 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5709 case L2CAP_ECHO_RSP:
5712 case L2CAP_INFO_REQ:
5713 err = l2cap_information_req(conn, cmd, cmd_len, data);
5716 case L2CAP_INFO_RSP:
5717 l2cap_information_rsp(conn, cmd, cmd_len, data);
5720 case L2CAP_CREATE_CHAN_REQ:
5721 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5724 case L2CAP_MOVE_CHAN_REQ:
5725 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5728 case L2CAP_MOVE_CHAN_RSP:
5729 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5732 case L2CAP_MOVE_CHAN_CFM:
5733 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5736 case L2CAP_MOVE_CHAN_CFM_RSP:
5737 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5741 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5749 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5750 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5753 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5754 struct l2cap_le_conn_rsp rsp;
5755 struct l2cap_chan *chan, *pchan;
5756 u16 dcid, scid, credits, mtu, mps;
5760 if (cmd_len != sizeof(*req))
5763 scid = __le16_to_cpu(req->scid);
5764 mtu = __le16_to_cpu(req->mtu);
5765 mps = __le16_to_cpu(req->mps);
5770 if (mtu < 23 || mps < 23)
5773 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5776 /* Check if we have socket listening on psm */
5777 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5778 &conn->hcon->dst, LE_LINK);
5780 result = L2CAP_CR_LE_BAD_PSM;
5785 mutex_lock(&conn->chan_lock);
5786 l2cap_chan_lock(pchan);
5788 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5790 result = L2CAP_CR_LE_AUTHENTICATION;
5792 goto response_unlock;
5795 /* Check for valid dynamic CID range */
5796 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5797 result = L2CAP_CR_LE_INVALID_SCID;
5799 goto response_unlock;
5802 /* Check if we already have channel with that dcid */
5803 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5804 result = L2CAP_CR_LE_SCID_IN_USE;
5806 goto response_unlock;
5809 chan = pchan->ops->new_connection(pchan);
5811 result = L2CAP_CR_LE_NO_MEM;
5812 goto response_unlock;
5815 bacpy(&chan->src, &conn->hcon->src);
5816 bacpy(&chan->dst, &conn->hcon->dst);
5817 chan->src_type = bdaddr_src_type(conn->hcon);
5818 chan->dst_type = bdaddr_dst_type(conn->hcon);
5822 chan->remote_mps = mps;
5824 __l2cap_chan_add(conn, chan);
5826 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5829 credits = chan->rx_credits;
5831 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5833 chan->ident = cmd->ident;
5835 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5836 l2cap_state_change(chan, BT_CONNECT2);
5837 /* The following result value is actually not defined
5838 * for LE CoC but we use it to let the function know
5839 * that it should bail out after doing its cleanup
5840 * instead of sending a response.
5842 result = L2CAP_CR_PEND;
5843 chan->ops->defer(chan);
5845 l2cap_chan_ready(chan);
5846 result = L2CAP_CR_LE_SUCCESS;
5850 l2cap_chan_unlock(pchan);
5851 mutex_unlock(&conn->chan_lock);
5852 l2cap_chan_put(pchan);
5854 if (result == L2CAP_CR_PEND)
5859 rsp.mtu = cpu_to_le16(chan->imtu);
5860 rsp.mps = cpu_to_le16(chan->mps);
5866 rsp.dcid = cpu_to_le16(dcid);
5867 rsp.credits = cpu_to_le16(credits);
5868 rsp.result = cpu_to_le16(result);
5870 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5875 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5876 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5879 struct l2cap_le_credits *pkt;
5880 struct l2cap_chan *chan;
5881 u16 cid, credits, max_credits;
5883 if (cmd_len != sizeof(*pkt))
5886 pkt = (struct l2cap_le_credits *) data;
5887 cid = __le16_to_cpu(pkt->cid);
5888 credits = __le16_to_cpu(pkt->credits);
5890 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5892 chan = l2cap_get_chan_by_dcid(conn, cid);
5896 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5897 if (credits > max_credits) {
5898 BT_ERR("LE credits overflow");
5899 l2cap_send_disconn_req(chan, ECONNRESET);
5900 l2cap_chan_unlock(chan);
5902 /* Return 0 so that we don't trigger an unnecessary
5903 * command reject packet.
5908 chan->tx_credits += credits;
5910 /* Resume sending */
5911 l2cap_le_flowctl_send(chan);
5913 if (chan->tx_credits)
5914 chan->ops->resume(chan);
5916 l2cap_chan_unlock(chan);
5921 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5922 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5925 struct l2cap_ecred_conn_req *req = (void *) data;
5927 struct l2cap_ecred_conn_rsp rsp;
5928 __le16 dcid[L2CAP_ECRED_MAX_CID];
5930 struct l2cap_chan *chan, *pchan;
5940 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5941 result = L2CAP_CR_LE_INVALID_PARAMS;
5945 cmd_len -= sizeof(*req);
5946 num_scid = cmd_len / sizeof(u16);
5948 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5949 result = L2CAP_CR_LE_INVALID_PARAMS;
5953 mtu = __le16_to_cpu(req->mtu);
5954 mps = __le16_to_cpu(req->mps);
5956 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5957 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5963 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5965 memset(&pdu, 0, sizeof(pdu));
5967 /* Check if we have socket listening on psm */
5968 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5969 &conn->hcon->dst, LE_LINK);
5971 result = L2CAP_CR_LE_BAD_PSM;
5975 mutex_lock(&conn->chan_lock);
5976 l2cap_chan_lock(pchan);
5978 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5980 result = L2CAP_CR_LE_AUTHENTICATION;
5984 result = L2CAP_CR_LE_SUCCESS;
5986 for (i = 0; i < num_scid; i++) {
5987 u16 scid = __le16_to_cpu(req->scid[i]);
5989 BT_DBG("scid[%d] 0x%4.4x", i, scid);
5991 pdu.dcid[i] = 0x0000;
5992 len += sizeof(*pdu.dcid);
5994 /* Check for valid dynamic CID range */
5995 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5996 result = L2CAP_CR_LE_INVALID_SCID;
6000 /* Check if we already have channel with that dcid */
6001 if (__l2cap_get_chan_by_dcid(conn, scid)) {
6002 result = L2CAP_CR_LE_SCID_IN_USE;
6006 chan = pchan->ops->new_connection(pchan);
6008 result = L2CAP_CR_LE_NO_MEM;
6012 bacpy(&chan->src, &conn->hcon->src);
6013 bacpy(&chan->dst, &conn->hcon->dst);
6014 chan->src_type = bdaddr_src_type(conn->hcon);
6015 chan->dst_type = bdaddr_dst_type(conn->hcon);
6019 chan->remote_mps = mps;
6021 __l2cap_chan_add(conn, chan);
6023 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6026 if (!pdu.rsp.credits) {
6027 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6028 pdu.rsp.mps = cpu_to_le16(chan->mps);
6029 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6032 pdu.dcid[i] = cpu_to_le16(chan->scid);
6034 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6036 chan->ident = cmd->ident;
6038 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6039 l2cap_state_change(chan, BT_CONNECT2);
6041 chan->ops->defer(chan);
6043 l2cap_chan_ready(chan);
6048 l2cap_chan_unlock(pchan);
6049 mutex_unlock(&conn->chan_lock);
6050 l2cap_chan_put(pchan);
6053 pdu.rsp.result = cpu_to_le16(result);
6058 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6059 sizeof(pdu.rsp) + len, &pdu);
6064 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6065 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6068 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6069 struct hci_conn *hcon = conn->hcon;
6070 u16 mtu, mps, credits, result;
6071 struct l2cap_chan *chan;
6072 int err = 0, sec_level;
6075 if (cmd_len < sizeof(*rsp))
6078 mtu = __le16_to_cpu(rsp->mtu);
6079 mps = __le16_to_cpu(rsp->mps);
6080 credits = __le16_to_cpu(rsp->credits);
6081 result = __le16_to_cpu(rsp->result);
6083 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6086 mutex_lock(&conn->chan_lock);
6088 cmd_len -= sizeof(*rsp);
6090 list_for_each_entry(chan, &conn->chan_l, list) {
6093 if (chan->ident != cmd->ident ||
6094 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6095 chan->state == BT_CONNECTED)
6098 l2cap_chan_lock(chan);
6100 /* Check that there is a dcid for each pending channel */
6101 if (cmd_len < sizeof(dcid)) {
6102 l2cap_chan_del(chan, ECONNREFUSED);
6103 l2cap_chan_unlock(chan);
6107 dcid = __le16_to_cpu(rsp->dcid[i++]);
6108 cmd_len -= sizeof(u16);
6110 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6112 /* Check if dcid is already in use */
6113 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6114 /* If a device receives a
6115 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6116 * already-assigned Destination CID, then both the
6117 * original channel and the new channel shall be
6118 * immediately discarded and not used.
6120 l2cap_chan_del(chan, ECONNREFUSED);
6121 l2cap_chan_unlock(chan);
6122 chan = __l2cap_get_chan_by_dcid(conn, dcid);
6123 l2cap_chan_lock(chan);
6124 l2cap_chan_del(chan, ECONNRESET);
6125 l2cap_chan_unlock(chan);
6130 case L2CAP_CR_LE_AUTHENTICATION:
6131 case L2CAP_CR_LE_ENCRYPTION:
6132 /* If we already have MITM protection we can't do
6135 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6136 l2cap_chan_del(chan, ECONNREFUSED);
6140 sec_level = hcon->sec_level + 1;
6141 if (chan->sec_level < sec_level)
6142 chan->sec_level = sec_level;
6144 /* We'll need to send a new Connect Request */
6145 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6147 smp_conn_security(hcon, chan->sec_level);
6150 case L2CAP_CR_LE_BAD_PSM:
6151 l2cap_chan_del(chan, ECONNREFUSED);
6155 /* If dcid was not set it means channels was refused */
6157 l2cap_chan_del(chan, ECONNREFUSED);
6164 chan->remote_mps = mps;
6165 chan->tx_credits = credits;
6166 l2cap_chan_ready(chan);
6170 l2cap_chan_unlock(chan);
6173 mutex_unlock(&conn->chan_lock);
6178 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6179 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6182 struct l2cap_ecred_reconf_req *req = (void *) data;
6183 struct l2cap_ecred_reconf_rsp rsp;
6184 u16 mtu, mps, result;
6185 struct l2cap_chan *chan;
6191 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6192 result = L2CAP_CR_LE_INVALID_PARAMS;
6196 mtu = __le16_to_cpu(req->mtu);
6197 mps = __le16_to_cpu(req->mps);
6199 BT_DBG("mtu %u mps %u", mtu, mps);
6201 if (mtu < L2CAP_ECRED_MIN_MTU) {
6202 result = L2CAP_RECONF_INVALID_MTU;
6206 if (mps < L2CAP_ECRED_MIN_MPS) {
6207 result = L2CAP_RECONF_INVALID_MPS;
6211 cmd_len -= sizeof(*req);
6212 num_scid = cmd_len / sizeof(u16);
6213 result = L2CAP_RECONF_SUCCESS;
6215 for (i = 0; i < num_scid; i++) {
6218 scid = __le16_to_cpu(req->scid[i]);
6222 chan = __l2cap_get_chan_by_dcid(conn, scid);
6226 /* If the MTU value is decreased for any of the included
6227 * channels, then the receiver shall disconnect all
6228 * included channels.
6230 if (chan->omtu > mtu) {
6231 BT_ERR("chan %p decreased MTU %u -> %u", chan,
6233 result = L2CAP_RECONF_INVALID_MTU;
6237 chan->remote_mps = mps;
6241 rsp.result = cpu_to_le16(result);
6243 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6249 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6250 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6253 struct l2cap_chan *chan;
6254 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6257 if (cmd_len < sizeof(*rsp))
6260 result = __le16_to_cpu(rsp->result);
6262 BT_DBG("result 0x%4.4x", rsp->result);
6267 list_for_each_entry(chan, &conn->chan_l, list) {
6268 if (chan->ident != cmd->ident)
6271 l2cap_chan_del(chan, ECONNRESET);
6277 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6278 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6281 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6282 struct l2cap_chan *chan;
6284 if (cmd_len < sizeof(*rej))
6287 mutex_lock(&conn->chan_lock);
6289 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6293 l2cap_chan_lock(chan);
6294 l2cap_chan_del(chan, ECONNREFUSED);
6295 l2cap_chan_unlock(chan);
6298 mutex_unlock(&conn->chan_lock);
6302 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6303 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6308 switch (cmd->code) {
6309 case L2CAP_COMMAND_REJ:
6310 l2cap_le_command_rej(conn, cmd, cmd_len, data);
6313 case L2CAP_CONN_PARAM_UPDATE_REQ:
6314 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6317 case L2CAP_CONN_PARAM_UPDATE_RSP:
6320 case L2CAP_LE_CONN_RSP:
6321 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6324 case L2CAP_LE_CONN_REQ:
6325 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6328 case L2CAP_LE_CREDITS:
6329 err = l2cap_le_credits(conn, cmd, cmd_len, data);
6332 case L2CAP_ECRED_CONN_REQ:
6333 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6336 case L2CAP_ECRED_CONN_RSP:
6337 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6340 case L2CAP_ECRED_RECONF_REQ:
6341 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6344 case L2CAP_ECRED_RECONF_RSP:
6345 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6348 case L2CAP_DISCONN_REQ:
6349 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6352 case L2CAP_DISCONN_RSP:
6353 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6357 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6365 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6366 struct sk_buff *skb)
6368 struct hci_conn *hcon = conn->hcon;
6369 struct l2cap_cmd_hdr *cmd;
6373 if (hcon->type != LE_LINK)
6376 if (skb->len < L2CAP_CMD_HDR_SIZE)
6379 cmd = (void *) skb->data;
6380 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6382 len = le16_to_cpu(cmd->len);
6384 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6386 if (len != skb->len || !cmd->ident) {
6387 BT_DBG("corrupted command");
6391 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6393 struct l2cap_cmd_rej_unk rej;
6395 BT_ERR("Wrong link type (%d)", err);
6397 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6398 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6406 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6407 struct sk_buff *skb)
6409 struct hci_conn *hcon = conn->hcon;
6410 struct l2cap_cmd_hdr *cmd;
6413 l2cap_raw_recv(conn, skb);
6415 if (hcon->type != ACL_LINK)
6418 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6421 cmd = (void *) skb->data;
6422 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6424 len = le16_to_cpu(cmd->len);
6426 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6429 if (len > skb->len || !cmd->ident) {
6430 BT_DBG("corrupted command");
6434 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6436 struct l2cap_cmd_rej_unk rej;
6438 BT_ERR("Wrong link type (%d)", err);
6440 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6441 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6452 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
6454 u16 our_fcs, rcv_fcs;
6457 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6458 hdr_size = L2CAP_EXT_HDR_SIZE;
6460 hdr_size = L2CAP_ENH_HDR_SIZE;
6462 if (chan->fcs == L2CAP_FCS_CRC16) {
6463 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6464 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6465 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6467 if (our_fcs != rcv_fcs)
6473 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6475 struct l2cap_ctrl control;
6477 BT_DBG("chan %p", chan);
6479 memset(&control, 0, sizeof(control));
6482 control.reqseq = chan->buffer_seq;
6483 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6485 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6486 control.super = L2CAP_SUPER_RNR;
6487 l2cap_send_sframe(chan, &control);
6490 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6491 chan->unacked_frames > 0)
6492 __set_retrans_timer(chan);
6494 /* Send pending iframes */
6495 l2cap_ertm_send(chan);
6497 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6498 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6499 /* F-bit wasn't sent in an s-frame or i-frame yet, so
6502 control.super = L2CAP_SUPER_RR;
6503 l2cap_send_sframe(chan, &control);
6507 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6508 struct sk_buff **last_frag)
6510 /* skb->len reflects data in skb as well as all fragments
6511 * skb->data_len reflects only data in fragments
6513 if (!skb_has_frag_list(skb))
6514 skb_shinfo(skb)->frag_list = new_frag;
6516 new_frag->next = NULL;
6518 (*last_frag)->next = new_frag;
6519 *last_frag = new_frag;
6521 skb->len += new_frag->len;
6522 skb->data_len += new_frag->len;
6523 skb->truesize += new_frag->truesize;
6526 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6527 struct l2cap_ctrl *control)
6531 switch (control->sar) {
6532 case L2CAP_SAR_UNSEGMENTED:
6536 err = chan->ops->recv(chan, skb);
6539 case L2CAP_SAR_START:
6543 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6546 chan->sdu_len = get_unaligned_le16(skb->data);
6547 skb_pull(skb, L2CAP_SDULEN_SIZE);
6549 if (chan->sdu_len > chan->imtu) {
6554 if (skb->len >= chan->sdu_len)
6558 chan->sdu_last_frag = skb;
6564 case L2CAP_SAR_CONTINUE:
6568 append_skb_frag(chan->sdu, skb,
6569 &chan->sdu_last_frag);
6572 if (chan->sdu->len >= chan->sdu_len)
6582 append_skb_frag(chan->sdu, skb,
6583 &chan->sdu_last_frag);
6586 if (chan->sdu->len != chan->sdu_len)
6589 err = chan->ops->recv(chan, chan->sdu);
6592 /* Reassembly complete */
6594 chan->sdu_last_frag = NULL;
6602 kfree_skb(chan->sdu);
6604 chan->sdu_last_frag = NULL;
6611 static int l2cap_resegment(struct l2cap_chan *chan)
6617 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6621 if (chan->mode != L2CAP_MODE_ERTM)
6624 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6625 l2cap_tx(chan, NULL, NULL, event);
6628 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6631 /* Pass sequential frames to l2cap_reassemble_sdu()
6632 * until a gap is encountered.
6635 BT_DBG("chan %p", chan);
6637 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6638 struct sk_buff *skb;
6639 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6640 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6642 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6647 skb_unlink(skb, &chan->srej_q);
6648 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6649 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6654 if (skb_queue_empty(&chan->srej_q)) {
6655 chan->rx_state = L2CAP_RX_STATE_RECV;
6656 l2cap_send_ack(chan);
6662 static void l2cap_handle_srej(struct l2cap_chan *chan,
6663 struct l2cap_ctrl *control)
6665 struct sk_buff *skb;
6667 BT_DBG("chan %p, control %p", chan, control);
6669 if (control->reqseq == chan->next_tx_seq) {
6670 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6671 l2cap_send_disconn_req(chan, ECONNRESET);
6675 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6678 BT_DBG("Seq %d not available for retransmission",
6683 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6684 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6685 l2cap_send_disconn_req(chan, ECONNRESET);
6689 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6691 if (control->poll) {
6692 l2cap_pass_to_tx(chan, control);
6694 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6695 l2cap_retransmit(chan, control);
6696 l2cap_ertm_send(chan);
6698 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6699 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6700 chan->srej_save_reqseq = control->reqseq;
6703 l2cap_pass_to_tx_fbit(chan, control);
6705 if (control->final) {
6706 if (chan->srej_save_reqseq != control->reqseq ||
6707 !test_and_clear_bit(CONN_SREJ_ACT,
6709 l2cap_retransmit(chan, control);
6711 l2cap_retransmit(chan, control);
6712 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6713 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6714 chan->srej_save_reqseq = control->reqseq;
6720 static void l2cap_handle_rej(struct l2cap_chan *chan,
6721 struct l2cap_ctrl *control)
6723 struct sk_buff *skb;
6725 BT_DBG("chan %p, control %p", chan, control);
6727 if (control->reqseq == chan->next_tx_seq) {
6728 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6729 l2cap_send_disconn_req(chan, ECONNRESET);
6733 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6735 if (chan->max_tx && skb &&
6736 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6737 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6738 l2cap_send_disconn_req(chan, ECONNRESET);
6742 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6744 l2cap_pass_to_tx(chan, control);
6746 if (control->final) {
6747 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6748 l2cap_retransmit_all(chan, control);
6750 l2cap_retransmit_all(chan, control);
6751 l2cap_ertm_send(chan);
6752 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6753 set_bit(CONN_REJ_ACT, &chan->conn_state);
6757 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6759 BT_DBG("chan %p, txseq %d", chan, txseq);
6761 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6762 chan->expected_tx_seq);
6764 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6765 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6767 /* See notes below regarding "double poll" and
6770 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6771 BT_DBG("Invalid/Ignore - after SREJ");
6772 return L2CAP_TXSEQ_INVALID_IGNORE;
6774 BT_DBG("Invalid - in window after SREJ sent");
6775 return L2CAP_TXSEQ_INVALID;
6779 if (chan->srej_list.head == txseq) {
6780 BT_DBG("Expected SREJ");
6781 return L2CAP_TXSEQ_EXPECTED_SREJ;
6784 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6785 BT_DBG("Duplicate SREJ - txseq already stored");
6786 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6789 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6790 BT_DBG("Unexpected SREJ - not requested");
6791 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6795 if (chan->expected_tx_seq == txseq) {
6796 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6798 BT_DBG("Invalid - txseq outside tx window");
6799 return L2CAP_TXSEQ_INVALID;
6802 return L2CAP_TXSEQ_EXPECTED;
6806 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6807 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6808 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6809 return L2CAP_TXSEQ_DUPLICATE;
6812 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6813 /* A source of invalid packets is a "double poll" condition,
6814 * where delays cause us to send multiple poll packets. If
6815 * the remote stack receives and processes both polls,
6816 * sequence numbers can wrap around in such a way that a
6817 * resent frame has a sequence number that looks like new data
6818 * with a sequence gap. This would trigger an erroneous SREJ
6821 * Fortunately, this is impossible with a tx window that's
6822 * less than half of the maximum sequence number, which allows
6823 * invalid frames to be safely ignored.
6825 * With tx window sizes greater than half of the tx window
6826 * maximum, the frame is invalid and cannot be ignored. This
6827 * causes a disconnect.
6830 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6831 BT_DBG("Invalid/Ignore - txseq outside tx window");
6832 return L2CAP_TXSEQ_INVALID_IGNORE;
6834 BT_DBG("Invalid - txseq outside tx window");
6835 return L2CAP_TXSEQ_INVALID;
6838 BT_DBG("Unexpected - txseq indicates missing frames");
6839 return L2CAP_TXSEQ_UNEXPECTED;
6843 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6844 struct l2cap_ctrl *control,
6845 struct sk_buff *skb, u8 event)
6848 bool skb_in_use = false;
6850 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6854 case L2CAP_EV_RECV_IFRAME:
6855 switch (l2cap_classify_txseq(chan, control->txseq)) {
6856 case L2CAP_TXSEQ_EXPECTED:
6857 l2cap_pass_to_tx(chan, control);
6859 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6860 BT_DBG("Busy, discarding expected seq %d",
6865 chan->expected_tx_seq = __next_seq(chan,
6868 chan->buffer_seq = chan->expected_tx_seq;
6871 err = l2cap_reassemble_sdu(chan, skb, control);
6875 if (control->final) {
6876 if (!test_and_clear_bit(CONN_REJ_ACT,
6877 &chan->conn_state)) {
6879 l2cap_retransmit_all(chan, control);
6880 l2cap_ertm_send(chan);
6884 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6885 l2cap_send_ack(chan);
6887 case L2CAP_TXSEQ_UNEXPECTED:
6888 l2cap_pass_to_tx(chan, control);
6890 /* Can't issue SREJ frames in the local busy state.
6891 * Drop this frame, it will be seen as missing
6892 * when local busy is exited.
6894 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6895 BT_DBG("Busy, discarding unexpected seq %d",
6900 /* There was a gap in the sequence, so an SREJ
6901 * must be sent for each missing frame. The
6902 * current frame is stored for later use.
6904 skb_queue_tail(&chan->srej_q, skb);
6906 BT_DBG("Queued %p (queue len %d)", skb,
6907 skb_queue_len(&chan->srej_q));
6909 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6910 l2cap_seq_list_clear(&chan->srej_list);
6911 l2cap_send_srej(chan, control->txseq);
6913 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6915 case L2CAP_TXSEQ_DUPLICATE:
6916 l2cap_pass_to_tx(chan, control);
6918 case L2CAP_TXSEQ_INVALID_IGNORE:
6920 case L2CAP_TXSEQ_INVALID:
6922 l2cap_send_disconn_req(chan, ECONNRESET);
6926 case L2CAP_EV_RECV_RR:
6927 l2cap_pass_to_tx(chan, control);
6928 if (control->final) {
6929 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6931 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6932 !__chan_is_moving(chan)) {
6934 l2cap_retransmit_all(chan, control);
6937 l2cap_ertm_send(chan);
6938 } else if (control->poll) {
6939 l2cap_send_i_or_rr_or_rnr(chan);
6941 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6942 &chan->conn_state) &&
6943 chan->unacked_frames)
6944 __set_retrans_timer(chan);
6946 l2cap_ertm_send(chan);
6949 case L2CAP_EV_RECV_RNR:
6950 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6951 l2cap_pass_to_tx(chan, control);
6952 if (control && control->poll) {
6953 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6954 l2cap_send_rr_or_rnr(chan, 0);
6956 __clear_retrans_timer(chan);
6957 l2cap_seq_list_clear(&chan->retrans_list);
6959 case L2CAP_EV_RECV_REJ:
6960 l2cap_handle_rej(chan, control);
6962 case L2CAP_EV_RECV_SREJ:
6963 l2cap_handle_srej(chan, control);
6969 if (skb && !skb_in_use) {
6970 BT_DBG("Freeing %p", skb);
6977 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6978 struct l2cap_ctrl *control,
6979 struct sk_buff *skb, u8 event)
6982 u16 txseq = control->txseq;
6983 bool skb_in_use = false;
6985 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6989 case L2CAP_EV_RECV_IFRAME:
6990 switch (l2cap_classify_txseq(chan, txseq)) {
6991 case L2CAP_TXSEQ_EXPECTED:
6992 /* Keep frame for reassembly later */
6993 l2cap_pass_to_tx(chan, control);
6994 skb_queue_tail(&chan->srej_q, skb);
6996 BT_DBG("Queued %p (queue len %d)", skb,
6997 skb_queue_len(&chan->srej_q));
6999 chan->expected_tx_seq = __next_seq(chan, txseq);
7001 case L2CAP_TXSEQ_EXPECTED_SREJ:
7002 l2cap_seq_list_pop(&chan->srej_list);
7004 l2cap_pass_to_tx(chan, control);
7005 skb_queue_tail(&chan->srej_q, skb);
7007 BT_DBG("Queued %p (queue len %d)", skb,
7008 skb_queue_len(&chan->srej_q));
7010 err = l2cap_rx_queued_iframes(chan);
7015 case L2CAP_TXSEQ_UNEXPECTED:
7016 /* Got a frame that can't be reassembled yet.
7017 * Save it for later, and send SREJs to cover
7018 * the missing frames.
7020 skb_queue_tail(&chan->srej_q, skb);
7022 BT_DBG("Queued %p (queue len %d)", skb,
7023 skb_queue_len(&chan->srej_q));
7025 l2cap_pass_to_tx(chan, control);
7026 l2cap_send_srej(chan, control->txseq);
7028 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7029 /* This frame was requested with an SREJ, but
7030 * some expected retransmitted frames are
7031 * missing. Request retransmission of missing
7034 skb_queue_tail(&chan->srej_q, skb);
7036 BT_DBG("Queued %p (queue len %d)", skb,
7037 skb_queue_len(&chan->srej_q));
7039 l2cap_pass_to_tx(chan, control);
7040 l2cap_send_srej_list(chan, control->txseq);
7042 case L2CAP_TXSEQ_DUPLICATE_SREJ:
7043 /* We've already queued this frame. Drop this copy. */
7044 l2cap_pass_to_tx(chan, control);
7046 case L2CAP_TXSEQ_DUPLICATE:
7047 /* Expecting a later sequence number, so this frame
7048 * was already received. Ignore it completely.
7051 case L2CAP_TXSEQ_INVALID_IGNORE:
7053 case L2CAP_TXSEQ_INVALID:
7055 l2cap_send_disconn_req(chan, ECONNRESET);
7059 case L2CAP_EV_RECV_RR:
7060 l2cap_pass_to_tx(chan, control);
7061 if (control->final) {
7062 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7064 if (!test_and_clear_bit(CONN_REJ_ACT,
7065 &chan->conn_state)) {
7067 l2cap_retransmit_all(chan, control);
7070 l2cap_ertm_send(chan);
7071 } else if (control->poll) {
7072 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7073 &chan->conn_state) &&
7074 chan->unacked_frames) {
7075 __set_retrans_timer(chan);
7078 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7079 l2cap_send_srej_tail(chan);
7081 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7082 &chan->conn_state) &&
7083 chan->unacked_frames)
7084 __set_retrans_timer(chan);
7086 l2cap_send_ack(chan);
7089 case L2CAP_EV_RECV_RNR:
7090 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7091 l2cap_pass_to_tx(chan, control);
7092 if (control->poll) {
7093 l2cap_send_srej_tail(chan);
7095 struct l2cap_ctrl rr_control;
7096 memset(&rr_control, 0, sizeof(rr_control));
7097 rr_control.sframe = 1;
7098 rr_control.super = L2CAP_SUPER_RR;
7099 rr_control.reqseq = chan->buffer_seq;
7100 l2cap_send_sframe(chan, &rr_control);
7104 case L2CAP_EV_RECV_REJ:
7105 l2cap_handle_rej(chan, control);
7107 case L2CAP_EV_RECV_SREJ:
7108 l2cap_handle_srej(chan, control);
7112 if (skb && !skb_in_use) {
7113 BT_DBG("Freeing %p", skb);
7120 static int l2cap_finish_move(struct l2cap_chan *chan)
7122 BT_DBG("chan %p", chan);
7124 chan->rx_state = L2CAP_RX_STATE_RECV;
7127 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7129 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7131 return l2cap_resegment(chan);
7134 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7135 struct l2cap_ctrl *control,
7136 struct sk_buff *skb, u8 event)
7140 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7146 l2cap_process_reqseq(chan, control->reqseq);
7148 if (!skb_queue_empty(&chan->tx_q))
7149 chan->tx_send_head = skb_peek(&chan->tx_q);
7151 chan->tx_send_head = NULL;
7153 /* Rewind next_tx_seq to the point expected
7156 chan->next_tx_seq = control->reqseq;
7157 chan->unacked_frames = 0;
7159 err = l2cap_finish_move(chan);
7163 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7164 l2cap_send_i_or_rr_or_rnr(chan);
7166 if (event == L2CAP_EV_RECV_IFRAME)
7169 return l2cap_rx_state_recv(chan, control, NULL, event);
7172 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7173 struct l2cap_ctrl *control,
7174 struct sk_buff *skb, u8 event)
7178 if (!control->final)
7181 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7183 chan->rx_state = L2CAP_RX_STATE_RECV;
7184 l2cap_process_reqseq(chan, control->reqseq);
7186 if (!skb_queue_empty(&chan->tx_q))
7187 chan->tx_send_head = skb_peek(&chan->tx_q);
7189 chan->tx_send_head = NULL;
7191 /* Rewind next_tx_seq to the point expected
7194 chan->next_tx_seq = control->reqseq;
7195 chan->unacked_frames = 0;
7198 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7200 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7202 err = l2cap_resegment(chan);
7205 err = l2cap_rx_state_recv(chan, control, skb, event);
7210 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7212 /* Make sure reqseq is for a packet that has been sent but not acked */
7215 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7216 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7219 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7220 struct sk_buff *skb, u8 event)
7224 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7225 control, skb, event, chan->rx_state);
7227 if (__valid_reqseq(chan, control->reqseq)) {
7228 switch (chan->rx_state) {
7229 case L2CAP_RX_STATE_RECV:
7230 err = l2cap_rx_state_recv(chan, control, skb, event);
7232 case L2CAP_RX_STATE_SREJ_SENT:
7233 err = l2cap_rx_state_srej_sent(chan, control, skb,
7236 case L2CAP_RX_STATE_WAIT_P:
7237 err = l2cap_rx_state_wait_p(chan, control, skb, event);
7239 case L2CAP_RX_STATE_WAIT_F:
7240 err = l2cap_rx_state_wait_f(chan, control, skb, event);
7247 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7248 control->reqseq, chan->next_tx_seq,
7249 chan->expected_ack_seq);
7250 l2cap_send_disconn_req(chan, ECONNRESET);
7256 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7257 struct sk_buff *skb)
7259 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7262 if (l2cap_classify_txseq(chan, control->txseq) ==
7263 L2CAP_TXSEQ_EXPECTED) {
7264 l2cap_pass_to_tx(chan, control);
7266 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7267 __next_seq(chan, chan->buffer_seq));
7269 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7271 l2cap_reassemble_sdu(chan, skb, control);
7274 kfree_skb(chan->sdu);
7277 chan->sdu_last_frag = NULL;
7281 BT_DBG("Freeing %p", skb);
7286 chan->last_acked_seq = control->txseq;
7287 chan->expected_tx_seq = __next_seq(chan, control->txseq);
7292 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7294 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7298 __unpack_control(chan, skb);
7303 * We can just drop the corrupted I-frame here.
7304 * Receiver will miss it and start proper recovery
7305 * procedures and ask for retransmission.
7307 if (l2cap_check_fcs(chan, skb))
7310 if (!control->sframe && control->sar == L2CAP_SAR_START)
7311 len -= L2CAP_SDULEN_SIZE;
7313 if (chan->fcs == L2CAP_FCS_CRC16)
7314 len -= L2CAP_FCS_SIZE;
7316 if (len > chan->mps) {
7317 l2cap_send_disconn_req(chan, ECONNRESET);
7321 if (chan->ops->filter) {
7322 if (chan->ops->filter(chan, skb))
7326 if (!control->sframe) {
7329 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7330 control->sar, control->reqseq, control->final,
7333 /* Validate F-bit - F=0 always valid, F=1 only
7334 * valid in TX WAIT_F
7336 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7339 if (chan->mode != L2CAP_MODE_STREAMING) {
7340 event = L2CAP_EV_RECV_IFRAME;
7341 err = l2cap_rx(chan, control, skb, event);
7343 err = l2cap_stream_rx(chan, control, skb);
7347 l2cap_send_disconn_req(chan, ECONNRESET);
7349 const u8 rx_func_to_event[4] = {
7350 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7351 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7354 /* Only I-frames are expected in streaming mode */
7355 if (chan->mode == L2CAP_MODE_STREAMING)
7358 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7359 control->reqseq, control->final, control->poll,
7363 BT_ERR("Trailing bytes: %d in sframe", len);
7364 l2cap_send_disconn_req(chan, ECONNRESET);
7368 /* Validate F and P bits */
7369 if (control->final && (control->poll ||
7370 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7373 event = rx_func_to_event[control->super];
7374 if (l2cap_rx(chan, control, skb, event))
7375 l2cap_send_disconn_req(chan, ECONNRESET);
7385 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7387 struct l2cap_conn *conn = chan->conn;
7388 struct l2cap_le_credits pkt;
7391 return_credits = (chan->imtu / chan->mps) + 1;
7393 if (chan->rx_credits >= return_credits)
7396 return_credits -= chan->rx_credits;
7398 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7400 chan->rx_credits += return_credits;
7402 pkt.cid = cpu_to_le16(chan->scid);
7403 pkt.credits = cpu_to_le16(return_credits);
7405 chan->ident = l2cap_get_ident(conn);
7407 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7410 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7414 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7416 /* Wait recv to confirm reception before updating the credits */
7417 err = chan->ops->recv(chan, skb);
7419 /* Update credits whenever an SDU is received */
7420 l2cap_chan_le_send_credits(chan);
7425 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7429 if (!chan->rx_credits) {
7430 BT_ERR("No credits to receive LE L2CAP data");
7431 l2cap_send_disconn_req(chan, ECONNRESET);
7435 if (chan->imtu < skb->len) {
7436 BT_ERR("Too big LE L2CAP PDU");
7441 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7443 /* Update if remote had run out of credits, this should only happens
7444 * if the remote is not using the entire MPS.
7446 if (!chan->rx_credits)
7447 l2cap_chan_le_send_credits(chan);
7454 sdu_len = get_unaligned_le16(skb->data);
7455 skb_pull(skb, L2CAP_SDULEN_SIZE);
7457 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7458 sdu_len, skb->len, chan->imtu);
7460 if (sdu_len > chan->imtu) {
7461 BT_ERR("Too big LE L2CAP SDU length received");
7466 if (skb->len > sdu_len) {
7467 BT_ERR("Too much LE L2CAP data received");
7472 if (skb->len == sdu_len)
7473 return l2cap_ecred_recv(chan, skb);
7476 chan->sdu_len = sdu_len;
7477 chan->sdu_last_frag = skb;
7479 /* Detect if remote is not able to use the selected MPS */
7480 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7481 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7483 /* Adjust the number of credits */
7484 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7485 chan->mps = mps_len;
7486 l2cap_chan_le_send_credits(chan);
7492 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7493 chan->sdu->len, skb->len, chan->sdu_len);
7495 if (chan->sdu->len + skb->len > chan->sdu_len) {
7496 BT_ERR("Too much LE L2CAP data received");
7501 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7504 if (chan->sdu->len == chan->sdu_len) {
7505 err = l2cap_ecred_recv(chan, chan->sdu);
7508 chan->sdu_last_frag = NULL;
7516 kfree_skb(chan->sdu);
7518 chan->sdu_last_frag = NULL;
7522 /* We can't return an error here since we took care of the skb
7523 * freeing internally. An error return would cause the caller to
7524 * do a double-free of the skb.
7529 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7530 struct sk_buff *skb)
7532 struct l2cap_chan *chan;
7534 chan = l2cap_get_chan_by_scid(conn, cid);
7536 if (cid == L2CAP_CID_A2MP) {
7537 chan = a2mp_channel_create(conn, skb);
7543 l2cap_chan_lock(chan);
7545 BT_DBG("unknown cid 0x%4.4x", cid);
7546 /* Drop packet and return */
7552 BT_DBG("chan %p, len %d", chan, skb->len);
7554 /* If we receive data on a fixed channel before the info req/rsp
7555 * procdure is done simply assume that the channel is supported
7556 * and mark it as ready.
7558 if (chan->chan_type == L2CAP_CHAN_FIXED)
7559 l2cap_chan_ready(chan);
7561 if (chan->state != BT_CONNECTED)
7564 switch (chan->mode) {
7565 case L2CAP_MODE_LE_FLOWCTL:
7566 case L2CAP_MODE_EXT_FLOWCTL:
7567 if (l2cap_ecred_data_rcv(chan, skb) < 0)
7572 case L2CAP_MODE_BASIC:
7573 /* If socket recv buffers overflows we drop data here
7574 * which is *bad* because L2CAP has to be reliable.
7575 * But we don't have any other choice. L2CAP doesn't
7576 * provide flow control mechanism. */
7578 if (chan->imtu < skb->len) {
7579 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7583 if (!chan->ops->recv(chan, skb))
7587 case L2CAP_MODE_ERTM:
7588 case L2CAP_MODE_STREAMING:
7589 l2cap_data_rcv(chan, skb);
7593 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7601 l2cap_chan_unlock(chan);
7604 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7605 struct sk_buff *skb)
7607 struct hci_conn *hcon = conn->hcon;
7608 struct l2cap_chan *chan;
7610 if (hcon->type != ACL_LINK)
7613 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7618 BT_DBG("chan %p, len %d", chan, skb->len);
7620 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7623 if (chan->imtu < skb->len)
7626 /* Store remote BD_ADDR and PSM for msg_name */
7627 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7628 bt_cb(skb)->l2cap.psm = psm;
7630 if (!chan->ops->recv(chan, skb)) {
7631 l2cap_chan_put(chan);
7636 l2cap_chan_put(chan);
7641 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7643 struct l2cap_hdr *lh = (void *) skb->data;
7644 struct hci_conn *hcon = conn->hcon;
7648 if (hcon->state != BT_CONNECTED) {
7649 BT_DBG("queueing pending rx skb");
7650 skb_queue_tail(&conn->pending_rx, skb);
7654 skb_pull(skb, L2CAP_HDR_SIZE);
7655 cid = __le16_to_cpu(lh->cid);
7656 len = __le16_to_cpu(lh->len);
7658 if (len != skb->len) {
7663 /* Since we can't actively block incoming LE connections we must
7664 * at least ensure that we ignore incoming data from them.
7666 if (hcon->type == LE_LINK &&
7667 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7668 bdaddr_dst_type(hcon))) {
7673 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7676 case L2CAP_CID_SIGNALING:
7677 l2cap_sig_channel(conn, skb);
7680 case L2CAP_CID_CONN_LESS:
7681 psm = get_unaligned((__le16 *) skb->data);
7682 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7683 l2cap_conless_channel(conn, psm, skb);
7686 case L2CAP_CID_LE_SIGNALING:
7687 l2cap_le_sig_channel(conn, skb);
7691 l2cap_data_channel(conn, cid, skb);
7696 static void process_pending_rx(struct work_struct *work)
7698 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7700 struct sk_buff *skb;
7704 while ((skb = skb_dequeue(&conn->pending_rx)))
7705 l2cap_recv_frame(conn, skb);
7708 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7710 struct l2cap_conn *conn = hcon->l2cap_data;
7711 struct hci_chan *hchan;
7716 hchan = hci_chan_create(hcon);
7720 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7722 hci_chan_del(hchan);
7726 kref_init(&conn->ref);
7727 hcon->l2cap_data = conn;
7728 conn->hcon = hci_conn_get(hcon);
7729 conn->hchan = hchan;
7731 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7733 switch (hcon->type) {
7735 if (hcon->hdev->le_mtu) {
7736 conn->mtu = hcon->hdev->le_mtu;
7741 conn->mtu = hcon->hdev->acl_mtu;
7745 conn->feat_mask = 0;
7747 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7749 if (hcon->type == ACL_LINK &&
7750 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7751 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7753 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7754 (bredr_sc_enabled(hcon->hdev) ||
7755 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7756 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7758 mutex_init(&conn->ident_lock);
7759 mutex_init(&conn->chan_lock);
7761 INIT_LIST_HEAD(&conn->chan_l);
7762 INIT_LIST_HEAD(&conn->users);
7764 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7766 skb_queue_head_init(&conn->pending_rx);
7767 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7768 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7770 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7775 static bool is_valid_psm(u16 psm, u8 dst_type) {
7779 if (bdaddr_type_is_le(dst_type))
7780 return (psm <= 0x00ff);
7782 /* PSM must be odd and lsb of upper byte must be 0 */
7783 return ((psm & 0x0101) == 0x0001);
7786 struct l2cap_chan_data {
7787 struct l2cap_chan *chan;
7792 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7794 struct l2cap_chan_data *d = data;
7797 if (chan == d->chan)
7800 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7803 pid = chan->ops->get_peer_pid(chan);
7805 /* Only count deferred channels with the same PID/PSM */
7806 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7807 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7813 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7814 bdaddr_t *dst, u8 dst_type)
7816 struct l2cap_conn *conn;
7817 struct hci_conn *hcon;
7818 struct hci_dev *hdev;
7821 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7822 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7824 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7826 return -EHOSTUNREACH;
7830 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7831 chan->chan_type != L2CAP_CHAN_RAW) {
7836 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7841 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7846 switch (chan->mode) {
7847 case L2CAP_MODE_BASIC:
7849 case L2CAP_MODE_LE_FLOWCTL:
7851 case L2CAP_MODE_EXT_FLOWCTL:
7852 if (!enable_ecred) {
7857 case L2CAP_MODE_ERTM:
7858 case L2CAP_MODE_STREAMING:
7867 switch (chan->state) {
7871 /* Already connecting */
7876 /* Already connected */
7890 /* Set destination address and psm */
7891 bacpy(&chan->dst, dst);
7892 chan->dst_type = dst_type;
7897 if (bdaddr_type_is_le(dst_type)) {
7898 /* Convert from L2CAP channel address type to HCI address type
7900 if (dst_type == BDADDR_LE_PUBLIC)
7901 dst_type = ADDR_LE_DEV_PUBLIC;
7903 dst_type = ADDR_LE_DEV_RANDOM;
7905 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7906 hcon = hci_connect_le(hdev, dst, dst_type,
7908 HCI_LE_CONN_TIMEOUT,
7909 HCI_ROLE_SLAVE, NULL);
7911 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7913 HCI_LE_CONN_TIMEOUT,
7914 CONN_REASON_L2CAP_CHAN);
7917 u8 auth_type = l2cap_get_auth_type(chan);
7918 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7919 CONN_REASON_L2CAP_CHAN);
7923 err = PTR_ERR(hcon);
7927 conn = l2cap_conn_add(hcon);
7929 hci_conn_drop(hcon);
7934 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7935 struct l2cap_chan_data data;
7938 data.pid = chan->ops->get_peer_pid(chan);
7941 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7943 /* Check if there isn't too many channels being connected */
7944 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7945 hci_conn_drop(hcon);
7951 mutex_lock(&conn->chan_lock);
7952 l2cap_chan_lock(chan);
7954 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7955 hci_conn_drop(hcon);
7960 /* Update source addr of the socket */
7961 bacpy(&chan->src, &hcon->src);
7962 chan->src_type = bdaddr_src_type(hcon);
7964 __l2cap_chan_add(conn, chan);
7966 /* l2cap_chan_add takes its own ref so we can drop this one */
7967 hci_conn_drop(hcon);
7969 l2cap_state_change(chan, BT_CONNECT);
7970 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7972 /* Release chan->sport so that it can be reused by other
7973 * sockets (as it's only used for listening sockets).
7975 write_lock(&chan_list_lock);
7977 write_unlock(&chan_list_lock);
7979 if (hcon->state == BT_CONNECTED) {
7980 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7981 __clear_chan_timer(chan);
7982 if (l2cap_chan_check_security(chan, true))
7983 l2cap_state_change(chan, BT_CONNECTED);
7985 l2cap_do_start(chan);
7991 l2cap_chan_unlock(chan);
7992 mutex_unlock(&conn->chan_lock);
7994 hci_dev_unlock(hdev);
7998 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8000 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8002 struct l2cap_conn *conn = chan->conn;
8004 struct l2cap_ecred_reconf_req req;
8008 pdu.req.mtu = cpu_to_le16(chan->imtu);
8009 pdu.req.mps = cpu_to_le16(chan->mps);
8010 pdu.scid = cpu_to_le16(chan->scid);
8012 chan->ident = l2cap_get_ident(conn);
8014 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8018 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8020 if (chan->imtu > mtu)
8023 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8027 l2cap_ecred_reconfigure(chan);
8032 /* ---- L2CAP interface with lower layer (HCI) ---- */
8034 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8036 int exact = 0, lm1 = 0, lm2 = 0;
8037 struct l2cap_chan *c;
8039 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8041 /* Find listening sockets and check their link_mode */
8042 read_lock(&chan_list_lock);
8043 list_for_each_entry(c, &chan_list, global_l) {
8044 if (c->state != BT_LISTEN)
8047 if (!bacmp(&c->src, &hdev->bdaddr)) {
8048 lm1 |= HCI_LM_ACCEPT;
8049 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8050 lm1 |= HCI_LM_MASTER;
8052 } else if (!bacmp(&c->src, BDADDR_ANY)) {
8053 lm2 |= HCI_LM_ACCEPT;
8054 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8055 lm2 |= HCI_LM_MASTER;
8058 read_unlock(&chan_list_lock);
8060 return exact ? lm1 : lm2;
8063 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8064 * from an existing channel in the list or from the beginning of the
8065 * global list (by passing NULL as first parameter).
8067 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8068 struct hci_conn *hcon)
8070 u8 src_type = bdaddr_src_type(hcon);
8072 read_lock(&chan_list_lock);
8075 c = list_next_entry(c, global_l);
8077 c = list_entry(chan_list.next, typeof(*c), global_l);
8079 list_for_each_entry_from(c, &chan_list, global_l) {
8080 if (c->chan_type != L2CAP_CHAN_FIXED)
8082 if (c->state != BT_LISTEN)
8084 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8086 if (src_type != c->src_type)
8090 read_unlock(&chan_list_lock);
8094 read_unlock(&chan_list_lock);
8099 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8101 struct hci_dev *hdev = hcon->hdev;
8102 struct l2cap_conn *conn;
8103 struct l2cap_chan *pchan;
8106 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8109 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8112 l2cap_conn_del(hcon, bt_to_errno(status));
8116 conn = l2cap_conn_add(hcon);
8120 dst_type = bdaddr_dst_type(hcon);
8122 /* If device is blocked, do not create channels for it */
8123 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
8126 /* Find fixed channels and notify them of the new connection. We
8127 * use multiple individual lookups, continuing each time where
8128 * we left off, because the list lock would prevent calling the
8129 * potentially sleeping l2cap_chan_lock() function.
8131 pchan = l2cap_global_fixed_chan(NULL, hcon);
8133 struct l2cap_chan *chan, *next;
8135 /* Client fixed channels should override server ones */
8136 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8139 l2cap_chan_lock(pchan);
8140 chan = pchan->ops->new_connection(pchan);
8142 bacpy(&chan->src, &hcon->src);
8143 bacpy(&chan->dst, &hcon->dst);
8144 chan->src_type = bdaddr_src_type(hcon);
8145 chan->dst_type = dst_type;
8147 __l2cap_chan_add(conn, chan);
8150 l2cap_chan_unlock(pchan);
8152 next = l2cap_global_fixed_chan(pchan, hcon);
8153 l2cap_chan_put(pchan);
8157 l2cap_conn_ready(conn);
8160 int l2cap_disconn_ind(struct hci_conn *hcon)
8162 struct l2cap_conn *conn = hcon->l2cap_data;
8164 BT_DBG("hcon %p", hcon);
8167 return HCI_ERROR_REMOTE_USER_TERM;
8168 return conn->disc_reason;
8171 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8173 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8176 BT_DBG("hcon %p reason %d", hcon, reason);
8178 l2cap_conn_del(hcon, bt_to_errno(reason));
8181 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8183 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8186 if (encrypt == 0x00) {
8187 if (chan->sec_level == BT_SECURITY_MEDIUM) {
8188 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8189 } else if (chan->sec_level == BT_SECURITY_HIGH ||
8190 chan->sec_level == BT_SECURITY_FIPS)
8191 l2cap_chan_close(chan, ECONNREFUSED);
8193 if (chan->sec_level == BT_SECURITY_MEDIUM)
8194 __clear_chan_timer(chan);
8198 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8200 struct l2cap_conn *conn = hcon->l2cap_data;
8201 struct l2cap_chan *chan;
8206 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8208 mutex_lock(&conn->chan_lock);
8210 list_for_each_entry(chan, &conn->chan_l, list) {
8211 l2cap_chan_lock(chan);
8213 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8214 state_to_string(chan->state));
8216 if (chan->scid == L2CAP_CID_A2MP) {
8217 l2cap_chan_unlock(chan);
8221 if (!status && encrypt)
8222 chan->sec_level = hcon->sec_level;
8224 if (!__l2cap_no_conn_pending(chan)) {
8225 l2cap_chan_unlock(chan);
8229 if (!status && (chan->state == BT_CONNECTED ||
8230 chan->state == BT_CONFIG)) {
8231 chan->ops->resume(chan);
8232 l2cap_check_encryption(chan, encrypt);
8233 l2cap_chan_unlock(chan);
8237 if (chan->state == BT_CONNECT) {
8238 if (!status && l2cap_check_enc_key_size(hcon))
8239 l2cap_start_connection(chan);
8241 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8242 } else if (chan->state == BT_CONNECT2 &&
8243 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8244 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8245 struct l2cap_conn_rsp rsp;
8248 if (!status && l2cap_check_enc_key_size(hcon)) {
8249 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8250 res = L2CAP_CR_PEND;
8251 stat = L2CAP_CS_AUTHOR_PEND;
8252 chan->ops->defer(chan);
8254 l2cap_state_change(chan, BT_CONFIG);
8255 res = L2CAP_CR_SUCCESS;
8256 stat = L2CAP_CS_NO_INFO;
8259 l2cap_state_change(chan, BT_DISCONN);
8260 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8261 res = L2CAP_CR_SEC_BLOCK;
8262 stat = L2CAP_CS_NO_INFO;
8265 rsp.scid = cpu_to_le16(chan->dcid);
8266 rsp.dcid = cpu_to_le16(chan->scid);
8267 rsp.result = cpu_to_le16(res);
8268 rsp.status = cpu_to_le16(stat);
8269 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8272 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8273 res == L2CAP_CR_SUCCESS) {
8275 set_bit(CONF_REQ_SENT, &chan->conf_state);
8276 l2cap_send_cmd(conn, l2cap_get_ident(conn),
8278 l2cap_build_conf_req(chan, buf, sizeof(buf)),
8280 chan->num_conf_req++;
8284 l2cap_chan_unlock(chan);
8287 mutex_unlock(&conn->chan_lock);
8290 /* Append fragment into frame respecting the maximum len of rx_skb */
8291 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8294 if (!conn->rx_skb) {
8295 /* Allocate skb for the complete frame (with header) */
8296 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8303 /* Copy as much as the rx_skb can hold */
8304 len = min_t(u16, len, skb->len);
8305 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8307 conn->rx_len -= len;
8312 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8314 struct sk_buff *rx_skb;
8317 /* Append just enough to complete the header */
8318 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8320 /* If header could not be read just continue */
8321 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8324 rx_skb = conn->rx_skb;
8325 len = get_unaligned_le16(rx_skb->data);
8327 /* Check if rx_skb has enough space to received all fragments */
8328 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8329 /* Update expected len */
8330 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8331 return L2CAP_LEN_SIZE;
8334 /* Reset conn->rx_skb since it will need to be reallocated in order to
8335 * fit all fragments.
8337 conn->rx_skb = NULL;
8339 /* Reallocates rx_skb using the exact expected length */
8340 len = l2cap_recv_frag(conn, rx_skb,
8341 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8347 static void l2cap_recv_reset(struct l2cap_conn *conn)
8349 kfree_skb(conn->rx_skb);
8350 conn->rx_skb = NULL;
8354 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8356 struct l2cap_conn *conn = hcon->l2cap_data;
8359 /* For AMP controller do not create l2cap conn */
8360 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8364 conn = l2cap_conn_add(hcon);
8369 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8373 case ACL_START_NO_FLUSH:
8376 BT_ERR("Unexpected start frame (len %d)", skb->len);
8377 l2cap_recv_reset(conn);
8378 l2cap_conn_unreliable(conn, ECOMM);
8381 /* Start fragment may not contain the L2CAP length so just
8382 * copy the initial byte when that happens and use conn->mtu as
8385 if (skb->len < L2CAP_LEN_SIZE) {
8386 if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
8391 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8393 if (len == skb->len) {
8394 /* Complete frame received */
8395 l2cap_recv_frame(conn, skb);
8399 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8401 if (skb->len > len) {
8402 BT_ERR("Frame is too long (len %d, expected len %d)",
8404 l2cap_conn_unreliable(conn, ECOMM);
8408 /* Append fragment into frame (with header) */
8409 if (l2cap_recv_frag(conn, skb, len) < 0)
8415 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8417 if (!conn->rx_skb) {
8418 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8419 l2cap_conn_unreliable(conn, ECOMM);
8423 /* Complete the L2CAP length if it has not been read */
8424 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8425 if (l2cap_recv_len(conn, skb) < 0) {
8426 l2cap_conn_unreliable(conn, ECOMM);
8430 /* Header still could not be read just continue */
8431 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8435 if (skb->len > conn->rx_len) {
8436 BT_ERR("Fragment is too long (len %d, expected %d)",
8437 skb->len, conn->rx_len);
8438 l2cap_recv_reset(conn);
8439 l2cap_conn_unreliable(conn, ECOMM);
8443 /* Append fragment into frame (with header) */
8444 l2cap_recv_frag(conn, skb, skb->len);
8446 if (!conn->rx_len) {
8447 /* Complete frame received. l2cap_recv_frame
8448 * takes ownership of the skb so set the global
8449 * rx_skb pointer to NULL first.
8451 struct sk_buff *rx_skb = conn->rx_skb;
8452 conn->rx_skb = NULL;
8453 l2cap_recv_frame(conn, rx_skb);
8462 static struct hci_cb l2cap_cb = {
8464 .connect_cfm = l2cap_connect_cfm,
8465 .disconn_cfm = l2cap_disconn_cfm,
8466 .security_cfm = l2cap_security_cfm,
8469 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8471 struct l2cap_chan *c;
8473 read_lock(&chan_list_lock);
8475 list_for_each_entry(c, &chan_list, global_l) {
8476 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8477 &c->src, c->src_type, &c->dst, c->dst_type,
8478 c->state, __le16_to_cpu(c->psm),
8479 c->scid, c->dcid, c->imtu, c->omtu,
8480 c->sec_level, c->mode);
8483 read_unlock(&chan_list_lock);
8488 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8490 static struct dentry *l2cap_debugfs;
8492 int __init l2cap_init(void)
8496 err = l2cap_init_sockets();
8500 hci_register_cb(&l2cap_cb);
8502 if (IS_ERR_OR_NULL(bt_debugfs))
8505 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8506 NULL, &l2cap_debugfs_fops);
8511 void l2cap_exit(void)
8513 debugfs_remove(l2cap_debugfs);
8514 hci_unregister_cb(&l2cap_cb);
8515 l2cap_cleanup_sockets();
8518 module_param(disable_ertm, bool, 0644);
8519 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8521 module_param(enable_ecred, bool, 0644);
8522 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");