2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
45 #define LE_FLOWCTL_MAX_CREDITS 65535
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 struct sk_buff_head *skbs, u8 event);
65 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67 if (link_type == LE_LINK) {
68 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
69 return BDADDR_LE_PUBLIC;
71 return BDADDR_LE_RANDOM;
77 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79 return bdaddr_type(hcon->type, hcon->src_type);
82 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84 return bdaddr_type(hcon->type, hcon->dst_type);
87 /* ---- L2CAP channels ---- */
89 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
94 list_for_each_entry(c, &conn->chan_l, list) {
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
104 struct l2cap_chan *c;
106 list_for_each_entry(c, &conn->chan_l, list) {
113 /* Find channel with given SCID.
114 * Returns a reference locked channel.
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
119 struct l2cap_chan *c;
121 mutex_lock(&conn->chan_lock);
122 c = __l2cap_get_chan_by_scid(conn, cid);
124 /* Only lock if chan reference is not 0 */
125 c = l2cap_chan_hold_unless_zero(c);
129 mutex_unlock(&conn->chan_lock);
134 /* Find channel with given DCID.
135 * Returns a reference locked channel.
137 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
140 struct l2cap_chan *c;
142 mutex_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_dcid(conn, cid);
145 /* Only lock if chan reference is not 0 */
146 c = l2cap_chan_hold_unless_zero(c);
150 mutex_unlock(&conn->chan_lock);
155 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
158 struct l2cap_chan *c;
160 list_for_each_entry(c, &conn->chan_l, list) {
161 if (c->ident == ident)
167 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
170 struct l2cap_chan *c;
172 mutex_lock(&conn->chan_lock);
173 c = __l2cap_get_chan_by_ident(conn, ident);
175 /* Only lock if chan reference is not 0 */
176 c = l2cap_chan_hold_unless_zero(c);
180 mutex_unlock(&conn->chan_lock);
185 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
188 struct l2cap_chan *c;
190 list_for_each_entry(c, &chan_list, global_l) {
191 if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
194 if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
197 if (c->sport == psm && !bacmp(&c->src, src))
203 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 write_lock(&chan_list_lock);
209 if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
219 u16 p, start, end, incr;
221 if (chan->src_type == BDADDR_BREDR) {
222 start = L2CAP_PSM_DYN_START;
223 end = L2CAP_PSM_AUTO_END;
226 start = L2CAP_PSM_LE_DYN_START;
227 end = L2CAP_PSM_LE_DYN_END;
232 for (p = start; p <= end; p += incr)
233 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
235 chan->psm = cpu_to_le16(p);
236 chan->sport = cpu_to_le16(p);
243 write_unlock(&chan_list_lock);
246 EXPORT_SYMBOL_GPL(l2cap_add_psm);
248 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
250 write_lock(&chan_list_lock);
252 /* Override the defaults (which are for conn-oriented) */
253 chan->omtu = L2CAP_DEFAULT_MTU;
254 chan->chan_type = L2CAP_CHAN_FIXED;
258 write_unlock(&chan_list_lock);
263 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 if (conn->hcon->type == LE_LINK)
268 dyn_end = L2CAP_CID_LE_DYN_END;
270 dyn_end = L2CAP_CID_DYN_END;
272 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
273 if (!__l2cap_get_chan_by_scid(conn, cid))
280 static void l2cap_state_change(struct l2cap_chan *chan, int state)
282 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
283 state_to_string(state));
286 chan->ops->state_change(chan, state, 0);
289 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 chan->ops->state_change(chan, chan->state, err);
296 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
298 chan->ops->state_change(chan, chan->state, err);
301 static void __set_retrans_timer(struct l2cap_chan *chan)
303 if (!delayed_work_pending(&chan->monitor_timer) &&
304 chan->retrans_timeout) {
305 l2cap_set_timer(chan, &chan->retrans_timer,
306 msecs_to_jiffies(chan->retrans_timeout));
310 static void __set_monitor_timer(struct l2cap_chan *chan)
312 __clear_retrans_timer(chan);
313 if (chan->monitor_timeout) {
314 l2cap_set_timer(chan, &chan->monitor_timer,
315 msecs_to_jiffies(chan->monitor_timeout));
319 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
324 skb_queue_walk(head, skb) {
325 if (bt_cb(skb)->l2cap.txseq == seq)
332 /* ---- L2CAP sequence number lists ---- */
334 /* For ERTM, ordered lists of sequence numbers must be tracked for
335 * SREJ requests that are received and for frames that are to be
336 * retransmitted. These seq_list functions implement a singly-linked
337 * list in an array, where membership in the list can also be checked
338 * in constant time. Items can also be added to the tail of the list
339 * and removed from the head in constant time, without further memory
343 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
345 size_t alloc_size, i;
347 /* Allocated size is a power of 2 to map sequence numbers
348 * (which may be up to 14 bits) in to a smaller array that is
349 * sized for the negotiated ERTM transmit windows.
351 alloc_size = roundup_pow_of_two(size);
353 seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 seq_list->mask = alloc_size - 1;
358 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
359 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
360 for (i = 0; i < alloc_size; i++)
361 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
366 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
368 kfree(seq_list->list);
371 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
374 /* Constant-time check for list membership */
375 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
378 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
380 u16 seq = seq_list->head;
381 u16 mask = seq_list->mask;
383 seq_list->head = seq_list->list[seq & mask];
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
386 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
387 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
388 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
394 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
401 for (i = 0; i <= seq_list->mask; i++)
402 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
404 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
405 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
408 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
410 u16 mask = seq_list->mask;
412 /* All appends happen in constant time */
414 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
417 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
418 seq_list->head = seq;
420 seq_list->list[seq_list->tail & mask] = seq;
422 seq_list->tail = seq;
423 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
426 static void l2cap_chan_timeout(struct work_struct *work)
428 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
430 struct l2cap_conn *conn = chan->conn;
433 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
435 mutex_lock(&conn->chan_lock);
436 /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
437 * this work. No need to call l2cap_chan_hold(chan) here again.
439 l2cap_chan_lock(chan);
441 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
442 reason = ECONNREFUSED;
443 else if (chan->state == BT_CONNECT &&
444 chan->sec_level != BT_SECURITY_SDP)
445 reason = ECONNREFUSED;
449 l2cap_chan_close(chan, reason);
451 chan->ops->close(chan);
453 l2cap_chan_unlock(chan);
454 l2cap_chan_put(chan);
456 mutex_unlock(&conn->chan_lock);
459 struct l2cap_chan *l2cap_chan_create(void)
461 struct l2cap_chan *chan;
463 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 skb_queue_head_init(&chan->tx_q);
468 skb_queue_head_init(&chan->srej_q);
469 mutex_init(&chan->lock);
471 /* Set default lock nesting level */
472 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
474 write_lock(&chan_list_lock);
475 list_add(&chan->global_l, &chan_list);
476 write_unlock(&chan_list_lock);
478 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
480 chan->state = BT_OPEN;
482 kref_init(&chan->kref);
484 /* This flag is cleared in l2cap_chan_ready() */
485 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
487 BT_DBG("chan %p", chan);
491 EXPORT_SYMBOL_GPL(l2cap_chan_create);
493 static void l2cap_chan_destroy(struct kref *kref)
495 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
497 BT_DBG("chan %p", chan);
499 write_lock(&chan_list_lock);
500 list_del(&chan->global_l);
501 write_unlock(&chan_list_lock);
506 void l2cap_chan_hold(struct l2cap_chan *c)
508 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
513 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
515 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
517 if (!kref_get_unless_zero(&c->kref))
523 void l2cap_chan_put(struct l2cap_chan *c)
525 BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
527 kref_put(&c->kref, l2cap_chan_destroy);
529 EXPORT_SYMBOL_GPL(l2cap_chan_put);
531 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
533 chan->fcs = L2CAP_FCS_CRC16;
534 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
535 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
536 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
537 chan->remote_max_tx = chan->max_tx;
538 chan->remote_tx_win = chan->tx_win;
539 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
540 chan->sec_level = BT_SECURITY_LOW;
541 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
542 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
543 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
545 chan->conf_state = 0;
546 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
548 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
550 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
552 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
555 chan->sdu_last_frag = NULL;
557 chan->tx_credits = tx_credits;
558 /* Derive MPS from connection MTU to stop HCI fragmentation */
559 chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
560 /* Give enough credits for a full packet */
561 chan->rx_credits = (chan->imtu / chan->mps) + 1;
563 skb_queue_head_init(&chan->tx_q);
566 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
568 l2cap_le_flowctl_init(chan, tx_credits);
570 /* L2CAP implementations shall support a minimum MPS of 64 octets */
571 if (chan->mps < L2CAP_ECRED_MIN_MPS) {
572 chan->mps = L2CAP_ECRED_MIN_MPS;
573 chan->rx_credits = (chan->imtu / chan->mps) + 1;
577 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
579 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
580 __le16_to_cpu(chan->psm), chan->dcid);
582 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
586 switch (chan->chan_type) {
587 case L2CAP_CHAN_CONN_ORIENTED:
588 /* Alloc CID for connection-oriented socket */
589 chan->scid = l2cap_alloc_cid(conn);
590 if (conn->hcon->type == ACL_LINK)
591 chan->omtu = L2CAP_DEFAULT_MTU;
594 case L2CAP_CHAN_CONN_LESS:
595 /* Connectionless socket */
596 chan->scid = L2CAP_CID_CONN_LESS;
597 chan->dcid = L2CAP_CID_CONN_LESS;
598 chan->omtu = L2CAP_DEFAULT_MTU;
601 case L2CAP_CHAN_FIXED:
602 /* Caller will set CID and CID specific MTU values */
606 /* Raw socket can send/recv signalling messages only */
607 chan->scid = L2CAP_CID_SIGNALING;
608 chan->dcid = L2CAP_CID_SIGNALING;
609 chan->omtu = L2CAP_DEFAULT_MTU;
612 chan->local_id = L2CAP_BESTEFFORT_ID;
613 chan->local_stype = L2CAP_SERV_BESTEFFORT;
614 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
615 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
616 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
617 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
619 l2cap_chan_hold(chan);
621 /* Only keep a reference for fixed channels if they requested it */
622 if (chan->chan_type != L2CAP_CHAN_FIXED ||
623 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
624 hci_conn_hold(conn->hcon);
626 list_add(&chan->list, &conn->chan_l);
629 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
631 mutex_lock(&conn->chan_lock);
632 __l2cap_chan_add(conn, chan);
633 mutex_unlock(&conn->chan_lock);
636 void l2cap_chan_del(struct l2cap_chan *chan, int err)
638 struct l2cap_conn *conn = chan->conn;
640 __clear_chan_timer(chan);
642 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
643 state_to_string(chan->state));
645 chan->ops->teardown(chan, err);
648 struct amp_mgr *mgr = conn->hcon->amp_mgr;
649 /* Delete from channel list */
650 list_del(&chan->list);
652 l2cap_chan_put(chan);
656 /* Reference was only held for non-fixed channels or
657 * fixed channels that explicitly requested it using the
658 * FLAG_HOLD_HCI_CONN flag.
660 if (chan->chan_type != L2CAP_CHAN_FIXED ||
661 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
662 hci_conn_drop(conn->hcon);
664 if (mgr && mgr->bredr_chan == chan)
665 mgr->bredr_chan = NULL;
668 if (chan->hs_hchan) {
669 struct hci_chan *hs_hchan = chan->hs_hchan;
671 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
672 amp_disconnect_logical_link(hs_hchan);
675 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
678 switch (chan->mode) {
679 case L2CAP_MODE_BASIC:
682 case L2CAP_MODE_LE_FLOWCTL:
683 case L2CAP_MODE_EXT_FLOWCTL:
684 skb_queue_purge(&chan->tx_q);
687 case L2CAP_MODE_ERTM:
688 __clear_retrans_timer(chan);
689 __clear_monitor_timer(chan);
690 __clear_ack_timer(chan);
692 skb_queue_purge(&chan->srej_q);
694 l2cap_seq_list_free(&chan->srej_list);
695 l2cap_seq_list_free(&chan->retrans_list);
698 case L2CAP_MODE_STREAMING:
699 skb_queue_purge(&chan->tx_q);
703 EXPORT_SYMBOL_GPL(l2cap_chan_del);
705 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
708 struct l2cap_chan *chan;
710 list_for_each_entry(chan, &conn->chan_l, list) {
715 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
721 mutex_lock(&conn->chan_lock);
722 __l2cap_chan_list(conn, func, data);
723 mutex_unlock(&conn->chan_lock);
726 EXPORT_SYMBOL_GPL(l2cap_chan_list);
728 static void l2cap_conn_update_id_addr(struct work_struct *work)
730 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
731 id_addr_update_work);
732 struct hci_conn *hcon = conn->hcon;
733 struct l2cap_chan *chan;
735 mutex_lock(&conn->chan_lock);
737 list_for_each_entry(chan, &conn->chan_l, list) {
738 l2cap_chan_lock(chan);
739 bacpy(&chan->dst, &hcon->dst);
740 chan->dst_type = bdaddr_dst_type(hcon);
741 l2cap_chan_unlock(chan);
744 mutex_unlock(&conn->chan_lock);
747 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
749 struct l2cap_conn *conn = chan->conn;
750 struct l2cap_le_conn_rsp rsp;
753 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
754 result = L2CAP_CR_LE_AUTHORIZATION;
756 result = L2CAP_CR_LE_BAD_PSM;
758 l2cap_state_change(chan, BT_DISCONN);
760 rsp.dcid = cpu_to_le16(chan->scid);
761 rsp.mtu = cpu_to_le16(chan->imtu);
762 rsp.mps = cpu_to_le16(chan->mps);
763 rsp.credits = cpu_to_le16(chan->rx_credits);
764 rsp.result = cpu_to_le16(result);
766 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
770 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
772 struct l2cap_conn *conn = chan->conn;
773 struct l2cap_ecred_conn_rsp rsp;
776 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
777 result = L2CAP_CR_LE_AUTHORIZATION;
779 result = L2CAP_CR_LE_BAD_PSM;
781 l2cap_state_change(chan, BT_DISCONN);
783 memset(&rsp, 0, sizeof(rsp));
785 rsp.result = cpu_to_le16(result);
787 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
791 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
793 struct l2cap_conn *conn = chan->conn;
794 struct l2cap_conn_rsp rsp;
797 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
798 result = L2CAP_CR_SEC_BLOCK;
800 result = L2CAP_CR_BAD_PSM;
802 l2cap_state_change(chan, BT_DISCONN);
804 rsp.scid = cpu_to_le16(chan->dcid);
805 rsp.dcid = cpu_to_le16(chan->scid);
806 rsp.result = cpu_to_le16(result);
807 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
809 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
812 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
814 struct l2cap_conn *conn = chan->conn;
816 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
818 switch (chan->state) {
820 chan->ops->teardown(chan, 0);
825 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
826 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
827 l2cap_send_disconn_req(chan, reason);
829 l2cap_chan_del(chan, reason);
833 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
834 if (conn->hcon->type == ACL_LINK)
835 l2cap_chan_connect_reject(chan);
836 else if (conn->hcon->type == LE_LINK) {
837 switch (chan->mode) {
838 case L2CAP_MODE_LE_FLOWCTL:
839 l2cap_chan_le_connect_reject(chan);
841 case L2CAP_MODE_EXT_FLOWCTL:
842 l2cap_chan_ecred_connect_reject(chan);
848 l2cap_chan_del(chan, reason);
853 l2cap_chan_del(chan, reason);
857 chan->ops->teardown(chan, 0);
861 EXPORT_SYMBOL(l2cap_chan_close);
863 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
865 switch (chan->chan_type) {
867 switch (chan->sec_level) {
868 case BT_SECURITY_HIGH:
869 case BT_SECURITY_FIPS:
870 return HCI_AT_DEDICATED_BONDING_MITM;
871 case BT_SECURITY_MEDIUM:
872 return HCI_AT_DEDICATED_BONDING;
874 return HCI_AT_NO_BONDING;
877 case L2CAP_CHAN_CONN_LESS:
878 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
879 if (chan->sec_level == BT_SECURITY_LOW)
880 chan->sec_level = BT_SECURITY_SDP;
882 if (chan->sec_level == BT_SECURITY_HIGH ||
883 chan->sec_level == BT_SECURITY_FIPS)
884 return HCI_AT_NO_BONDING_MITM;
886 return HCI_AT_NO_BONDING;
888 case L2CAP_CHAN_CONN_ORIENTED:
889 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
890 if (chan->sec_level == BT_SECURITY_LOW)
891 chan->sec_level = BT_SECURITY_SDP;
893 if (chan->sec_level == BT_SECURITY_HIGH ||
894 chan->sec_level == BT_SECURITY_FIPS)
895 return HCI_AT_NO_BONDING_MITM;
897 return HCI_AT_NO_BONDING;
902 switch (chan->sec_level) {
903 case BT_SECURITY_HIGH:
904 case BT_SECURITY_FIPS:
905 return HCI_AT_GENERAL_BONDING_MITM;
906 case BT_SECURITY_MEDIUM:
907 return HCI_AT_GENERAL_BONDING;
909 return HCI_AT_NO_BONDING;
915 /* Service level security */
916 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
918 struct l2cap_conn *conn = chan->conn;
921 if (conn->hcon->type == LE_LINK)
922 return smp_conn_security(conn->hcon, chan->sec_level);
924 auth_type = l2cap_get_auth_type(chan);
926 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
930 static u8 l2cap_get_ident(struct l2cap_conn *conn)
934 /* Get next available identificator.
935 * 1 - 128 are used by kernel.
936 * 129 - 199 are reserved.
937 * 200 - 254 are used by utilities like l2ping, etc.
940 mutex_lock(&conn->ident_lock);
942 if (++conn->tx_ident > 128)
947 mutex_unlock(&conn->ident_lock);
952 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
955 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
958 BT_DBG("code 0x%2.2x", code);
963 /* Use NO_FLUSH if supported or we have an LE link (which does
964 * not support auto-flushing packets) */
965 if (lmp_no_flush_capable(conn->hcon->hdev) ||
966 conn->hcon->type == LE_LINK)
967 flags = ACL_START_NO_FLUSH;
971 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
972 skb->priority = HCI_PRIO_MAX;
974 hci_send_acl(conn->hchan, skb, flags);
977 static bool __chan_is_moving(struct l2cap_chan *chan)
979 return chan->move_state != L2CAP_MOVE_STABLE &&
980 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
983 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
985 struct hci_conn *hcon = chan->conn->hcon;
988 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
991 if (chan->hs_hcon && !__chan_is_moving(chan)) {
993 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
1000 /* Use NO_FLUSH for LE links (where this is the only option) or
1001 * if the BR/EDR link supports it and flushing has not been
1002 * explicitly requested (through FLAG_FLUSHABLE).
1004 if (hcon->type == LE_LINK ||
1005 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1006 lmp_no_flush_capable(hcon->hdev)))
1007 flags = ACL_START_NO_FLUSH;
1011 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1012 hci_send_acl(chan->conn->hchan, skb, flags);
1015 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1017 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1018 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1020 if (enh & L2CAP_CTRL_FRAME_TYPE) {
1022 control->sframe = 1;
1023 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1024 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1030 control->sframe = 0;
1031 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1032 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1039 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1041 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1042 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1044 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1046 control->sframe = 1;
1047 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1048 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1054 control->sframe = 0;
1055 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1056 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1063 static inline void __unpack_control(struct l2cap_chan *chan,
1064 struct sk_buff *skb)
1066 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1067 __unpack_extended_control(get_unaligned_le32(skb->data),
1068 &bt_cb(skb)->l2cap);
1069 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1071 __unpack_enhanced_control(get_unaligned_le16(skb->data),
1072 &bt_cb(skb)->l2cap);
1073 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1077 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1081 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1082 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1084 if (control->sframe) {
1085 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1086 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1087 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1089 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1090 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1096 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1100 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1101 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1103 if (control->sframe) {
1104 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1105 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1106 packed |= L2CAP_CTRL_FRAME_TYPE;
1108 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1109 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1115 static inline void __pack_control(struct l2cap_chan *chan,
1116 struct l2cap_ctrl *control,
1117 struct sk_buff *skb)
1119 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1120 put_unaligned_le32(__pack_extended_control(control),
1121 skb->data + L2CAP_HDR_SIZE);
1123 put_unaligned_le16(__pack_enhanced_control(control),
1124 skb->data + L2CAP_HDR_SIZE);
1128 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1130 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1131 return L2CAP_EXT_HDR_SIZE;
1133 return L2CAP_ENH_HDR_SIZE;
1136 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1139 struct sk_buff *skb;
1140 struct l2cap_hdr *lh;
1141 int hlen = __ertm_hdr_size(chan);
1143 if (chan->fcs == L2CAP_FCS_CRC16)
1144 hlen += L2CAP_FCS_SIZE;
1146 skb = bt_skb_alloc(hlen, GFP_KERNEL);
1149 return ERR_PTR(-ENOMEM);
1151 lh = skb_put(skb, L2CAP_HDR_SIZE);
1152 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1153 lh->cid = cpu_to_le16(chan->dcid);
1155 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1156 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1158 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1160 if (chan->fcs == L2CAP_FCS_CRC16) {
1161 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1162 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1165 skb->priority = HCI_PRIO_MAX;
1169 static void l2cap_send_sframe(struct l2cap_chan *chan,
1170 struct l2cap_ctrl *control)
1172 struct sk_buff *skb;
1175 BT_DBG("chan %p, control %p", chan, control);
1177 if (!control->sframe)
1180 if (__chan_is_moving(chan))
1183 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1187 if (control->super == L2CAP_SUPER_RR)
1188 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1189 else if (control->super == L2CAP_SUPER_RNR)
1190 set_bit(CONN_RNR_SENT, &chan->conn_state);
1192 if (control->super != L2CAP_SUPER_SREJ) {
1193 chan->last_acked_seq = control->reqseq;
1194 __clear_ack_timer(chan);
1197 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1198 control->final, control->poll, control->super);
1200 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1201 control_field = __pack_extended_control(control);
1203 control_field = __pack_enhanced_control(control);
1205 skb = l2cap_create_sframe_pdu(chan, control_field);
1207 l2cap_do_send(chan, skb);
1210 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1212 struct l2cap_ctrl control;
1214 BT_DBG("chan %p, poll %d", chan, poll);
1216 memset(&control, 0, sizeof(control));
1218 control.poll = poll;
1220 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1221 control.super = L2CAP_SUPER_RNR;
1223 control.super = L2CAP_SUPER_RR;
1225 control.reqseq = chan->buffer_seq;
1226 l2cap_send_sframe(chan, &control);
1229 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1231 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1234 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1237 static bool __amp_capable(struct l2cap_chan *chan)
1239 struct l2cap_conn *conn = chan->conn;
1240 struct hci_dev *hdev;
1241 bool amp_available = false;
1243 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1246 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1249 read_lock(&hci_dev_list_lock);
1250 list_for_each_entry(hdev, &hci_dev_list, list) {
1251 if (hdev->amp_type != AMP_TYPE_BREDR &&
1252 test_bit(HCI_UP, &hdev->flags)) {
1253 amp_available = true;
1257 read_unlock(&hci_dev_list_lock);
1259 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1260 return amp_available;
1265 static bool l2cap_check_efs(struct l2cap_chan *chan)
1267 /* Check EFS parameters */
1271 void l2cap_send_conn_req(struct l2cap_chan *chan)
1273 struct l2cap_conn *conn = chan->conn;
1274 struct l2cap_conn_req req;
1276 req.scid = cpu_to_le16(chan->scid);
1277 req.psm = chan->psm;
1279 chan->ident = l2cap_get_ident(conn);
1281 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1283 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1286 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1288 struct l2cap_create_chan_req req;
1289 req.scid = cpu_to_le16(chan->scid);
1290 req.psm = chan->psm;
1291 req.amp_id = amp_id;
1293 chan->ident = l2cap_get_ident(chan->conn);
1295 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1299 static void l2cap_move_setup(struct l2cap_chan *chan)
1301 struct sk_buff *skb;
1303 BT_DBG("chan %p", chan);
1305 if (chan->mode != L2CAP_MODE_ERTM)
1308 __clear_retrans_timer(chan);
1309 __clear_monitor_timer(chan);
1310 __clear_ack_timer(chan);
1312 chan->retry_count = 0;
1313 skb_queue_walk(&chan->tx_q, skb) {
1314 if (bt_cb(skb)->l2cap.retries)
1315 bt_cb(skb)->l2cap.retries = 1;
1320 chan->expected_tx_seq = chan->buffer_seq;
1322 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1323 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1324 l2cap_seq_list_clear(&chan->retrans_list);
1325 l2cap_seq_list_clear(&chan->srej_list);
1326 skb_queue_purge(&chan->srej_q);
1328 chan->tx_state = L2CAP_TX_STATE_XMIT;
1329 chan->rx_state = L2CAP_RX_STATE_MOVE;
1331 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1334 static void l2cap_move_done(struct l2cap_chan *chan)
1336 u8 move_role = chan->move_role;
1337 BT_DBG("chan %p", chan);
1339 chan->move_state = L2CAP_MOVE_STABLE;
1340 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1342 if (chan->mode != L2CAP_MODE_ERTM)
1345 switch (move_role) {
1346 case L2CAP_MOVE_ROLE_INITIATOR:
1347 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1348 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1350 case L2CAP_MOVE_ROLE_RESPONDER:
1351 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1356 static void l2cap_chan_ready(struct l2cap_chan *chan)
1358 /* The channel may have already been flagged as connected in
1359 * case of receiving data before the L2CAP info req/rsp
1360 * procedure is complete.
1362 if (chan->state == BT_CONNECTED)
1365 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1366 chan->conf_state = 0;
1367 __clear_chan_timer(chan);
1369 switch (chan->mode) {
1370 case L2CAP_MODE_LE_FLOWCTL:
1371 case L2CAP_MODE_EXT_FLOWCTL:
1372 if (!chan->tx_credits)
1373 chan->ops->suspend(chan);
1377 chan->state = BT_CONNECTED;
1379 chan->ops->ready(chan);
1382 static void l2cap_le_connect(struct l2cap_chan *chan)
1384 struct l2cap_conn *conn = chan->conn;
1385 struct l2cap_le_conn_req req;
1387 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1391 chan->imtu = chan->conn->mtu;
1393 l2cap_le_flowctl_init(chan, 0);
1395 req.psm = chan->psm;
1396 req.scid = cpu_to_le16(chan->scid);
1397 req.mtu = cpu_to_le16(chan->imtu);
1398 req.mps = cpu_to_le16(chan->mps);
1399 req.credits = cpu_to_le16(chan->rx_credits);
1401 chan->ident = l2cap_get_ident(conn);
1403 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1407 struct l2cap_ecred_conn_data {
1409 struct l2cap_ecred_conn_req req;
1412 struct l2cap_chan *chan;
1417 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1419 struct l2cap_ecred_conn_data *conn = data;
1422 if (chan == conn->chan)
1425 if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1428 pid = chan->ops->get_peer_pid(chan);
1430 /* Only add deferred channels with the same PID/PSM */
1431 if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1432 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1435 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1438 l2cap_ecred_init(chan, 0);
1440 /* Set the same ident so we can match on the rsp */
1441 chan->ident = conn->chan->ident;
1443 /* Include all channels deferred */
1444 conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1449 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1451 struct l2cap_conn *conn = chan->conn;
1452 struct l2cap_ecred_conn_data data;
1454 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1457 if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1460 l2cap_ecred_init(chan, 0);
1462 memset(&data, 0, sizeof(data));
1463 data.pdu.req.psm = chan->psm;
1464 data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1465 data.pdu.req.mps = cpu_to_le16(chan->mps);
1466 data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1467 data.pdu.scid[0] = cpu_to_le16(chan->scid);
1469 chan->ident = l2cap_get_ident(conn);
1473 data.pid = chan->ops->get_peer_pid(chan);
1475 __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1477 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1478 sizeof(data.pdu.req) + data.count * sizeof(__le16),
1482 static void l2cap_le_start(struct l2cap_chan *chan)
1484 struct l2cap_conn *conn = chan->conn;
1486 if (!smp_conn_security(conn->hcon, chan->sec_level))
1490 l2cap_chan_ready(chan);
1494 if (chan->state == BT_CONNECT) {
1495 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1496 l2cap_ecred_connect(chan);
1498 l2cap_le_connect(chan);
1502 static void l2cap_start_connection(struct l2cap_chan *chan)
1504 if (__amp_capable(chan)) {
1505 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1506 a2mp_discover_amp(chan);
1507 } else if (chan->conn->hcon->type == LE_LINK) {
1508 l2cap_le_start(chan);
1510 l2cap_send_conn_req(chan);
1514 static void l2cap_request_info(struct l2cap_conn *conn)
1516 struct l2cap_info_req req;
1518 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1521 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1523 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1524 conn->info_ident = l2cap_get_ident(conn);
1526 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1528 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1532 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1534 /* The minimum encryption key size needs to be enforced by the
1535 * host stack before establishing any L2CAP connections. The
1536 * specification in theory allows a minimum of 1, but to align
1537 * BR/EDR and LE transports, a minimum of 7 is chosen.
1539 * This check might also be called for unencrypted connections
1540 * that have no key size requirements. Ensure that the link is
1541 * actually encrypted before enforcing a key size.
1543 int min_key_size = hcon->hdev->min_enc_key_size;
1545 /* On FIPS security level, key size must be 16 bytes */
1546 if (hcon->sec_level == BT_SECURITY_FIPS)
1549 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1550 hcon->enc_key_size >= min_key_size);
1553 static void l2cap_do_start(struct l2cap_chan *chan)
1555 struct l2cap_conn *conn = chan->conn;
1557 if (conn->hcon->type == LE_LINK) {
1558 l2cap_le_start(chan);
1562 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1563 l2cap_request_info(conn);
1567 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1570 if (!l2cap_chan_check_security(chan, true) ||
1571 !__l2cap_no_conn_pending(chan))
1574 if (l2cap_check_enc_key_size(conn->hcon))
1575 l2cap_start_connection(chan);
1577 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1580 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1582 u32 local_feat_mask = l2cap_feat_mask;
1584 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1587 case L2CAP_MODE_ERTM:
1588 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1589 case L2CAP_MODE_STREAMING:
1590 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1596 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1598 struct l2cap_conn *conn = chan->conn;
1599 struct l2cap_disconn_req req;
1604 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1605 __clear_retrans_timer(chan);
1606 __clear_monitor_timer(chan);
1607 __clear_ack_timer(chan);
1610 if (chan->scid == L2CAP_CID_A2MP) {
1611 l2cap_state_change(chan, BT_DISCONN);
1615 req.dcid = cpu_to_le16(chan->dcid);
1616 req.scid = cpu_to_le16(chan->scid);
1617 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1620 l2cap_state_change_and_error(chan, BT_DISCONN, err);
1623 /* ---- L2CAP connections ---- */
1624 static void l2cap_conn_start(struct l2cap_conn *conn)
1626 struct l2cap_chan *chan, *tmp;
1628 BT_DBG("conn %p", conn);
1630 mutex_lock(&conn->chan_lock);
1632 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1633 l2cap_chan_lock(chan);
1635 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1636 l2cap_chan_ready(chan);
1637 l2cap_chan_unlock(chan);
1641 if (chan->state == BT_CONNECT) {
1642 if (!l2cap_chan_check_security(chan, true) ||
1643 !__l2cap_no_conn_pending(chan)) {
1644 l2cap_chan_unlock(chan);
1648 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1649 && test_bit(CONF_STATE2_DEVICE,
1650 &chan->conf_state)) {
1651 l2cap_chan_close(chan, ECONNRESET);
1652 l2cap_chan_unlock(chan);
1656 if (l2cap_check_enc_key_size(conn->hcon))
1657 l2cap_start_connection(chan);
1659 l2cap_chan_close(chan, ECONNREFUSED);
1661 } else if (chan->state == BT_CONNECT2) {
1662 struct l2cap_conn_rsp rsp;
1664 rsp.scid = cpu_to_le16(chan->dcid);
1665 rsp.dcid = cpu_to_le16(chan->scid);
1667 if (l2cap_chan_check_security(chan, false)) {
1668 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1669 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1670 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1671 chan->ops->defer(chan);
1674 l2cap_state_change(chan, BT_CONFIG);
1675 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1676 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1679 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1680 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1683 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1686 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1687 rsp.result != L2CAP_CR_SUCCESS) {
1688 l2cap_chan_unlock(chan);
1692 set_bit(CONF_REQ_SENT, &chan->conf_state);
1693 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1694 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1695 chan->num_conf_req++;
1698 l2cap_chan_unlock(chan);
1701 mutex_unlock(&conn->chan_lock);
1704 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1706 struct hci_conn *hcon = conn->hcon;
1707 struct hci_dev *hdev = hcon->hdev;
1709 BT_DBG("%s conn %p", hdev->name, conn);
1711 /* For outgoing pairing which doesn't necessarily have an
1712 * associated socket (e.g. mgmt_pair_device).
1715 smp_conn_security(hcon, hcon->pending_sec_level);
1717 /* For LE peripheral connections, make sure the connection interval
1718 * is in the range of the minimum and maximum interval that has
1719 * been configured for this connection. If not, then trigger
1720 * the connection update procedure.
1722 if (hcon->role == HCI_ROLE_SLAVE &&
1723 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1724 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1725 struct l2cap_conn_param_update_req req;
1727 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1728 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1729 req.latency = cpu_to_le16(hcon->le_conn_latency);
1730 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1732 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1733 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1737 static void l2cap_conn_ready(struct l2cap_conn *conn)
1739 struct l2cap_chan *chan;
1740 struct hci_conn *hcon = conn->hcon;
1742 BT_DBG("conn %p", conn);
1744 if (hcon->type == ACL_LINK)
1745 l2cap_request_info(conn);
1747 mutex_lock(&conn->chan_lock);
1749 list_for_each_entry(chan, &conn->chan_l, list) {
1751 l2cap_chan_lock(chan);
1753 if (chan->scid == L2CAP_CID_A2MP) {
1754 l2cap_chan_unlock(chan);
1758 if (hcon->type == LE_LINK) {
1759 l2cap_le_start(chan);
1760 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1761 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1762 l2cap_chan_ready(chan);
1763 } else if (chan->state == BT_CONNECT) {
1764 l2cap_do_start(chan);
1767 l2cap_chan_unlock(chan);
1770 mutex_unlock(&conn->chan_lock);
1772 if (hcon->type == LE_LINK)
1773 l2cap_le_conn_ready(conn);
1775 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1778 /* Notify sockets that we cannot guaranty reliability anymore */
1779 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1781 struct l2cap_chan *chan;
1783 BT_DBG("conn %p", conn);
1785 mutex_lock(&conn->chan_lock);
1787 list_for_each_entry(chan, &conn->chan_l, list) {
1788 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1789 l2cap_chan_set_err(chan, err);
1792 mutex_unlock(&conn->chan_lock);
1795 static void l2cap_info_timeout(struct work_struct *work)
1797 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1800 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1801 conn->info_ident = 0;
1803 l2cap_conn_start(conn);
1808 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1809 * callback is called during registration. The ->remove callback is called
1810 * during unregistration.
1811 * An l2cap_user object can either be explicitly unregistered or when the
1812 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1813 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1814 * External modules must own a reference to the l2cap_conn object if they intend
1815 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1816 * any time if they don't.
1819 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1821 struct hci_dev *hdev = conn->hcon->hdev;
1824 /* We need to check whether l2cap_conn is registered. If it is not, we
1825 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1826 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1827 * relies on the parent hci_conn object to be locked. This itself relies
1828 * on the hci_dev object to be locked. So we must lock the hci device
1833 if (!list_empty(&user->list)) {
1838 /* conn->hchan is NULL after l2cap_conn_del() was called */
1844 ret = user->probe(conn, user);
1848 list_add(&user->list, &conn->users);
1852 hci_dev_unlock(hdev);
1855 EXPORT_SYMBOL(l2cap_register_user);
1857 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1859 struct hci_dev *hdev = conn->hcon->hdev;
1863 if (list_empty(&user->list))
1866 list_del_init(&user->list);
1867 user->remove(conn, user);
1870 hci_dev_unlock(hdev);
1872 EXPORT_SYMBOL(l2cap_unregister_user);
1874 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1876 struct l2cap_user *user;
1878 while (!list_empty(&conn->users)) {
1879 user = list_first_entry(&conn->users, struct l2cap_user, list);
1880 list_del_init(&user->list);
1881 user->remove(conn, user);
1885 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1887 struct l2cap_conn *conn = hcon->l2cap_data;
1888 struct l2cap_chan *chan, *l;
1893 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1895 kfree_skb(conn->rx_skb);
1897 skb_queue_purge(&conn->pending_rx);
1899 /* We can not call flush_work(&conn->pending_rx_work) here since we
1900 * might block if we are running on a worker from the same workqueue
1901 * pending_rx_work is waiting on.
1903 if (work_pending(&conn->pending_rx_work))
1904 cancel_work_sync(&conn->pending_rx_work);
1906 if (work_pending(&conn->id_addr_update_work))
1907 cancel_work_sync(&conn->id_addr_update_work);
1909 l2cap_unregister_all_users(conn);
1911 /* Force the connection to be immediately dropped */
1912 hcon->disc_timeout = 0;
1914 mutex_lock(&conn->chan_lock);
1917 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1918 l2cap_chan_hold(chan);
1919 l2cap_chan_lock(chan);
1921 l2cap_chan_del(chan, err);
1923 chan->ops->close(chan);
1925 l2cap_chan_unlock(chan);
1926 l2cap_chan_put(chan);
1929 mutex_unlock(&conn->chan_lock);
1931 hci_chan_del(conn->hchan);
1933 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1934 cancel_delayed_work_sync(&conn->info_timer);
1936 hcon->l2cap_data = NULL;
1938 l2cap_conn_put(conn);
1941 static void l2cap_conn_free(struct kref *ref)
1943 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1945 hci_conn_put(conn->hcon);
1949 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1951 kref_get(&conn->ref);
1954 EXPORT_SYMBOL(l2cap_conn_get);
1956 void l2cap_conn_put(struct l2cap_conn *conn)
1958 kref_put(&conn->ref, l2cap_conn_free);
1960 EXPORT_SYMBOL(l2cap_conn_put);
1962 /* ---- Socket interface ---- */
1964 /* Find socket with psm and source / destination bdaddr.
1965 * Returns closest match.
1967 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1972 struct l2cap_chan *c, *c1 = NULL;
1974 read_lock(&chan_list_lock);
1976 list_for_each_entry(c, &chan_list, global_l) {
1977 if (state && c->state != state)
1980 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1983 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1986 if (c->psm == psm) {
1987 int src_match, dst_match;
1988 int src_any, dst_any;
1991 src_match = !bacmp(&c->src, src);
1992 dst_match = !bacmp(&c->dst, dst);
1993 if (src_match && dst_match) {
1994 c = l2cap_chan_hold_unless_zero(c);
1998 read_unlock(&chan_list_lock);
2003 src_any = !bacmp(&c->src, BDADDR_ANY);
2004 dst_any = !bacmp(&c->dst, BDADDR_ANY);
2005 if ((src_match && dst_any) || (src_any && dst_match) ||
2006 (src_any && dst_any))
2012 c1 = l2cap_chan_hold_unless_zero(c1);
2014 read_unlock(&chan_list_lock);
2019 static void l2cap_monitor_timeout(struct work_struct *work)
2021 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2022 monitor_timer.work);
2024 BT_DBG("chan %p", chan);
2026 l2cap_chan_lock(chan);
2029 l2cap_chan_unlock(chan);
2030 l2cap_chan_put(chan);
2034 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2036 l2cap_chan_unlock(chan);
2037 l2cap_chan_put(chan);
2040 static void l2cap_retrans_timeout(struct work_struct *work)
2042 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2043 retrans_timer.work);
2045 BT_DBG("chan %p", chan);
2047 l2cap_chan_lock(chan);
2050 l2cap_chan_unlock(chan);
2051 l2cap_chan_put(chan);
2055 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2056 l2cap_chan_unlock(chan);
2057 l2cap_chan_put(chan);
2060 static void l2cap_streaming_send(struct l2cap_chan *chan,
2061 struct sk_buff_head *skbs)
2063 struct sk_buff *skb;
2064 struct l2cap_ctrl *control;
2066 BT_DBG("chan %p, skbs %p", chan, skbs);
2068 if (__chan_is_moving(chan))
2071 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2073 while (!skb_queue_empty(&chan->tx_q)) {
2075 skb = skb_dequeue(&chan->tx_q);
2077 bt_cb(skb)->l2cap.retries = 1;
2078 control = &bt_cb(skb)->l2cap;
2080 control->reqseq = 0;
2081 control->txseq = chan->next_tx_seq;
2083 __pack_control(chan, control, skb);
2085 if (chan->fcs == L2CAP_FCS_CRC16) {
2086 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2087 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2090 l2cap_do_send(chan, skb);
2092 BT_DBG("Sent txseq %u", control->txseq);
2094 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2095 chan->frames_sent++;
2099 static int l2cap_ertm_send(struct l2cap_chan *chan)
2101 struct sk_buff *skb, *tx_skb;
2102 struct l2cap_ctrl *control;
2105 BT_DBG("chan %p", chan);
2107 if (chan->state != BT_CONNECTED)
2110 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2113 if (__chan_is_moving(chan))
2116 while (chan->tx_send_head &&
2117 chan->unacked_frames < chan->remote_tx_win &&
2118 chan->tx_state == L2CAP_TX_STATE_XMIT) {
2120 skb = chan->tx_send_head;
2122 bt_cb(skb)->l2cap.retries = 1;
2123 control = &bt_cb(skb)->l2cap;
2125 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2128 control->reqseq = chan->buffer_seq;
2129 chan->last_acked_seq = chan->buffer_seq;
2130 control->txseq = chan->next_tx_seq;
2132 __pack_control(chan, control, skb);
2134 if (chan->fcs == L2CAP_FCS_CRC16) {
2135 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2136 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2139 /* Clone after data has been modified. Data is assumed to be
2140 read-only (for locking purposes) on cloned sk_buffs.
2142 tx_skb = skb_clone(skb, GFP_KERNEL);
2147 __set_retrans_timer(chan);
2149 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2150 chan->unacked_frames++;
2151 chan->frames_sent++;
2154 if (skb_queue_is_last(&chan->tx_q, skb))
2155 chan->tx_send_head = NULL;
2157 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2159 l2cap_do_send(chan, tx_skb);
2160 BT_DBG("Sent txseq %u", control->txseq);
2163 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2164 chan->unacked_frames, skb_queue_len(&chan->tx_q));
2169 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2171 struct l2cap_ctrl control;
2172 struct sk_buff *skb;
2173 struct sk_buff *tx_skb;
2176 BT_DBG("chan %p", chan);
2178 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2181 if (__chan_is_moving(chan))
2184 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2185 seq = l2cap_seq_list_pop(&chan->retrans_list);
2187 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2189 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2194 bt_cb(skb)->l2cap.retries++;
2195 control = bt_cb(skb)->l2cap;
2197 if (chan->max_tx != 0 &&
2198 bt_cb(skb)->l2cap.retries > chan->max_tx) {
2199 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2200 l2cap_send_disconn_req(chan, ECONNRESET);
2201 l2cap_seq_list_clear(&chan->retrans_list);
2205 control.reqseq = chan->buffer_seq;
2206 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2211 if (skb_cloned(skb)) {
2212 /* Cloned sk_buffs are read-only, so we need a
2215 tx_skb = skb_copy(skb, GFP_KERNEL);
2217 tx_skb = skb_clone(skb, GFP_KERNEL);
2221 l2cap_seq_list_clear(&chan->retrans_list);
2225 /* Update skb contents */
2226 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2227 put_unaligned_le32(__pack_extended_control(&control),
2228 tx_skb->data + L2CAP_HDR_SIZE);
2230 put_unaligned_le16(__pack_enhanced_control(&control),
2231 tx_skb->data + L2CAP_HDR_SIZE);
2235 if (chan->fcs == L2CAP_FCS_CRC16) {
2236 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2237 tx_skb->len - L2CAP_FCS_SIZE);
2238 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2242 l2cap_do_send(chan, tx_skb);
2244 BT_DBG("Resent txseq %d", control.txseq);
2246 chan->last_acked_seq = chan->buffer_seq;
2250 static void l2cap_retransmit(struct l2cap_chan *chan,
2251 struct l2cap_ctrl *control)
2253 BT_DBG("chan %p, control %p", chan, control);
2255 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2256 l2cap_ertm_resend(chan);
2259 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2260 struct l2cap_ctrl *control)
2262 struct sk_buff *skb;
2264 BT_DBG("chan %p, control %p", chan, control);
2267 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2269 l2cap_seq_list_clear(&chan->retrans_list);
2271 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2274 if (chan->unacked_frames) {
2275 skb_queue_walk(&chan->tx_q, skb) {
2276 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2277 skb == chan->tx_send_head)
2281 skb_queue_walk_from(&chan->tx_q, skb) {
2282 if (skb == chan->tx_send_head)
2285 l2cap_seq_list_append(&chan->retrans_list,
2286 bt_cb(skb)->l2cap.txseq);
2289 l2cap_ertm_resend(chan);
2293 static void l2cap_send_ack(struct l2cap_chan *chan)
2295 struct l2cap_ctrl control;
2296 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2297 chan->last_acked_seq);
2300 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2301 chan, chan->last_acked_seq, chan->buffer_seq);
2303 memset(&control, 0, sizeof(control));
2306 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2307 chan->rx_state == L2CAP_RX_STATE_RECV) {
2308 __clear_ack_timer(chan);
2309 control.super = L2CAP_SUPER_RNR;
2310 control.reqseq = chan->buffer_seq;
2311 l2cap_send_sframe(chan, &control);
2313 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2314 l2cap_ertm_send(chan);
2315 /* If any i-frames were sent, they included an ack */
2316 if (chan->buffer_seq == chan->last_acked_seq)
2320 /* Ack now if the window is 3/4ths full.
2321 * Calculate without mul or div
2323 threshold = chan->ack_win;
2324 threshold += threshold << 1;
2327 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2330 if (frames_to_ack >= threshold) {
2331 __clear_ack_timer(chan);
2332 control.super = L2CAP_SUPER_RR;
2333 control.reqseq = chan->buffer_seq;
2334 l2cap_send_sframe(chan, &control);
2339 __set_ack_timer(chan);
2343 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2344 struct msghdr *msg, int len,
2345 int count, struct sk_buff *skb)
2347 struct l2cap_conn *conn = chan->conn;
2348 struct sk_buff **frag;
2351 if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2357 /* Continuation fragments (no L2CAP header) */
2358 frag = &skb_shinfo(skb)->frag_list;
2360 struct sk_buff *tmp;
2362 count = min_t(unsigned int, conn->mtu, len);
2364 tmp = chan->ops->alloc_skb(chan, 0, count,
2365 msg->msg_flags & MSG_DONTWAIT);
2367 return PTR_ERR(tmp);
2371 if (!copy_from_iter_full(skb_put(*frag, count), count,
2378 skb->len += (*frag)->len;
2379 skb->data_len += (*frag)->len;
2381 frag = &(*frag)->next;
2387 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2388 struct msghdr *msg, size_t len)
2390 struct l2cap_conn *conn = chan->conn;
2391 struct sk_buff *skb;
2392 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2393 struct l2cap_hdr *lh;
2395 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2396 __le16_to_cpu(chan->psm), len);
2398 count = min_t(unsigned int, (conn->mtu - hlen), len);
2400 skb = chan->ops->alloc_skb(chan, hlen, count,
2401 msg->msg_flags & MSG_DONTWAIT);
2405 /* Create L2CAP header */
2406 lh = skb_put(skb, L2CAP_HDR_SIZE);
2407 lh->cid = cpu_to_le16(chan->dcid);
2408 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2409 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2411 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2412 if (unlikely(err < 0)) {
2414 return ERR_PTR(err);
2419 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2420 struct msghdr *msg, size_t len)
2422 struct l2cap_conn *conn = chan->conn;
2423 struct sk_buff *skb;
2425 struct l2cap_hdr *lh;
2427 BT_DBG("chan %p len %zu", chan, len);
2429 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2431 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2432 msg->msg_flags & MSG_DONTWAIT);
2436 /* Create L2CAP header */
2437 lh = skb_put(skb, L2CAP_HDR_SIZE);
2438 lh->cid = cpu_to_le16(chan->dcid);
2439 lh->len = cpu_to_le16(len);
2441 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2442 if (unlikely(err < 0)) {
2444 return ERR_PTR(err);
2449 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2450 struct msghdr *msg, size_t len,
2453 struct l2cap_conn *conn = chan->conn;
2454 struct sk_buff *skb;
2455 int err, count, hlen;
2456 struct l2cap_hdr *lh;
2458 BT_DBG("chan %p len %zu", chan, len);
2461 return ERR_PTR(-ENOTCONN);
2463 hlen = __ertm_hdr_size(chan);
2466 hlen += L2CAP_SDULEN_SIZE;
2468 if (chan->fcs == L2CAP_FCS_CRC16)
2469 hlen += L2CAP_FCS_SIZE;
2471 count = min_t(unsigned int, (conn->mtu - hlen), len);
2473 skb = chan->ops->alloc_skb(chan, hlen, count,
2474 msg->msg_flags & MSG_DONTWAIT);
2478 /* Create L2CAP header */
2479 lh = skb_put(skb, L2CAP_HDR_SIZE);
2480 lh->cid = cpu_to_le16(chan->dcid);
2481 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2483 /* Control header is populated later */
2484 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2485 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2487 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2490 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2492 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2493 if (unlikely(err < 0)) {
2495 return ERR_PTR(err);
2498 bt_cb(skb)->l2cap.fcs = chan->fcs;
2499 bt_cb(skb)->l2cap.retries = 0;
2503 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2504 struct sk_buff_head *seg_queue,
2505 struct msghdr *msg, size_t len)
2507 struct sk_buff *skb;
2512 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2514 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2515 * so fragmented skbs are not used. The HCI layer's handling
2516 * of fragmented skbs is not compatible with ERTM's queueing.
2519 /* PDU size is derived from the HCI MTU */
2520 pdu_len = chan->conn->mtu;
2522 /* Constrain PDU size for BR/EDR connections */
2524 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2526 /* Adjust for largest possible L2CAP overhead. */
2528 pdu_len -= L2CAP_FCS_SIZE;
2530 pdu_len -= __ertm_hdr_size(chan);
2532 /* Remote device may have requested smaller PDUs */
2533 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2535 if (len <= pdu_len) {
2536 sar = L2CAP_SAR_UNSEGMENTED;
2540 sar = L2CAP_SAR_START;
2545 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2548 __skb_queue_purge(seg_queue);
2549 return PTR_ERR(skb);
2552 bt_cb(skb)->l2cap.sar = sar;
2553 __skb_queue_tail(seg_queue, skb);
2559 if (len <= pdu_len) {
2560 sar = L2CAP_SAR_END;
2563 sar = L2CAP_SAR_CONTINUE;
2570 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2572 size_t len, u16 sdulen)
2574 struct l2cap_conn *conn = chan->conn;
2575 struct sk_buff *skb;
2576 int err, count, hlen;
2577 struct l2cap_hdr *lh;
2579 BT_DBG("chan %p len %zu", chan, len);
2582 return ERR_PTR(-ENOTCONN);
2584 hlen = L2CAP_HDR_SIZE;
2587 hlen += L2CAP_SDULEN_SIZE;
2589 count = min_t(unsigned int, (conn->mtu - hlen), len);
2591 skb = chan->ops->alloc_skb(chan, hlen, count,
2592 msg->msg_flags & MSG_DONTWAIT);
2596 /* Create L2CAP header */
2597 lh = skb_put(skb, L2CAP_HDR_SIZE);
2598 lh->cid = cpu_to_le16(chan->dcid);
2599 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2602 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2604 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2605 if (unlikely(err < 0)) {
2607 return ERR_PTR(err);
2613 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2614 struct sk_buff_head *seg_queue,
2615 struct msghdr *msg, size_t len)
2617 struct sk_buff *skb;
2621 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2624 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2630 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2632 __skb_queue_purge(seg_queue);
2633 return PTR_ERR(skb);
2636 __skb_queue_tail(seg_queue, skb);
2642 pdu_len += L2CAP_SDULEN_SIZE;
2649 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2653 BT_DBG("chan %p", chan);
2655 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2656 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2661 BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2662 skb_queue_len(&chan->tx_q));
2665 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2667 struct sk_buff *skb;
2669 struct sk_buff_head seg_queue;
2674 /* Connectionless channel */
2675 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2676 skb = l2cap_create_connless_pdu(chan, msg, len);
2678 return PTR_ERR(skb);
2680 /* Channel lock is released before requesting new skb and then
2681 * reacquired thus we need to recheck channel state.
2683 if (chan->state != BT_CONNECTED) {
2688 l2cap_do_send(chan, skb);
2692 switch (chan->mode) {
2693 case L2CAP_MODE_LE_FLOWCTL:
2694 case L2CAP_MODE_EXT_FLOWCTL:
2695 /* Check outgoing MTU */
2696 if (len > chan->omtu)
2699 __skb_queue_head_init(&seg_queue);
2701 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2703 if (chan->state != BT_CONNECTED) {
2704 __skb_queue_purge(&seg_queue);
2711 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2713 l2cap_le_flowctl_send(chan);
2715 if (!chan->tx_credits)
2716 chan->ops->suspend(chan);
2722 case L2CAP_MODE_BASIC:
2723 /* Check outgoing MTU */
2724 if (len > chan->omtu)
2727 /* Create a basic PDU */
2728 skb = l2cap_create_basic_pdu(chan, msg, len);
2730 return PTR_ERR(skb);
2732 /* Channel lock is released before requesting new skb and then
2733 * reacquired thus we need to recheck channel state.
2735 if (chan->state != BT_CONNECTED) {
2740 l2cap_do_send(chan, skb);
2744 case L2CAP_MODE_ERTM:
2745 case L2CAP_MODE_STREAMING:
2746 /* Check outgoing MTU */
2747 if (len > chan->omtu) {
2752 __skb_queue_head_init(&seg_queue);
2754 /* Do segmentation before calling in to the state machine,
2755 * since it's possible to block while waiting for memory
2758 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2760 /* The channel could have been closed while segmenting,
2761 * check that it is still connected.
2763 if (chan->state != BT_CONNECTED) {
2764 __skb_queue_purge(&seg_queue);
2771 if (chan->mode == L2CAP_MODE_ERTM)
2772 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2774 l2cap_streaming_send(chan, &seg_queue);
2778 /* If the skbs were not queued for sending, they'll still be in
2779 * seg_queue and need to be purged.
2781 __skb_queue_purge(&seg_queue);
2785 BT_DBG("bad state %1.1x", chan->mode);
2791 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2793 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2795 struct l2cap_ctrl control;
2798 BT_DBG("chan %p, txseq %u", chan, txseq);
2800 memset(&control, 0, sizeof(control));
2802 control.super = L2CAP_SUPER_SREJ;
2804 for (seq = chan->expected_tx_seq; seq != txseq;
2805 seq = __next_seq(chan, seq)) {
2806 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2807 control.reqseq = seq;
2808 l2cap_send_sframe(chan, &control);
2809 l2cap_seq_list_append(&chan->srej_list, seq);
2813 chan->expected_tx_seq = __next_seq(chan, txseq);
2816 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2818 struct l2cap_ctrl control;
2820 BT_DBG("chan %p", chan);
2822 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2825 memset(&control, 0, sizeof(control));
2827 control.super = L2CAP_SUPER_SREJ;
2828 control.reqseq = chan->srej_list.tail;
2829 l2cap_send_sframe(chan, &control);
2832 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2834 struct l2cap_ctrl control;
2838 BT_DBG("chan %p, txseq %u", chan, txseq);
2840 memset(&control, 0, sizeof(control));
2842 control.super = L2CAP_SUPER_SREJ;
2844 /* Capture initial list head to allow only one pass through the list. */
2845 initial_head = chan->srej_list.head;
2848 seq = l2cap_seq_list_pop(&chan->srej_list);
2849 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2852 control.reqseq = seq;
2853 l2cap_send_sframe(chan, &control);
2854 l2cap_seq_list_append(&chan->srej_list, seq);
2855 } while (chan->srej_list.head != initial_head);
2858 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2860 struct sk_buff *acked_skb;
2863 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2865 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2868 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2869 chan->expected_ack_seq, chan->unacked_frames);
2871 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2872 ackseq = __next_seq(chan, ackseq)) {
2874 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2876 skb_unlink(acked_skb, &chan->tx_q);
2877 kfree_skb(acked_skb);
2878 chan->unacked_frames--;
2882 chan->expected_ack_seq = reqseq;
2884 if (chan->unacked_frames == 0)
2885 __clear_retrans_timer(chan);
2887 BT_DBG("unacked_frames %u", chan->unacked_frames);
2890 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2892 BT_DBG("chan %p", chan);
2894 chan->expected_tx_seq = chan->buffer_seq;
2895 l2cap_seq_list_clear(&chan->srej_list);
2896 skb_queue_purge(&chan->srej_q);
2897 chan->rx_state = L2CAP_RX_STATE_RECV;
2900 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2901 struct l2cap_ctrl *control,
2902 struct sk_buff_head *skbs, u8 event)
2904 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2908 case L2CAP_EV_DATA_REQUEST:
2909 if (chan->tx_send_head == NULL)
2910 chan->tx_send_head = skb_peek(skbs);
2912 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2913 l2cap_ertm_send(chan);
2915 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2916 BT_DBG("Enter LOCAL_BUSY");
2917 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2919 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2920 /* The SREJ_SENT state must be aborted if we are to
2921 * enter the LOCAL_BUSY state.
2923 l2cap_abort_rx_srej_sent(chan);
2926 l2cap_send_ack(chan);
2929 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2930 BT_DBG("Exit LOCAL_BUSY");
2931 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2933 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2934 struct l2cap_ctrl local_control;
2936 memset(&local_control, 0, sizeof(local_control));
2937 local_control.sframe = 1;
2938 local_control.super = L2CAP_SUPER_RR;
2939 local_control.poll = 1;
2940 local_control.reqseq = chan->buffer_seq;
2941 l2cap_send_sframe(chan, &local_control);
2943 chan->retry_count = 1;
2944 __set_monitor_timer(chan);
2945 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2948 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2949 l2cap_process_reqseq(chan, control->reqseq);
2951 case L2CAP_EV_EXPLICIT_POLL:
2952 l2cap_send_rr_or_rnr(chan, 1);
2953 chan->retry_count = 1;
2954 __set_monitor_timer(chan);
2955 __clear_ack_timer(chan);
2956 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2958 case L2CAP_EV_RETRANS_TO:
2959 l2cap_send_rr_or_rnr(chan, 1);
2960 chan->retry_count = 1;
2961 __set_monitor_timer(chan);
2962 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2964 case L2CAP_EV_RECV_FBIT:
2965 /* Nothing to process */
2972 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2973 struct l2cap_ctrl *control,
2974 struct sk_buff_head *skbs, u8 event)
2976 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2980 case L2CAP_EV_DATA_REQUEST:
2981 if (chan->tx_send_head == NULL)
2982 chan->tx_send_head = skb_peek(skbs);
2983 /* Queue data, but don't send. */
2984 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2986 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2987 BT_DBG("Enter LOCAL_BUSY");
2988 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2990 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2991 /* The SREJ_SENT state must be aborted if we are to
2992 * enter the LOCAL_BUSY state.
2994 l2cap_abort_rx_srej_sent(chan);
2997 l2cap_send_ack(chan);
3000 case L2CAP_EV_LOCAL_BUSY_CLEAR:
3001 BT_DBG("Exit LOCAL_BUSY");
3002 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3004 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
3005 struct l2cap_ctrl local_control;
3006 memset(&local_control, 0, sizeof(local_control));
3007 local_control.sframe = 1;
3008 local_control.super = L2CAP_SUPER_RR;
3009 local_control.poll = 1;
3010 local_control.reqseq = chan->buffer_seq;
3011 l2cap_send_sframe(chan, &local_control);
3013 chan->retry_count = 1;
3014 __set_monitor_timer(chan);
3015 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
3018 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
3019 l2cap_process_reqseq(chan, control->reqseq);
3022 case L2CAP_EV_RECV_FBIT:
3023 if (control && control->final) {
3024 __clear_monitor_timer(chan);
3025 if (chan->unacked_frames > 0)
3026 __set_retrans_timer(chan);
3027 chan->retry_count = 0;
3028 chan->tx_state = L2CAP_TX_STATE_XMIT;
3029 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3032 case L2CAP_EV_EXPLICIT_POLL:
3035 case L2CAP_EV_MONITOR_TO:
3036 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3037 l2cap_send_rr_or_rnr(chan, 1);
3038 __set_monitor_timer(chan);
3039 chan->retry_count++;
3041 l2cap_send_disconn_req(chan, ECONNABORTED);
3049 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3050 struct sk_buff_head *skbs, u8 event)
3052 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3053 chan, control, skbs, event, chan->tx_state);
3055 switch (chan->tx_state) {
3056 case L2CAP_TX_STATE_XMIT:
3057 l2cap_tx_state_xmit(chan, control, skbs, event);
3059 case L2CAP_TX_STATE_WAIT_F:
3060 l2cap_tx_state_wait_f(chan, control, skbs, event);
3068 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3069 struct l2cap_ctrl *control)
3071 BT_DBG("chan %p, control %p", chan, control);
3072 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3075 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3076 struct l2cap_ctrl *control)
3078 BT_DBG("chan %p, control %p", chan, control);
3079 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3082 /* Copy frame to all raw sockets on that connection */
3083 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3085 struct sk_buff *nskb;
3086 struct l2cap_chan *chan;
3088 BT_DBG("conn %p", conn);
3090 mutex_lock(&conn->chan_lock);
3092 list_for_each_entry(chan, &conn->chan_l, list) {
3093 if (chan->chan_type != L2CAP_CHAN_RAW)
3096 /* Don't send frame to the channel it came from */
3097 if (bt_cb(skb)->l2cap.chan == chan)
3100 nskb = skb_clone(skb, GFP_KERNEL);
3103 if (chan->ops->recv(chan, nskb))
3107 mutex_unlock(&conn->chan_lock);
3110 /* ---- L2CAP signalling commands ---- */
3111 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3112 u8 ident, u16 dlen, void *data)
3114 struct sk_buff *skb, **frag;
3115 struct l2cap_cmd_hdr *cmd;
3116 struct l2cap_hdr *lh;
3119 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3120 conn, code, ident, dlen);
3122 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3125 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3126 count = min_t(unsigned int, conn->mtu, len);
3128 skb = bt_skb_alloc(count, GFP_KERNEL);
3132 lh = skb_put(skb, L2CAP_HDR_SIZE);
3133 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3135 if (conn->hcon->type == LE_LINK)
3136 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3138 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3140 cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3143 cmd->len = cpu_to_le16(dlen);
3146 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3147 skb_put_data(skb, data, count);
3153 /* Continuation fragments (no L2CAP header) */
3154 frag = &skb_shinfo(skb)->frag_list;
3156 count = min_t(unsigned int, conn->mtu, len);
3158 *frag = bt_skb_alloc(count, GFP_KERNEL);
3162 skb_put_data(*frag, data, count);
3167 frag = &(*frag)->next;
3177 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3180 struct l2cap_conf_opt *opt = *ptr;
3183 len = L2CAP_CONF_OPT_SIZE + opt->len;
3191 *val = *((u8 *) opt->val);
3195 *val = get_unaligned_le16(opt->val);
3199 *val = get_unaligned_le32(opt->val);
3203 *val = (unsigned long) opt->val;
3207 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3211 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3213 struct l2cap_conf_opt *opt = *ptr;
3215 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3217 if (size < L2CAP_CONF_OPT_SIZE + len)
3225 *((u8 *) opt->val) = val;
3229 put_unaligned_le16(val, opt->val);
3233 put_unaligned_le32(val, opt->val);
3237 memcpy(opt->val, (void *) val, len);
3241 *ptr += L2CAP_CONF_OPT_SIZE + len;
3244 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3246 struct l2cap_conf_efs efs;
3248 switch (chan->mode) {
3249 case L2CAP_MODE_ERTM:
3250 efs.id = chan->local_id;
3251 efs.stype = chan->local_stype;
3252 efs.msdu = cpu_to_le16(chan->local_msdu);
3253 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3254 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3255 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3258 case L2CAP_MODE_STREAMING:
3260 efs.stype = L2CAP_SERV_BESTEFFORT;
3261 efs.msdu = cpu_to_le16(chan->local_msdu);
3262 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3271 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3272 (unsigned long) &efs, size);
3275 static void l2cap_ack_timeout(struct work_struct *work)
3277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3281 BT_DBG("chan %p", chan);
3283 l2cap_chan_lock(chan);
3285 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3286 chan->last_acked_seq);
3289 l2cap_send_rr_or_rnr(chan, 0);
3291 l2cap_chan_unlock(chan);
3292 l2cap_chan_put(chan);
3295 int l2cap_ertm_init(struct l2cap_chan *chan)
3299 chan->next_tx_seq = 0;
3300 chan->expected_tx_seq = 0;
3301 chan->expected_ack_seq = 0;
3302 chan->unacked_frames = 0;
3303 chan->buffer_seq = 0;
3304 chan->frames_sent = 0;
3305 chan->last_acked_seq = 0;
3307 chan->sdu_last_frag = NULL;
3310 skb_queue_head_init(&chan->tx_q);
3312 chan->local_amp_id = AMP_ID_BREDR;
3313 chan->move_id = AMP_ID_BREDR;
3314 chan->move_state = L2CAP_MOVE_STABLE;
3315 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3317 if (chan->mode != L2CAP_MODE_ERTM)
3320 chan->rx_state = L2CAP_RX_STATE_RECV;
3321 chan->tx_state = L2CAP_TX_STATE_XMIT;
3323 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3324 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3325 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3327 skb_queue_head_init(&chan->srej_q);
3329 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3333 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3335 l2cap_seq_list_free(&chan->srej_list);
3340 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3343 case L2CAP_MODE_STREAMING:
3344 case L2CAP_MODE_ERTM:
3345 if (l2cap_mode_supported(mode, remote_feat_mask))
3349 return L2CAP_MODE_BASIC;
3353 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3355 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3356 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3359 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3361 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3362 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3365 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3366 struct l2cap_conf_rfc *rfc)
3368 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3369 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3371 /* Class 1 devices have must have ERTM timeouts
3372 * exceeding the Link Supervision Timeout. The
3373 * default Link Supervision Timeout for AMP
3374 * controllers is 10 seconds.
3376 * Class 1 devices use 0xffffffff for their
3377 * best-effort flush timeout, so the clamping logic
3378 * will result in a timeout that meets the above
3379 * requirement. ERTM timeouts are 16-bit values, so
3380 * the maximum timeout is 65.535 seconds.
3383 /* Convert timeout to milliseconds and round */
3384 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3386 /* This is the recommended formula for class 2 devices
3387 * that start ERTM timers when packets are sent to the
3390 ertm_to = 3 * ertm_to + 500;
3392 if (ertm_to > 0xffff)
3395 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3396 rfc->monitor_timeout = rfc->retrans_timeout;
3398 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3399 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3403 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3405 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3406 __l2cap_ews_supported(chan->conn)) {
3407 /* use extended control field */
3408 set_bit(FLAG_EXT_CTRL, &chan->flags);
3409 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3411 chan->tx_win = min_t(u16, chan->tx_win,
3412 L2CAP_DEFAULT_TX_WINDOW);
3413 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3415 chan->ack_win = chan->tx_win;
3418 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3420 struct hci_conn *conn = chan->conn->hcon;
3422 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3424 /* The 2-DH1 packet has between 2 and 56 information bytes
3425 * (including the 2-byte payload header)
3427 if (!(conn->pkt_type & HCI_2DH1))
3430 /* The 3-DH1 packet has between 2 and 85 information bytes
3431 * (including the 2-byte payload header)
3433 if (!(conn->pkt_type & HCI_3DH1))
3436 /* The 2-DH3 packet has between 2 and 369 information bytes
3437 * (including the 2-byte payload header)
3439 if (!(conn->pkt_type & HCI_2DH3))
3442 /* The 3-DH3 packet has between 2 and 554 information bytes
3443 * (including the 2-byte payload header)
3445 if (!(conn->pkt_type & HCI_3DH3))
3448 /* The 2-DH5 packet has between 2 and 681 information bytes
3449 * (including the 2-byte payload header)
3451 if (!(conn->pkt_type & HCI_2DH5))
3454 /* The 3-DH5 packet has between 2 and 1023 information bytes
3455 * (including the 2-byte payload header)
3457 if (!(conn->pkt_type & HCI_3DH5))
3461 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3463 struct l2cap_conf_req *req = data;
3464 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3465 void *ptr = req->data;
3466 void *endptr = data + data_size;
3469 BT_DBG("chan %p", chan);
3471 if (chan->num_conf_req || chan->num_conf_rsp)
3474 switch (chan->mode) {
3475 case L2CAP_MODE_STREAMING:
3476 case L2CAP_MODE_ERTM:
3477 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3480 if (__l2cap_efs_supported(chan->conn))
3481 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3485 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3490 if (chan->imtu != L2CAP_DEFAULT_MTU) {
3492 l2cap_mtu_auto(chan);
3493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3497 switch (chan->mode) {
3498 case L2CAP_MODE_BASIC:
3502 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3503 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3506 rfc.mode = L2CAP_MODE_BASIC;
3508 rfc.max_transmit = 0;
3509 rfc.retrans_timeout = 0;
3510 rfc.monitor_timeout = 0;
3511 rfc.max_pdu_size = 0;
3513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3514 (unsigned long) &rfc, endptr - ptr);
3517 case L2CAP_MODE_ERTM:
3518 rfc.mode = L2CAP_MODE_ERTM;
3519 rfc.max_transmit = chan->max_tx;
3521 __l2cap_set_ertm_timeouts(chan, &rfc);
3523 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3524 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3526 rfc.max_pdu_size = cpu_to_le16(size);
3528 l2cap_txwin_setup(chan);
3530 rfc.txwin_size = min_t(u16, chan->tx_win,
3531 L2CAP_DEFAULT_TX_WINDOW);
3533 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3534 (unsigned long) &rfc, endptr - ptr);
3536 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3537 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3539 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3541 chan->tx_win, endptr - ptr);
3543 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3544 if (chan->fcs == L2CAP_FCS_NONE ||
3545 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3546 chan->fcs = L2CAP_FCS_NONE;
3547 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3548 chan->fcs, endptr - ptr);
3552 case L2CAP_MODE_STREAMING:
3553 l2cap_txwin_setup(chan);
3554 rfc.mode = L2CAP_MODE_STREAMING;
3556 rfc.max_transmit = 0;
3557 rfc.retrans_timeout = 0;
3558 rfc.monitor_timeout = 0;
3560 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3561 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3563 rfc.max_pdu_size = cpu_to_le16(size);
3565 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3566 (unsigned long) &rfc, endptr - ptr);
3568 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3569 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3571 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3572 if (chan->fcs == L2CAP_FCS_NONE ||
3573 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3574 chan->fcs = L2CAP_FCS_NONE;
3575 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3576 chan->fcs, endptr - ptr);
3581 req->dcid = cpu_to_le16(chan->dcid);
3582 req->flags = cpu_to_le16(0);
3587 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3589 struct l2cap_conf_rsp *rsp = data;
3590 void *ptr = rsp->data;
3591 void *endptr = data + data_size;
3592 void *req = chan->conf_req;
3593 int len = chan->conf_len;
3594 int type, hint, olen;
3596 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3597 struct l2cap_conf_efs efs;
3599 u16 mtu = L2CAP_DEFAULT_MTU;
3600 u16 result = L2CAP_CONF_SUCCESS;
3603 BT_DBG("chan %p", chan);
3605 while (len >= L2CAP_CONF_OPT_SIZE) {
3606 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3610 hint = type & L2CAP_CONF_HINT;
3611 type &= L2CAP_CONF_MASK;
3614 case L2CAP_CONF_MTU:
3620 case L2CAP_CONF_FLUSH_TO:
3623 chan->flush_to = val;
3626 case L2CAP_CONF_QOS:
3629 case L2CAP_CONF_RFC:
3630 if (olen != sizeof(rfc))
3632 memcpy(&rfc, (void *) val, olen);
3635 case L2CAP_CONF_FCS:
3638 if (val == L2CAP_FCS_NONE)
3639 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3642 case L2CAP_CONF_EFS:
3643 if (olen != sizeof(efs))
3646 memcpy(&efs, (void *) val, olen);
3649 case L2CAP_CONF_EWS:
3652 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3653 return -ECONNREFUSED;
3654 set_bit(FLAG_EXT_CTRL, &chan->flags);
3655 set_bit(CONF_EWS_RECV, &chan->conf_state);
3656 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3657 chan->remote_tx_win = val;
3663 result = L2CAP_CONF_UNKNOWN;
3664 l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3669 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3672 switch (chan->mode) {
3673 case L2CAP_MODE_STREAMING:
3674 case L2CAP_MODE_ERTM:
3675 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3676 chan->mode = l2cap_select_mode(rfc.mode,
3677 chan->conn->feat_mask);
3682 if (__l2cap_efs_supported(chan->conn))
3683 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3685 return -ECONNREFUSED;
3688 if (chan->mode != rfc.mode)
3689 return -ECONNREFUSED;
3695 if (chan->mode != rfc.mode) {
3696 result = L2CAP_CONF_UNACCEPT;
3697 rfc.mode = chan->mode;
3699 if (chan->num_conf_rsp == 1)
3700 return -ECONNREFUSED;
3702 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3703 (unsigned long) &rfc, endptr - ptr);
3706 if (result == L2CAP_CONF_SUCCESS) {
3707 /* Configure output options and let the other side know
3708 * which ones we don't like. */
3710 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3711 result = L2CAP_CONF_UNACCEPT;
3714 set_bit(CONF_MTU_DONE, &chan->conf_state);
3716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3719 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3720 efs.stype != L2CAP_SERV_NOTRAFIC &&
3721 efs.stype != chan->local_stype) {
3723 result = L2CAP_CONF_UNACCEPT;
3725 if (chan->num_conf_req >= 1)
3726 return -ECONNREFUSED;
3728 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3730 (unsigned long) &efs, endptr - ptr);
3732 /* Send PENDING Conf Rsp */
3733 result = L2CAP_CONF_PENDING;
3734 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3739 case L2CAP_MODE_BASIC:
3740 chan->fcs = L2CAP_FCS_NONE;
3741 set_bit(CONF_MODE_DONE, &chan->conf_state);
3744 case L2CAP_MODE_ERTM:
3745 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3746 chan->remote_tx_win = rfc.txwin_size;
3748 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3750 chan->remote_max_tx = rfc.max_transmit;
3752 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3753 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3754 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3755 rfc.max_pdu_size = cpu_to_le16(size);
3756 chan->remote_mps = size;
3758 __l2cap_set_ertm_timeouts(chan, &rfc);
3760 set_bit(CONF_MODE_DONE, &chan->conf_state);
3762 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3763 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3765 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3766 chan->remote_id = efs.id;
3767 chan->remote_stype = efs.stype;
3768 chan->remote_msdu = le16_to_cpu(efs.msdu);
3769 chan->remote_flush_to =
3770 le32_to_cpu(efs.flush_to);
3771 chan->remote_acc_lat =
3772 le32_to_cpu(efs.acc_lat);
3773 chan->remote_sdu_itime =
3774 le32_to_cpu(efs.sdu_itime);
3775 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3777 (unsigned long) &efs, endptr - ptr);
3781 case L2CAP_MODE_STREAMING:
3782 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3783 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3784 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3785 rfc.max_pdu_size = cpu_to_le16(size);
3786 chan->remote_mps = size;
3788 set_bit(CONF_MODE_DONE, &chan->conf_state);
3790 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3791 (unsigned long) &rfc, endptr - ptr);
3796 result = L2CAP_CONF_UNACCEPT;
3798 memset(&rfc, 0, sizeof(rfc));
3799 rfc.mode = chan->mode;
3802 if (result == L2CAP_CONF_SUCCESS)
3803 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3805 rsp->scid = cpu_to_le16(chan->dcid);
3806 rsp->result = cpu_to_le16(result);
3807 rsp->flags = cpu_to_le16(0);
3812 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3813 void *data, size_t size, u16 *result)
3815 struct l2cap_conf_req *req = data;
3816 void *ptr = req->data;
3817 void *endptr = data + size;
3820 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3821 struct l2cap_conf_efs efs;
3823 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3825 while (len >= L2CAP_CONF_OPT_SIZE) {
3826 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3831 case L2CAP_CONF_MTU:
3834 if (val < L2CAP_DEFAULT_MIN_MTU) {
3835 *result = L2CAP_CONF_UNACCEPT;
3836 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3839 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3843 case L2CAP_CONF_FLUSH_TO:
3846 chan->flush_to = val;
3847 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3848 chan->flush_to, endptr - ptr);
3851 case L2CAP_CONF_RFC:
3852 if (olen != sizeof(rfc))
3854 memcpy(&rfc, (void *)val, olen);
3855 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3856 rfc.mode != chan->mode)
3857 return -ECONNREFUSED;
3859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3860 (unsigned long) &rfc, endptr - ptr);
3863 case L2CAP_CONF_EWS:
3866 chan->ack_win = min_t(u16, val, chan->ack_win);
3867 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3868 chan->tx_win, endptr - ptr);
3871 case L2CAP_CONF_EFS:
3872 if (olen != sizeof(efs))
3874 memcpy(&efs, (void *)val, olen);
3875 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3876 efs.stype != L2CAP_SERV_NOTRAFIC &&
3877 efs.stype != chan->local_stype)
3878 return -ECONNREFUSED;
3879 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3880 (unsigned long) &efs, endptr - ptr);
3883 case L2CAP_CONF_FCS:
3886 if (*result == L2CAP_CONF_PENDING)
3887 if (val == L2CAP_FCS_NONE)
3888 set_bit(CONF_RECV_NO_FCS,
3894 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3895 return -ECONNREFUSED;
3897 chan->mode = rfc.mode;
3899 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3901 case L2CAP_MODE_ERTM:
3902 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3903 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3904 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3905 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3906 chan->ack_win = min_t(u16, chan->ack_win,
3909 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3910 chan->local_msdu = le16_to_cpu(efs.msdu);
3911 chan->local_sdu_itime =
3912 le32_to_cpu(efs.sdu_itime);
3913 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3914 chan->local_flush_to =
3915 le32_to_cpu(efs.flush_to);
3919 case L2CAP_MODE_STREAMING:
3920 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3924 req->dcid = cpu_to_le16(chan->dcid);
3925 req->flags = cpu_to_le16(0);
3930 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3931 u16 result, u16 flags)
3933 struct l2cap_conf_rsp *rsp = data;
3934 void *ptr = rsp->data;
3936 BT_DBG("chan %p", chan);
3938 rsp->scid = cpu_to_le16(chan->dcid);
3939 rsp->result = cpu_to_le16(result);
3940 rsp->flags = cpu_to_le16(flags);
3945 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3947 struct l2cap_le_conn_rsp rsp;
3948 struct l2cap_conn *conn = chan->conn;
3950 BT_DBG("chan %p", chan);
3952 rsp.dcid = cpu_to_le16(chan->scid);
3953 rsp.mtu = cpu_to_le16(chan->imtu);
3954 rsp.mps = cpu_to_le16(chan->mps);
3955 rsp.credits = cpu_to_le16(chan->rx_credits);
3956 rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3958 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3962 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3965 struct l2cap_ecred_conn_rsp rsp;
3968 struct l2cap_conn *conn = chan->conn;
3969 u16 ident = chan->ident;
3975 BT_DBG("chan %p ident %d", chan, ident);
3977 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3978 pdu.rsp.mps = cpu_to_le16(chan->mps);
3979 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3980 pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3982 mutex_lock(&conn->chan_lock);
3984 list_for_each_entry(chan, &conn->chan_l, list) {
3985 if (chan->ident != ident)
3988 /* Reset ident so only one response is sent */
3991 /* Include all channels pending with the same ident */
3992 pdu.dcid[i++] = cpu_to_le16(chan->scid);
3995 mutex_unlock(&conn->chan_lock);
3997 l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
3998 sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
4001 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4003 struct l2cap_conn_rsp rsp;
4004 struct l2cap_conn *conn = chan->conn;
4008 rsp.scid = cpu_to_le16(chan->dcid);
4009 rsp.dcid = cpu_to_le16(chan->scid);
4010 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4011 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4014 rsp_code = L2CAP_CREATE_CHAN_RSP;
4016 rsp_code = L2CAP_CONN_RSP;
4018 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4020 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4022 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4025 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4026 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4027 chan->num_conf_req++;
4030 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4034 /* Use sane default values in case a misbehaving remote device
4035 * did not send an RFC or extended window size option.
4037 u16 txwin_ext = chan->ack_win;
4038 struct l2cap_conf_rfc rfc = {
4040 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4041 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4042 .max_pdu_size = cpu_to_le16(chan->imtu),
4043 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4046 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4048 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4051 while (len >= L2CAP_CONF_OPT_SIZE) {
4052 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4057 case L2CAP_CONF_RFC:
4058 if (olen != sizeof(rfc))
4060 memcpy(&rfc, (void *)val, olen);
4062 case L2CAP_CONF_EWS:
4071 case L2CAP_MODE_ERTM:
4072 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4073 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4074 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4075 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4076 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4078 chan->ack_win = min_t(u16, chan->ack_win,
4081 case L2CAP_MODE_STREAMING:
4082 chan->mps = le16_to_cpu(rfc.max_pdu_size);
4086 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4087 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4090 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4092 if (cmd_len < sizeof(*rej))
4095 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4098 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4099 cmd->ident == conn->info_ident) {
4100 cancel_delayed_work(&conn->info_timer);
4102 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4103 conn->info_ident = 0;
4105 l2cap_conn_start(conn);
4111 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4112 struct l2cap_cmd_hdr *cmd,
4113 u8 *data, u8 rsp_code, u8 amp_id)
4115 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4116 struct l2cap_conn_rsp rsp;
4117 struct l2cap_chan *chan = NULL, *pchan;
4118 int result, status = L2CAP_CS_NO_INFO;
4120 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4121 __le16 psm = req->psm;
4123 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4125 /* Check if we have socket listening on psm */
4126 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4127 &conn->hcon->dst, ACL_LINK);
4129 result = L2CAP_CR_BAD_PSM;
4133 mutex_lock(&conn->chan_lock);
4134 l2cap_chan_lock(pchan);
4136 /* Check if the ACL is secure enough (if not SDP) */
4137 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4138 !hci_conn_check_link_mode(conn->hcon)) {
4139 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4140 result = L2CAP_CR_SEC_BLOCK;
4144 result = L2CAP_CR_NO_MEM;
4146 /* Check for valid dynamic CID range (as per Erratum 3253) */
4147 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4148 result = L2CAP_CR_INVALID_SCID;
4152 /* Check if we already have channel with that dcid */
4153 if (__l2cap_get_chan_by_dcid(conn, scid)) {
4154 result = L2CAP_CR_SCID_IN_USE;
4158 chan = pchan->ops->new_connection(pchan);
4162 /* For certain devices (ex: HID mouse), support for authentication,
4163 * pairing and bonding is optional. For such devices, inorder to avoid
4164 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4165 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4167 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4169 bacpy(&chan->src, &conn->hcon->src);
4170 bacpy(&chan->dst, &conn->hcon->dst);
4171 chan->src_type = bdaddr_src_type(conn->hcon);
4172 chan->dst_type = bdaddr_dst_type(conn->hcon);
4175 chan->local_amp_id = amp_id;
4177 __l2cap_chan_add(conn, chan);
4181 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4183 chan->ident = cmd->ident;
4185 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4186 if (l2cap_chan_check_security(chan, false)) {
4187 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4188 l2cap_state_change(chan, BT_CONNECT2);
4189 result = L2CAP_CR_PEND;
4190 status = L2CAP_CS_AUTHOR_PEND;
4191 chan->ops->defer(chan);
4193 /* Force pending result for AMP controllers.
4194 * The connection will succeed after the
4195 * physical link is up.
4197 if (amp_id == AMP_ID_BREDR) {
4198 l2cap_state_change(chan, BT_CONFIG);
4199 result = L2CAP_CR_SUCCESS;
4201 l2cap_state_change(chan, BT_CONNECT2);
4202 result = L2CAP_CR_PEND;
4204 status = L2CAP_CS_NO_INFO;
4207 l2cap_state_change(chan, BT_CONNECT2);
4208 result = L2CAP_CR_PEND;
4209 status = L2CAP_CS_AUTHEN_PEND;
4212 l2cap_state_change(chan, BT_CONNECT2);
4213 result = L2CAP_CR_PEND;
4214 status = L2CAP_CS_NO_INFO;
4218 l2cap_chan_unlock(pchan);
4219 mutex_unlock(&conn->chan_lock);
4220 l2cap_chan_put(pchan);
4223 rsp.scid = cpu_to_le16(scid);
4224 rsp.dcid = cpu_to_le16(dcid);
4225 rsp.result = cpu_to_le16(result);
4226 rsp.status = cpu_to_le16(status);
4227 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4229 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4230 struct l2cap_info_req info;
4231 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4233 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4234 conn->info_ident = l2cap_get_ident(conn);
4236 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4238 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4239 sizeof(info), &info);
4242 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4243 result == L2CAP_CR_SUCCESS) {
4245 set_bit(CONF_REQ_SENT, &chan->conf_state);
4246 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4247 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4248 chan->num_conf_req++;
4254 static int l2cap_connect_req(struct l2cap_conn *conn,
4255 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4257 struct hci_dev *hdev = conn->hcon->hdev;
4258 struct hci_conn *hcon = conn->hcon;
4260 if (cmd_len < sizeof(struct l2cap_conn_req))
4264 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4265 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4266 mgmt_device_connected(hdev, hcon, NULL, 0);
4267 hci_dev_unlock(hdev);
4269 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4273 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4274 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4277 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4278 u16 scid, dcid, result, status;
4279 struct l2cap_chan *chan;
4283 if (cmd_len < sizeof(*rsp))
4286 scid = __le16_to_cpu(rsp->scid);
4287 dcid = __le16_to_cpu(rsp->dcid);
4288 result = __le16_to_cpu(rsp->result);
4289 status = __le16_to_cpu(rsp->status);
4291 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4292 dcid, scid, result, status);
4294 mutex_lock(&conn->chan_lock);
4297 chan = __l2cap_get_chan_by_scid(conn, scid);
4303 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4312 l2cap_chan_lock(chan);
4315 case L2CAP_CR_SUCCESS:
4316 l2cap_state_change(chan, BT_CONFIG);
4319 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4321 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4324 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4325 l2cap_build_conf_req(chan, req, sizeof(req)), req);
4326 chan->num_conf_req++;
4330 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4334 l2cap_chan_del(chan, ECONNREFUSED);
4338 l2cap_chan_unlock(chan);
4341 mutex_unlock(&conn->chan_lock);
4346 static inline void set_default_fcs(struct l2cap_chan *chan)
4348 /* FCS is enabled only in ERTM or streaming mode, if one or both
4351 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4352 chan->fcs = L2CAP_FCS_NONE;
4353 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4354 chan->fcs = L2CAP_FCS_CRC16;
4357 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4358 u8 ident, u16 flags)
4360 struct l2cap_conn *conn = chan->conn;
4362 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4365 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4366 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4368 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4369 l2cap_build_conf_rsp(chan, data,
4370 L2CAP_CONF_SUCCESS, flags), data);
4373 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4376 struct l2cap_cmd_rej_cid rej;
4378 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4379 rej.scid = __cpu_to_le16(scid);
4380 rej.dcid = __cpu_to_le16(dcid);
4382 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4385 static inline int l2cap_config_req(struct l2cap_conn *conn,
4386 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4389 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4392 struct l2cap_chan *chan;
4395 if (cmd_len < sizeof(*req))
4398 dcid = __le16_to_cpu(req->dcid);
4399 flags = __le16_to_cpu(req->flags);
4401 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4403 chan = l2cap_get_chan_by_scid(conn, dcid);
4405 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4409 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4410 chan->state != BT_CONNECTED) {
4411 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4416 /* Reject if config buffer is too small. */
4417 len = cmd_len - sizeof(*req);
4418 if (chan->conf_len + len > sizeof(chan->conf_req)) {
4419 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4420 l2cap_build_conf_rsp(chan, rsp,
4421 L2CAP_CONF_REJECT, flags), rsp);
4426 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4427 chan->conf_len += len;
4429 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4430 /* Incomplete config. Send empty response. */
4431 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4432 l2cap_build_conf_rsp(chan, rsp,
4433 L2CAP_CONF_SUCCESS, flags), rsp);
4437 /* Complete config. */
4438 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4440 l2cap_send_disconn_req(chan, ECONNRESET);
4444 chan->ident = cmd->ident;
4445 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4446 chan->num_conf_rsp++;
4448 /* Reset config buffer. */
4451 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4454 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4455 set_default_fcs(chan);
4457 if (chan->mode == L2CAP_MODE_ERTM ||
4458 chan->mode == L2CAP_MODE_STREAMING)
4459 err = l2cap_ertm_init(chan);
4462 l2cap_send_disconn_req(chan, -err);
4464 l2cap_chan_ready(chan);
4469 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4471 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4472 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4473 chan->num_conf_req++;
4476 /* Got Conf Rsp PENDING from remote side and assume we sent
4477 Conf Rsp PENDING in the code above */
4478 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4479 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4481 /* check compatibility */
4483 /* Send rsp for BR/EDR channel */
4485 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4487 chan->ident = cmd->ident;
4491 l2cap_chan_unlock(chan);
4492 l2cap_chan_put(chan);
4496 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4497 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4500 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4501 u16 scid, flags, result;
4502 struct l2cap_chan *chan;
4503 int len = cmd_len - sizeof(*rsp);
4506 if (cmd_len < sizeof(*rsp))
4509 scid = __le16_to_cpu(rsp->scid);
4510 flags = __le16_to_cpu(rsp->flags);
4511 result = __le16_to_cpu(rsp->result);
4513 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4516 chan = l2cap_get_chan_by_scid(conn, scid);
4521 case L2CAP_CONF_SUCCESS:
4522 l2cap_conf_rfc_get(chan, rsp->data, len);
4523 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4526 case L2CAP_CONF_PENDING:
4527 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4529 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4532 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4533 buf, sizeof(buf), &result);
4535 l2cap_send_disconn_req(chan, ECONNRESET);
4539 if (!chan->hs_hcon) {
4540 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4543 if (l2cap_check_efs(chan)) {
4544 amp_create_logical_link(chan);
4545 chan->ident = cmd->ident;
4551 case L2CAP_CONF_UNKNOWN:
4552 case L2CAP_CONF_UNACCEPT:
4553 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4556 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4557 l2cap_send_disconn_req(chan, ECONNRESET);
4561 /* throw out any old stored conf requests */
4562 result = L2CAP_CONF_SUCCESS;
4563 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4564 req, sizeof(req), &result);
4566 l2cap_send_disconn_req(chan, ECONNRESET);
4570 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4571 L2CAP_CONF_REQ, len, req);
4572 chan->num_conf_req++;
4573 if (result != L2CAP_CONF_SUCCESS)
4580 l2cap_chan_set_err(chan, ECONNRESET);
4582 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4583 l2cap_send_disconn_req(chan, ECONNRESET);
4587 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4590 set_bit(CONF_INPUT_DONE, &chan->conf_state);
4592 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4593 set_default_fcs(chan);
4595 if (chan->mode == L2CAP_MODE_ERTM ||
4596 chan->mode == L2CAP_MODE_STREAMING)
4597 err = l2cap_ertm_init(chan);
4600 l2cap_send_disconn_req(chan, -err);
4602 l2cap_chan_ready(chan);
4606 l2cap_chan_unlock(chan);
4607 l2cap_chan_put(chan);
4611 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4612 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4615 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4616 struct l2cap_disconn_rsp rsp;
4618 struct l2cap_chan *chan;
4620 if (cmd_len != sizeof(*req))
4623 scid = __le16_to_cpu(req->scid);
4624 dcid = __le16_to_cpu(req->dcid);
4626 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4628 mutex_lock(&conn->chan_lock);
4630 chan = __l2cap_get_chan_by_scid(conn, dcid);
4632 mutex_unlock(&conn->chan_lock);
4633 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4637 l2cap_chan_hold(chan);
4638 l2cap_chan_lock(chan);
4640 rsp.dcid = cpu_to_le16(chan->scid);
4641 rsp.scid = cpu_to_le16(chan->dcid);
4642 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4644 chan->ops->set_shutdown(chan);
4646 l2cap_chan_del(chan, ECONNRESET);
4648 chan->ops->close(chan);
4650 l2cap_chan_unlock(chan);
4651 l2cap_chan_put(chan);
4653 mutex_unlock(&conn->chan_lock);
4658 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4659 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4662 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4664 struct l2cap_chan *chan;
4666 if (cmd_len != sizeof(*rsp))
4669 scid = __le16_to_cpu(rsp->scid);
4670 dcid = __le16_to_cpu(rsp->dcid);
4672 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4674 mutex_lock(&conn->chan_lock);
4676 chan = __l2cap_get_chan_by_scid(conn, scid);
4678 mutex_unlock(&conn->chan_lock);
4682 l2cap_chan_hold(chan);
4683 l2cap_chan_lock(chan);
4685 if (chan->state != BT_DISCONN) {
4686 l2cap_chan_unlock(chan);
4687 l2cap_chan_put(chan);
4688 mutex_unlock(&conn->chan_lock);
4692 l2cap_chan_del(chan, 0);
4694 chan->ops->close(chan);
4696 l2cap_chan_unlock(chan);
4697 l2cap_chan_put(chan);
4699 mutex_unlock(&conn->chan_lock);
4704 static inline int l2cap_information_req(struct l2cap_conn *conn,
4705 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4708 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4711 if (cmd_len != sizeof(*req))
4714 type = __le16_to_cpu(req->type);
4716 BT_DBG("type 0x%4.4x", type);
4718 if (type == L2CAP_IT_FEAT_MASK) {
4720 u32 feat_mask = l2cap_feat_mask;
4721 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4722 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4723 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4725 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4727 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4728 feat_mask |= L2CAP_FEAT_EXT_FLOW
4729 | L2CAP_FEAT_EXT_WINDOW;
4731 put_unaligned_le32(feat_mask, rsp->data);
4732 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4734 } else if (type == L2CAP_IT_FIXED_CHAN) {
4736 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4738 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4739 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4740 rsp->data[0] = conn->local_fixed_chan;
4741 memset(rsp->data + 1, 0, 7);
4742 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4745 struct l2cap_info_rsp rsp;
4746 rsp.type = cpu_to_le16(type);
4747 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4748 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4755 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4756 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4759 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4762 if (cmd_len < sizeof(*rsp))
4765 type = __le16_to_cpu(rsp->type);
4766 result = __le16_to_cpu(rsp->result);
4768 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4770 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4771 if (cmd->ident != conn->info_ident ||
4772 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4775 cancel_delayed_work(&conn->info_timer);
4777 if (result != L2CAP_IR_SUCCESS) {
4778 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4779 conn->info_ident = 0;
4781 l2cap_conn_start(conn);
4787 case L2CAP_IT_FEAT_MASK:
4788 conn->feat_mask = get_unaligned_le32(rsp->data);
4790 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4791 struct l2cap_info_req req;
4792 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4794 conn->info_ident = l2cap_get_ident(conn);
4796 l2cap_send_cmd(conn, conn->info_ident,
4797 L2CAP_INFO_REQ, sizeof(req), &req);
4799 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4800 conn->info_ident = 0;
4802 l2cap_conn_start(conn);
4806 case L2CAP_IT_FIXED_CHAN:
4807 conn->remote_fixed_chan = rsp->data[0];
4808 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4809 conn->info_ident = 0;
4811 l2cap_conn_start(conn);
4818 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4819 struct l2cap_cmd_hdr *cmd,
4820 u16 cmd_len, void *data)
4822 struct l2cap_create_chan_req *req = data;
4823 struct l2cap_create_chan_rsp rsp;
4824 struct l2cap_chan *chan;
4825 struct hci_dev *hdev;
4828 if (cmd_len != sizeof(*req))
4831 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4834 psm = le16_to_cpu(req->psm);
4835 scid = le16_to_cpu(req->scid);
4837 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4839 /* For controller id 0 make BR/EDR connection */
4840 if (req->amp_id == AMP_ID_BREDR) {
4841 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4846 /* Validate AMP controller id */
4847 hdev = hci_dev_get(req->amp_id);
4851 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4856 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4859 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4860 struct hci_conn *hs_hcon;
4862 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4866 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4871 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4873 mgr->bredr_chan = chan;
4874 chan->hs_hcon = hs_hcon;
4875 chan->fcs = L2CAP_FCS_NONE;
4876 conn->mtu = hdev->block_mtu;
4885 rsp.scid = cpu_to_le16(scid);
4886 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4887 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4889 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4895 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4897 struct l2cap_move_chan_req req;
4900 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4902 ident = l2cap_get_ident(chan->conn);
4903 chan->ident = ident;
4905 req.icid = cpu_to_le16(chan->scid);
4906 req.dest_amp_id = dest_amp_id;
4908 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4911 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4914 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4916 struct l2cap_move_chan_rsp rsp;
4918 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4920 rsp.icid = cpu_to_le16(chan->dcid);
4921 rsp.result = cpu_to_le16(result);
4923 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4927 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4929 struct l2cap_move_chan_cfm cfm;
4931 BT_DBG("chan %p, result 0x%4.4x", chan, result);
4933 chan->ident = l2cap_get_ident(chan->conn);
4935 cfm.icid = cpu_to_le16(chan->scid);
4936 cfm.result = cpu_to_le16(result);
4938 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4941 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4944 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4946 struct l2cap_move_chan_cfm cfm;
4948 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4950 cfm.icid = cpu_to_le16(icid);
4951 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4953 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4957 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4960 struct l2cap_move_chan_cfm_rsp rsp;
4962 BT_DBG("icid 0x%4.4x", icid);
4964 rsp.icid = cpu_to_le16(icid);
4965 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4968 static void __release_logical_link(struct l2cap_chan *chan)
4970 chan->hs_hchan = NULL;
4971 chan->hs_hcon = NULL;
4973 /* Placeholder - release the logical link */
4976 static void l2cap_logical_fail(struct l2cap_chan *chan)
4978 /* Logical link setup failed */
4979 if (chan->state != BT_CONNECTED) {
4980 /* Create channel failure, disconnect */
4981 l2cap_send_disconn_req(chan, ECONNRESET);
4985 switch (chan->move_role) {
4986 case L2CAP_MOVE_ROLE_RESPONDER:
4987 l2cap_move_done(chan);
4988 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4990 case L2CAP_MOVE_ROLE_INITIATOR:
4991 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4992 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4993 /* Remote has only sent pending or
4994 * success responses, clean up
4996 l2cap_move_done(chan);
4999 /* Other amp move states imply that the move
5000 * has already aborted
5002 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5007 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5008 struct hci_chan *hchan)
5010 struct l2cap_conf_rsp rsp;
5012 chan->hs_hchan = hchan;
5013 chan->hs_hcon->l2cap_data = chan->conn;
5015 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5017 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5020 set_default_fcs(chan);
5022 err = l2cap_ertm_init(chan);
5024 l2cap_send_disconn_req(chan, -err);
5026 l2cap_chan_ready(chan);
5030 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5031 struct hci_chan *hchan)
5033 chan->hs_hcon = hchan->conn;
5034 chan->hs_hcon->l2cap_data = chan->conn;
5036 BT_DBG("move_state %d", chan->move_state);
5038 switch (chan->move_state) {
5039 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5040 /* Move confirm will be sent after a success
5041 * response is received
5043 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5045 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5046 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5047 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5048 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5049 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5050 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5051 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5052 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5053 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5057 /* Move was not in expected state, free the channel */
5058 __release_logical_link(chan);
5060 chan->move_state = L2CAP_MOVE_STABLE;
5064 /* Call with chan locked */
5065 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5068 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5071 l2cap_logical_fail(chan);
5072 __release_logical_link(chan);
5076 if (chan->state != BT_CONNECTED) {
5077 /* Ignore logical link if channel is on BR/EDR */
5078 if (chan->local_amp_id != AMP_ID_BREDR)
5079 l2cap_logical_finish_create(chan, hchan);
5081 l2cap_logical_finish_move(chan, hchan);
5085 void l2cap_move_start(struct l2cap_chan *chan)
5087 BT_DBG("chan %p", chan);
5089 if (chan->local_amp_id == AMP_ID_BREDR) {
5090 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5092 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5093 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5094 /* Placeholder - start physical link setup */
5096 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5097 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5099 l2cap_move_setup(chan);
5100 l2cap_send_move_chan_req(chan, 0);
5104 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5105 u8 local_amp_id, u8 remote_amp_id)
5107 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5108 local_amp_id, remote_amp_id);
5110 chan->fcs = L2CAP_FCS_NONE;
5112 /* Outgoing channel on AMP */
5113 if (chan->state == BT_CONNECT) {
5114 if (result == L2CAP_CR_SUCCESS) {
5115 chan->local_amp_id = local_amp_id;
5116 l2cap_send_create_chan_req(chan, remote_amp_id);
5118 /* Revert to BR/EDR connect */
5119 l2cap_send_conn_req(chan);
5125 /* Incoming channel on AMP */
5126 if (__l2cap_no_conn_pending(chan)) {
5127 struct l2cap_conn_rsp rsp;
5129 rsp.scid = cpu_to_le16(chan->dcid);
5130 rsp.dcid = cpu_to_le16(chan->scid);
5132 if (result == L2CAP_CR_SUCCESS) {
5133 /* Send successful response */
5134 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5135 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5137 /* Send negative response */
5138 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5139 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5142 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5145 if (result == L2CAP_CR_SUCCESS) {
5146 l2cap_state_change(chan, BT_CONFIG);
5147 set_bit(CONF_REQ_SENT, &chan->conf_state);
5148 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5150 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5151 chan->num_conf_req++;
5156 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5159 l2cap_move_setup(chan);
5160 chan->move_id = local_amp_id;
5161 chan->move_state = L2CAP_MOVE_WAIT_RSP;
5163 l2cap_send_move_chan_req(chan, remote_amp_id);
5166 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5168 struct hci_chan *hchan = NULL;
5170 /* Placeholder - get hci_chan for logical link */
5173 if (hchan->state == BT_CONNECTED) {
5174 /* Logical link is ready to go */
5175 chan->hs_hcon = hchan->conn;
5176 chan->hs_hcon->l2cap_data = chan->conn;
5177 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5178 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5180 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5182 /* Wait for logical link to be ready */
5183 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5186 /* Logical link not available */
5187 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5191 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5193 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5195 if (result == -EINVAL)
5196 rsp_result = L2CAP_MR_BAD_ID;
5198 rsp_result = L2CAP_MR_NOT_ALLOWED;
5200 l2cap_send_move_chan_rsp(chan, rsp_result);
5203 chan->move_role = L2CAP_MOVE_ROLE_NONE;
5204 chan->move_state = L2CAP_MOVE_STABLE;
5206 /* Restart data transmission */
5207 l2cap_ertm_send(chan);
5210 /* Invoke with locked chan */
5211 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5213 u8 local_amp_id = chan->local_amp_id;
5214 u8 remote_amp_id = chan->remote_amp_id;
5216 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5217 chan, result, local_amp_id, remote_amp_id);
5219 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5222 if (chan->state != BT_CONNECTED) {
5223 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5224 } else if (result != L2CAP_MR_SUCCESS) {
5225 l2cap_do_move_cancel(chan, result);
5227 switch (chan->move_role) {
5228 case L2CAP_MOVE_ROLE_INITIATOR:
5229 l2cap_do_move_initiate(chan, local_amp_id,
5232 case L2CAP_MOVE_ROLE_RESPONDER:
5233 l2cap_do_move_respond(chan, result);
5236 l2cap_do_move_cancel(chan, result);
5242 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5243 struct l2cap_cmd_hdr *cmd,
5244 u16 cmd_len, void *data)
5246 struct l2cap_move_chan_req *req = data;
5247 struct l2cap_move_chan_rsp rsp;
5248 struct l2cap_chan *chan;
5250 u16 result = L2CAP_MR_NOT_ALLOWED;
5252 if (cmd_len != sizeof(*req))
5255 icid = le16_to_cpu(req->icid);
5257 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5259 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5262 chan = l2cap_get_chan_by_dcid(conn, icid);
5264 rsp.icid = cpu_to_le16(icid);
5265 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5266 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5271 chan->ident = cmd->ident;
5273 if (chan->scid < L2CAP_CID_DYN_START ||
5274 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5275 (chan->mode != L2CAP_MODE_ERTM &&
5276 chan->mode != L2CAP_MODE_STREAMING)) {
5277 result = L2CAP_MR_NOT_ALLOWED;
5278 goto send_move_response;
5281 if (chan->local_amp_id == req->dest_amp_id) {
5282 result = L2CAP_MR_SAME_ID;
5283 goto send_move_response;
5286 if (req->dest_amp_id != AMP_ID_BREDR) {
5287 struct hci_dev *hdev;
5288 hdev = hci_dev_get(req->dest_amp_id);
5289 if (!hdev || hdev->dev_type != HCI_AMP ||
5290 !test_bit(HCI_UP, &hdev->flags)) {
5294 result = L2CAP_MR_BAD_ID;
5295 goto send_move_response;
5300 /* Detect a move collision. Only send a collision response
5301 * if this side has "lost", otherwise proceed with the move.
5302 * The winner has the larger bd_addr.
5304 if ((__chan_is_moving(chan) ||
5305 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5306 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5307 result = L2CAP_MR_COLLISION;
5308 goto send_move_response;
5311 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5312 l2cap_move_setup(chan);
5313 chan->move_id = req->dest_amp_id;
5315 if (req->dest_amp_id == AMP_ID_BREDR) {
5316 /* Moving to BR/EDR */
5317 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5318 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5319 result = L2CAP_MR_PEND;
5321 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5322 result = L2CAP_MR_SUCCESS;
5325 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5326 /* Placeholder - uncomment when amp functions are available */
5327 /*amp_accept_physical(chan, req->dest_amp_id);*/
5328 result = L2CAP_MR_PEND;
5332 l2cap_send_move_chan_rsp(chan, result);
5334 l2cap_chan_unlock(chan);
5335 l2cap_chan_put(chan);
5340 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5342 struct l2cap_chan *chan;
5343 struct hci_chan *hchan = NULL;
5345 chan = l2cap_get_chan_by_scid(conn, icid);
5347 l2cap_send_move_chan_cfm_icid(conn, icid);
5351 __clear_chan_timer(chan);
5352 if (result == L2CAP_MR_PEND)
5353 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5355 switch (chan->move_state) {
5356 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5357 /* Move confirm will be sent when logical link
5360 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5362 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5363 if (result == L2CAP_MR_PEND) {
5365 } else if (test_bit(CONN_LOCAL_BUSY,
5366 &chan->conn_state)) {
5367 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5369 /* Logical link is up or moving to BR/EDR,
5372 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5373 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5376 case L2CAP_MOVE_WAIT_RSP:
5378 if (result == L2CAP_MR_SUCCESS) {
5379 /* Remote is ready, send confirm immediately
5380 * after logical link is ready
5382 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5384 /* Both logical link and move success
5385 * are required to confirm
5387 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5390 /* Placeholder - get hci_chan for logical link */
5392 /* Logical link not available */
5393 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5397 /* If the logical link is not yet connected, do not
5398 * send confirmation.
5400 if (hchan->state != BT_CONNECTED)
5403 /* Logical link is already ready to go */
5405 chan->hs_hcon = hchan->conn;
5406 chan->hs_hcon->l2cap_data = chan->conn;
5408 if (result == L2CAP_MR_SUCCESS) {
5409 /* Can confirm now */
5410 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5412 /* Now only need move success
5415 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5418 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5421 /* Any other amp move state means the move failed. */
5422 chan->move_id = chan->local_amp_id;
5423 l2cap_move_done(chan);
5424 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5427 l2cap_chan_unlock(chan);
5428 l2cap_chan_put(chan);
5431 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5434 struct l2cap_chan *chan;
5436 chan = l2cap_get_chan_by_ident(conn, ident);
5438 /* Could not locate channel, icid is best guess */
5439 l2cap_send_move_chan_cfm_icid(conn, icid);
5443 __clear_chan_timer(chan);
5445 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5446 if (result == L2CAP_MR_COLLISION) {
5447 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5449 /* Cleanup - cancel move */
5450 chan->move_id = chan->local_amp_id;
5451 l2cap_move_done(chan);
5455 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5457 l2cap_chan_unlock(chan);
5458 l2cap_chan_put(chan);
5461 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5462 struct l2cap_cmd_hdr *cmd,
5463 u16 cmd_len, void *data)
5465 struct l2cap_move_chan_rsp *rsp = data;
5468 if (cmd_len != sizeof(*rsp))
5471 icid = le16_to_cpu(rsp->icid);
5472 result = le16_to_cpu(rsp->result);
5474 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5476 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5477 l2cap_move_continue(conn, icid, result);
5479 l2cap_move_fail(conn, cmd->ident, icid, result);
5484 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5485 struct l2cap_cmd_hdr *cmd,
5486 u16 cmd_len, void *data)
5488 struct l2cap_move_chan_cfm *cfm = data;
5489 struct l2cap_chan *chan;
5492 if (cmd_len != sizeof(*cfm))
5495 icid = le16_to_cpu(cfm->icid);
5496 result = le16_to_cpu(cfm->result);
5498 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5500 chan = l2cap_get_chan_by_dcid(conn, icid);
5502 /* Spec requires a response even if the icid was not found */
5503 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5507 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5508 if (result == L2CAP_MC_CONFIRMED) {
5509 chan->local_amp_id = chan->move_id;
5510 if (chan->local_amp_id == AMP_ID_BREDR)
5511 __release_logical_link(chan);
5513 chan->move_id = chan->local_amp_id;
5516 l2cap_move_done(chan);
5519 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5521 l2cap_chan_unlock(chan);
5522 l2cap_chan_put(chan);
5527 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5528 struct l2cap_cmd_hdr *cmd,
5529 u16 cmd_len, void *data)
5531 struct l2cap_move_chan_cfm_rsp *rsp = data;
5532 struct l2cap_chan *chan;
5535 if (cmd_len != sizeof(*rsp))
5538 icid = le16_to_cpu(rsp->icid);
5540 BT_DBG("icid 0x%4.4x", icid);
5542 chan = l2cap_get_chan_by_scid(conn, icid);
5546 __clear_chan_timer(chan);
5548 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5549 chan->local_amp_id = chan->move_id;
5551 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5552 __release_logical_link(chan);
5554 l2cap_move_done(chan);
5557 l2cap_chan_unlock(chan);
5558 l2cap_chan_put(chan);
5563 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5564 struct l2cap_cmd_hdr *cmd,
5565 u16 cmd_len, u8 *data)
5567 struct hci_conn *hcon = conn->hcon;
5568 struct l2cap_conn_param_update_req *req;
5569 struct l2cap_conn_param_update_rsp rsp;
5570 u16 min, max, latency, to_multiplier;
5573 if (hcon->role != HCI_ROLE_MASTER)
5576 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5579 req = (struct l2cap_conn_param_update_req *) data;
5580 min = __le16_to_cpu(req->min);
5581 max = __le16_to_cpu(req->max);
5582 latency = __le16_to_cpu(req->latency);
5583 to_multiplier = __le16_to_cpu(req->to_multiplier);
5585 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5586 min, max, latency, to_multiplier);
5588 memset(&rsp, 0, sizeof(rsp));
5590 err = hci_check_conn_params(min, max, latency, to_multiplier);
5592 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5594 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5596 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5602 store_hint = hci_le_conn_update(hcon, min, max, latency,
5604 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5605 store_hint, min, max, latency,
5613 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5614 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5617 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5618 struct hci_conn *hcon = conn->hcon;
5619 u16 dcid, mtu, mps, credits, result;
5620 struct l2cap_chan *chan;
5623 if (cmd_len < sizeof(*rsp))
5626 dcid = __le16_to_cpu(rsp->dcid);
5627 mtu = __le16_to_cpu(rsp->mtu);
5628 mps = __le16_to_cpu(rsp->mps);
5629 credits = __le16_to_cpu(rsp->credits);
5630 result = __le16_to_cpu(rsp->result);
5632 if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5633 dcid < L2CAP_CID_DYN_START ||
5634 dcid > L2CAP_CID_LE_DYN_END))
5637 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5638 dcid, mtu, mps, credits, result);
5640 mutex_lock(&conn->chan_lock);
5642 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5650 l2cap_chan_lock(chan);
5653 case L2CAP_CR_LE_SUCCESS:
5654 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5662 chan->remote_mps = mps;
5663 chan->tx_credits = credits;
5664 l2cap_chan_ready(chan);
5667 case L2CAP_CR_LE_AUTHENTICATION:
5668 case L2CAP_CR_LE_ENCRYPTION:
5669 /* If we already have MITM protection we can't do
5672 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5673 l2cap_chan_del(chan, ECONNREFUSED);
5677 sec_level = hcon->sec_level + 1;
5678 if (chan->sec_level < sec_level)
5679 chan->sec_level = sec_level;
5681 /* We'll need to send a new Connect Request */
5682 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5684 smp_conn_security(hcon, chan->sec_level);
5688 l2cap_chan_del(chan, ECONNREFUSED);
5692 l2cap_chan_unlock(chan);
5695 mutex_unlock(&conn->chan_lock);
5700 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5701 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5706 switch (cmd->code) {
5707 case L2CAP_COMMAND_REJ:
5708 l2cap_command_rej(conn, cmd, cmd_len, data);
5711 case L2CAP_CONN_REQ:
5712 err = l2cap_connect_req(conn, cmd, cmd_len, data);
5715 case L2CAP_CONN_RSP:
5716 case L2CAP_CREATE_CHAN_RSP:
5717 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5720 case L2CAP_CONF_REQ:
5721 err = l2cap_config_req(conn, cmd, cmd_len, data);
5724 case L2CAP_CONF_RSP:
5725 l2cap_config_rsp(conn, cmd, cmd_len, data);
5728 case L2CAP_DISCONN_REQ:
5729 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5732 case L2CAP_DISCONN_RSP:
5733 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5736 case L2CAP_ECHO_REQ:
5737 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5740 case L2CAP_ECHO_RSP:
5743 case L2CAP_INFO_REQ:
5744 err = l2cap_information_req(conn, cmd, cmd_len, data);
5747 case L2CAP_INFO_RSP:
5748 l2cap_information_rsp(conn, cmd, cmd_len, data);
5751 case L2CAP_CREATE_CHAN_REQ:
5752 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5755 case L2CAP_MOVE_CHAN_REQ:
5756 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5759 case L2CAP_MOVE_CHAN_RSP:
5760 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5763 case L2CAP_MOVE_CHAN_CFM:
5764 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5767 case L2CAP_MOVE_CHAN_CFM_RSP:
5768 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5772 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5780 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5781 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5784 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5785 struct l2cap_le_conn_rsp rsp;
5786 struct l2cap_chan *chan, *pchan;
5787 u16 dcid, scid, credits, mtu, mps;
5791 if (cmd_len != sizeof(*req))
5794 scid = __le16_to_cpu(req->scid);
5795 mtu = __le16_to_cpu(req->mtu);
5796 mps = __le16_to_cpu(req->mps);
5801 if (mtu < 23 || mps < 23)
5804 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5807 /* Check if we have socket listening on psm */
5808 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5809 &conn->hcon->dst, LE_LINK);
5811 result = L2CAP_CR_LE_BAD_PSM;
5816 mutex_lock(&conn->chan_lock);
5817 l2cap_chan_lock(pchan);
5819 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5821 result = L2CAP_CR_LE_AUTHENTICATION;
5823 goto response_unlock;
5826 /* Check for valid dynamic CID range */
5827 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5828 result = L2CAP_CR_LE_INVALID_SCID;
5830 goto response_unlock;
5833 /* Check if we already have channel with that dcid */
5834 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5835 result = L2CAP_CR_LE_SCID_IN_USE;
5837 goto response_unlock;
5840 chan = pchan->ops->new_connection(pchan);
5842 result = L2CAP_CR_LE_NO_MEM;
5843 goto response_unlock;
5846 bacpy(&chan->src, &conn->hcon->src);
5847 bacpy(&chan->dst, &conn->hcon->dst);
5848 chan->src_type = bdaddr_src_type(conn->hcon);
5849 chan->dst_type = bdaddr_dst_type(conn->hcon);
5853 chan->remote_mps = mps;
5855 __l2cap_chan_add(conn, chan);
5857 l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5860 credits = chan->rx_credits;
5862 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5864 chan->ident = cmd->ident;
5866 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5867 l2cap_state_change(chan, BT_CONNECT2);
5868 /* The following result value is actually not defined
5869 * for LE CoC but we use it to let the function know
5870 * that it should bail out after doing its cleanup
5871 * instead of sending a response.
5873 result = L2CAP_CR_PEND;
5874 chan->ops->defer(chan);
5876 l2cap_chan_ready(chan);
5877 result = L2CAP_CR_LE_SUCCESS;
5881 l2cap_chan_unlock(pchan);
5882 mutex_unlock(&conn->chan_lock);
5883 l2cap_chan_put(pchan);
5885 if (result == L2CAP_CR_PEND)
5890 rsp.mtu = cpu_to_le16(chan->imtu);
5891 rsp.mps = cpu_to_le16(chan->mps);
5897 rsp.dcid = cpu_to_le16(dcid);
5898 rsp.credits = cpu_to_le16(credits);
5899 rsp.result = cpu_to_le16(result);
5901 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5906 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5907 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5910 struct l2cap_le_credits *pkt;
5911 struct l2cap_chan *chan;
5912 u16 cid, credits, max_credits;
5914 if (cmd_len != sizeof(*pkt))
5917 pkt = (struct l2cap_le_credits *) data;
5918 cid = __le16_to_cpu(pkt->cid);
5919 credits = __le16_to_cpu(pkt->credits);
5921 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5923 chan = l2cap_get_chan_by_dcid(conn, cid);
5927 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5928 if (credits > max_credits) {
5929 BT_ERR("LE credits overflow");
5930 l2cap_send_disconn_req(chan, ECONNRESET);
5932 /* Return 0 so that we don't trigger an unnecessary
5933 * command reject packet.
5938 chan->tx_credits += credits;
5940 /* Resume sending */
5941 l2cap_le_flowctl_send(chan);
5943 if (chan->tx_credits)
5944 chan->ops->resume(chan);
5947 l2cap_chan_unlock(chan);
5948 l2cap_chan_put(chan);
5953 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5954 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5957 struct l2cap_ecred_conn_req *req = (void *) data;
5959 struct l2cap_ecred_conn_rsp rsp;
5960 __le16 dcid[L2CAP_ECRED_MAX_CID];
5962 struct l2cap_chan *chan, *pchan;
5972 if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5973 result = L2CAP_CR_LE_INVALID_PARAMS;
5977 cmd_len -= sizeof(*req);
5978 num_scid = cmd_len / sizeof(u16);
5980 if (num_scid > ARRAY_SIZE(pdu.dcid)) {
5981 result = L2CAP_CR_LE_INVALID_PARAMS;
5985 mtu = __le16_to_cpu(req->mtu);
5986 mps = __le16_to_cpu(req->mps);
5988 if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5989 result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5995 BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5997 memset(&pdu, 0, sizeof(pdu));
5999 /* Check if we have socket listening on psm */
6000 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6001 &conn->hcon->dst, LE_LINK);
6003 result = L2CAP_CR_LE_BAD_PSM;
6007 mutex_lock(&conn->chan_lock);
6008 l2cap_chan_lock(pchan);
6010 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6012 result = L2CAP_CR_LE_AUTHENTICATION;
6016 result = L2CAP_CR_LE_SUCCESS;
6018 for (i = 0; i < num_scid; i++) {
6019 u16 scid = __le16_to_cpu(req->scid[i]);
6021 BT_DBG("scid[%d] 0x%4.4x", i, scid);
6023 pdu.dcid[i] = 0x0000;
6024 len += sizeof(*pdu.dcid);
6026 /* Check for valid dynamic CID range */
6027 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6028 result = L2CAP_CR_LE_INVALID_SCID;
6032 /* Check if we already have channel with that dcid */
6033 if (__l2cap_get_chan_by_dcid(conn, scid)) {
6034 result = L2CAP_CR_LE_SCID_IN_USE;
6038 chan = pchan->ops->new_connection(pchan);
6040 result = L2CAP_CR_LE_NO_MEM;
6044 bacpy(&chan->src, &conn->hcon->src);
6045 bacpy(&chan->dst, &conn->hcon->dst);
6046 chan->src_type = bdaddr_src_type(conn->hcon);
6047 chan->dst_type = bdaddr_dst_type(conn->hcon);
6051 chan->remote_mps = mps;
6053 __l2cap_chan_add(conn, chan);
6055 l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6058 if (!pdu.rsp.credits) {
6059 pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6060 pdu.rsp.mps = cpu_to_le16(chan->mps);
6061 pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6064 pdu.dcid[i] = cpu_to_le16(chan->scid);
6066 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6068 chan->ident = cmd->ident;
6070 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6071 l2cap_state_change(chan, BT_CONNECT2);
6073 chan->ops->defer(chan);
6075 l2cap_chan_ready(chan);
6080 l2cap_chan_unlock(pchan);
6081 mutex_unlock(&conn->chan_lock);
6082 l2cap_chan_put(pchan);
6085 pdu.rsp.result = cpu_to_le16(result);
6090 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6091 sizeof(pdu.rsp) + len, &pdu);
6096 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6097 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6100 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6101 struct hci_conn *hcon = conn->hcon;
6102 u16 mtu, mps, credits, result;
6103 struct l2cap_chan *chan, *tmp;
6104 int err = 0, sec_level;
6107 if (cmd_len < sizeof(*rsp))
6110 mtu = __le16_to_cpu(rsp->mtu);
6111 mps = __le16_to_cpu(rsp->mps);
6112 credits = __le16_to_cpu(rsp->credits);
6113 result = __le16_to_cpu(rsp->result);
6115 BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6118 mutex_lock(&conn->chan_lock);
6120 cmd_len -= sizeof(*rsp);
6122 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6125 if (chan->ident != cmd->ident ||
6126 chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6127 chan->state == BT_CONNECTED)
6130 l2cap_chan_lock(chan);
6132 /* Check that there is a dcid for each pending channel */
6133 if (cmd_len < sizeof(dcid)) {
6134 l2cap_chan_del(chan, ECONNREFUSED);
6135 l2cap_chan_unlock(chan);
6139 dcid = __le16_to_cpu(rsp->dcid[i++]);
6140 cmd_len -= sizeof(u16);
6142 BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6144 /* Check if dcid is already in use */
6145 if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6146 /* If a device receives a
6147 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6148 * already-assigned Destination CID, then both the
6149 * original channel and the new channel shall be
6150 * immediately discarded and not used.
6152 l2cap_chan_del(chan, ECONNREFUSED);
6153 l2cap_chan_unlock(chan);
6154 chan = __l2cap_get_chan_by_dcid(conn, dcid);
6155 l2cap_chan_lock(chan);
6156 l2cap_chan_del(chan, ECONNRESET);
6157 l2cap_chan_unlock(chan);
6162 case L2CAP_CR_LE_AUTHENTICATION:
6163 case L2CAP_CR_LE_ENCRYPTION:
6164 /* If we already have MITM protection we can't do
6167 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6168 l2cap_chan_del(chan, ECONNREFUSED);
6172 sec_level = hcon->sec_level + 1;
6173 if (chan->sec_level < sec_level)
6174 chan->sec_level = sec_level;
6176 /* We'll need to send a new Connect Request */
6177 clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6179 smp_conn_security(hcon, chan->sec_level);
6182 case L2CAP_CR_LE_BAD_PSM:
6183 l2cap_chan_del(chan, ECONNREFUSED);
6187 /* If dcid was not set it means channels was refused */
6189 l2cap_chan_del(chan, ECONNREFUSED);
6196 chan->remote_mps = mps;
6197 chan->tx_credits = credits;
6198 l2cap_chan_ready(chan);
6202 l2cap_chan_unlock(chan);
6205 mutex_unlock(&conn->chan_lock);
6210 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6211 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6214 struct l2cap_ecred_reconf_req *req = (void *) data;
6215 struct l2cap_ecred_reconf_rsp rsp;
6216 u16 mtu, mps, result;
6217 struct l2cap_chan *chan;
6223 if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6224 result = L2CAP_CR_LE_INVALID_PARAMS;
6228 mtu = __le16_to_cpu(req->mtu);
6229 mps = __le16_to_cpu(req->mps);
6231 BT_DBG("mtu %u mps %u", mtu, mps);
6233 if (mtu < L2CAP_ECRED_MIN_MTU) {
6234 result = L2CAP_RECONF_INVALID_MTU;
6238 if (mps < L2CAP_ECRED_MIN_MPS) {
6239 result = L2CAP_RECONF_INVALID_MPS;
6243 cmd_len -= sizeof(*req);
6244 num_scid = cmd_len / sizeof(u16);
6245 result = L2CAP_RECONF_SUCCESS;
6247 for (i = 0; i < num_scid; i++) {
6250 scid = __le16_to_cpu(req->scid[i]);
6254 chan = __l2cap_get_chan_by_dcid(conn, scid);
6258 /* If the MTU value is decreased for any of the included
6259 * channels, then the receiver shall disconnect all
6260 * included channels.
6262 if (chan->omtu > mtu) {
6263 BT_ERR("chan %p decreased MTU %u -> %u", chan,
6265 result = L2CAP_RECONF_INVALID_MTU;
6269 chan->remote_mps = mps;
6273 rsp.result = cpu_to_le16(result);
6275 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6281 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6282 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6285 struct l2cap_chan *chan, *tmp;
6286 struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6289 if (cmd_len < sizeof(*rsp))
6292 result = __le16_to_cpu(rsp->result);
6294 BT_DBG("result 0x%4.4x", rsp->result);
6299 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6300 if (chan->ident != cmd->ident)
6303 l2cap_chan_del(chan, ECONNRESET);
6309 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6310 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6313 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6314 struct l2cap_chan *chan;
6316 if (cmd_len < sizeof(*rej))
6319 mutex_lock(&conn->chan_lock);
6321 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6325 l2cap_chan_lock(chan);
6326 l2cap_chan_del(chan, ECONNREFUSED);
6327 l2cap_chan_unlock(chan);
6330 mutex_unlock(&conn->chan_lock);
6334 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6335 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6340 switch (cmd->code) {
6341 case L2CAP_COMMAND_REJ:
6342 l2cap_le_command_rej(conn, cmd, cmd_len, data);
6345 case L2CAP_CONN_PARAM_UPDATE_REQ:
6346 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6349 case L2CAP_CONN_PARAM_UPDATE_RSP:
6352 case L2CAP_LE_CONN_RSP:
6353 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6356 case L2CAP_LE_CONN_REQ:
6357 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6360 case L2CAP_LE_CREDITS:
6361 err = l2cap_le_credits(conn, cmd, cmd_len, data);
6364 case L2CAP_ECRED_CONN_REQ:
6365 err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6368 case L2CAP_ECRED_CONN_RSP:
6369 err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6372 case L2CAP_ECRED_RECONF_REQ:
6373 err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6376 case L2CAP_ECRED_RECONF_RSP:
6377 err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6380 case L2CAP_DISCONN_REQ:
6381 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6384 case L2CAP_DISCONN_RSP:
6385 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6389 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6397 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6398 struct sk_buff *skb)
6400 struct hci_conn *hcon = conn->hcon;
6401 struct l2cap_cmd_hdr *cmd;
6405 if (hcon->type != LE_LINK)
6408 if (skb->len < L2CAP_CMD_HDR_SIZE)
6411 cmd = (void *) skb->data;
6412 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6414 len = le16_to_cpu(cmd->len);
6416 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6418 if (len != skb->len || !cmd->ident) {
6419 BT_DBG("corrupted command");
6423 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6425 struct l2cap_cmd_rej_unk rej;
6427 BT_ERR("Wrong link type (%d)", err);
6429 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6430 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6438 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6439 struct sk_buff *skb)
6441 struct hci_conn *hcon = conn->hcon;
6442 struct l2cap_cmd_hdr *cmd;
6445 l2cap_raw_recv(conn, skb);
6447 if (hcon->type != ACL_LINK)
6450 while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6453 cmd = (void *) skb->data;
6454 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6456 len = le16_to_cpu(cmd->len);
6458 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6461 if (len > skb->len || !cmd->ident) {
6462 BT_DBG("corrupted command");
6466 err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6468 struct l2cap_cmd_rej_unk rej;
6470 BT_ERR("Wrong link type (%d)", err);
6472 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6473 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6484 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
6486 u16 our_fcs, rcv_fcs;
6489 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6490 hdr_size = L2CAP_EXT_HDR_SIZE;
6492 hdr_size = L2CAP_ENH_HDR_SIZE;
6494 if (chan->fcs == L2CAP_FCS_CRC16) {
6495 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6496 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6497 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6499 if (our_fcs != rcv_fcs)
6505 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6507 struct l2cap_ctrl control;
6509 BT_DBG("chan %p", chan);
6511 memset(&control, 0, sizeof(control));
6514 control.reqseq = chan->buffer_seq;
6515 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6517 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6518 control.super = L2CAP_SUPER_RNR;
6519 l2cap_send_sframe(chan, &control);
6522 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6523 chan->unacked_frames > 0)
6524 __set_retrans_timer(chan);
6526 /* Send pending iframes */
6527 l2cap_ertm_send(chan);
6529 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6530 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6531 /* F-bit wasn't sent in an s-frame or i-frame yet, so
6534 control.super = L2CAP_SUPER_RR;
6535 l2cap_send_sframe(chan, &control);
6539 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6540 struct sk_buff **last_frag)
6542 /* skb->len reflects data in skb as well as all fragments
6543 * skb->data_len reflects only data in fragments
6545 if (!skb_has_frag_list(skb))
6546 skb_shinfo(skb)->frag_list = new_frag;
6548 new_frag->next = NULL;
6550 (*last_frag)->next = new_frag;
6551 *last_frag = new_frag;
6553 skb->len += new_frag->len;
6554 skb->data_len += new_frag->len;
6555 skb->truesize += new_frag->truesize;
6558 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6559 struct l2cap_ctrl *control)
6563 switch (control->sar) {
6564 case L2CAP_SAR_UNSEGMENTED:
6568 err = chan->ops->recv(chan, skb);
6571 case L2CAP_SAR_START:
6575 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6578 chan->sdu_len = get_unaligned_le16(skb->data);
6579 skb_pull(skb, L2CAP_SDULEN_SIZE);
6581 if (chan->sdu_len > chan->imtu) {
6586 if (skb->len >= chan->sdu_len)
6590 chan->sdu_last_frag = skb;
6596 case L2CAP_SAR_CONTINUE:
6600 append_skb_frag(chan->sdu, skb,
6601 &chan->sdu_last_frag);
6604 if (chan->sdu->len >= chan->sdu_len)
6614 append_skb_frag(chan->sdu, skb,
6615 &chan->sdu_last_frag);
6618 if (chan->sdu->len != chan->sdu_len)
6621 err = chan->ops->recv(chan, chan->sdu);
6624 /* Reassembly complete */
6626 chan->sdu_last_frag = NULL;
6634 kfree_skb(chan->sdu);
6636 chan->sdu_last_frag = NULL;
6643 static int l2cap_resegment(struct l2cap_chan *chan)
6649 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6653 if (chan->mode != L2CAP_MODE_ERTM)
6656 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6657 l2cap_tx(chan, NULL, NULL, event);
6660 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6663 /* Pass sequential frames to l2cap_reassemble_sdu()
6664 * until a gap is encountered.
6667 BT_DBG("chan %p", chan);
6669 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6670 struct sk_buff *skb;
6671 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6672 chan->buffer_seq, skb_queue_len(&chan->srej_q));
6674 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6679 skb_unlink(skb, &chan->srej_q);
6680 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6681 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6686 if (skb_queue_empty(&chan->srej_q)) {
6687 chan->rx_state = L2CAP_RX_STATE_RECV;
6688 l2cap_send_ack(chan);
6694 static void l2cap_handle_srej(struct l2cap_chan *chan,
6695 struct l2cap_ctrl *control)
6697 struct sk_buff *skb;
6699 BT_DBG("chan %p, control %p", chan, control);
6701 if (control->reqseq == chan->next_tx_seq) {
6702 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6703 l2cap_send_disconn_req(chan, ECONNRESET);
6707 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6710 BT_DBG("Seq %d not available for retransmission",
6715 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6716 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6717 l2cap_send_disconn_req(chan, ECONNRESET);
6721 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6723 if (control->poll) {
6724 l2cap_pass_to_tx(chan, control);
6726 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6727 l2cap_retransmit(chan, control);
6728 l2cap_ertm_send(chan);
6730 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6731 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6732 chan->srej_save_reqseq = control->reqseq;
6735 l2cap_pass_to_tx_fbit(chan, control);
6737 if (control->final) {
6738 if (chan->srej_save_reqseq != control->reqseq ||
6739 !test_and_clear_bit(CONN_SREJ_ACT,
6741 l2cap_retransmit(chan, control);
6743 l2cap_retransmit(chan, control);
6744 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6745 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6746 chan->srej_save_reqseq = control->reqseq;
6752 static void l2cap_handle_rej(struct l2cap_chan *chan,
6753 struct l2cap_ctrl *control)
6755 struct sk_buff *skb;
6757 BT_DBG("chan %p, control %p", chan, control);
6759 if (control->reqseq == chan->next_tx_seq) {
6760 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6761 l2cap_send_disconn_req(chan, ECONNRESET);
6765 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6767 if (chan->max_tx && skb &&
6768 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6769 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6770 l2cap_send_disconn_req(chan, ECONNRESET);
6774 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6776 l2cap_pass_to_tx(chan, control);
6778 if (control->final) {
6779 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6780 l2cap_retransmit_all(chan, control);
6782 l2cap_retransmit_all(chan, control);
6783 l2cap_ertm_send(chan);
6784 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6785 set_bit(CONN_REJ_ACT, &chan->conn_state);
6789 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6791 BT_DBG("chan %p, txseq %d", chan, txseq);
6793 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6794 chan->expected_tx_seq);
6796 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6797 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6799 /* See notes below regarding "double poll" and
6802 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6803 BT_DBG("Invalid/Ignore - after SREJ");
6804 return L2CAP_TXSEQ_INVALID_IGNORE;
6806 BT_DBG("Invalid - in window after SREJ sent");
6807 return L2CAP_TXSEQ_INVALID;
6811 if (chan->srej_list.head == txseq) {
6812 BT_DBG("Expected SREJ");
6813 return L2CAP_TXSEQ_EXPECTED_SREJ;
6816 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6817 BT_DBG("Duplicate SREJ - txseq already stored");
6818 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6821 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6822 BT_DBG("Unexpected SREJ - not requested");
6823 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6827 if (chan->expected_tx_seq == txseq) {
6828 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6830 BT_DBG("Invalid - txseq outside tx window");
6831 return L2CAP_TXSEQ_INVALID;
6834 return L2CAP_TXSEQ_EXPECTED;
6838 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6839 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6840 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6841 return L2CAP_TXSEQ_DUPLICATE;
6844 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6845 /* A source of invalid packets is a "double poll" condition,
6846 * where delays cause us to send multiple poll packets. If
6847 * the remote stack receives and processes both polls,
6848 * sequence numbers can wrap around in such a way that a
6849 * resent frame has a sequence number that looks like new data
6850 * with a sequence gap. This would trigger an erroneous SREJ
6853 * Fortunately, this is impossible with a tx window that's
6854 * less than half of the maximum sequence number, which allows
6855 * invalid frames to be safely ignored.
6857 * With tx window sizes greater than half of the tx window
6858 * maximum, the frame is invalid and cannot be ignored. This
6859 * causes a disconnect.
6862 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6863 BT_DBG("Invalid/Ignore - txseq outside tx window");
6864 return L2CAP_TXSEQ_INVALID_IGNORE;
6866 BT_DBG("Invalid - txseq outside tx window");
6867 return L2CAP_TXSEQ_INVALID;
6870 BT_DBG("Unexpected - txseq indicates missing frames");
6871 return L2CAP_TXSEQ_UNEXPECTED;
6875 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6876 struct l2cap_ctrl *control,
6877 struct sk_buff *skb, u8 event)
6880 bool skb_in_use = false;
6882 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6886 case L2CAP_EV_RECV_IFRAME:
6887 switch (l2cap_classify_txseq(chan, control->txseq)) {
6888 case L2CAP_TXSEQ_EXPECTED:
6889 l2cap_pass_to_tx(chan, control);
6891 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6892 BT_DBG("Busy, discarding expected seq %d",
6897 chan->expected_tx_seq = __next_seq(chan,
6900 chan->buffer_seq = chan->expected_tx_seq;
6903 err = l2cap_reassemble_sdu(chan, skb, control);
6907 if (control->final) {
6908 if (!test_and_clear_bit(CONN_REJ_ACT,
6909 &chan->conn_state)) {
6911 l2cap_retransmit_all(chan, control);
6912 l2cap_ertm_send(chan);
6916 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6917 l2cap_send_ack(chan);
6919 case L2CAP_TXSEQ_UNEXPECTED:
6920 l2cap_pass_to_tx(chan, control);
6922 /* Can't issue SREJ frames in the local busy state.
6923 * Drop this frame, it will be seen as missing
6924 * when local busy is exited.
6926 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6927 BT_DBG("Busy, discarding unexpected seq %d",
6932 /* There was a gap in the sequence, so an SREJ
6933 * must be sent for each missing frame. The
6934 * current frame is stored for later use.
6936 skb_queue_tail(&chan->srej_q, skb);
6938 BT_DBG("Queued %p (queue len %d)", skb,
6939 skb_queue_len(&chan->srej_q));
6941 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6942 l2cap_seq_list_clear(&chan->srej_list);
6943 l2cap_send_srej(chan, control->txseq);
6945 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6947 case L2CAP_TXSEQ_DUPLICATE:
6948 l2cap_pass_to_tx(chan, control);
6950 case L2CAP_TXSEQ_INVALID_IGNORE:
6952 case L2CAP_TXSEQ_INVALID:
6954 l2cap_send_disconn_req(chan, ECONNRESET);
6958 case L2CAP_EV_RECV_RR:
6959 l2cap_pass_to_tx(chan, control);
6960 if (control->final) {
6961 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6963 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6964 !__chan_is_moving(chan)) {
6966 l2cap_retransmit_all(chan, control);
6969 l2cap_ertm_send(chan);
6970 } else if (control->poll) {
6971 l2cap_send_i_or_rr_or_rnr(chan);
6973 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6974 &chan->conn_state) &&
6975 chan->unacked_frames)
6976 __set_retrans_timer(chan);
6978 l2cap_ertm_send(chan);
6981 case L2CAP_EV_RECV_RNR:
6982 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6983 l2cap_pass_to_tx(chan, control);
6984 if (control && control->poll) {
6985 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6986 l2cap_send_rr_or_rnr(chan, 0);
6988 __clear_retrans_timer(chan);
6989 l2cap_seq_list_clear(&chan->retrans_list);
6991 case L2CAP_EV_RECV_REJ:
6992 l2cap_handle_rej(chan, control);
6994 case L2CAP_EV_RECV_SREJ:
6995 l2cap_handle_srej(chan, control);
7001 if (skb && !skb_in_use) {
7002 BT_DBG("Freeing %p", skb);
7009 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7010 struct l2cap_ctrl *control,
7011 struct sk_buff *skb, u8 event)
7014 u16 txseq = control->txseq;
7015 bool skb_in_use = false;
7017 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7021 case L2CAP_EV_RECV_IFRAME:
7022 switch (l2cap_classify_txseq(chan, txseq)) {
7023 case L2CAP_TXSEQ_EXPECTED:
7024 /* Keep frame for reassembly later */
7025 l2cap_pass_to_tx(chan, control);
7026 skb_queue_tail(&chan->srej_q, skb);
7028 BT_DBG("Queued %p (queue len %d)", skb,
7029 skb_queue_len(&chan->srej_q));
7031 chan->expected_tx_seq = __next_seq(chan, txseq);
7033 case L2CAP_TXSEQ_EXPECTED_SREJ:
7034 l2cap_seq_list_pop(&chan->srej_list);
7036 l2cap_pass_to_tx(chan, control);
7037 skb_queue_tail(&chan->srej_q, skb);
7039 BT_DBG("Queued %p (queue len %d)", skb,
7040 skb_queue_len(&chan->srej_q));
7042 err = l2cap_rx_queued_iframes(chan);
7047 case L2CAP_TXSEQ_UNEXPECTED:
7048 /* Got a frame that can't be reassembled yet.
7049 * Save it for later, and send SREJs to cover
7050 * the missing frames.
7052 skb_queue_tail(&chan->srej_q, skb);
7054 BT_DBG("Queued %p (queue len %d)", skb,
7055 skb_queue_len(&chan->srej_q));
7057 l2cap_pass_to_tx(chan, control);
7058 l2cap_send_srej(chan, control->txseq);
7060 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7061 /* This frame was requested with an SREJ, but
7062 * some expected retransmitted frames are
7063 * missing. Request retransmission of missing
7066 skb_queue_tail(&chan->srej_q, skb);
7068 BT_DBG("Queued %p (queue len %d)", skb,
7069 skb_queue_len(&chan->srej_q));
7071 l2cap_pass_to_tx(chan, control);
7072 l2cap_send_srej_list(chan, control->txseq);
7074 case L2CAP_TXSEQ_DUPLICATE_SREJ:
7075 /* We've already queued this frame. Drop this copy. */
7076 l2cap_pass_to_tx(chan, control);
7078 case L2CAP_TXSEQ_DUPLICATE:
7079 /* Expecting a later sequence number, so this frame
7080 * was already received. Ignore it completely.
7083 case L2CAP_TXSEQ_INVALID_IGNORE:
7085 case L2CAP_TXSEQ_INVALID:
7087 l2cap_send_disconn_req(chan, ECONNRESET);
7091 case L2CAP_EV_RECV_RR:
7092 l2cap_pass_to_tx(chan, control);
7093 if (control->final) {
7094 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7096 if (!test_and_clear_bit(CONN_REJ_ACT,
7097 &chan->conn_state)) {
7099 l2cap_retransmit_all(chan, control);
7102 l2cap_ertm_send(chan);
7103 } else if (control->poll) {
7104 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7105 &chan->conn_state) &&
7106 chan->unacked_frames) {
7107 __set_retrans_timer(chan);
7110 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7111 l2cap_send_srej_tail(chan);
7113 if (test_and_clear_bit(CONN_REMOTE_BUSY,
7114 &chan->conn_state) &&
7115 chan->unacked_frames)
7116 __set_retrans_timer(chan);
7118 l2cap_send_ack(chan);
7121 case L2CAP_EV_RECV_RNR:
7122 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7123 l2cap_pass_to_tx(chan, control);
7124 if (control->poll) {
7125 l2cap_send_srej_tail(chan);
7127 struct l2cap_ctrl rr_control;
7128 memset(&rr_control, 0, sizeof(rr_control));
7129 rr_control.sframe = 1;
7130 rr_control.super = L2CAP_SUPER_RR;
7131 rr_control.reqseq = chan->buffer_seq;
7132 l2cap_send_sframe(chan, &rr_control);
7136 case L2CAP_EV_RECV_REJ:
7137 l2cap_handle_rej(chan, control);
7139 case L2CAP_EV_RECV_SREJ:
7140 l2cap_handle_srej(chan, control);
7144 if (skb && !skb_in_use) {
7145 BT_DBG("Freeing %p", skb);
7152 static int l2cap_finish_move(struct l2cap_chan *chan)
7154 BT_DBG("chan %p", chan);
7156 chan->rx_state = L2CAP_RX_STATE_RECV;
7159 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7161 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7163 return l2cap_resegment(chan);
7166 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7167 struct l2cap_ctrl *control,
7168 struct sk_buff *skb, u8 event)
7172 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7178 l2cap_process_reqseq(chan, control->reqseq);
7180 if (!skb_queue_empty(&chan->tx_q))
7181 chan->tx_send_head = skb_peek(&chan->tx_q);
7183 chan->tx_send_head = NULL;
7185 /* Rewind next_tx_seq to the point expected
7188 chan->next_tx_seq = control->reqseq;
7189 chan->unacked_frames = 0;
7191 err = l2cap_finish_move(chan);
7195 set_bit(CONN_SEND_FBIT, &chan->conn_state);
7196 l2cap_send_i_or_rr_or_rnr(chan);
7198 if (event == L2CAP_EV_RECV_IFRAME)
7201 return l2cap_rx_state_recv(chan, control, NULL, event);
7204 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7205 struct l2cap_ctrl *control,
7206 struct sk_buff *skb, u8 event)
7210 if (!control->final)
7213 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7215 chan->rx_state = L2CAP_RX_STATE_RECV;
7216 l2cap_process_reqseq(chan, control->reqseq);
7218 if (!skb_queue_empty(&chan->tx_q))
7219 chan->tx_send_head = skb_peek(&chan->tx_q);
7221 chan->tx_send_head = NULL;
7223 /* Rewind next_tx_seq to the point expected
7226 chan->next_tx_seq = control->reqseq;
7227 chan->unacked_frames = 0;
7230 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7232 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7234 err = l2cap_resegment(chan);
7237 err = l2cap_rx_state_recv(chan, control, skb, event);
7242 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7244 /* Make sure reqseq is for a packet that has been sent but not acked */
7247 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7248 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7251 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7252 struct sk_buff *skb, u8 event)
7256 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7257 control, skb, event, chan->rx_state);
7259 if (__valid_reqseq(chan, control->reqseq)) {
7260 switch (chan->rx_state) {
7261 case L2CAP_RX_STATE_RECV:
7262 err = l2cap_rx_state_recv(chan, control, skb, event);
7264 case L2CAP_RX_STATE_SREJ_SENT:
7265 err = l2cap_rx_state_srej_sent(chan, control, skb,
7268 case L2CAP_RX_STATE_WAIT_P:
7269 err = l2cap_rx_state_wait_p(chan, control, skb, event);
7271 case L2CAP_RX_STATE_WAIT_F:
7272 err = l2cap_rx_state_wait_f(chan, control, skb, event);
7279 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7280 control->reqseq, chan->next_tx_seq,
7281 chan->expected_ack_seq);
7282 l2cap_send_disconn_req(chan, ECONNRESET);
7288 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7289 struct sk_buff *skb)
7291 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7294 if (l2cap_classify_txseq(chan, control->txseq) ==
7295 L2CAP_TXSEQ_EXPECTED) {
7296 l2cap_pass_to_tx(chan, control);
7298 BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7299 __next_seq(chan, chan->buffer_seq));
7301 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7303 l2cap_reassemble_sdu(chan, skb, control);
7306 kfree_skb(chan->sdu);
7309 chan->sdu_last_frag = NULL;
7313 BT_DBG("Freeing %p", skb);
7318 chan->last_acked_seq = control->txseq;
7319 chan->expected_tx_seq = __next_seq(chan, control->txseq);
7324 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7326 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7330 __unpack_control(chan, skb);
7335 * We can just drop the corrupted I-frame here.
7336 * Receiver will miss it and start proper recovery
7337 * procedures and ask for retransmission.
7339 if (l2cap_check_fcs(chan, skb))
7342 if (!control->sframe && control->sar == L2CAP_SAR_START)
7343 len -= L2CAP_SDULEN_SIZE;
7345 if (chan->fcs == L2CAP_FCS_CRC16)
7346 len -= L2CAP_FCS_SIZE;
7348 if (len > chan->mps) {
7349 l2cap_send_disconn_req(chan, ECONNRESET);
7353 if (chan->ops->filter) {
7354 if (chan->ops->filter(chan, skb))
7358 if (!control->sframe) {
7361 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7362 control->sar, control->reqseq, control->final,
7365 /* Validate F-bit - F=0 always valid, F=1 only
7366 * valid in TX WAIT_F
7368 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7371 if (chan->mode != L2CAP_MODE_STREAMING) {
7372 event = L2CAP_EV_RECV_IFRAME;
7373 err = l2cap_rx(chan, control, skb, event);
7375 err = l2cap_stream_rx(chan, control, skb);
7379 l2cap_send_disconn_req(chan, ECONNRESET);
7381 const u8 rx_func_to_event[4] = {
7382 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7383 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7386 /* Only I-frames are expected in streaming mode */
7387 if (chan->mode == L2CAP_MODE_STREAMING)
7390 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7391 control->reqseq, control->final, control->poll,
7395 BT_ERR("Trailing bytes: %d in sframe", len);
7396 l2cap_send_disconn_req(chan, ECONNRESET);
7400 /* Validate F and P bits */
7401 if (control->final && (control->poll ||
7402 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7405 event = rx_func_to_event[control->super];
7406 if (l2cap_rx(chan, control, skb, event))
7407 l2cap_send_disconn_req(chan, ECONNRESET);
7417 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7419 struct l2cap_conn *conn = chan->conn;
7420 struct l2cap_le_credits pkt;
7423 return_credits = (chan->imtu / chan->mps) + 1;
7425 if (chan->rx_credits >= return_credits)
7428 return_credits -= chan->rx_credits;
7430 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7432 chan->rx_credits += return_credits;
7434 pkt.cid = cpu_to_le16(chan->scid);
7435 pkt.credits = cpu_to_le16(return_credits);
7437 chan->ident = l2cap_get_ident(conn);
7439 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7442 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7446 BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7448 /* Wait recv to confirm reception before updating the credits */
7449 err = chan->ops->recv(chan, skb);
7451 /* Update credits whenever an SDU is received */
7452 l2cap_chan_le_send_credits(chan);
7457 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7461 if (!chan->rx_credits) {
7462 BT_ERR("No credits to receive LE L2CAP data");
7463 l2cap_send_disconn_req(chan, ECONNRESET);
7467 if (chan->imtu < skb->len) {
7468 BT_ERR("Too big LE L2CAP PDU");
7473 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7475 /* Update if remote had run out of credits, this should only happens
7476 * if the remote is not using the entire MPS.
7478 if (!chan->rx_credits)
7479 l2cap_chan_le_send_credits(chan);
7486 sdu_len = get_unaligned_le16(skb->data);
7487 skb_pull(skb, L2CAP_SDULEN_SIZE);
7489 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7490 sdu_len, skb->len, chan->imtu);
7492 if (sdu_len > chan->imtu) {
7493 BT_ERR("Too big LE L2CAP SDU length received");
7498 if (skb->len > sdu_len) {
7499 BT_ERR("Too much LE L2CAP data received");
7504 if (skb->len == sdu_len)
7505 return l2cap_ecred_recv(chan, skb);
7508 chan->sdu_len = sdu_len;
7509 chan->sdu_last_frag = skb;
7511 /* Detect if remote is not able to use the selected MPS */
7512 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7513 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7515 /* Adjust the number of credits */
7516 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7517 chan->mps = mps_len;
7518 l2cap_chan_le_send_credits(chan);
7524 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7525 chan->sdu->len, skb->len, chan->sdu_len);
7527 if (chan->sdu->len + skb->len > chan->sdu_len) {
7528 BT_ERR("Too much LE L2CAP data received");
7533 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7536 if (chan->sdu->len == chan->sdu_len) {
7537 err = l2cap_ecred_recv(chan, chan->sdu);
7540 chan->sdu_last_frag = NULL;
7548 kfree_skb(chan->sdu);
7550 chan->sdu_last_frag = NULL;
7554 /* We can't return an error here since we took care of the skb
7555 * freeing internally. An error return would cause the caller to
7556 * do a double-free of the skb.
7561 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7562 struct sk_buff *skb)
7564 struct l2cap_chan *chan;
7566 chan = l2cap_get_chan_by_scid(conn, cid);
7568 if (cid == L2CAP_CID_A2MP) {
7569 chan = a2mp_channel_create(conn, skb);
7575 l2cap_chan_lock(chan);
7577 BT_DBG("unknown cid 0x%4.4x", cid);
7578 /* Drop packet and return */
7584 BT_DBG("chan %p, len %d", chan, skb->len);
7586 /* If we receive data on a fixed channel before the info req/rsp
7587 * procedure is done simply assume that the channel is supported
7588 * and mark it as ready.
7590 if (chan->chan_type == L2CAP_CHAN_FIXED)
7591 l2cap_chan_ready(chan);
7593 if (chan->state != BT_CONNECTED)
7596 switch (chan->mode) {
7597 case L2CAP_MODE_LE_FLOWCTL:
7598 case L2CAP_MODE_EXT_FLOWCTL:
7599 if (l2cap_ecred_data_rcv(chan, skb) < 0)
7604 case L2CAP_MODE_BASIC:
7605 /* If socket recv buffers overflows we drop data here
7606 * which is *bad* because L2CAP has to be reliable.
7607 * But we don't have any other choice. L2CAP doesn't
7608 * provide flow control mechanism. */
7610 if (chan->imtu < skb->len) {
7611 BT_ERR("Dropping L2CAP data: receive buffer overflow");
7615 if (!chan->ops->recv(chan, skb))
7619 case L2CAP_MODE_ERTM:
7620 case L2CAP_MODE_STREAMING:
7621 l2cap_data_rcv(chan, skb);
7625 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7633 l2cap_chan_unlock(chan);
7634 l2cap_chan_put(chan);
7637 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7638 struct sk_buff *skb)
7640 struct hci_conn *hcon = conn->hcon;
7641 struct l2cap_chan *chan;
7643 if (hcon->type != ACL_LINK)
7646 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7651 BT_DBG("chan %p, len %d", chan, skb->len);
7653 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7656 if (chan->imtu < skb->len)
7659 /* Store remote BD_ADDR and PSM for msg_name */
7660 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7661 bt_cb(skb)->l2cap.psm = psm;
7663 if (!chan->ops->recv(chan, skb)) {
7664 l2cap_chan_put(chan);
7669 l2cap_chan_put(chan);
7674 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7676 struct l2cap_hdr *lh = (void *) skb->data;
7677 struct hci_conn *hcon = conn->hcon;
7681 if (hcon->state != BT_CONNECTED) {
7682 BT_DBG("queueing pending rx skb");
7683 skb_queue_tail(&conn->pending_rx, skb);
7687 skb_pull(skb, L2CAP_HDR_SIZE);
7688 cid = __le16_to_cpu(lh->cid);
7689 len = __le16_to_cpu(lh->len);
7691 if (len != skb->len) {
7696 /* Since we can't actively block incoming LE connections we must
7697 * at least ensure that we ignore incoming data from them.
7699 if (hcon->type == LE_LINK &&
7700 hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7701 bdaddr_dst_type(hcon))) {
7706 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7709 case L2CAP_CID_SIGNALING:
7710 l2cap_sig_channel(conn, skb);
7713 case L2CAP_CID_CONN_LESS:
7714 psm = get_unaligned((__le16 *) skb->data);
7715 skb_pull(skb, L2CAP_PSMLEN_SIZE);
7716 l2cap_conless_channel(conn, psm, skb);
7719 case L2CAP_CID_LE_SIGNALING:
7720 l2cap_le_sig_channel(conn, skb);
7724 l2cap_data_channel(conn, cid, skb);
7729 static void process_pending_rx(struct work_struct *work)
7731 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7733 struct sk_buff *skb;
7737 while ((skb = skb_dequeue(&conn->pending_rx)))
7738 l2cap_recv_frame(conn, skb);
7741 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7743 struct l2cap_conn *conn = hcon->l2cap_data;
7744 struct hci_chan *hchan;
7749 hchan = hci_chan_create(hcon);
7753 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7755 hci_chan_del(hchan);
7759 kref_init(&conn->ref);
7760 hcon->l2cap_data = conn;
7761 conn->hcon = hci_conn_get(hcon);
7762 conn->hchan = hchan;
7764 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7766 switch (hcon->type) {
7768 if (hcon->hdev->le_mtu) {
7769 conn->mtu = hcon->hdev->le_mtu;
7774 conn->mtu = hcon->hdev->acl_mtu;
7778 conn->feat_mask = 0;
7780 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7782 if (hcon->type == ACL_LINK &&
7783 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7784 conn->local_fixed_chan |= L2CAP_FC_A2MP;
7786 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7787 (bredr_sc_enabled(hcon->hdev) ||
7788 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7789 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7791 mutex_init(&conn->ident_lock);
7792 mutex_init(&conn->chan_lock);
7794 INIT_LIST_HEAD(&conn->chan_l);
7795 INIT_LIST_HEAD(&conn->users);
7797 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7799 skb_queue_head_init(&conn->pending_rx);
7800 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7801 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7803 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7808 static bool is_valid_psm(u16 psm, u8 dst_type)
7813 if (bdaddr_type_is_le(dst_type))
7814 return (psm <= 0x00ff);
7816 /* PSM must be odd and lsb of upper byte must be 0 */
7817 return ((psm & 0x0101) == 0x0001);
7820 struct l2cap_chan_data {
7821 struct l2cap_chan *chan;
7826 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7828 struct l2cap_chan_data *d = data;
7831 if (chan == d->chan)
7834 if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7837 pid = chan->ops->get_peer_pid(chan);
7839 /* Only count deferred channels with the same PID/PSM */
7840 if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7841 chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7847 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7848 bdaddr_t *dst, u8 dst_type)
7850 struct l2cap_conn *conn;
7851 struct hci_conn *hcon;
7852 struct hci_dev *hdev;
7855 BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7856 dst, dst_type, __le16_to_cpu(psm), chan->mode);
7858 hdev = hci_get_route(dst, &chan->src, chan->src_type);
7860 return -EHOSTUNREACH;
7864 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7865 chan->chan_type != L2CAP_CHAN_RAW) {
7870 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7875 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7880 switch (chan->mode) {
7881 case L2CAP_MODE_BASIC:
7883 case L2CAP_MODE_LE_FLOWCTL:
7885 case L2CAP_MODE_EXT_FLOWCTL:
7886 if (!enable_ecred) {
7891 case L2CAP_MODE_ERTM:
7892 case L2CAP_MODE_STREAMING:
7901 switch (chan->state) {
7905 /* Already connecting */
7910 /* Already connected */
7924 /* Set destination address and psm */
7925 bacpy(&chan->dst, dst);
7926 chan->dst_type = dst_type;
7931 if (bdaddr_type_is_le(dst_type)) {
7932 /* Convert from L2CAP channel address type to HCI address type
7934 if (dst_type == BDADDR_LE_PUBLIC)
7935 dst_type = ADDR_LE_DEV_PUBLIC;
7937 dst_type = ADDR_LE_DEV_RANDOM;
7939 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7940 hcon = hci_connect_le(hdev, dst, dst_type, false,
7942 HCI_LE_CONN_TIMEOUT,
7945 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7947 HCI_LE_CONN_TIMEOUT,
7948 CONN_REASON_L2CAP_CHAN);
7951 u8 auth_type = l2cap_get_auth_type(chan);
7952 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7953 CONN_REASON_L2CAP_CHAN);
7957 err = PTR_ERR(hcon);
7961 conn = l2cap_conn_add(hcon);
7963 hci_conn_drop(hcon);
7968 if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7969 struct l2cap_chan_data data;
7972 data.pid = chan->ops->get_peer_pid(chan);
7975 l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7977 /* Check if there isn't too many channels being connected */
7978 if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7979 hci_conn_drop(hcon);
7985 mutex_lock(&conn->chan_lock);
7986 l2cap_chan_lock(chan);
7988 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7989 hci_conn_drop(hcon);
7994 /* Update source addr of the socket */
7995 bacpy(&chan->src, &hcon->src);
7996 chan->src_type = bdaddr_src_type(hcon);
7998 __l2cap_chan_add(conn, chan);
8000 /* l2cap_chan_add takes its own ref so we can drop this one */
8001 hci_conn_drop(hcon);
8003 l2cap_state_change(chan, BT_CONNECT);
8004 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8006 /* Release chan->sport so that it can be reused by other
8007 * sockets (as it's only used for listening sockets).
8009 write_lock(&chan_list_lock);
8011 write_unlock(&chan_list_lock);
8013 if (hcon->state == BT_CONNECTED) {
8014 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8015 __clear_chan_timer(chan);
8016 if (l2cap_chan_check_security(chan, true))
8017 l2cap_state_change(chan, BT_CONNECTED);
8019 l2cap_do_start(chan);
8025 l2cap_chan_unlock(chan);
8026 mutex_unlock(&conn->chan_lock);
8028 hci_dev_unlock(hdev);
8032 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8034 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8036 struct l2cap_conn *conn = chan->conn;
8038 struct l2cap_ecred_reconf_req req;
8042 pdu.req.mtu = cpu_to_le16(chan->imtu);
8043 pdu.req.mps = cpu_to_le16(chan->mps);
8044 pdu.scid = cpu_to_le16(chan->scid);
8046 chan->ident = l2cap_get_ident(conn);
8048 l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8052 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8054 if (chan->imtu > mtu)
8057 BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8061 l2cap_ecred_reconfigure(chan);
8066 /* ---- L2CAP interface with lower layer (HCI) ---- */
8068 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8070 int exact = 0, lm1 = 0, lm2 = 0;
8071 struct l2cap_chan *c;
8073 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8075 /* Find listening sockets and check their link_mode */
8076 read_lock(&chan_list_lock);
8077 list_for_each_entry(c, &chan_list, global_l) {
8078 if (c->state != BT_LISTEN)
8081 if (!bacmp(&c->src, &hdev->bdaddr)) {
8082 lm1 |= HCI_LM_ACCEPT;
8083 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8084 lm1 |= HCI_LM_MASTER;
8086 } else if (!bacmp(&c->src, BDADDR_ANY)) {
8087 lm2 |= HCI_LM_ACCEPT;
8088 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8089 lm2 |= HCI_LM_MASTER;
8092 read_unlock(&chan_list_lock);
8094 return exact ? lm1 : lm2;
8097 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8098 * from an existing channel in the list or from the beginning of the
8099 * global list (by passing NULL as first parameter).
8101 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8102 struct hci_conn *hcon)
8104 u8 src_type = bdaddr_src_type(hcon);
8106 read_lock(&chan_list_lock);
8109 c = list_next_entry(c, global_l);
8111 c = list_entry(chan_list.next, typeof(*c), global_l);
8113 list_for_each_entry_from(c, &chan_list, global_l) {
8114 if (c->chan_type != L2CAP_CHAN_FIXED)
8116 if (c->state != BT_LISTEN)
8118 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8120 if (src_type != c->src_type)
8123 c = l2cap_chan_hold_unless_zero(c);
8124 read_unlock(&chan_list_lock);
8128 read_unlock(&chan_list_lock);
8133 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8135 struct hci_dev *hdev = hcon->hdev;
8136 struct l2cap_conn *conn;
8137 struct l2cap_chan *pchan;
8140 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8143 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8146 l2cap_conn_del(hcon, bt_to_errno(status));
8150 conn = l2cap_conn_add(hcon);
8154 dst_type = bdaddr_dst_type(hcon);
8156 /* If device is blocked, do not create channels for it */
8157 if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8160 /* Find fixed channels and notify them of the new connection. We
8161 * use multiple individual lookups, continuing each time where
8162 * we left off, because the list lock would prevent calling the
8163 * potentially sleeping l2cap_chan_lock() function.
8165 pchan = l2cap_global_fixed_chan(NULL, hcon);
8167 struct l2cap_chan *chan, *next;
8169 /* Client fixed channels should override server ones */
8170 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8173 l2cap_chan_lock(pchan);
8174 chan = pchan->ops->new_connection(pchan);
8176 bacpy(&chan->src, &hcon->src);
8177 bacpy(&chan->dst, &hcon->dst);
8178 chan->src_type = bdaddr_src_type(hcon);
8179 chan->dst_type = dst_type;
8181 __l2cap_chan_add(conn, chan);
8184 l2cap_chan_unlock(pchan);
8186 next = l2cap_global_fixed_chan(pchan, hcon);
8187 l2cap_chan_put(pchan);
8191 l2cap_conn_ready(conn);
8194 int l2cap_disconn_ind(struct hci_conn *hcon)
8196 struct l2cap_conn *conn = hcon->l2cap_data;
8198 BT_DBG("hcon %p", hcon);
8201 return HCI_ERROR_REMOTE_USER_TERM;
8202 return conn->disc_reason;
8205 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8207 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8210 BT_DBG("hcon %p reason %d", hcon, reason);
8212 l2cap_conn_del(hcon, bt_to_errno(reason));
8215 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8217 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8220 if (encrypt == 0x00) {
8221 if (chan->sec_level == BT_SECURITY_MEDIUM) {
8222 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8223 } else if (chan->sec_level == BT_SECURITY_HIGH ||
8224 chan->sec_level == BT_SECURITY_FIPS)
8225 l2cap_chan_close(chan, ECONNREFUSED);
8227 if (chan->sec_level == BT_SECURITY_MEDIUM)
8228 __clear_chan_timer(chan);
8232 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8234 struct l2cap_conn *conn = hcon->l2cap_data;
8235 struct l2cap_chan *chan;
8240 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8242 mutex_lock(&conn->chan_lock);
8244 list_for_each_entry(chan, &conn->chan_l, list) {
8245 l2cap_chan_lock(chan);
8247 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8248 state_to_string(chan->state));
8250 if (chan->scid == L2CAP_CID_A2MP) {
8251 l2cap_chan_unlock(chan);
8255 if (!status && encrypt)
8256 chan->sec_level = hcon->sec_level;
8258 if (!__l2cap_no_conn_pending(chan)) {
8259 l2cap_chan_unlock(chan);
8263 if (!status && (chan->state == BT_CONNECTED ||
8264 chan->state == BT_CONFIG)) {
8265 chan->ops->resume(chan);
8266 l2cap_check_encryption(chan, encrypt);
8267 l2cap_chan_unlock(chan);
8271 if (chan->state == BT_CONNECT) {
8272 if (!status && l2cap_check_enc_key_size(hcon))
8273 l2cap_start_connection(chan);
8275 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8276 } else if (chan->state == BT_CONNECT2 &&
8277 !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8278 chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8279 struct l2cap_conn_rsp rsp;
8282 if (!status && l2cap_check_enc_key_size(hcon)) {
8283 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8284 res = L2CAP_CR_PEND;
8285 stat = L2CAP_CS_AUTHOR_PEND;
8286 chan->ops->defer(chan);
8288 l2cap_state_change(chan, BT_CONFIG);
8289 res = L2CAP_CR_SUCCESS;
8290 stat = L2CAP_CS_NO_INFO;
8293 l2cap_state_change(chan, BT_DISCONN);
8294 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8295 res = L2CAP_CR_SEC_BLOCK;
8296 stat = L2CAP_CS_NO_INFO;
8299 rsp.scid = cpu_to_le16(chan->dcid);
8300 rsp.dcid = cpu_to_le16(chan->scid);
8301 rsp.result = cpu_to_le16(res);
8302 rsp.status = cpu_to_le16(stat);
8303 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8306 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8307 res == L2CAP_CR_SUCCESS) {
8309 set_bit(CONF_REQ_SENT, &chan->conf_state);
8310 l2cap_send_cmd(conn, l2cap_get_ident(conn),
8312 l2cap_build_conf_req(chan, buf, sizeof(buf)),
8314 chan->num_conf_req++;
8318 l2cap_chan_unlock(chan);
8321 mutex_unlock(&conn->chan_lock);
8324 /* Append fragment into frame respecting the maximum len of rx_skb */
8325 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8328 if (!conn->rx_skb) {
8329 /* Allocate skb for the complete frame (with header) */
8330 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8337 /* Copy as much as the rx_skb can hold */
8338 len = min_t(u16, len, skb->len);
8339 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8341 conn->rx_len -= len;
8346 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8348 struct sk_buff *rx_skb;
8351 /* Append just enough to complete the header */
8352 len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8354 /* If header could not be read just continue */
8355 if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8358 rx_skb = conn->rx_skb;
8359 len = get_unaligned_le16(rx_skb->data);
8361 /* Check if rx_skb has enough space to received all fragments */
8362 if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8363 /* Update expected len */
8364 conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8365 return L2CAP_LEN_SIZE;
8368 /* Reset conn->rx_skb since it will need to be reallocated in order to
8369 * fit all fragments.
8371 conn->rx_skb = NULL;
8373 /* Reallocates rx_skb using the exact expected length */
8374 len = l2cap_recv_frag(conn, rx_skb,
8375 len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8381 static void l2cap_recv_reset(struct l2cap_conn *conn)
8383 kfree_skb(conn->rx_skb);
8384 conn->rx_skb = NULL;
8388 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8390 struct l2cap_conn *conn = hcon->l2cap_data;
8393 /* For AMP controller do not create l2cap conn */
8394 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8398 conn = l2cap_conn_add(hcon);
8403 BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8407 case ACL_START_NO_FLUSH:
8410 BT_ERR("Unexpected start frame (len %d)", skb->len);
8411 l2cap_recv_reset(conn);
8412 l2cap_conn_unreliable(conn, ECOMM);
8415 /* Start fragment may not contain the L2CAP length so just
8416 * copy the initial byte when that happens and use conn->mtu as
8419 if (skb->len < L2CAP_LEN_SIZE) {
8420 if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
8425 len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8427 if (len == skb->len) {
8428 /* Complete frame received */
8429 l2cap_recv_frame(conn, skb);
8433 BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8435 if (skb->len > len) {
8436 BT_ERR("Frame is too long (len %u, expected len %d)",
8438 l2cap_conn_unreliable(conn, ECOMM);
8442 /* Append fragment into frame (with header) */
8443 if (l2cap_recv_frag(conn, skb, len) < 0)
8449 BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8451 if (!conn->rx_skb) {
8452 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8453 l2cap_conn_unreliable(conn, ECOMM);
8457 /* Complete the L2CAP length if it has not been read */
8458 if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8459 if (l2cap_recv_len(conn, skb) < 0) {
8460 l2cap_conn_unreliable(conn, ECOMM);
8464 /* Header still could not be read just continue */
8465 if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8469 if (skb->len > conn->rx_len) {
8470 BT_ERR("Fragment is too long (len %u, expected %u)",
8471 skb->len, conn->rx_len);
8472 l2cap_recv_reset(conn);
8473 l2cap_conn_unreliable(conn, ECOMM);
8477 /* Append fragment into frame (with header) */
8478 l2cap_recv_frag(conn, skb, skb->len);
8480 if (!conn->rx_len) {
8481 /* Complete frame received. l2cap_recv_frame
8482 * takes ownership of the skb so set the global
8483 * rx_skb pointer to NULL first.
8485 struct sk_buff *rx_skb = conn->rx_skb;
8486 conn->rx_skb = NULL;
8487 l2cap_recv_frame(conn, rx_skb);
8496 static struct hci_cb l2cap_cb = {
8498 .connect_cfm = l2cap_connect_cfm,
8499 .disconn_cfm = l2cap_disconn_cfm,
8500 .security_cfm = l2cap_security_cfm,
8503 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8505 struct l2cap_chan *c;
8507 read_lock(&chan_list_lock);
8509 list_for_each_entry(c, &chan_list, global_l) {
8510 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8511 &c->src, c->src_type, &c->dst, c->dst_type,
8512 c->state, __le16_to_cpu(c->psm),
8513 c->scid, c->dcid, c->imtu, c->omtu,
8514 c->sec_level, c->mode);
8517 read_unlock(&chan_list_lock);
8522 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8524 static struct dentry *l2cap_debugfs;
8526 int __init l2cap_init(void)
8530 err = l2cap_init_sockets();
8534 hci_register_cb(&l2cap_cb);
8536 if (IS_ERR_OR_NULL(bt_debugfs))
8539 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8540 NULL, &l2cap_debugfs_fops);
8545 void l2cap_exit(void)
8547 debugfs_remove(l2cap_debugfs);
8548 hci_unregister_cb(&l2cap_cb);
8549 l2cap_cleanup_sockets();
8552 module_param(disable_ertm, bool, 0644);
8553 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8555 module_param(enable_ecred, bool, 0644);
8556 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");