2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
68 list_for_each_entry(c, &conn->chan_l, list) {
75 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
80 list_for_each_entry(c, &conn->chan_l, list) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 mutex_lock(&conn->chan_lock);
95 c = __l2cap_get_chan_by_scid(conn, cid);
98 mutex_unlock(&conn->chan_lock);
103 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
106 struct l2cap_chan *c;
108 list_for_each_entry(c, &conn->chan_l, list) {
109 if (c->ident == ident)
115 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
117 struct l2cap_chan *c;
119 list_for_each_entry(c, &chan_list, global_l) {
120 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
126 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
130 write_lock(&chan_list_lock);
132 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
145 for (p = 0x1001; p < 0x1100; p += 2)
146 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
147 chan->psm = cpu_to_le16(p);
148 chan->sport = cpu_to_le16(p);
155 write_unlock(&chan_list_lock);
159 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
161 write_lock(&chan_list_lock);
165 write_unlock(&chan_list_lock);
170 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(conn, cid))
182 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
184 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
185 state_to_string(state));
188 chan->ops->state_change(chan, state);
191 static void l2cap_state_change(struct l2cap_chan *chan, int state)
193 struct sock *sk = chan->sk;
196 __l2cap_state_change(chan, state);
200 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
202 struct sock *sk = chan->sk;
207 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
209 struct sock *sk = chan->sk;
212 __l2cap_chan_set_err(chan, err);
216 static void __set_retrans_timer(struct l2cap_chan *chan)
218 if (!delayed_work_pending(&chan->monitor_timer) &&
219 chan->retrans_timeout) {
220 l2cap_set_timer(chan, &chan->retrans_timer,
221 msecs_to_jiffies(chan->retrans_timeout));
225 static void __set_monitor_timer(struct l2cap_chan *chan)
227 __clear_retrans_timer(chan);
228 if (chan->monitor_timeout) {
229 l2cap_set_timer(chan, &chan->monitor_timer,
230 msecs_to_jiffies(chan->monitor_timeout));
234 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
239 skb_queue_walk(head, skb) {
240 if (bt_cb(skb)->control.txseq == seq)
247 /* ---- L2CAP sequence number lists ---- */
249 /* For ERTM, ordered lists of sequence numbers must be tracked for
250 * SREJ requests that are received and for frames that are to be
251 * retransmitted. These seq_list functions implement a singly-linked
252 * list in an array, where membership in the list can also be checked
253 * in constant time. Items can also be added to the tail of the list
254 * and removed from the head in constant time, without further memory
258 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
260 size_t alloc_size, i;
262 /* Allocated size is a power of 2 to map sequence numbers
263 * (which may be up to 14 bits) in to a smaller array that is
264 * sized for the negotiated ERTM transmit windows.
266 alloc_size = roundup_pow_of_two(size);
268 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
272 seq_list->mask = alloc_size - 1;
273 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
274 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
275 for (i = 0; i < alloc_size; i++)
276 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
281 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
283 kfree(seq_list->list);
286 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
289 /* Constant-time check for list membership */
290 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
293 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
295 u16 mask = seq_list->mask;
297 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
298 /* In case someone tries to pop the head of an empty list */
299 return L2CAP_SEQ_LIST_CLEAR;
300 } else if (seq_list->head == seq) {
301 /* Head can be removed in constant time */
302 seq_list->head = seq_list->list[seq & mask];
303 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
305 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
306 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
307 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
310 /* Walk the list to find the sequence number */
311 u16 prev = seq_list->head;
312 while (seq_list->list[prev & mask] != seq) {
313 prev = seq_list->list[prev & mask];
314 if (prev == L2CAP_SEQ_LIST_TAIL)
315 return L2CAP_SEQ_LIST_CLEAR;
318 /* Unlink the number from the list and clear it */
319 seq_list->list[prev & mask] = seq_list->list[seq & mask];
320 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
321 if (seq_list->tail == seq)
322 seq_list->tail = prev;
327 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
329 /* Remove the head in constant time */
330 return l2cap_seq_list_remove(seq_list, seq_list->head);
333 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
337 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
340 for (i = 0; i <= seq_list->mask; i++)
341 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
343 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
344 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
347 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
349 u16 mask = seq_list->mask;
351 /* All appends happen in constant time */
353 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
356 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
357 seq_list->head = seq;
359 seq_list->list[seq_list->tail & mask] = seq;
361 seq_list->tail = seq;
362 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
365 static void l2cap_chan_timeout(struct work_struct *work)
367 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
369 struct l2cap_conn *conn = chan->conn;
372 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
374 mutex_lock(&conn->chan_lock);
375 l2cap_chan_lock(chan);
377 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
378 reason = ECONNREFUSED;
379 else if (chan->state == BT_CONNECT &&
380 chan->sec_level != BT_SECURITY_SDP)
381 reason = ECONNREFUSED;
385 l2cap_chan_close(chan, reason);
387 l2cap_chan_unlock(chan);
389 chan->ops->close(chan);
390 mutex_unlock(&conn->chan_lock);
392 l2cap_chan_put(chan);
395 struct l2cap_chan *l2cap_chan_create(void)
397 struct l2cap_chan *chan;
399 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
403 mutex_init(&chan->lock);
405 write_lock(&chan_list_lock);
406 list_add(&chan->global_l, &chan_list);
407 write_unlock(&chan_list_lock);
409 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
411 chan->state = BT_OPEN;
413 kref_init(&chan->kref);
415 /* This flag is cleared in l2cap_chan_ready() */
416 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
418 BT_DBG("chan %p", chan);
423 static void l2cap_chan_destroy(struct kref *kref)
425 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
427 BT_DBG("chan %p", chan);
429 write_lock(&chan_list_lock);
430 list_del(&chan->global_l);
431 write_unlock(&chan_list_lock);
436 void l2cap_chan_hold(struct l2cap_chan *c)
438 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
443 void l2cap_chan_put(struct l2cap_chan *c)
445 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
447 kref_put(&c->kref, l2cap_chan_destroy);
450 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
452 chan->fcs = L2CAP_FCS_CRC16;
453 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
454 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
455 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
456 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
457 chan->sec_level = BT_SECURITY_LOW;
459 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
462 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
464 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
465 __le16_to_cpu(chan->psm), chan->dcid);
467 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
471 switch (chan->chan_type) {
472 case L2CAP_CHAN_CONN_ORIENTED:
473 if (conn->hcon->type == LE_LINK) {
475 chan->omtu = L2CAP_DEFAULT_MTU;
476 chan->scid = L2CAP_CID_LE_DATA;
477 chan->dcid = L2CAP_CID_LE_DATA;
479 /* Alloc CID for connection-oriented socket */
480 chan->scid = l2cap_alloc_cid(conn);
481 chan->omtu = L2CAP_DEFAULT_MTU;
485 case L2CAP_CHAN_CONN_LESS:
486 /* Connectionless socket */
487 chan->scid = L2CAP_CID_CONN_LESS;
488 chan->dcid = L2CAP_CID_CONN_LESS;
489 chan->omtu = L2CAP_DEFAULT_MTU;
492 case L2CAP_CHAN_CONN_FIX_A2MP:
493 chan->scid = L2CAP_CID_A2MP;
494 chan->dcid = L2CAP_CID_A2MP;
495 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
496 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
500 /* Raw socket can send/recv signalling messages only */
501 chan->scid = L2CAP_CID_SIGNALING;
502 chan->dcid = L2CAP_CID_SIGNALING;
503 chan->omtu = L2CAP_DEFAULT_MTU;
506 chan->local_id = L2CAP_BESTEFFORT_ID;
507 chan->local_stype = L2CAP_SERV_BESTEFFORT;
508 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
509 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
510 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
511 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
513 l2cap_chan_hold(chan);
515 list_add(&chan->list, &conn->chan_l);
518 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
520 mutex_lock(&conn->chan_lock);
521 __l2cap_chan_add(conn, chan);
522 mutex_unlock(&conn->chan_lock);
525 void l2cap_chan_del(struct l2cap_chan *chan, int err)
527 struct l2cap_conn *conn = chan->conn;
529 __clear_chan_timer(chan);
531 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
534 /* Delete from channel list */
535 list_del(&chan->list);
537 l2cap_chan_put(chan);
541 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
542 hci_conn_put(conn->hcon);
545 if (chan->ops->teardown)
546 chan->ops->teardown(chan, err);
548 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
552 case L2CAP_MODE_BASIC:
555 case L2CAP_MODE_ERTM:
556 __clear_retrans_timer(chan);
557 __clear_monitor_timer(chan);
558 __clear_ack_timer(chan);
560 skb_queue_purge(&chan->srej_q);
562 l2cap_seq_list_free(&chan->srej_list);
563 l2cap_seq_list_free(&chan->retrans_list);
567 case L2CAP_MODE_STREAMING:
568 skb_queue_purge(&chan->tx_q);
575 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
577 struct l2cap_conn *conn = chan->conn;
578 struct sock *sk = chan->sk;
580 BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
583 switch (chan->state) {
585 if (chan->ops->teardown)
586 chan->ops->teardown(chan, 0);
591 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
592 conn->hcon->type == ACL_LINK) {
593 __set_chan_timer(chan, sk->sk_sndtimeo);
594 l2cap_send_disconn_req(conn, chan, reason);
596 l2cap_chan_del(chan, reason);
600 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
601 conn->hcon->type == ACL_LINK) {
602 struct l2cap_conn_rsp rsp;
605 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
606 result = L2CAP_CR_SEC_BLOCK;
608 result = L2CAP_CR_BAD_PSM;
609 l2cap_state_change(chan, BT_DISCONN);
611 rsp.scid = cpu_to_le16(chan->dcid);
612 rsp.dcid = cpu_to_le16(chan->scid);
613 rsp.result = cpu_to_le16(result);
614 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
615 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
619 l2cap_chan_del(chan, reason);
624 l2cap_chan_del(chan, reason);
628 if (chan->ops->teardown)
629 chan->ops->teardown(chan, 0);
634 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
636 if (chan->chan_type == L2CAP_CHAN_RAW) {
637 switch (chan->sec_level) {
638 case BT_SECURITY_HIGH:
639 return HCI_AT_DEDICATED_BONDING_MITM;
640 case BT_SECURITY_MEDIUM:
641 return HCI_AT_DEDICATED_BONDING;
643 return HCI_AT_NO_BONDING;
645 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
646 if (chan->sec_level == BT_SECURITY_LOW)
647 chan->sec_level = BT_SECURITY_SDP;
649 if (chan->sec_level == BT_SECURITY_HIGH)
650 return HCI_AT_NO_BONDING_MITM;
652 return HCI_AT_NO_BONDING;
654 switch (chan->sec_level) {
655 case BT_SECURITY_HIGH:
656 return HCI_AT_GENERAL_BONDING_MITM;
657 case BT_SECURITY_MEDIUM:
658 return HCI_AT_GENERAL_BONDING;
660 return HCI_AT_NO_BONDING;
665 /* Service level security */
666 int l2cap_chan_check_security(struct l2cap_chan *chan)
668 struct l2cap_conn *conn = chan->conn;
671 auth_type = l2cap_get_auth_type(chan);
673 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
676 static u8 l2cap_get_ident(struct l2cap_conn *conn)
680 /* Get next available identificator.
681 * 1 - 128 are used by kernel.
682 * 129 - 199 are reserved.
683 * 200 - 254 are used by utilities like l2ping, etc.
686 spin_lock(&conn->lock);
688 if (++conn->tx_ident > 128)
693 spin_unlock(&conn->lock);
698 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
701 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
704 BT_DBG("code 0x%2.2x", code);
709 if (lmp_no_flush_capable(conn->hcon->hdev))
710 flags = ACL_START_NO_FLUSH;
714 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
715 skb->priority = HCI_PRIO_MAX;
717 hci_send_acl(conn->hchan, skb, flags);
720 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
722 struct hci_conn *hcon = chan->conn->hcon;
725 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
728 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
729 lmp_no_flush_capable(hcon->hdev))
730 flags = ACL_START_NO_FLUSH;
734 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
735 hci_send_acl(chan->conn->hchan, skb, flags);
738 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
740 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
741 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
743 if (enh & L2CAP_CTRL_FRAME_TYPE) {
746 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
747 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
754 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
755 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
762 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
764 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
765 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
767 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
770 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
771 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
778 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
779 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
786 static inline void __unpack_control(struct l2cap_chan *chan,
789 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
790 __unpack_extended_control(get_unaligned_le32(skb->data),
791 &bt_cb(skb)->control);
792 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
794 __unpack_enhanced_control(get_unaligned_le16(skb->data),
795 &bt_cb(skb)->control);
796 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
800 static u32 __pack_extended_control(struct l2cap_ctrl *control)
804 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
805 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
807 if (control->sframe) {
808 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
809 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
810 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
812 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
813 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
819 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
823 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
824 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
826 if (control->sframe) {
827 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
828 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
829 packed |= L2CAP_CTRL_FRAME_TYPE;
831 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
832 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
838 static inline void __pack_control(struct l2cap_chan *chan,
839 struct l2cap_ctrl *control,
842 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
843 put_unaligned_le32(__pack_extended_control(control),
844 skb->data + L2CAP_HDR_SIZE);
846 put_unaligned_le16(__pack_enhanced_control(control),
847 skb->data + L2CAP_HDR_SIZE);
851 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
853 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
854 return L2CAP_EXT_HDR_SIZE;
856 return L2CAP_ENH_HDR_SIZE;
859 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
863 struct l2cap_hdr *lh;
864 int hlen = __ertm_hdr_size(chan);
866 if (chan->fcs == L2CAP_FCS_CRC16)
867 hlen += L2CAP_FCS_SIZE;
869 skb = bt_skb_alloc(hlen, GFP_KERNEL);
872 return ERR_PTR(-ENOMEM);
874 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
875 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
876 lh->cid = cpu_to_le16(chan->dcid);
878 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
879 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
881 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
883 if (chan->fcs == L2CAP_FCS_CRC16) {
884 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
885 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
888 skb->priority = HCI_PRIO_MAX;
892 static void l2cap_send_sframe(struct l2cap_chan *chan,
893 struct l2cap_ctrl *control)
898 BT_DBG("chan %p, control %p", chan, control);
900 if (!control->sframe)
903 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
907 if (control->super == L2CAP_SUPER_RR)
908 clear_bit(CONN_RNR_SENT, &chan->conn_state);
909 else if (control->super == L2CAP_SUPER_RNR)
910 set_bit(CONN_RNR_SENT, &chan->conn_state);
912 if (control->super != L2CAP_SUPER_SREJ) {
913 chan->last_acked_seq = control->reqseq;
914 __clear_ack_timer(chan);
917 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
918 control->final, control->poll, control->super);
920 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
921 control_field = __pack_extended_control(control);
923 control_field = __pack_enhanced_control(control);
925 skb = l2cap_create_sframe_pdu(chan, control_field);
927 l2cap_do_send(chan, skb);
930 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
932 struct l2cap_ctrl control;
934 BT_DBG("chan %p, poll %d", chan, poll);
936 memset(&control, 0, sizeof(control));
940 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
941 control.super = L2CAP_SUPER_RNR;
943 control.super = L2CAP_SUPER_RR;
945 control.reqseq = chan->buffer_seq;
946 l2cap_send_sframe(chan, &control);
949 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
951 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
954 static bool __amp_capable(struct l2cap_chan *chan)
956 struct l2cap_conn *conn = chan->conn;
959 chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
960 conn->fixed_chan_mask & L2CAP_FC_A2MP)
966 void l2cap_send_conn_req(struct l2cap_chan *chan)
968 struct l2cap_conn *conn = chan->conn;
969 struct l2cap_conn_req req;
971 req.scid = cpu_to_le16(chan->scid);
974 chan->ident = l2cap_get_ident(conn);
976 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
978 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
981 static void l2cap_chan_ready(struct l2cap_chan *chan)
983 /* This clears all conf flags, including CONF_NOT_COMPLETE */
984 chan->conf_state = 0;
985 __clear_chan_timer(chan);
987 chan->state = BT_CONNECTED;
989 chan->ops->ready(chan);
992 static void l2cap_start_connection(struct l2cap_chan *chan)
994 if (__amp_capable(chan)) {
995 BT_DBG("chan %p AMP capable: discover AMPs", chan);
996 a2mp_discover_amp(chan);
998 l2cap_send_conn_req(chan);
1002 static void l2cap_do_start(struct l2cap_chan *chan)
1004 struct l2cap_conn *conn = chan->conn;
1006 if (conn->hcon->type == LE_LINK) {
1007 l2cap_chan_ready(chan);
1011 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1012 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1015 if (l2cap_chan_check_security(chan) &&
1016 __l2cap_no_conn_pending(chan)) {
1017 l2cap_start_connection(chan);
1020 struct l2cap_info_req req;
1021 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
1023 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1024 conn->info_ident = l2cap_get_ident(conn);
1026 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1028 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1033 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1035 u32 local_feat_mask = l2cap_feat_mask;
1037 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1040 case L2CAP_MODE_ERTM:
1041 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1042 case L2CAP_MODE_STREAMING:
1043 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1049 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
1050 struct l2cap_chan *chan, int err)
1052 struct sock *sk = chan->sk;
1053 struct l2cap_disconn_req req;
1058 if (chan->mode == L2CAP_MODE_ERTM) {
1059 __clear_retrans_timer(chan);
1060 __clear_monitor_timer(chan);
1061 __clear_ack_timer(chan);
1064 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1065 __l2cap_state_change(chan, BT_DISCONN);
1069 req.dcid = cpu_to_le16(chan->dcid);
1070 req.scid = cpu_to_le16(chan->scid);
1071 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1075 __l2cap_state_change(chan, BT_DISCONN);
1076 __l2cap_chan_set_err(chan, err);
1080 /* ---- L2CAP connections ---- */
1081 static void l2cap_conn_start(struct l2cap_conn *conn)
1083 struct l2cap_chan *chan, *tmp;
1085 BT_DBG("conn %p", conn);
1087 mutex_lock(&conn->chan_lock);
1089 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1090 struct sock *sk = chan->sk;
1092 l2cap_chan_lock(chan);
1094 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1095 l2cap_chan_unlock(chan);
1099 if (chan->state == BT_CONNECT) {
1100 if (!l2cap_chan_check_security(chan) ||
1101 !__l2cap_no_conn_pending(chan)) {
1102 l2cap_chan_unlock(chan);
1106 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1107 && test_bit(CONF_STATE2_DEVICE,
1108 &chan->conf_state)) {
1109 l2cap_chan_close(chan, ECONNRESET);
1110 l2cap_chan_unlock(chan);
1114 l2cap_start_connection(chan);
1116 } else if (chan->state == BT_CONNECT2) {
1117 struct l2cap_conn_rsp rsp;
1119 rsp.scid = cpu_to_le16(chan->dcid);
1120 rsp.dcid = cpu_to_le16(chan->scid);
1122 if (l2cap_chan_check_security(chan)) {
1124 if (test_bit(BT_SK_DEFER_SETUP,
1125 &bt_sk(sk)->flags)) {
1126 struct sock *parent = bt_sk(sk)->parent;
1127 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1128 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1130 parent->sk_data_ready(parent, 0);
1133 __l2cap_state_change(chan, BT_CONFIG);
1134 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1135 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1139 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1140 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1143 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1146 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1147 rsp.result != L2CAP_CR_SUCCESS) {
1148 l2cap_chan_unlock(chan);
1152 set_bit(CONF_REQ_SENT, &chan->conf_state);
1153 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1154 l2cap_build_conf_req(chan, buf), buf);
1155 chan->num_conf_req++;
1158 l2cap_chan_unlock(chan);
1161 mutex_unlock(&conn->chan_lock);
1164 /* Find socket with cid and source/destination bdaddr.
1165 * Returns closest match, locked.
1167 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1171 struct l2cap_chan *c, *c1 = NULL;
1173 read_lock(&chan_list_lock);
1175 list_for_each_entry(c, &chan_list, global_l) {
1176 struct sock *sk = c->sk;
1178 if (state && c->state != state)
1181 if (c->scid == cid) {
1182 int src_match, dst_match;
1183 int src_any, dst_any;
1186 src_match = !bacmp(&bt_sk(sk)->src, src);
1187 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1188 if (src_match && dst_match) {
1189 read_unlock(&chan_list_lock);
1194 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1195 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1196 if ((src_match && dst_any) || (src_any && dst_match) ||
1197 (src_any && dst_any))
1202 read_unlock(&chan_list_lock);
1207 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1209 struct sock *parent, *sk;
1210 struct l2cap_chan *chan, *pchan;
1214 /* Check if we have socket listening on cid */
1215 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1216 conn->src, conn->dst);
1224 chan = pchan->ops->new_connection(pchan);
1230 hci_conn_hold(conn->hcon);
1231 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1233 bacpy(&bt_sk(sk)->src, conn->src);
1234 bacpy(&bt_sk(sk)->dst, conn->dst);
1236 bt_accept_enqueue(parent, sk);
1238 l2cap_chan_add(conn, chan);
1240 l2cap_chan_ready(chan);
1243 release_sock(parent);
1246 static void l2cap_conn_ready(struct l2cap_conn *conn)
1248 struct l2cap_chan *chan;
1249 struct hci_conn *hcon = conn->hcon;
1251 BT_DBG("conn %p", conn);
1253 if (!hcon->out && hcon->type == LE_LINK)
1254 l2cap_le_conn_ready(conn);
1256 if (hcon->out && hcon->type == LE_LINK)
1257 smp_conn_security(hcon, hcon->pending_sec_level);
1259 mutex_lock(&conn->chan_lock);
1261 list_for_each_entry(chan, &conn->chan_l, list) {
1263 l2cap_chan_lock(chan);
1265 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1266 l2cap_chan_unlock(chan);
1270 if (hcon->type == LE_LINK) {
1271 if (smp_conn_security(hcon, chan->sec_level))
1272 l2cap_chan_ready(chan);
1274 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1275 struct sock *sk = chan->sk;
1276 __clear_chan_timer(chan);
1278 __l2cap_state_change(chan, BT_CONNECTED);
1279 sk->sk_state_change(sk);
1282 } else if (chan->state == BT_CONNECT)
1283 l2cap_do_start(chan);
1285 l2cap_chan_unlock(chan);
1288 mutex_unlock(&conn->chan_lock);
1291 /* Notify sockets that we cannot guaranty reliability anymore */
1292 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1294 struct l2cap_chan *chan;
1296 BT_DBG("conn %p", conn);
1298 mutex_lock(&conn->chan_lock);
1300 list_for_each_entry(chan, &conn->chan_l, list) {
1301 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1302 l2cap_chan_set_err(chan, err);
1305 mutex_unlock(&conn->chan_lock);
1308 static void l2cap_info_timeout(struct work_struct *work)
1310 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1313 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1314 conn->info_ident = 0;
1316 l2cap_conn_start(conn);
1319 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1321 struct l2cap_conn *conn = hcon->l2cap_data;
1322 struct l2cap_chan *chan, *l;
1327 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1329 kfree_skb(conn->rx_skb);
1331 mutex_lock(&conn->chan_lock);
1334 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1335 l2cap_chan_hold(chan);
1336 l2cap_chan_lock(chan);
1338 l2cap_chan_del(chan, err);
1340 l2cap_chan_unlock(chan);
1342 chan->ops->close(chan);
1343 l2cap_chan_put(chan);
1346 mutex_unlock(&conn->chan_lock);
1348 hci_chan_del(conn->hchan);
1350 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1351 cancel_delayed_work_sync(&conn->info_timer);
1353 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1354 cancel_delayed_work_sync(&conn->security_timer);
1355 smp_chan_destroy(conn);
1358 hcon->l2cap_data = NULL;
1362 static void security_timeout(struct work_struct *work)
1364 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1365 security_timer.work);
1367 BT_DBG("conn %p", conn);
1369 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1370 smp_chan_destroy(conn);
1371 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1375 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1377 struct l2cap_conn *conn = hcon->l2cap_data;
1378 struct hci_chan *hchan;
1383 hchan = hci_chan_create(hcon);
1387 conn = kzalloc(sizeof(struct l2cap_conn), GFP_KERNEL);
1389 hci_chan_del(hchan);
1393 hcon->l2cap_data = conn;
1395 conn->hchan = hchan;
1397 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1399 switch (hcon->type) {
1401 conn->mtu = hcon->hdev->block_mtu;
1405 if (hcon->hdev->le_mtu) {
1406 conn->mtu = hcon->hdev->le_mtu;
1412 conn->mtu = hcon->hdev->acl_mtu;
1416 conn->src = &hcon->hdev->bdaddr;
1417 conn->dst = &hcon->dst;
1419 conn->feat_mask = 0;
1421 spin_lock_init(&conn->lock);
1422 mutex_init(&conn->chan_lock);
1424 INIT_LIST_HEAD(&conn->chan_l);
1426 if (hcon->type == LE_LINK)
1427 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1429 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1431 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1436 /* ---- Socket interface ---- */
1438 /* Find socket with psm and source / destination bdaddr.
1439 * Returns closest match.
1441 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1445 struct l2cap_chan *c, *c1 = NULL;
1447 read_lock(&chan_list_lock);
1449 list_for_each_entry(c, &chan_list, global_l) {
1450 struct sock *sk = c->sk;
1452 if (state && c->state != state)
1455 if (c->psm == psm) {
1456 int src_match, dst_match;
1457 int src_any, dst_any;
1460 src_match = !bacmp(&bt_sk(sk)->src, src);
1461 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1462 if (src_match && dst_match) {
1463 read_unlock(&chan_list_lock);
1468 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1469 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1470 if ((src_match && dst_any) || (src_any && dst_match) ||
1471 (src_any && dst_any))
1476 read_unlock(&chan_list_lock);
1481 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1482 bdaddr_t *dst, u8 dst_type)
1484 struct sock *sk = chan->sk;
1485 bdaddr_t *src = &bt_sk(sk)->src;
1486 struct l2cap_conn *conn;
1487 struct hci_conn *hcon;
1488 struct hci_dev *hdev;
1492 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
1493 dst_type, __le16_to_cpu(psm));
1495 hdev = hci_get_route(dst, src);
1497 return -EHOSTUNREACH;
1501 l2cap_chan_lock(chan);
1503 /* PSM must be odd and lsb of upper byte must be 0 */
1504 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1505 chan->chan_type != L2CAP_CHAN_RAW) {
1510 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1515 switch (chan->mode) {
1516 case L2CAP_MODE_BASIC:
1518 case L2CAP_MODE_ERTM:
1519 case L2CAP_MODE_STREAMING:
1528 switch (chan->state) {
1532 /* Already connecting */
1537 /* Already connected */
1551 /* Set destination address and psm */
1553 bacpy(&bt_sk(sk)->dst, dst);
1559 auth_type = l2cap_get_auth_type(chan);
1561 if (chan->dcid == L2CAP_CID_LE_DATA)
1562 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1563 chan->sec_level, auth_type);
1565 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1566 chan->sec_level, auth_type);
1569 err = PTR_ERR(hcon);
1573 conn = l2cap_conn_add(hcon, 0);
1580 if (hcon->type == LE_LINK) {
1583 if (!list_empty(&conn->chan_l)) {
1592 /* Update source addr of the socket */
1593 bacpy(src, conn->src);
1595 l2cap_chan_unlock(chan);
1596 l2cap_chan_add(conn, chan);
1597 l2cap_chan_lock(chan);
1599 l2cap_state_change(chan, BT_CONNECT);
1600 __set_chan_timer(chan, sk->sk_sndtimeo);
1602 if (hcon->state == BT_CONNECTED) {
1603 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1604 __clear_chan_timer(chan);
1605 if (l2cap_chan_check_security(chan))
1606 l2cap_state_change(chan, BT_CONNECTED);
1608 l2cap_do_start(chan);
1614 l2cap_chan_unlock(chan);
1615 hci_dev_unlock(hdev);
1620 int __l2cap_wait_ack(struct sock *sk)
1622 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1623 DECLARE_WAITQUEUE(wait, current);
1627 add_wait_queue(sk_sleep(sk), &wait);
1628 set_current_state(TASK_INTERRUPTIBLE);
1629 while (chan->unacked_frames > 0 && chan->conn) {
1633 if (signal_pending(current)) {
1634 err = sock_intr_errno(timeo);
1639 timeo = schedule_timeout(timeo);
1641 set_current_state(TASK_INTERRUPTIBLE);
1643 err = sock_error(sk);
1647 set_current_state(TASK_RUNNING);
1648 remove_wait_queue(sk_sleep(sk), &wait);
1652 static void l2cap_monitor_timeout(struct work_struct *work)
1654 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1655 monitor_timer.work);
1657 BT_DBG("chan %p", chan);
1659 l2cap_chan_lock(chan);
1662 l2cap_chan_unlock(chan);
1663 l2cap_chan_put(chan);
1667 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1669 l2cap_chan_unlock(chan);
1670 l2cap_chan_put(chan);
1673 static void l2cap_retrans_timeout(struct work_struct *work)
1675 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1676 retrans_timer.work);
1678 BT_DBG("chan %p", chan);
1680 l2cap_chan_lock(chan);
1683 l2cap_chan_unlock(chan);
1684 l2cap_chan_put(chan);
1688 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1689 l2cap_chan_unlock(chan);
1690 l2cap_chan_put(chan);
1693 static void l2cap_streaming_send(struct l2cap_chan *chan,
1694 struct sk_buff_head *skbs)
1696 struct sk_buff *skb;
1697 struct l2cap_ctrl *control;
1699 BT_DBG("chan %p, skbs %p", chan, skbs);
1701 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1703 while (!skb_queue_empty(&chan->tx_q)) {
1705 skb = skb_dequeue(&chan->tx_q);
1707 bt_cb(skb)->control.retries = 1;
1708 control = &bt_cb(skb)->control;
1710 control->reqseq = 0;
1711 control->txseq = chan->next_tx_seq;
1713 __pack_control(chan, control, skb);
1715 if (chan->fcs == L2CAP_FCS_CRC16) {
1716 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1717 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1720 l2cap_do_send(chan, skb);
1722 BT_DBG("Sent txseq %u", control->txseq);
1724 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1725 chan->frames_sent++;
1729 static int l2cap_ertm_send(struct l2cap_chan *chan)
1731 struct sk_buff *skb, *tx_skb;
1732 struct l2cap_ctrl *control;
1735 BT_DBG("chan %p", chan);
1737 if (chan->state != BT_CONNECTED)
1740 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1743 while (chan->tx_send_head &&
1744 chan->unacked_frames < chan->remote_tx_win &&
1745 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1747 skb = chan->tx_send_head;
1749 bt_cb(skb)->control.retries = 1;
1750 control = &bt_cb(skb)->control;
1752 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1755 control->reqseq = chan->buffer_seq;
1756 chan->last_acked_seq = chan->buffer_seq;
1757 control->txseq = chan->next_tx_seq;
1759 __pack_control(chan, control, skb);
1761 if (chan->fcs == L2CAP_FCS_CRC16) {
1762 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1763 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1766 /* Clone after data has been modified. Data is assumed to be
1767 read-only (for locking purposes) on cloned sk_buffs.
1769 tx_skb = skb_clone(skb, GFP_KERNEL);
1774 __set_retrans_timer(chan);
1776 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1777 chan->unacked_frames++;
1778 chan->frames_sent++;
1781 if (skb_queue_is_last(&chan->tx_q, skb))
1782 chan->tx_send_head = NULL;
1784 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1786 l2cap_do_send(chan, tx_skb);
1787 BT_DBG("Sent txseq %u", control->txseq);
1790 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1791 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1796 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1798 struct l2cap_ctrl control;
1799 struct sk_buff *skb;
1800 struct sk_buff *tx_skb;
1803 BT_DBG("chan %p", chan);
1805 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1808 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1809 seq = l2cap_seq_list_pop(&chan->retrans_list);
1811 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1813 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1818 bt_cb(skb)->control.retries++;
1819 control = bt_cb(skb)->control;
1821 if (chan->max_tx != 0 &&
1822 bt_cb(skb)->control.retries > chan->max_tx) {
1823 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1824 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1825 l2cap_seq_list_clear(&chan->retrans_list);
1829 control.reqseq = chan->buffer_seq;
1830 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1835 if (skb_cloned(skb)) {
1836 /* Cloned sk_buffs are read-only, so we need a
1839 tx_skb = skb_copy(skb, GFP_KERNEL);
1841 tx_skb = skb_clone(skb, GFP_KERNEL);
1845 l2cap_seq_list_clear(&chan->retrans_list);
1849 /* Update skb contents */
1850 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1851 put_unaligned_le32(__pack_extended_control(&control),
1852 tx_skb->data + L2CAP_HDR_SIZE);
1854 put_unaligned_le16(__pack_enhanced_control(&control),
1855 tx_skb->data + L2CAP_HDR_SIZE);
1858 if (chan->fcs == L2CAP_FCS_CRC16) {
1859 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1860 put_unaligned_le16(fcs, skb_put(tx_skb,
1864 l2cap_do_send(chan, tx_skb);
1866 BT_DBG("Resent txseq %d", control.txseq);
1868 chan->last_acked_seq = chan->buffer_seq;
1872 static void l2cap_retransmit(struct l2cap_chan *chan,
1873 struct l2cap_ctrl *control)
1875 BT_DBG("chan %p, control %p", chan, control);
1877 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1878 l2cap_ertm_resend(chan);
1881 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1882 struct l2cap_ctrl *control)
1884 struct sk_buff *skb;
1886 BT_DBG("chan %p, control %p", chan, control);
1889 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1891 l2cap_seq_list_clear(&chan->retrans_list);
1893 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1896 if (chan->unacked_frames) {
1897 skb_queue_walk(&chan->tx_q, skb) {
1898 if (bt_cb(skb)->control.txseq == control->reqseq ||
1899 skb == chan->tx_send_head)
1903 skb_queue_walk_from(&chan->tx_q, skb) {
1904 if (skb == chan->tx_send_head)
1907 l2cap_seq_list_append(&chan->retrans_list,
1908 bt_cb(skb)->control.txseq);
1911 l2cap_ertm_resend(chan);
1915 static void l2cap_send_ack(struct l2cap_chan *chan)
1917 struct l2cap_ctrl control;
1918 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1919 chan->last_acked_seq);
1922 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1923 chan, chan->last_acked_seq, chan->buffer_seq);
1925 memset(&control, 0, sizeof(control));
1928 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1929 chan->rx_state == L2CAP_RX_STATE_RECV) {
1930 __clear_ack_timer(chan);
1931 control.super = L2CAP_SUPER_RNR;
1932 control.reqseq = chan->buffer_seq;
1933 l2cap_send_sframe(chan, &control);
1935 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1936 l2cap_ertm_send(chan);
1937 /* If any i-frames were sent, they included an ack */
1938 if (chan->buffer_seq == chan->last_acked_seq)
1942 /* Ack now if the window is 3/4ths full.
1943 * Calculate without mul or div
1945 threshold = chan->ack_win;
1946 threshold += threshold << 1;
1949 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1952 if (frames_to_ack >= threshold) {
1953 __clear_ack_timer(chan);
1954 control.super = L2CAP_SUPER_RR;
1955 control.reqseq = chan->buffer_seq;
1956 l2cap_send_sframe(chan, &control);
1961 __set_ack_timer(chan);
1965 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1966 struct msghdr *msg, int len,
1967 int count, struct sk_buff *skb)
1969 struct l2cap_conn *conn = chan->conn;
1970 struct sk_buff **frag;
1973 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1979 /* Continuation fragments (no L2CAP header) */
1980 frag = &skb_shinfo(skb)->frag_list;
1982 struct sk_buff *tmp;
1984 count = min_t(unsigned int, conn->mtu, len);
1986 tmp = chan->ops->alloc_skb(chan, count,
1987 msg->msg_flags & MSG_DONTWAIT);
1989 return PTR_ERR(tmp);
1993 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1996 (*frag)->priority = skb->priority;
2001 skb->len += (*frag)->len;
2002 skb->data_len += (*frag)->len;
2004 frag = &(*frag)->next;
2010 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2011 struct msghdr *msg, size_t len,
2014 struct l2cap_conn *conn = chan->conn;
2015 struct sk_buff *skb;
2016 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2017 struct l2cap_hdr *lh;
2019 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
2021 count = min_t(unsigned int, (conn->mtu - hlen), len);
2023 skb = chan->ops->alloc_skb(chan, count + hlen,
2024 msg->msg_flags & MSG_DONTWAIT);
2028 skb->priority = priority;
2030 /* Create L2CAP header */
2031 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2032 lh->cid = cpu_to_le16(chan->dcid);
2033 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2034 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
2036 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2037 if (unlikely(err < 0)) {
2039 return ERR_PTR(err);
2044 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2045 struct msghdr *msg, size_t len,
2048 struct l2cap_conn *conn = chan->conn;
2049 struct sk_buff *skb;
2051 struct l2cap_hdr *lh;
2053 BT_DBG("chan %p len %zu", chan, len);
2055 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2057 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2058 msg->msg_flags & MSG_DONTWAIT);
2062 skb->priority = priority;
2064 /* Create L2CAP header */
2065 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2066 lh->cid = cpu_to_le16(chan->dcid);
2067 lh->len = cpu_to_le16(len);
2069 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2070 if (unlikely(err < 0)) {
2072 return ERR_PTR(err);
2077 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2078 struct msghdr *msg, size_t len,
2081 struct l2cap_conn *conn = chan->conn;
2082 struct sk_buff *skb;
2083 int err, count, hlen;
2084 struct l2cap_hdr *lh;
2086 BT_DBG("chan %p len %zu", chan, len);
2089 return ERR_PTR(-ENOTCONN);
2091 hlen = __ertm_hdr_size(chan);
2094 hlen += L2CAP_SDULEN_SIZE;
2096 if (chan->fcs == L2CAP_FCS_CRC16)
2097 hlen += L2CAP_FCS_SIZE;
2099 count = min_t(unsigned int, (conn->mtu - hlen), len);
2101 skb = chan->ops->alloc_skb(chan, count + hlen,
2102 msg->msg_flags & MSG_DONTWAIT);
2106 /* Create L2CAP header */
2107 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2108 lh->cid = cpu_to_le16(chan->dcid);
2109 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2111 /* Control header is populated later */
2112 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2113 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2115 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2118 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2120 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2121 if (unlikely(err < 0)) {
2123 return ERR_PTR(err);
2126 bt_cb(skb)->control.fcs = chan->fcs;
2127 bt_cb(skb)->control.retries = 0;
2131 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2132 struct sk_buff_head *seg_queue,
2133 struct msghdr *msg, size_t len)
2135 struct sk_buff *skb;
2140 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2142 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2143 * so fragmented skbs are not used. The HCI layer's handling
2144 * of fragmented skbs is not compatible with ERTM's queueing.
2147 /* PDU size is derived from the HCI MTU */
2148 pdu_len = chan->conn->mtu;
2150 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2152 /* Adjust for largest possible L2CAP overhead. */
2154 pdu_len -= L2CAP_FCS_SIZE;
2156 pdu_len -= __ertm_hdr_size(chan);
2158 /* Remote device may have requested smaller PDUs */
2159 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2161 if (len <= pdu_len) {
2162 sar = L2CAP_SAR_UNSEGMENTED;
2166 sar = L2CAP_SAR_START;
2168 pdu_len -= L2CAP_SDULEN_SIZE;
2172 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2175 __skb_queue_purge(seg_queue);
2176 return PTR_ERR(skb);
2179 bt_cb(skb)->control.sar = sar;
2180 __skb_queue_tail(seg_queue, skb);
2185 pdu_len += L2CAP_SDULEN_SIZE;
2188 if (len <= pdu_len) {
2189 sar = L2CAP_SAR_END;
2192 sar = L2CAP_SAR_CONTINUE;
2199 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2202 struct sk_buff *skb;
2204 struct sk_buff_head seg_queue;
2206 /* Connectionless channel */
2207 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2208 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2210 return PTR_ERR(skb);
2212 l2cap_do_send(chan, skb);
2216 switch (chan->mode) {
2217 case L2CAP_MODE_BASIC:
2218 /* Check outgoing MTU */
2219 if (len > chan->omtu)
2222 /* Create a basic PDU */
2223 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2225 return PTR_ERR(skb);
2227 l2cap_do_send(chan, skb);
2231 case L2CAP_MODE_ERTM:
2232 case L2CAP_MODE_STREAMING:
2233 /* Check outgoing MTU */
2234 if (len > chan->omtu) {
2239 __skb_queue_head_init(&seg_queue);
2241 /* Do segmentation before calling in to the state machine,
2242 * since it's possible to block while waiting for memory
2245 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2247 /* The channel could have been closed while segmenting,
2248 * check that it is still connected.
2250 if (chan->state != BT_CONNECTED) {
2251 __skb_queue_purge(&seg_queue);
2258 if (chan->mode == L2CAP_MODE_ERTM)
2259 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2261 l2cap_streaming_send(chan, &seg_queue);
2265 /* If the skbs were not queued for sending, they'll still be in
2266 * seg_queue and need to be purged.
2268 __skb_queue_purge(&seg_queue);
2272 BT_DBG("bad state %1.1x", chan->mode);
2279 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2281 struct l2cap_ctrl control;
2284 BT_DBG("chan %p, txseq %u", chan, txseq);
2286 memset(&control, 0, sizeof(control));
2288 control.super = L2CAP_SUPER_SREJ;
2290 for (seq = chan->expected_tx_seq; seq != txseq;
2291 seq = __next_seq(chan, seq)) {
2292 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2293 control.reqseq = seq;
2294 l2cap_send_sframe(chan, &control);
2295 l2cap_seq_list_append(&chan->srej_list, seq);
2299 chan->expected_tx_seq = __next_seq(chan, txseq);
2302 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2304 struct l2cap_ctrl control;
2306 BT_DBG("chan %p", chan);
2308 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2311 memset(&control, 0, sizeof(control));
2313 control.super = L2CAP_SUPER_SREJ;
2314 control.reqseq = chan->srej_list.tail;
2315 l2cap_send_sframe(chan, &control);
2318 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2320 struct l2cap_ctrl control;
2324 BT_DBG("chan %p, txseq %u", chan, txseq);
2326 memset(&control, 0, sizeof(control));
2328 control.super = L2CAP_SUPER_SREJ;
2330 /* Capture initial list head to allow only one pass through the list. */
2331 initial_head = chan->srej_list.head;
2334 seq = l2cap_seq_list_pop(&chan->srej_list);
2335 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2338 control.reqseq = seq;
2339 l2cap_send_sframe(chan, &control);
2340 l2cap_seq_list_append(&chan->srej_list, seq);
2341 } while (chan->srej_list.head != initial_head);
2344 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2346 struct sk_buff *acked_skb;
2349 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2351 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2354 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2355 chan->expected_ack_seq, chan->unacked_frames);
2357 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2358 ackseq = __next_seq(chan, ackseq)) {
2360 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2362 skb_unlink(acked_skb, &chan->tx_q);
2363 kfree_skb(acked_skb);
2364 chan->unacked_frames--;
2368 chan->expected_ack_seq = reqseq;
2370 if (chan->unacked_frames == 0)
2371 __clear_retrans_timer(chan);
2373 BT_DBG("unacked_frames %u", chan->unacked_frames);
2376 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2378 BT_DBG("chan %p", chan);
2380 chan->expected_tx_seq = chan->buffer_seq;
2381 l2cap_seq_list_clear(&chan->srej_list);
2382 skb_queue_purge(&chan->srej_q);
2383 chan->rx_state = L2CAP_RX_STATE_RECV;
2386 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2387 struct l2cap_ctrl *control,
2388 struct sk_buff_head *skbs, u8 event)
2390 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2394 case L2CAP_EV_DATA_REQUEST:
2395 if (chan->tx_send_head == NULL)
2396 chan->tx_send_head = skb_peek(skbs);
2398 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2399 l2cap_ertm_send(chan);
2401 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2402 BT_DBG("Enter LOCAL_BUSY");
2403 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2405 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2406 /* The SREJ_SENT state must be aborted if we are to
2407 * enter the LOCAL_BUSY state.
2409 l2cap_abort_rx_srej_sent(chan);
2412 l2cap_send_ack(chan);
2415 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2416 BT_DBG("Exit LOCAL_BUSY");
2417 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2419 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2420 struct l2cap_ctrl local_control;
2422 memset(&local_control, 0, sizeof(local_control));
2423 local_control.sframe = 1;
2424 local_control.super = L2CAP_SUPER_RR;
2425 local_control.poll = 1;
2426 local_control.reqseq = chan->buffer_seq;
2427 l2cap_send_sframe(chan, &local_control);
2429 chan->retry_count = 1;
2430 __set_monitor_timer(chan);
2431 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2434 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2435 l2cap_process_reqseq(chan, control->reqseq);
2437 case L2CAP_EV_EXPLICIT_POLL:
2438 l2cap_send_rr_or_rnr(chan, 1);
2439 chan->retry_count = 1;
2440 __set_monitor_timer(chan);
2441 __clear_ack_timer(chan);
2442 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2444 case L2CAP_EV_RETRANS_TO:
2445 l2cap_send_rr_or_rnr(chan, 1);
2446 chan->retry_count = 1;
2447 __set_monitor_timer(chan);
2448 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2450 case L2CAP_EV_RECV_FBIT:
2451 /* Nothing to process */
2458 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2459 struct l2cap_ctrl *control,
2460 struct sk_buff_head *skbs, u8 event)
2462 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2466 case L2CAP_EV_DATA_REQUEST:
2467 if (chan->tx_send_head == NULL)
2468 chan->tx_send_head = skb_peek(skbs);
2469 /* Queue data, but don't send. */
2470 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2472 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2473 BT_DBG("Enter LOCAL_BUSY");
2474 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2476 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2477 /* The SREJ_SENT state must be aborted if we are to
2478 * enter the LOCAL_BUSY state.
2480 l2cap_abort_rx_srej_sent(chan);
2483 l2cap_send_ack(chan);
2486 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2487 BT_DBG("Exit LOCAL_BUSY");
2488 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2490 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2491 struct l2cap_ctrl local_control;
2492 memset(&local_control, 0, sizeof(local_control));
2493 local_control.sframe = 1;
2494 local_control.super = L2CAP_SUPER_RR;
2495 local_control.poll = 1;
2496 local_control.reqseq = chan->buffer_seq;
2497 l2cap_send_sframe(chan, &local_control);
2499 chan->retry_count = 1;
2500 __set_monitor_timer(chan);
2501 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2504 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2505 l2cap_process_reqseq(chan, control->reqseq);
2509 case L2CAP_EV_RECV_FBIT:
2510 if (control && control->final) {
2511 __clear_monitor_timer(chan);
2512 if (chan->unacked_frames > 0)
2513 __set_retrans_timer(chan);
2514 chan->retry_count = 0;
2515 chan->tx_state = L2CAP_TX_STATE_XMIT;
2516 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2519 case L2CAP_EV_EXPLICIT_POLL:
2522 case L2CAP_EV_MONITOR_TO:
2523 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2524 l2cap_send_rr_or_rnr(chan, 1);
2525 __set_monitor_timer(chan);
2526 chan->retry_count++;
2528 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2536 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2537 struct sk_buff_head *skbs, u8 event)
2539 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2540 chan, control, skbs, event, chan->tx_state);
2542 switch (chan->tx_state) {
2543 case L2CAP_TX_STATE_XMIT:
2544 l2cap_tx_state_xmit(chan, control, skbs, event);
2546 case L2CAP_TX_STATE_WAIT_F:
2547 l2cap_tx_state_wait_f(chan, control, skbs, event);
2555 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2556 struct l2cap_ctrl *control)
2558 BT_DBG("chan %p, control %p", chan, control);
2559 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2562 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2563 struct l2cap_ctrl *control)
2565 BT_DBG("chan %p, control %p", chan, control);
2566 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2569 /* Copy frame to all raw sockets on that connection */
2570 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2572 struct sk_buff *nskb;
2573 struct l2cap_chan *chan;
2575 BT_DBG("conn %p", conn);
2577 mutex_lock(&conn->chan_lock);
2579 list_for_each_entry(chan, &conn->chan_l, list) {
2580 struct sock *sk = chan->sk;
2581 if (chan->chan_type != L2CAP_CHAN_RAW)
2584 /* Don't send frame to the socket it came from */
2587 nskb = skb_clone(skb, GFP_KERNEL);
2591 if (chan->ops->recv(chan, nskb))
2595 mutex_unlock(&conn->chan_lock);
2598 /* ---- L2CAP signalling commands ---- */
2599 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2600 u8 ident, u16 dlen, void *data)
2602 struct sk_buff *skb, **frag;
2603 struct l2cap_cmd_hdr *cmd;
2604 struct l2cap_hdr *lh;
2607 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2608 conn, code, ident, dlen);
2610 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2611 count = min_t(unsigned int, conn->mtu, len);
2613 skb = bt_skb_alloc(count, GFP_KERNEL);
2617 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2618 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2620 if (conn->hcon->type == LE_LINK)
2621 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2623 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2625 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2628 cmd->len = cpu_to_le16(dlen);
2631 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2632 memcpy(skb_put(skb, count), data, count);
2638 /* Continuation fragments (no L2CAP header) */
2639 frag = &skb_shinfo(skb)->frag_list;
2641 count = min_t(unsigned int, conn->mtu, len);
2643 *frag = bt_skb_alloc(count, GFP_KERNEL);
2647 memcpy(skb_put(*frag, count), data, count);
2652 frag = &(*frag)->next;
2662 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2665 struct l2cap_conf_opt *opt = *ptr;
2668 len = L2CAP_CONF_OPT_SIZE + opt->len;
2676 *val = *((u8 *) opt->val);
2680 *val = get_unaligned_le16(opt->val);
2684 *val = get_unaligned_le32(opt->val);
2688 *val = (unsigned long) opt->val;
2692 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2696 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2698 struct l2cap_conf_opt *opt = *ptr;
2700 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2707 *((u8 *) opt->val) = val;
2711 put_unaligned_le16(val, opt->val);
2715 put_unaligned_le32(val, opt->val);
2719 memcpy(opt->val, (void *) val, len);
2723 *ptr += L2CAP_CONF_OPT_SIZE + len;
2726 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2728 struct l2cap_conf_efs efs;
2730 switch (chan->mode) {
2731 case L2CAP_MODE_ERTM:
2732 efs.id = chan->local_id;
2733 efs.stype = chan->local_stype;
2734 efs.msdu = cpu_to_le16(chan->local_msdu);
2735 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2736 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2737 efs.flush_to = __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2740 case L2CAP_MODE_STREAMING:
2742 efs.stype = L2CAP_SERV_BESTEFFORT;
2743 efs.msdu = cpu_to_le16(chan->local_msdu);
2744 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2753 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2754 (unsigned long) &efs);
2757 static void l2cap_ack_timeout(struct work_struct *work)
2759 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2763 BT_DBG("chan %p", chan);
2765 l2cap_chan_lock(chan);
2767 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2768 chan->last_acked_seq);
2771 l2cap_send_rr_or_rnr(chan, 0);
2773 l2cap_chan_unlock(chan);
2774 l2cap_chan_put(chan);
2777 int l2cap_ertm_init(struct l2cap_chan *chan)
2781 chan->next_tx_seq = 0;
2782 chan->expected_tx_seq = 0;
2783 chan->expected_ack_seq = 0;
2784 chan->unacked_frames = 0;
2785 chan->buffer_seq = 0;
2786 chan->frames_sent = 0;
2787 chan->last_acked_seq = 0;
2789 chan->sdu_last_frag = NULL;
2792 skb_queue_head_init(&chan->tx_q);
2794 if (chan->mode != L2CAP_MODE_ERTM)
2797 chan->rx_state = L2CAP_RX_STATE_RECV;
2798 chan->tx_state = L2CAP_TX_STATE_XMIT;
2800 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2801 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2802 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2804 skb_queue_head_init(&chan->srej_q);
2806 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2810 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2812 l2cap_seq_list_free(&chan->srej_list);
2817 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2820 case L2CAP_MODE_STREAMING:
2821 case L2CAP_MODE_ERTM:
2822 if (l2cap_mode_supported(mode, remote_feat_mask))
2826 return L2CAP_MODE_BASIC;
2830 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2832 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2835 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2837 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2840 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2842 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2843 __l2cap_ews_supported(chan)) {
2844 /* use extended control field */
2845 set_bit(FLAG_EXT_CTRL, &chan->flags);
2846 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2848 chan->tx_win = min_t(u16, chan->tx_win,
2849 L2CAP_DEFAULT_TX_WINDOW);
2850 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2852 chan->ack_win = chan->tx_win;
2855 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2857 struct l2cap_conf_req *req = data;
2858 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2859 void *ptr = req->data;
2862 BT_DBG("chan %p", chan);
2864 if (chan->num_conf_req || chan->num_conf_rsp)
2867 switch (chan->mode) {
2868 case L2CAP_MODE_STREAMING:
2869 case L2CAP_MODE_ERTM:
2870 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2873 if (__l2cap_efs_supported(chan))
2874 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2878 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2883 if (chan->imtu != L2CAP_DEFAULT_MTU)
2884 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2886 switch (chan->mode) {
2887 case L2CAP_MODE_BASIC:
2888 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2889 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2892 rfc.mode = L2CAP_MODE_BASIC;
2894 rfc.max_transmit = 0;
2895 rfc.retrans_timeout = 0;
2896 rfc.monitor_timeout = 0;
2897 rfc.max_pdu_size = 0;
2899 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2900 (unsigned long) &rfc);
2903 case L2CAP_MODE_ERTM:
2904 rfc.mode = L2CAP_MODE_ERTM;
2905 rfc.max_transmit = chan->max_tx;
2906 rfc.retrans_timeout = 0;
2907 rfc.monitor_timeout = 0;
2909 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2910 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2912 rfc.max_pdu_size = cpu_to_le16(size);
2914 l2cap_txwin_setup(chan);
2916 rfc.txwin_size = min_t(u16, chan->tx_win,
2917 L2CAP_DEFAULT_TX_WINDOW);
2919 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2920 (unsigned long) &rfc);
2922 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2923 l2cap_add_opt_efs(&ptr, chan);
2925 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2928 if (chan->fcs == L2CAP_FCS_NONE ||
2929 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2930 chan->fcs = L2CAP_FCS_NONE;
2931 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2934 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2935 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2939 case L2CAP_MODE_STREAMING:
2940 l2cap_txwin_setup(chan);
2941 rfc.mode = L2CAP_MODE_STREAMING;
2943 rfc.max_transmit = 0;
2944 rfc.retrans_timeout = 0;
2945 rfc.monitor_timeout = 0;
2947 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2948 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
2950 rfc.max_pdu_size = cpu_to_le16(size);
2952 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2953 (unsigned long) &rfc);
2955 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2956 l2cap_add_opt_efs(&ptr, chan);
2958 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2961 if (chan->fcs == L2CAP_FCS_NONE ||
2962 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2963 chan->fcs = L2CAP_FCS_NONE;
2964 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2969 req->dcid = cpu_to_le16(chan->dcid);
2970 req->flags = __constant_cpu_to_le16(0);
2975 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2977 struct l2cap_conf_rsp *rsp = data;
2978 void *ptr = rsp->data;
2979 void *req = chan->conf_req;
2980 int len = chan->conf_len;
2981 int type, hint, olen;
2983 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2984 struct l2cap_conf_efs efs;
2986 u16 mtu = L2CAP_DEFAULT_MTU;
2987 u16 result = L2CAP_CONF_SUCCESS;
2990 BT_DBG("chan %p", chan);
2992 while (len >= L2CAP_CONF_OPT_SIZE) {
2993 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2995 hint = type & L2CAP_CONF_HINT;
2996 type &= L2CAP_CONF_MASK;
2999 case L2CAP_CONF_MTU:
3003 case L2CAP_CONF_FLUSH_TO:
3004 chan->flush_to = val;
3007 case L2CAP_CONF_QOS:
3010 case L2CAP_CONF_RFC:
3011 if (olen == sizeof(rfc))
3012 memcpy(&rfc, (void *) val, olen);
3015 case L2CAP_CONF_FCS:
3016 if (val == L2CAP_FCS_NONE)
3017 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
3020 case L2CAP_CONF_EFS:
3022 if (olen == sizeof(efs))
3023 memcpy(&efs, (void *) val, olen);
3026 case L2CAP_CONF_EWS:
3028 return -ECONNREFUSED;
3030 set_bit(FLAG_EXT_CTRL, &chan->flags);
3031 set_bit(CONF_EWS_RECV, &chan->conf_state);
3032 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3033 chan->remote_tx_win = val;
3040 result = L2CAP_CONF_UNKNOWN;
3041 *((u8 *) ptr++) = type;
3046 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3049 switch (chan->mode) {
3050 case L2CAP_MODE_STREAMING:
3051 case L2CAP_MODE_ERTM:
3052 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3053 chan->mode = l2cap_select_mode(rfc.mode,
3054 chan->conn->feat_mask);
3059 if (__l2cap_efs_supported(chan))
3060 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3062 return -ECONNREFUSED;
3065 if (chan->mode != rfc.mode)
3066 return -ECONNREFUSED;
3072 if (chan->mode != rfc.mode) {
3073 result = L2CAP_CONF_UNACCEPT;
3074 rfc.mode = chan->mode;
3076 if (chan->num_conf_rsp == 1)
3077 return -ECONNREFUSED;
3079 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3080 (unsigned long) &rfc);
3083 if (result == L2CAP_CONF_SUCCESS) {
3084 /* Configure output options and let the other side know
3085 * which ones we don't like. */
3087 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3088 result = L2CAP_CONF_UNACCEPT;
3091 set_bit(CONF_MTU_DONE, &chan->conf_state);
3093 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3096 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3097 efs.stype != L2CAP_SERV_NOTRAFIC &&
3098 efs.stype != chan->local_stype) {
3100 result = L2CAP_CONF_UNACCEPT;
3102 if (chan->num_conf_req >= 1)
3103 return -ECONNREFUSED;
3105 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3107 (unsigned long) &efs);
3109 /* Send PENDING Conf Rsp */
3110 result = L2CAP_CONF_PENDING;
3111 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3116 case L2CAP_MODE_BASIC:
3117 chan->fcs = L2CAP_FCS_NONE;
3118 set_bit(CONF_MODE_DONE, &chan->conf_state);
3121 case L2CAP_MODE_ERTM:
3122 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3123 chan->remote_tx_win = rfc.txwin_size;
3125 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3127 chan->remote_max_tx = rfc.max_transmit;
3129 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3130 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3131 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3132 rfc.max_pdu_size = cpu_to_le16(size);
3133 chan->remote_mps = size;
3135 rfc.retrans_timeout =
3136 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3137 rfc.monitor_timeout =
3138 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3140 set_bit(CONF_MODE_DONE, &chan->conf_state);
3142 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3143 sizeof(rfc), (unsigned long) &rfc);
3145 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3146 chan->remote_id = efs.id;
3147 chan->remote_stype = efs.stype;
3148 chan->remote_msdu = le16_to_cpu(efs.msdu);
3149 chan->remote_flush_to =
3150 le32_to_cpu(efs.flush_to);
3151 chan->remote_acc_lat =
3152 le32_to_cpu(efs.acc_lat);
3153 chan->remote_sdu_itime =
3154 le32_to_cpu(efs.sdu_itime);
3155 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3157 (unsigned long) &efs);
3161 case L2CAP_MODE_STREAMING:
3162 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3163 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3164 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3165 rfc.max_pdu_size = cpu_to_le16(size);
3166 chan->remote_mps = size;
3168 set_bit(CONF_MODE_DONE, &chan->conf_state);
3170 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3171 (unsigned long) &rfc);
3176 result = L2CAP_CONF_UNACCEPT;
3178 memset(&rfc, 0, sizeof(rfc));
3179 rfc.mode = chan->mode;
3182 if (result == L2CAP_CONF_SUCCESS)
3183 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3185 rsp->scid = cpu_to_le16(chan->dcid);
3186 rsp->result = cpu_to_le16(result);
3187 rsp->flags = __constant_cpu_to_le16(0);
3192 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3193 void *data, u16 *result)
3195 struct l2cap_conf_req *req = data;
3196 void *ptr = req->data;
3199 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3200 struct l2cap_conf_efs efs;
3202 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3204 while (len >= L2CAP_CONF_OPT_SIZE) {
3205 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3208 case L2CAP_CONF_MTU:
3209 if (val < L2CAP_DEFAULT_MIN_MTU) {
3210 *result = L2CAP_CONF_UNACCEPT;
3211 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3217 case L2CAP_CONF_FLUSH_TO:
3218 chan->flush_to = val;
3219 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3223 case L2CAP_CONF_RFC:
3224 if (olen == sizeof(rfc))
3225 memcpy(&rfc, (void *)val, olen);
3227 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3228 rfc.mode != chan->mode)
3229 return -ECONNREFUSED;
3233 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3234 sizeof(rfc), (unsigned long) &rfc);
3237 case L2CAP_CONF_EWS:
3238 chan->ack_win = min_t(u16, val, chan->ack_win);
3239 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3243 case L2CAP_CONF_EFS:
3244 if (olen == sizeof(efs))
3245 memcpy(&efs, (void *)val, olen);
3247 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3248 efs.stype != L2CAP_SERV_NOTRAFIC &&
3249 efs.stype != chan->local_stype)
3250 return -ECONNREFUSED;
3252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3253 (unsigned long) &efs);
3258 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3259 return -ECONNREFUSED;
3261 chan->mode = rfc.mode;
3263 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3265 case L2CAP_MODE_ERTM:
3266 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3267 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3268 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3269 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3270 chan->ack_win = min_t(u16, chan->ack_win,
3273 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3274 chan->local_msdu = le16_to_cpu(efs.msdu);
3275 chan->local_sdu_itime =
3276 le32_to_cpu(efs.sdu_itime);
3277 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3278 chan->local_flush_to =
3279 le32_to_cpu(efs.flush_to);
3283 case L2CAP_MODE_STREAMING:
3284 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3288 req->dcid = cpu_to_le16(chan->dcid);
3289 req->flags = __constant_cpu_to_le16(0);
3294 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3295 u16 result, u16 flags)
3297 struct l2cap_conf_rsp *rsp = data;
3298 void *ptr = rsp->data;
3300 BT_DBG("chan %p", chan);
3302 rsp->scid = cpu_to_le16(chan->dcid);
3303 rsp->result = cpu_to_le16(result);
3304 rsp->flags = cpu_to_le16(flags);
3309 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3311 struct l2cap_conn_rsp rsp;
3312 struct l2cap_conn *conn = chan->conn;
3315 rsp.scid = cpu_to_le16(chan->dcid);
3316 rsp.dcid = cpu_to_le16(chan->scid);
3317 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3318 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3319 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3321 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3324 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3325 l2cap_build_conf_req(chan, buf), buf);
3326 chan->num_conf_req++;
3329 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3333 /* Use sane default values in case a misbehaving remote device
3334 * did not send an RFC or extended window size option.
3336 u16 txwin_ext = chan->ack_win;
3337 struct l2cap_conf_rfc rfc = {
3339 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3340 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3341 .max_pdu_size = cpu_to_le16(chan->imtu),
3342 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3345 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3347 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3350 while (len >= L2CAP_CONF_OPT_SIZE) {
3351 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3354 case L2CAP_CONF_RFC:
3355 if (olen == sizeof(rfc))
3356 memcpy(&rfc, (void *)val, olen);
3358 case L2CAP_CONF_EWS:
3365 case L2CAP_MODE_ERTM:
3366 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3367 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3368 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3369 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3370 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3372 chan->ack_win = min_t(u16, chan->ack_win,
3375 case L2CAP_MODE_STREAMING:
3376 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3380 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3381 struct l2cap_cmd_hdr *cmd, u8 *data)
3383 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3385 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3388 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3389 cmd->ident == conn->info_ident) {
3390 cancel_delayed_work(&conn->info_timer);
3392 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3393 conn->info_ident = 0;
3395 l2cap_conn_start(conn);
3401 static void __l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3402 u8 *data, u8 rsp_code, u8 amp_id)
3404 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3405 struct l2cap_conn_rsp rsp;
3406 struct l2cap_chan *chan = NULL, *pchan;
3407 struct sock *parent, *sk = NULL;
3408 int result, status = L2CAP_CS_NO_INFO;
3410 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3411 __le16 psm = req->psm;
3413 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3415 /* Check if we have socket listening on psm */
3416 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3418 result = L2CAP_CR_BAD_PSM;
3424 mutex_lock(&conn->chan_lock);
3427 /* Check if the ACL is secure enough (if not SDP) */
3428 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3429 !hci_conn_check_link_mode(conn->hcon)) {
3430 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3431 result = L2CAP_CR_SEC_BLOCK;
3435 result = L2CAP_CR_NO_MEM;
3437 /* Check if we already have channel with that dcid */
3438 if (__l2cap_get_chan_by_dcid(conn, scid))
3441 chan = pchan->ops->new_connection(pchan);
3447 hci_conn_hold(conn->hcon);
3449 bacpy(&bt_sk(sk)->src, conn->src);
3450 bacpy(&bt_sk(sk)->dst, conn->dst);
3454 bt_accept_enqueue(parent, sk);
3456 __l2cap_chan_add(conn, chan);
3460 __set_chan_timer(chan, sk->sk_sndtimeo);
3462 chan->ident = cmd->ident;
3464 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3465 if (l2cap_chan_check_security(chan)) {
3466 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3467 __l2cap_state_change(chan, BT_CONNECT2);
3468 result = L2CAP_CR_PEND;
3469 status = L2CAP_CS_AUTHOR_PEND;
3470 parent->sk_data_ready(parent, 0);
3472 __l2cap_state_change(chan, BT_CONFIG);
3473 result = L2CAP_CR_SUCCESS;
3474 status = L2CAP_CS_NO_INFO;
3477 __l2cap_state_change(chan, BT_CONNECT2);
3478 result = L2CAP_CR_PEND;
3479 status = L2CAP_CS_AUTHEN_PEND;
3482 __l2cap_state_change(chan, BT_CONNECT2);
3483 result = L2CAP_CR_PEND;
3484 status = L2CAP_CS_NO_INFO;
3488 release_sock(parent);
3489 mutex_unlock(&conn->chan_lock);
3492 rsp.scid = cpu_to_le16(scid);
3493 rsp.dcid = cpu_to_le16(dcid);
3494 rsp.result = cpu_to_le16(result);
3495 rsp.status = cpu_to_le16(status);
3496 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3498 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3499 struct l2cap_info_req info;
3500 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3502 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3503 conn->info_ident = l2cap_get_ident(conn);
3505 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3507 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3508 sizeof(info), &info);
3511 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3512 result == L2CAP_CR_SUCCESS) {
3514 set_bit(CONF_REQ_SENT, &chan->conf_state);
3515 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3516 l2cap_build_conf_req(chan, buf), buf);
3517 chan->num_conf_req++;
3521 static int l2cap_connect_req(struct l2cap_conn *conn,
3522 struct l2cap_cmd_hdr *cmd, u8 *data)
3524 __l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3528 static inline int l2cap_connect_rsp(struct l2cap_conn *conn,
3529 struct l2cap_cmd_hdr *cmd, u8 *data)
3531 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3532 u16 scid, dcid, result, status;
3533 struct l2cap_chan *chan;
3537 scid = __le16_to_cpu(rsp->scid);
3538 dcid = __le16_to_cpu(rsp->dcid);
3539 result = __le16_to_cpu(rsp->result);
3540 status = __le16_to_cpu(rsp->status);
3542 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3543 dcid, scid, result, status);
3545 mutex_lock(&conn->chan_lock);
3548 chan = __l2cap_get_chan_by_scid(conn, scid);
3554 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3563 l2cap_chan_lock(chan);
3566 case L2CAP_CR_SUCCESS:
3567 l2cap_state_change(chan, BT_CONFIG);
3570 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3572 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3575 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3576 l2cap_build_conf_req(chan, req), req);
3577 chan->num_conf_req++;
3581 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3585 l2cap_chan_del(chan, ECONNREFUSED);
3589 l2cap_chan_unlock(chan);
3592 mutex_unlock(&conn->chan_lock);
3597 static inline void set_default_fcs(struct l2cap_chan *chan)
3599 /* FCS is enabled only in ERTM or streaming mode, if one or both
3602 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3603 chan->fcs = L2CAP_FCS_NONE;
3604 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3605 chan->fcs = L2CAP_FCS_CRC16;
3608 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3609 u8 ident, u16 flags)
3611 struct l2cap_conn *conn = chan->conn;
3613 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3616 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3617 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3619 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3620 l2cap_build_conf_rsp(chan, data,
3621 L2CAP_CONF_SUCCESS, flags), data);
3624 static inline int l2cap_config_req(struct l2cap_conn *conn,
3625 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3628 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3631 struct l2cap_chan *chan;
3634 dcid = __le16_to_cpu(req->dcid);
3635 flags = __le16_to_cpu(req->flags);
3637 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3639 chan = l2cap_get_chan_by_scid(conn, dcid);
3643 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3644 struct l2cap_cmd_rej_cid rej;
3646 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3647 rej.scid = cpu_to_le16(chan->scid);
3648 rej.dcid = cpu_to_le16(chan->dcid);
3650 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3655 /* Reject if config buffer is too small. */
3656 len = cmd_len - sizeof(*req);
3657 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3659 l2cap_build_conf_rsp(chan, rsp,
3660 L2CAP_CONF_REJECT, flags), rsp);
3665 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3666 chan->conf_len += len;
3668 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3669 /* Incomplete config. Send empty response. */
3670 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3671 l2cap_build_conf_rsp(chan, rsp,
3672 L2CAP_CONF_SUCCESS, flags), rsp);
3676 /* Complete config. */
3677 len = l2cap_parse_conf_req(chan, rsp);
3679 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3683 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3684 chan->num_conf_rsp++;
3686 /* Reset config buffer. */
3689 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3692 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3693 set_default_fcs(chan);
3695 if (chan->mode == L2CAP_MODE_ERTM ||
3696 chan->mode == L2CAP_MODE_STREAMING)
3697 err = l2cap_ertm_init(chan);
3700 l2cap_send_disconn_req(chan->conn, chan, -err);
3702 l2cap_chan_ready(chan);
3707 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3709 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3710 l2cap_build_conf_req(chan, buf), buf);
3711 chan->num_conf_req++;
3714 /* Got Conf Rsp PENDING from remote side and asume we sent
3715 Conf Rsp PENDING in the code above */
3716 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3717 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3719 /* check compatibility */
3721 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
3725 l2cap_chan_unlock(chan);
3729 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
3730 struct l2cap_cmd_hdr *cmd, u8 *data)
3732 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3733 u16 scid, flags, result;
3734 struct l2cap_chan *chan;
3735 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3738 scid = __le16_to_cpu(rsp->scid);
3739 flags = __le16_to_cpu(rsp->flags);
3740 result = __le16_to_cpu(rsp->result);
3742 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3745 chan = l2cap_get_chan_by_scid(conn, scid);
3750 case L2CAP_CONF_SUCCESS:
3751 l2cap_conf_rfc_get(chan, rsp->data, len);
3752 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3755 case L2CAP_CONF_PENDING:
3756 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3758 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3761 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3764 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3768 /* check compatibility */
3770 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
3774 case L2CAP_CONF_UNACCEPT:
3775 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3778 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3779 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3783 /* throw out any old stored conf requests */
3784 result = L2CAP_CONF_SUCCESS;
3785 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3788 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3792 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3793 L2CAP_CONF_REQ, len, req);
3794 chan->num_conf_req++;
3795 if (result != L2CAP_CONF_SUCCESS)
3801 l2cap_chan_set_err(chan, ECONNRESET);
3803 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3804 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3808 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3811 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3813 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3814 set_default_fcs(chan);
3816 if (chan->mode == L2CAP_MODE_ERTM ||
3817 chan->mode == L2CAP_MODE_STREAMING)
3818 err = l2cap_ertm_init(chan);
3821 l2cap_send_disconn_req(chan->conn, chan, -err);
3823 l2cap_chan_ready(chan);
3827 l2cap_chan_unlock(chan);
3831 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
3832 struct l2cap_cmd_hdr *cmd, u8 *data)
3834 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3835 struct l2cap_disconn_rsp rsp;
3837 struct l2cap_chan *chan;
3840 scid = __le16_to_cpu(req->scid);
3841 dcid = __le16_to_cpu(req->dcid);
3843 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3845 mutex_lock(&conn->chan_lock);
3847 chan = __l2cap_get_chan_by_scid(conn, dcid);
3849 mutex_unlock(&conn->chan_lock);
3853 l2cap_chan_lock(chan);
3857 rsp.dcid = cpu_to_le16(chan->scid);
3858 rsp.scid = cpu_to_le16(chan->dcid);
3859 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3862 sk->sk_shutdown = SHUTDOWN_MASK;
3865 l2cap_chan_hold(chan);
3866 l2cap_chan_del(chan, ECONNRESET);
3868 l2cap_chan_unlock(chan);
3870 chan->ops->close(chan);
3871 l2cap_chan_put(chan);
3873 mutex_unlock(&conn->chan_lock);
3878 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
3879 struct l2cap_cmd_hdr *cmd, u8 *data)
3881 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3883 struct l2cap_chan *chan;
3885 scid = __le16_to_cpu(rsp->scid);
3886 dcid = __le16_to_cpu(rsp->dcid);
3888 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3890 mutex_lock(&conn->chan_lock);
3892 chan = __l2cap_get_chan_by_scid(conn, scid);
3894 mutex_unlock(&conn->chan_lock);
3898 l2cap_chan_lock(chan);
3900 l2cap_chan_hold(chan);
3901 l2cap_chan_del(chan, 0);
3903 l2cap_chan_unlock(chan);
3905 chan->ops->close(chan);
3906 l2cap_chan_put(chan);
3908 mutex_unlock(&conn->chan_lock);
3913 static inline int l2cap_information_req(struct l2cap_conn *conn,
3914 struct l2cap_cmd_hdr *cmd, u8 *data)
3916 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3919 type = __le16_to_cpu(req->type);
3921 BT_DBG("type 0x%4.4x", type);
3923 if (type == L2CAP_IT_FEAT_MASK) {
3925 u32 feat_mask = l2cap_feat_mask;
3926 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3927 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3928 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3930 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3933 feat_mask |= L2CAP_FEAT_EXT_FLOW
3934 | L2CAP_FEAT_EXT_WINDOW;
3936 put_unaligned_le32(feat_mask, rsp->data);
3937 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3939 } else if (type == L2CAP_IT_FIXED_CHAN) {
3941 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3944 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3946 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3948 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3949 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3950 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3951 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
3954 struct l2cap_info_rsp rsp;
3955 rsp.type = cpu_to_le16(type);
3956 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3957 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
3964 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
3965 struct l2cap_cmd_hdr *cmd, u8 *data)
3967 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3970 type = __le16_to_cpu(rsp->type);
3971 result = __le16_to_cpu(rsp->result);
3973 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3975 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3976 if (cmd->ident != conn->info_ident ||
3977 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3980 cancel_delayed_work(&conn->info_timer);
3982 if (result != L2CAP_IR_SUCCESS) {
3983 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3984 conn->info_ident = 0;
3986 l2cap_conn_start(conn);
3992 case L2CAP_IT_FEAT_MASK:
3993 conn->feat_mask = get_unaligned_le32(rsp->data);
3995 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3996 struct l2cap_info_req req;
3997 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3999 conn->info_ident = l2cap_get_ident(conn);
4001 l2cap_send_cmd(conn, conn->info_ident,
4002 L2CAP_INFO_REQ, sizeof(req), &req);
4004 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4005 conn->info_ident = 0;
4007 l2cap_conn_start(conn);
4011 case L2CAP_IT_FIXED_CHAN:
4012 conn->fixed_chan_mask = rsp->data[0];
4013 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4014 conn->info_ident = 0;
4016 l2cap_conn_start(conn);
4023 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4024 struct l2cap_cmd_hdr *cmd,
4025 u16 cmd_len, void *data)
4027 struct l2cap_create_chan_req *req = data;
4028 struct l2cap_create_chan_rsp rsp;
4031 if (cmd_len != sizeof(*req))
4037 psm = le16_to_cpu(req->psm);
4038 scid = le16_to_cpu(req->scid);
4040 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4042 /* Placeholder: Always reject */
4044 rsp.scid = cpu_to_le16(scid);
4045 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
4046 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
4048 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4054 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4055 struct l2cap_cmd_hdr *cmd,
4058 BT_DBG("conn %p", conn);
4060 return l2cap_connect_rsp(conn, cmd, data);
4063 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4064 u16 icid, u16 result)
4066 struct l2cap_move_chan_rsp rsp;
4068 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4070 rsp.icid = cpu_to_le16(icid);
4071 rsp.result = cpu_to_le16(result);
4073 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4076 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4077 struct l2cap_chan *chan,
4078 u16 icid, u16 result)
4080 struct l2cap_move_chan_cfm cfm;
4083 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4085 ident = l2cap_get_ident(conn);
4087 chan->ident = ident;
4089 cfm.icid = cpu_to_le16(icid);
4090 cfm.result = cpu_to_le16(result);
4092 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4095 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4098 struct l2cap_move_chan_cfm_rsp rsp;
4100 BT_DBG("icid 0x%4.4x", icid);
4102 rsp.icid = cpu_to_le16(icid);
4103 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4106 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4107 struct l2cap_cmd_hdr *cmd,
4108 u16 cmd_len, void *data)
4110 struct l2cap_move_chan_req *req = data;
4112 u16 result = L2CAP_MR_NOT_ALLOWED;
4114 if (cmd_len != sizeof(*req))
4117 icid = le16_to_cpu(req->icid);
4119 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4124 /* Placeholder: Always refuse */
4125 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4130 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4131 struct l2cap_cmd_hdr *cmd,
4132 u16 cmd_len, void *data)
4134 struct l2cap_move_chan_rsp *rsp = data;
4137 if (cmd_len != sizeof(*rsp))
4140 icid = le16_to_cpu(rsp->icid);
4141 result = le16_to_cpu(rsp->result);
4143 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4145 /* Placeholder: Always unconfirmed */
4146 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4151 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4152 struct l2cap_cmd_hdr *cmd,
4153 u16 cmd_len, void *data)
4155 struct l2cap_move_chan_cfm *cfm = data;
4158 if (cmd_len != sizeof(*cfm))
4161 icid = le16_to_cpu(cfm->icid);
4162 result = le16_to_cpu(cfm->result);
4164 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4166 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4171 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4172 struct l2cap_cmd_hdr *cmd,
4173 u16 cmd_len, void *data)
4175 struct l2cap_move_chan_cfm_rsp *rsp = data;
4178 if (cmd_len != sizeof(*rsp))
4181 icid = le16_to_cpu(rsp->icid);
4183 BT_DBG("icid 0x%4.4x", icid);
4188 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4193 if (min > max || min < 6 || max > 3200)
4196 if (to_multiplier < 10 || to_multiplier > 3200)
4199 if (max >= to_multiplier * 8)
4202 max_latency = (to_multiplier * 8 / max) - 1;
4203 if (latency > 499 || latency > max_latency)
4209 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4210 struct l2cap_cmd_hdr *cmd,
4213 struct hci_conn *hcon = conn->hcon;
4214 struct l2cap_conn_param_update_req *req;
4215 struct l2cap_conn_param_update_rsp rsp;
4216 u16 min, max, latency, to_multiplier, cmd_len;
4219 if (!(hcon->link_mode & HCI_LM_MASTER))
4222 cmd_len = __le16_to_cpu(cmd->len);
4223 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4226 req = (struct l2cap_conn_param_update_req *) data;
4227 min = __le16_to_cpu(req->min);
4228 max = __le16_to_cpu(req->max);
4229 latency = __le16_to_cpu(req->latency);
4230 to_multiplier = __le16_to_cpu(req->to_multiplier);
4232 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4233 min, max, latency, to_multiplier);
4235 memset(&rsp, 0, sizeof(rsp));
4237 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4239 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4241 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4243 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4247 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4252 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4253 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4258 switch (cmd->code) {
4259 case L2CAP_COMMAND_REJ:
4260 l2cap_command_rej(conn, cmd, data);
4263 case L2CAP_CONN_REQ:
4264 err = l2cap_connect_req(conn, cmd, data);
4267 case L2CAP_CONN_RSP:
4268 case L2CAP_CREATE_CHAN_RSP:
4269 err = l2cap_connect_rsp(conn, cmd, data);
4272 case L2CAP_CONF_REQ:
4273 err = l2cap_config_req(conn, cmd, cmd_len, data);
4276 case L2CAP_CONF_RSP:
4277 err = l2cap_config_rsp(conn, cmd, data);
4280 case L2CAP_DISCONN_REQ:
4281 err = l2cap_disconnect_req(conn, cmd, data);
4284 case L2CAP_DISCONN_RSP:
4285 err = l2cap_disconnect_rsp(conn, cmd, data);
4288 case L2CAP_ECHO_REQ:
4289 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4292 case L2CAP_ECHO_RSP:
4295 case L2CAP_INFO_REQ:
4296 err = l2cap_information_req(conn, cmd, data);
4299 case L2CAP_INFO_RSP:
4300 err = l2cap_information_rsp(conn, cmd, data);
4303 case L2CAP_CREATE_CHAN_REQ:
4304 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4307 case L2CAP_MOVE_CHAN_REQ:
4308 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4311 case L2CAP_MOVE_CHAN_RSP:
4312 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4315 case L2CAP_MOVE_CHAN_CFM:
4316 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4319 case L2CAP_MOVE_CHAN_CFM_RSP:
4320 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4324 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4332 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4333 struct l2cap_cmd_hdr *cmd, u8 *data)
4335 switch (cmd->code) {
4336 case L2CAP_COMMAND_REJ:
4339 case L2CAP_CONN_PARAM_UPDATE_REQ:
4340 return l2cap_conn_param_update_req(conn, cmd, data);
4342 case L2CAP_CONN_PARAM_UPDATE_RSP:
4346 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4351 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4352 struct sk_buff *skb)
4354 u8 *data = skb->data;
4356 struct l2cap_cmd_hdr cmd;
4359 l2cap_raw_recv(conn, skb);
4361 while (len >= L2CAP_CMD_HDR_SIZE) {
4363 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4364 data += L2CAP_CMD_HDR_SIZE;
4365 len -= L2CAP_CMD_HDR_SIZE;
4367 cmd_len = le16_to_cpu(cmd.len);
4369 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
4372 if (cmd_len > len || !cmd.ident) {
4373 BT_DBG("corrupted command");
4377 if (conn->hcon->type == LE_LINK)
4378 err = l2cap_le_sig_cmd(conn, &cmd, data);
4380 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4383 struct l2cap_cmd_rej_unk rej;
4385 BT_ERR("Wrong link type (%d)", err);
4387 /* FIXME: Map err to a valid reason */
4388 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4389 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
4400 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4402 u16 our_fcs, rcv_fcs;
4405 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4406 hdr_size = L2CAP_EXT_HDR_SIZE;
4408 hdr_size = L2CAP_ENH_HDR_SIZE;
4410 if (chan->fcs == L2CAP_FCS_CRC16) {
4411 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4412 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4413 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4415 if (our_fcs != rcv_fcs)
4421 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4423 struct l2cap_ctrl control;
4425 BT_DBG("chan %p", chan);
4427 memset(&control, 0, sizeof(control));
4430 control.reqseq = chan->buffer_seq;
4431 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4433 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4434 control.super = L2CAP_SUPER_RNR;
4435 l2cap_send_sframe(chan, &control);
4438 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4439 chan->unacked_frames > 0)
4440 __set_retrans_timer(chan);
4442 /* Send pending iframes */
4443 l2cap_ertm_send(chan);
4445 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4446 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4447 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4450 control.super = L2CAP_SUPER_RR;
4451 l2cap_send_sframe(chan, &control);
4455 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
4456 struct sk_buff **last_frag)
4458 /* skb->len reflects data in skb as well as all fragments
4459 * skb->data_len reflects only data in fragments
4461 if (!skb_has_frag_list(skb))
4462 skb_shinfo(skb)->frag_list = new_frag;
4464 new_frag->next = NULL;
4466 (*last_frag)->next = new_frag;
4467 *last_frag = new_frag;
4469 skb->len += new_frag->len;
4470 skb->data_len += new_frag->len;
4471 skb->truesize += new_frag->truesize;
4474 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4475 struct l2cap_ctrl *control)
4479 switch (control->sar) {
4480 case L2CAP_SAR_UNSEGMENTED:
4484 err = chan->ops->recv(chan, skb);
4487 case L2CAP_SAR_START:
4491 chan->sdu_len = get_unaligned_le16(skb->data);
4492 skb_pull(skb, L2CAP_SDULEN_SIZE);
4494 if (chan->sdu_len > chan->imtu) {
4499 if (skb->len >= chan->sdu_len)
4503 chan->sdu_last_frag = skb;
4509 case L2CAP_SAR_CONTINUE:
4513 append_skb_frag(chan->sdu, skb,
4514 &chan->sdu_last_frag);
4517 if (chan->sdu->len >= chan->sdu_len)
4527 append_skb_frag(chan->sdu, skb,
4528 &chan->sdu_last_frag);
4531 if (chan->sdu->len != chan->sdu_len)
4534 err = chan->ops->recv(chan, chan->sdu);
4537 /* Reassembly complete */
4539 chan->sdu_last_frag = NULL;
4547 kfree_skb(chan->sdu);
4549 chan->sdu_last_frag = NULL;
4556 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4560 if (chan->mode != L2CAP_MODE_ERTM)
4563 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4564 l2cap_tx(chan, NULL, NULL, event);
4567 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4570 /* Pass sequential frames to l2cap_reassemble_sdu()
4571 * until a gap is encountered.
4574 BT_DBG("chan %p", chan);
4576 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4577 struct sk_buff *skb;
4578 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4579 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4581 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4586 skb_unlink(skb, &chan->srej_q);
4587 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4588 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4593 if (skb_queue_empty(&chan->srej_q)) {
4594 chan->rx_state = L2CAP_RX_STATE_RECV;
4595 l2cap_send_ack(chan);
4601 static void l2cap_handle_srej(struct l2cap_chan *chan,
4602 struct l2cap_ctrl *control)
4604 struct sk_buff *skb;
4606 BT_DBG("chan %p, control %p", chan, control);
4608 if (control->reqseq == chan->next_tx_seq) {
4609 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4610 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4614 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4617 BT_DBG("Seq %d not available for retransmission",
4622 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4623 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4624 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4628 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4630 if (control->poll) {
4631 l2cap_pass_to_tx(chan, control);
4633 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4634 l2cap_retransmit(chan, control);
4635 l2cap_ertm_send(chan);
4637 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4638 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4639 chan->srej_save_reqseq = control->reqseq;
4642 l2cap_pass_to_tx_fbit(chan, control);
4644 if (control->final) {
4645 if (chan->srej_save_reqseq != control->reqseq ||
4646 !test_and_clear_bit(CONN_SREJ_ACT,
4648 l2cap_retransmit(chan, control);
4650 l2cap_retransmit(chan, control);
4651 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4652 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4653 chan->srej_save_reqseq = control->reqseq;
4659 static void l2cap_handle_rej(struct l2cap_chan *chan,
4660 struct l2cap_ctrl *control)
4662 struct sk_buff *skb;
4664 BT_DBG("chan %p, control %p", chan, control);
4666 if (control->reqseq == chan->next_tx_seq) {
4667 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4668 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4672 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4674 if (chan->max_tx && skb &&
4675 bt_cb(skb)->control.retries >= chan->max_tx) {
4676 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4677 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4681 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4683 l2cap_pass_to_tx(chan, control);
4685 if (control->final) {
4686 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4687 l2cap_retransmit_all(chan, control);
4689 l2cap_retransmit_all(chan, control);
4690 l2cap_ertm_send(chan);
4691 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4692 set_bit(CONN_REJ_ACT, &chan->conn_state);
4696 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4698 BT_DBG("chan %p, txseq %d", chan, txseq);
4700 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4701 chan->expected_tx_seq);
4703 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4704 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4706 /* See notes below regarding "double poll" and
4709 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4710 BT_DBG("Invalid/Ignore - after SREJ");
4711 return L2CAP_TXSEQ_INVALID_IGNORE;
4713 BT_DBG("Invalid - in window after SREJ sent");
4714 return L2CAP_TXSEQ_INVALID;
4718 if (chan->srej_list.head == txseq) {
4719 BT_DBG("Expected SREJ");
4720 return L2CAP_TXSEQ_EXPECTED_SREJ;
4723 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4724 BT_DBG("Duplicate SREJ - txseq already stored");
4725 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4728 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4729 BT_DBG("Unexpected SREJ - not requested");
4730 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4734 if (chan->expected_tx_seq == txseq) {
4735 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4737 BT_DBG("Invalid - txseq outside tx window");
4738 return L2CAP_TXSEQ_INVALID;
4741 return L2CAP_TXSEQ_EXPECTED;
4745 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4746 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
4747 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4748 return L2CAP_TXSEQ_DUPLICATE;
4751 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4752 /* A source of invalid packets is a "double poll" condition,
4753 * where delays cause us to send multiple poll packets. If
4754 * the remote stack receives and processes both polls,
4755 * sequence numbers can wrap around in such a way that a
4756 * resent frame has a sequence number that looks like new data
4757 * with a sequence gap. This would trigger an erroneous SREJ
4760 * Fortunately, this is impossible with a tx window that's
4761 * less than half of the maximum sequence number, which allows
4762 * invalid frames to be safely ignored.
4764 * With tx window sizes greater than half of the tx window
4765 * maximum, the frame is invalid and cannot be ignored. This
4766 * causes a disconnect.
4769 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4770 BT_DBG("Invalid/Ignore - txseq outside tx window");
4771 return L2CAP_TXSEQ_INVALID_IGNORE;
4773 BT_DBG("Invalid - txseq outside tx window");
4774 return L2CAP_TXSEQ_INVALID;
4777 BT_DBG("Unexpected - txseq indicates missing frames");
4778 return L2CAP_TXSEQ_UNEXPECTED;
4782 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4783 struct l2cap_ctrl *control,
4784 struct sk_buff *skb, u8 event)
4787 bool skb_in_use = 0;
4789 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4793 case L2CAP_EV_RECV_IFRAME:
4794 switch (l2cap_classify_txseq(chan, control->txseq)) {
4795 case L2CAP_TXSEQ_EXPECTED:
4796 l2cap_pass_to_tx(chan, control);
4798 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4799 BT_DBG("Busy, discarding expected seq %d",
4804 chan->expected_tx_seq = __next_seq(chan,
4807 chan->buffer_seq = chan->expected_tx_seq;
4810 err = l2cap_reassemble_sdu(chan, skb, control);
4814 if (control->final) {
4815 if (!test_and_clear_bit(CONN_REJ_ACT,
4816 &chan->conn_state)) {
4818 l2cap_retransmit_all(chan, control);
4819 l2cap_ertm_send(chan);
4823 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4824 l2cap_send_ack(chan);
4826 case L2CAP_TXSEQ_UNEXPECTED:
4827 l2cap_pass_to_tx(chan, control);
4829 /* Can't issue SREJ frames in the local busy state.
4830 * Drop this frame, it will be seen as missing
4831 * when local busy is exited.
4833 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4834 BT_DBG("Busy, discarding unexpected seq %d",
4839 /* There was a gap in the sequence, so an SREJ
4840 * must be sent for each missing frame. The
4841 * current frame is stored for later use.
4843 skb_queue_tail(&chan->srej_q, skb);
4845 BT_DBG("Queued %p (queue len %d)", skb,
4846 skb_queue_len(&chan->srej_q));
4848 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4849 l2cap_seq_list_clear(&chan->srej_list);
4850 l2cap_send_srej(chan, control->txseq);
4852 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4854 case L2CAP_TXSEQ_DUPLICATE:
4855 l2cap_pass_to_tx(chan, control);
4857 case L2CAP_TXSEQ_INVALID_IGNORE:
4859 case L2CAP_TXSEQ_INVALID:
4861 l2cap_send_disconn_req(chan->conn, chan,
4866 case L2CAP_EV_RECV_RR:
4867 l2cap_pass_to_tx(chan, control);
4868 if (control->final) {
4869 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4871 if (!test_and_clear_bit(CONN_REJ_ACT,
4872 &chan->conn_state)) {
4874 l2cap_retransmit_all(chan, control);
4877 l2cap_ertm_send(chan);
4878 } else if (control->poll) {
4879 l2cap_send_i_or_rr_or_rnr(chan);
4881 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4882 &chan->conn_state) &&
4883 chan->unacked_frames)
4884 __set_retrans_timer(chan);
4886 l2cap_ertm_send(chan);
4889 case L2CAP_EV_RECV_RNR:
4890 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4891 l2cap_pass_to_tx(chan, control);
4892 if (control && control->poll) {
4893 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4894 l2cap_send_rr_or_rnr(chan, 0);
4896 __clear_retrans_timer(chan);
4897 l2cap_seq_list_clear(&chan->retrans_list);
4899 case L2CAP_EV_RECV_REJ:
4900 l2cap_handle_rej(chan, control);
4902 case L2CAP_EV_RECV_SREJ:
4903 l2cap_handle_srej(chan, control);
4909 if (skb && !skb_in_use) {
4910 BT_DBG("Freeing %p", skb);
4917 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4918 struct l2cap_ctrl *control,
4919 struct sk_buff *skb, u8 event)
4922 u16 txseq = control->txseq;
4923 bool skb_in_use = 0;
4925 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4929 case L2CAP_EV_RECV_IFRAME:
4930 switch (l2cap_classify_txseq(chan, txseq)) {
4931 case L2CAP_TXSEQ_EXPECTED:
4932 /* Keep frame for reassembly later */
4933 l2cap_pass_to_tx(chan, control);
4934 skb_queue_tail(&chan->srej_q, skb);
4936 BT_DBG("Queued %p (queue len %d)", skb,
4937 skb_queue_len(&chan->srej_q));
4939 chan->expected_tx_seq = __next_seq(chan, txseq);
4941 case L2CAP_TXSEQ_EXPECTED_SREJ:
4942 l2cap_seq_list_pop(&chan->srej_list);
4944 l2cap_pass_to_tx(chan, control);
4945 skb_queue_tail(&chan->srej_q, skb);
4947 BT_DBG("Queued %p (queue len %d)", skb,
4948 skb_queue_len(&chan->srej_q));
4950 err = l2cap_rx_queued_iframes(chan);
4955 case L2CAP_TXSEQ_UNEXPECTED:
4956 /* Got a frame that can't be reassembled yet.
4957 * Save it for later, and send SREJs to cover
4958 * the missing frames.
4960 skb_queue_tail(&chan->srej_q, skb);
4962 BT_DBG("Queued %p (queue len %d)", skb,
4963 skb_queue_len(&chan->srej_q));
4965 l2cap_pass_to_tx(chan, control);
4966 l2cap_send_srej(chan, control->txseq);
4968 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4969 /* This frame was requested with an SREJ, but
4970 * some expected retransmitted frames are
4971 * missing. Request retransmission of missing
4974 skb_queue_tail(&chan->srej_q, skb);
4976 BT_DBG("Queued %p (queue len %d)", skb,
4977 skb_queue_len(&chan->srej_q));
4979 l2cap_pass_to_tx(chan, control);
4980 l2cap_send_srej_list(chan, control->txseq);
4982 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4983 /* We've already queued this frame. Drop this copy. */
4984 l2cap_pass_to_tx(chan, control);
4986 case L2CAP_TXSEQ_DUPLICATE:
4987 /* Expecting a later sequence number, so this frame
4988 * was already received. Ignore it completely.
4991 case L2CAP_TXSEQ_INVALID_IGNORE:
4993 case L2CAP_TXSEQ_INVALID:
4995 l2cap_send_disconn_req(chan->conn, chan,
5000 case L2CAP_EV_RECV_RR:
5001 l2cap_pass_to_tx(chan, control);
5002 if (control->final) {
5003 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5005 if (!test_and_clear_bit(CONN_REJ_ACT,
5006 &chan->conn_state)) {
5008 l2cap_retransmit_all(chan, control);
5011 l2cap_ertm_send(chan);
5012 } else if (control->poll) {
5013 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5014 &chan->conn_state) &&
5015 chan->unacked_frames) {
5016 __set_retrans_timer(chan);
5019 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5020 l2cap_send_srej_tail(chan);
5022 if (test_and_clear_bit(CONN_REMOTE_BUSY,
5023 &chan->conn_state) &&
5024 chan->unacked_frames)
5025 __set_retrans_timer(chan);
5027 l2cap_send_ack(chan);
5030 case L2CAP_EV_RECV_RNR:
5031 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5032 l2cap_pass_to_tx(chan, control);
5033 if (control->poll) {
5034 l2cap_send_srej_tail(chan);
5036 struct l2cap_ctrl rr_control;
5037 memset(&rr_control, 0, sizeof(rr_control));
5038 rr_control.sframe = 1;
5039 rr_control.super = L2CAP_SUPER_RR;
5040 rr_control.reqseq = chan->buffer_seq;
5041 l2cap_send_sframe(chan, &rr_control);
5045 case L2CAP_EV_RECV_REJ:
5046 l2cap_handle_rej(chan, control);
5048 case L2CAP_EV_RECV_SREJ:
5049 l2cap_handle_srej(chan, control);
5053 if (skb && !skb_in_use) {
5054 BT_DBG("Freeing %p", skb);
5061 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5063 /* Make sure reqseq is for a packet that has been sent but not acked */
5066 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5067 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5070 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5071 struct sk_buff *skb, u8 event)
5075 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5076 control, skb, event, chan->rx_state);
5078 if (__valid_reqseq(chan, control->reqseq)) {
5079 switch (chan->rx_state) {
5080 case L2CAP_RX_STATE_RECV:
5081 err = l2cap_rx_state_recv(chan, control, skb, event);
5083 case L2CAP_RX_STATE_SREJ_SENT:
5084 err = l2cap_rx_state_srej_sent(chan, control, skb,
5092 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5093 control->reqseq, chan->next_tx_seq,
5094 chan->expected_ack_seq);
5095 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5101 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5102 struct sk_buff *skb)
5106 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5109 if (l2cap_classify_txseq(chan, control->txseq) ==
5110 L2CAP_TXSEQ_EXPECTED) {
5111 l2cap_pass_to_tx(chan, control);
5113 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5114 __next_seq(chan, chan->buffer_seq));
5116 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5118 l2cap_reassemble_sdu(chan, skb, control);
5121 kfree_skb(chan->sdu);
5124 chan->sdu_last_frag = NULL;
5128 BT_DBG("Freeing %p", skb);
5133 chan->last_acked_seq = control->txseq;
5134 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5139 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5141 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5145 __unpack_control(chan, skb);
5150 * We can just drop the corrupted I-frame here.
5151 * Receiver will miss it and start proper recovery
5152 * procedures and ask for retransmission.
5154 if (l2cap_check_fcs(chan, skb))
5157 if (!control->sframe && control->sar == L2CAP_SAR_START)
5158 len -= L2CAP_SDULEN_SIZE;
5160 if (chan->fcs == L2CAP_FCS_CRC16)
5161 len -= L2CAP_FCS_SIZE;
5163 if (len > chan->mps) {
5164 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5168 if (!control->sframe) {
5171 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5172 control->sar, control->reqseq, control->final,
5175 /* Validate F-bit - F=0 always valid, F=1 only
5176 * valid in TX WAIT_F
5178 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5181 if (chan->mode != L2CAP_MODE_STREAMING) {
5182 event = L2CAP_EV_RECV_IFRAME;
5183 err = l2cap_rx(chan, control, skb, event);
5185 err = l2cap_stream_rx(chan, control, skb);
5189 l2cap_send_disconn_req(chan->conn, chan,
5192 const u8 rx_func_to_event[4] = {
5193 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5194 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5197 /* Only I-frames are expected in streaming mode */
5198 if (chan->mode == L2CAP_MODE_STREAMING)
5201 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5202 control->reqseq, control->final, control->poll,
5207 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5211 /* Validate F and P bits */
5212 if (control->final && (control->poll ||
5213 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5216 event = rx_func_to_event[control->super];
5217 if (l2cap_rx(chan, control, skb, event))
5218 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5228 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5229 struct sk_buff *skb)
5231 struct l2cap_chan *chan;
5233 chan = l2cap_get_chan_by_scid(conn, cid);
5235 if (cid == L2CAP_CID_A2MP) {
5236 chan = a2mp_channel_create(conn, skb);
5242 l2cap_chan_lock(chan);
5244 BT_DBG("unknown cid 0x%4.4x", cid);
5245 /* Drop packet and return */
5251 BT_DBG("chan %p, len %d", chan, skb->len);
5253 if (chan->state != BT_CONNECTED)
5256 switch (chan->mode) {
5257 case L2CAP_MODE_BASIC:
5258 /* If socket recv buffers overflows we drop data here
5259 * which is *bad* because L2CAP has to be reliable.
5260 * But we don't have any other choice. L2CAP doesn't
5261 * provide flow control mechanism. */
5263 if (chan->imtu < skb->len)
5266 if (!chan->ops->recv(chan, skb))
5270 case L2CAP_MODE_ERTM:
5271 case L2CAP_MODE_STREAMING:
5272 l2cap_data_rcv(chan, skb);
5276 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5284 l2cap_chan_unlock(chan);
5287 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5288 struct sk_buff *skb)
5290 struct l2cap_chan *chan;
5292 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5296 BT_DBG("chan %p, len %d", chan, skb->len);
5298 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5301 if (chan->imtu < skb->len)
5304 if (!chan->ops->recv(chan, skb))
5311 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5312 struct sk_buff *skb)
5314 struct l2cap_chan *chan;
5316 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5320 BT_DBG("chan %p, len %d", chan, skb->len);
5322 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5325 if (chan->imtu < skb->len)
5328 if (!chan->ops->recv(chan, skb))
5335 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5337 struct l2cap_hdr *lh = (void *) skb->data;
5341 skb_pull(skb, L2CAP_HDR_SIZE);
5342 cid = __le16_to_cpu(lh->cid);
5343 len = __le16_to_cpu(lh->len);
5345 if (len != skb->len) {
5350 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5353 case L2CAP_CID_LE_SIGNALING:
5354 case L2CAP_CID_SIGNALING:
5355 l2cap_sig_channel(conn, skb);
5358 case L2CAP_CID_CONN_LESS:
5359 psm = get_unaligned((__le16 *) skb->data);
5360 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5361 l2cap_conless_channel(conn, psm, skb);
5364 case L2CAP_CID_LE_DATA:
5365 l2cap_att_channel(conn, cid, skb);
5369 if (smp_sig_channel(conn, skb))
5370 l2cap_conn_del(conn->hcon, EACCES);
5374 l2cap_data_channel(conn, cid, skb);
5379 /* ---- L2CAP interface with lower layer (HCI) ---- */
5381 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5383 int exact = 0, lm1 = 0, lm2 = 0;
5384 struct l2cap_chan *c;
5386 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
5388 /* Find listening sockets and check their link_mode */
5389 read_lock(&chan_list_lock);
5390 list_for_each_entry(c, &chan_list, global_l) {
5391 struct sock *sk = c->sk;
5393 if (c->state != BT_LISTEN)
5396 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5397 lm1 |= HCI_LM_ACCEPT;
5398 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5399 lm1 |= HCI_LM_MASTER;
5401 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5402 lm2 |= HCI_LM_ACCEPT;
5403 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5404 lm2 |= HCI_LM_MASTER;
5407 read_unlock(&chan_list_lock);
5409 return exact ? lm1 : lm2;
5412 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5414 struct l2cap_conn *conn;
5416 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
5419 conn = l2cap_conn_add(hcon, status);
5421 l2cap_conn_ready(conn);
5423 l2cap_conn_del(hcon, bt_to_errno(status));
5427 int l2cap_disconn_ind(struct hci_conn *hcon)
5429 struct l2cap_conn *conn = hcon->l2cap_data;
5431 BT_DBG("hcon %p", hcon);
5434 return HCI_ERROR_REMOTE_USER_TERM;
5435 return conn->disc_reason;
5438 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5440 BT_DBG("hcon %p reason %d", hcon, reason);
5442 l2cap_conn_del(hcon, bt_to_errno(reason));
5445 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5447 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5450 if (encrypt == 0x00) {
5451 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5452 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5453 } else if (chan->sec_level == BT_SECURITY_HIGH)
5454 l2cap_chan_close(chan, ECONNREFUSED);
5456 if (chan->sec_level == BT_SECURITY_MEDIUM)
5457 __clear_chan_timer(chan);
5461 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5463 struct l2cap_conn *conn = hcon->l2cap_data;
5464 struct l2cap_chan *chan;
5469 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5471 if (hcon->type == LE_LINK) {
5472 if (!status && encrypt)
5473 smp_distribute_keys(conn, 0);
5474 cancel_delayed_work(&conn->security_timer);
5477 mutex_lock(&conn->chan_lock);
5479 list_for_each_entry(chan, &conn->chan_l, list) {
5480 l2cap_chan_lock(chan);
5482 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5483 state_to_string(chan->state));
5485 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5486 l2cap_chan_unlock(chan);
5490 if (chan->scid == L2CAP_CID_LE_DATA) {
5491 if (!status && encrypt) {
5492 chan->sec_level = hcon->sec_level;
5493 l2cap_chan_ready(chan);
5496 l2cap_chan_unlock(chan);
5500 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5501 l2cap_chan_unlock(chan);
5505 if (!status && (chan->state == BT_CONNECTED ||
5506 chan->state == BT_CONFIG)) {
5507 struct sock *sk = chan->sk;
5509 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5510 sk->sk_state_change(sk);
5512 l2cap_check_encryption(chan, encrypt);
5513 l2cap_chan_unlock(chan);
5517 if (chan->state == BT_CONNECT) {
5519 l2cap_start_connection(chan);
5521 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5523 } else if (chan->state == BT_CONNECT2) {
5524 struct sock *sk = chan->sk;
5525 struct l2cap_conn_rsp rsp;
5531 if (test_bit(BT_SK_DEFER_SETUP,
5532 &bt_sk(sk)->flags)) {
5533 struct sock *parent = bt_sk(sk)->parent;
5534 res = L2CAP_CR_PEND;
5535 stat = L2CAP_CS_AUTHOR_PEND;
5537 parent->sk_data_ready(parent, 0);
5539 __l2cap_state_change(chan, BT_CONFIG);
5540 res = L2CAP_CR_SUCCESS;
5541 stat = L2CAP_CS_NO_INFO;
5544 __l2cap_state_change(chan, BT_DISCONN);
5545 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5546 res = L2CAP_CR_SEC_BLOCK;
5547 stat = L2CAP_CS_NO_INFO;
5552 rsp.scid = cpu_to_le16(chan->dcid);
5553 rsp.dcid = cpu_to_le16(chan->scid);
5554 rsp.result = cpu_to_le16(res);
5555 rsp.status = cpu_to_le16(stat);
5556 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5559 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5560 res == L2CAP_CR_SUCCESS) {
5562 set_bit(CONF_REQ_SENT, &chan->conf_state);
5563 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5565 l2cap_build_conf_req(chan, buf),
5567 chan->num_conf_req++;
5571 l2cap_chan_unlock(chan);
5574 mutex_unlock(&conn->chan_lock);
5579 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5581 struct l2cap_conn *conn = hcon->l2cap_data;
5584 conn = l2cap_conn_add(hcon, 0);
5589 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5591 if (!(flags & ACL_CONT)) {
5592 struct l2cap_hdr *hdr;
5596 BT_ERR("Unexpected start frame (len %d)", skb->len);
5597 kfree_skb(conn->rx_skb);
5598 conn->rx_skb = NULL;
5600 l2cap_conn_unreliable(conn, ECOMM);
5603 /* Start fragment always begin with Basic L2CAP header */
5604 if (skb->len < L2CAP_HDR_SIZE) {
5605 BT_ERR("Frame is too short (len %d)", skb->len);
5606 l2cap_conn_unreliable(conn, ECOMM);
5610 hdr = (struct l2cap_hdr *) skb->data;
5611 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5613 if (len == skb->len) {
5614 /* Complete frame received */
5615 l2cap_recv_frame(conn, skb);
5619 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5621 if (skb->len > len) {
5622 BT_ERR("Frame is too long (len %d, expected len %d)",
5624 l2cap_conn_unreliable(conn, ECOMM);
5628 /* Allocate skb for the complete frame (with header) */
5629 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
5633 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5635 conn->rx_len = len - skb->len;
5637 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5639 if (!conn->rx_len) {
5640 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5641 l2cap_conn_unreliable(conn, ECOMM);
5645 if (skb->len > conn->rx_len) {
5646 BT_ERR("Fragment is too long (len %d, expected %d)",
5647 skb->len, conn->rx_len);
5648 kfree_skb(conn->rx_skb);
5649 conn->rx_skb = NULL;
5651 l2cap_conn_unreliable(conn, ECOMM);
5655 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5657 conn->rx_len -= skb->len;
5659 if (!conn->rx_len) {
5660 /* Complete frame received */
5661 l2cap_recv_frame(conn, conn->rx_skb);
5662 conn->rx_skb = NULL;
5671 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5673 struct l2cap_chan *c;
5675 read_lock(&chan_list_lock);
5677 list_for_each_entry(c, &chan_list, global_l) {
5678 struct sock *sk = c->sk;
5680 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5681 &bt_sk(sk)->src, &bt_sk(sk)->dst,
5682 c->state, __le16_to_cpu(c->psm),
5683 c->scid, c->dcid, c->imtu, c->omtu,
5684 c->sec_level, c->mode);
5687 read_unlock(&chan_list_lock);
5692 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5694 return single_open(file, l2cap_debugfs_show, inode->i_private);
5697 static const struct file_operations l2cap_debugfs_fops = {
5698 .open = l2cap_debugfs_open,
5700 .llseek = seq_lseek,
5701 .release = single_release,
5704 static struct dentry *l2cap_debugfs;
5706 int __init l2cap_init(void)
5710 err = l2cap_init_sockets();
5715 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
5716 NULL, &l2cap_debugfs_fops);
5718 BT_ERR("Failed to create L2CAP debug file");
5724 void l2cap_exit(void)
5726 debugfs_remove(l2cap_debugfs);
5727 l2cap_cleanup_sockets();
5730 module_param(disable_ertm, bool, 0644);
5731 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");