2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
110 mutex_unlock(&conn->chan_lock);
115 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
117 struct l2cap_chan *c;
119 list_for_each_entry(c, &conn->chan_l, list) {
120 if (c->ident == ident)
126 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
128 struct l2cap_chan *c;
130 list_for_each_entry(c, &chan_list, global_l) {
131 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
137 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
141 write_lock(&chan_list_lock);
143 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
156 for (p = 0x1001; p < 0x1100; p += 2)
157 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
158 chan->psm = cpu_to_le16(p);
159 chan->sport = cpu_to_le16(p);
166 write_unlock(&chan_list_lock);
170 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
172 write_lock(&chan_list_lock);
176 write_unlock(&chan_list_lock);
181 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
183 u16 cid = L2CAP_CID_DYN_START;
185 for (; cid < L2CAP_CID_DYN_END; cid++) {
186 if (!__l2cap_get_chan_by_scid(conn, cid))
193 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
195 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
196 state_to_string(state));
199 chan->ops->state_change(chan->data, state);
202 static void l2cap_state_change(struct l2cap_chan *chan, int state)
204 struct sock *sk = chan->sk;
207 __l2cap_state_change(chan, state);
211 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
213 struct sock *sk = chan->sk;
218 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
220 struct sock *sk = chan->sk;
223 __l2cap_chan_set_err(chan, err);
227 /* ---- L2CAP sequence number lists ---- */
229 /* For ERTM, ordered lists of sequence numbers must be tracked for
230 * SREJ requests that are received and for frames that are to be
231 * retransmitted. These seq_list functions implement a singly-linked
232 * list in an array, where membership in the list can also be checked
233 * in constant time. Items can also be added to the tail of the list
234 * and removed from the head in constant time, without further memory
238 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
240 size_t alloc_size, i;
242 /* Allocated size is a power of 2 to map sequence numbers
243 * (which may be up to 14 bits) in to a smaller array that is
244 * sized for the negotiated ERTM transmit windows.
246 alloc_size = roundup_pow_of_two(size);
248 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
252 seq_list->mask = alloc_size - 1;
253 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
254 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
255 for (i = 0; i < alloc_size; i++)
256 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
261 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
263 kfree(seq_list->list);
266 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
269 /* Constant-time check for list membership */
270 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
273 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
275 u16 mask = seq_list->mask;
277 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
278 /* In case someone tries to pop the head of an empty list */
279 return L2CAP_SEQ_LIST_CLEAR;
280 } else if (seq_list->head == seq) {
281 /* Head can be removed in constant time */
282 seq_list->head = seq_list->list[seq & mask];
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
285 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
286 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
287 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
290 /* Walk the list to find the sequence number */
291 u16 prev = seq_list->head;
292 while (seq_list->list[prev & mask] != seq) {
293 prev = seq_list->list[prev & mask];
294 if (prev == L2CAP_SEQ_LIST_TAIL)
295 return L2CAP_SEQ_LIST_CLEAR;
298 /* Unlink the number from the list and clear it */
299 seq_list->list[prev & mask] = seq_list->list[seq & mask];
300 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->tail == seq)
302 seq_list->tail = prev;
307 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
309 /* Remove the head in constant time */
310 return l2cap_seq_list_remove(seq_list, seq_list->head);
313 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
317 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
320 for (i = 0; i <= seq_list->mask; i++)
321 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
324 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
327 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
329 u16 mask = seq_list->mask;
331 /* All appends happen in constant time */
333 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
336 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
337 seq_list->head = seq;
339 seq_list->list[seq_list->tail & mask] = seq;
341 seq_list->tail = seq;
342 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
345 static void l2cap_chan_timeout(struct work_struct *work)
347 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
349 struct l2cap_conn *conn = chan->conn;
352 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
354 mutex_lock(&conn->chan_lock);
355 l2cap_chan_lock(chan);
357 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
358 reason = ECONNREFUSED;
359 else if (chan->state == BT_CONNECT &&
360 chan->sec_level != BT_SECURITY_SDP)
361 reason = ECONNREFUSED;
365 l2cap_chan_close(chan, reason);
367 l2cap_chan_unlock(chan);
369 chan->ops->close(chan->data);
370 mutex_unlock(&conn->chan_lock);
372 l2cap_chan_put(chan);
375 struct l2cap_chan *l2cap_chan_create(void)
377 struct l2cap_chan *chan;
379 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
383 mutex_init(&chan->lock);
385 write_lock(&chan_list_lock);
386 list_add(&chan->global_l, &chan_list);
387 write_unlock(&chan_list_lock);
389 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
391 chan->state = BT_OPEN;
393 atomic_set(&chan->refcnt, 1);
395 BT_DBG("chan %p", chan);
400 void l2cap_chan_destroy(struct l2cap_chan *chan)
402 write_lock(&chan_list_lock);
403 list_del(&chan->global_l);
404 write_unlock(&chan_list_lock);
406 l2cap_chan_put(chan);
409 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
411 chan->fcs = L2CAP_FCS_CRC16;
412 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
413 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
414 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
415 chan->sec_level = BT_SECURITY_LOW;
417 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
420 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
422 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
423 __le16_to_cpu(chan->psm), chan->dcid);
425 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
429 switch (chan->chan_type) {
430 case L2CAP_CHAN_CONN_ORIENTED:
431 if (conn->hcon->type == LE_LINK) {
433 chan->omtu = L2CAP_LE_DEFAULT_MTU;
434 chan->scid = L2CAP_CID_LE_DATA;
435 chan->dcid = L2CAP_CID_LE_DATA;
437 /* Alloc CID for connection-oriented socket */
438 chan->scid = l2cap_alloc_cid(conn);
439 chan->omtu = L2CAP_DEFAULT_MTU;
443 case L2CAP_CHAN_CONN_LESS:
444 /* Connectionless socket */
445 chan->scid = L2CAP_CID_CONN_LESS;
446 chan->dcid = L2CAP_CID_CONN_LESS;
447 chan->omtu = L2CAP_DEFAULT_MTU;
451 /* Raw socket can send/recv signalling messages only */
452 chan->scid = L2CAP_CID_SIGNALING;
453 chan->dcid = L2CAP_CID_SIGNALING;
454 chan->omtu = L2CAP_DEFAULT_MTU;
457 chan->local_id = L2CAP_BESTEFFORT_ID;
458 chan->local_stype = L2CAP_SERV_BESTEFFORT;
459 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
460 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
461 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
462 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
464 l2cap_chan_hold(chan);
466 list_add(&chan->list, &conn->chan_l);
469 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
471 mutex_lock(&conn->chan_lock);
472 __l2cap_chan_add(conn, chan);
473 mutex_unlock(&conn->chan_lock);
476 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
478 struct sock *sk = chan->sk;
479 struct l2cap_conn *conn = chan->conn;
480 struct sock *parent = bt_sk(sk)->parent;
482 __clear_chan_timer(chan);
484 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
487 /* Delete from channel list */
488 list_del(&chan->list);
490 l2cap_chan_put(chan);
493 hci_conn_put(conn->hcon);
498 __l2cap_state_change(chan, BT_CLOSED);
499 sock_set_flag(sk, SOCK_ZAPPED);
502 __l2cap_chan_set_err(chan, err);
505 bt_accept_unlink(sk);
506 parent->sk_data_ready(parent, 0);
508 sk->sk_state_change(sk);
512 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
513 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
516 skb_queue_purge(&chan->tx_q);
518 if (chan->mode == L2CAP_MODE_ERTM) {
519 struct srej_list *l, *tmp;
521 __clear_retrans_timer(chan);
522 __clear_monitor_timer(chan);
523 __clear_ack_timer(chan);
525 skb_queue_purge(&chan->srej_q);
527 l2cap_seq_list_free(&chan->srej_list);
528 l2cap_seq_list_free(&chan->retrans_list);
529 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
536 static void l2cap_chan_cleanup_listen(struct sock *parent)
540 BT_DBG("parent %p", parent);
542 /* Close not yet accepted channels */
543 while ((sk = bt_accept_dequeue(parent, NULL))) {
544 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
546 l2cap_chan_lock(chan);
547 __clear_chan_timer(chan);
548 l2cap_chan_close(chan, ECONNRESET);
549 l2cap_chan_unlock(chan);
551 chan->ops->close(chan->data);
555 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
557 struct l2cap_conn *conn = chan->conn;
558 struct sock *sk = chan->sk;
560 BT_DBG("chan %p state %s sk %p", chan,
561 state_to_string(chan->state), sk);
563 switch (chan->state) {
566 l2cap_chan_cleanup_listen(sk);
568 __l2cap_state_change(chan, BT_CLOSED);
569 sock_set_flag(sk, SOCK_ZAPPED);
575 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
576 conn->hcon->type == ACL_LINK) {
577 __set_chan_timer(chan, sk->sk_sndtimeo);
578 l2cap_send_disconn_req(conn, chan, reason);
580 l2cap_chan_del(chan, reason);
584 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
585 conn->hcon->type == ACL_LINK) {
586 struct l2cap_conn_rsp rsp;
589 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
590 result = L2CAP_CR_SEC_BLOCK;
592 result = L2CAP_CR_BAD_PSM;
593 l2cap_state_change(chan, BT_DISCONN);
595 rsp.scid = cpu_to_le16(chan->dcid);
596 rsp.dcid = cpu_to_le16(chan->scid);
597 rsp.result = cpu_to_le16(result);
598 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
599 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
603 l2cap_chan_del(chan, reason);
608 l2cap_chan_del(chan, reason);
613 sock_set_flag(sk, SOCK_ZAPPED);
619 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
621 if (chan->chan_type == L2CAP_CHAN_RAW) {
622 switch (chan->sec_level) {
623 case BT_SECURITY_HIGH:
624 return HCI_AT_DEDICATED_BONDING_MITM;
625 case BT_SECURITY_MEDIUM:
626 return HCI_AT_DEDICATED_BONDING;
628 return HCI_AT_NO_BONDING;
630 } else if (chan->psm == cpu_to_le16(0x0001)) {
631 if (chan->sec_level == BT_SECURITY_LOW)
632 chan->sec_level = BT_SECURITY_SDP;
634 if (chan->sec_level == BT_SECURITY_HIGH)
635 return HCI_AT_NO_BONDING_MITM;
637 return HCI_AT_NO_BONDING;
639 switch (chan->sec_level) {
640 case BT_SECURITY_HIGH:
641 return HCI_AT_GENERAL_BONDING_MITM;
642 case BT_SECURITY_MEDIUM:
643 return HCI_AT_GENERAL_BONDING;
645 return HCI_AT_NO_BONDING;
650 /* Service level security */
651 int l2cap_chan_check_security(struct l2cap_chan *chan)
653 struct l2cap_conn *conn = chan->conn;
656 auth_type = l2cap_get_auth_type(chan);
658 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
661 static u8 l2cap_get_ident(struct l2cap_conn *conn)
665 /* Get next available identificator.
666 * 1 - 128 are used by kernel.
667 * 129 - 199 are reserved.
668 * 200 - 254 are used by utilities like l2ping, etc.
671 spin_lock(&conn->lock);
673 if (++conn->tx_ident > 128)
678 spin_unlock(&conn->lock);
683 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
685 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
688 BT_DBG("code 0x%2.2x", code);
693 if (lmp_no_flush_capable(conn->hcon->hdev))
694 flags = ACL_START_NO_FLUSH;
698 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
699 skb->priority = HCI_PRIO_MAX;
701 hci_send_acl(conn->hchan, skb, flags);
704 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
706 struct hci_conn *hcon = chan->conn->hcon;
709 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
712 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
713 lmp_no_flush_capable(hcon->hdev))
714 flags = ACL_START_NO_FLUSH;
718 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
719 hci_send_acl(chan->conn->hchan, skb, flags);
722 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
724 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
725 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
727 if (enh & L2CAP_CTRL_FRAME_TYPE) {
730 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
731 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
738 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
739 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
746 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
748 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
749 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
751 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
754 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
755 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
762 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
763 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
770 static inline void __unpack_control(struct l2cap_chan *chan,
773 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
774 __unpack_extended_control(get_unaligned_le32(skb->data),
775 &bt_cb(skb)->control);
777 __unpack_enhanced_control(get_unaligned_le16(skb->data),
778 &bt_cb(skb)->control);
782 static u32 __pack_extended_control(struct l2cap_ctrl *control)
786 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
787 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
789 if (control->sframe) {
790 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
791 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
792 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
794 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
795 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
801 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
805 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
806 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
808 if (control->sframe) {
809 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
810 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
811 packed |= L2CAP_CTRL_FRAME_TYPE;
813 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
814 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
820 static inline void __pack_control(struct l2cap_chan *chan,
821 struct l2cap_ctrl *control,
824 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
825 put_unaligned_le32(__pack_extended_control(control),
826 skb->data + L2CAP_HDR_SIZE);
828 put_unaligned_le16(__pack_enhanced_control(control),
829 skb->data + L2CAP_HDR_SIZE);
833 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
836 struct l2cap_hdr *lh;
837 struct l2cap_conn *conn = chan->conn;
840 if (chan->state != BT_CONNECTED)
843 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
844 hlen = L2CAP_EXT_HDR_SIZE;
846 hlen = L2CAP_ENH_HDR_SIZE;
848 if (chan->fcs == L2CAP_FCS_CRC16)
849 hlen += L2CAP_FCS_SIZE;
851 BT_DBG("chan %p, control 0x%8.8x", chan, control);
853 count = min_t(unsigned int, conn->mtu, hlen);
855 control |= __set_sframe(chan);
857 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
858 control |= __set_ctrl_final(chan);
860 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
861 control |= __set_ctrl_poll(chan);
863 skb = bt_skb_alloc(count, GFP_ATOMIC);
867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
868 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
869 lh->cid = cpu_to_le16(chan->dcid);
871 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
873 if (chan->fcs == L2CAP_FCS_CRC16) {
874 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
875 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
878 skb->priority = HCI_PRIO_MAX;
879 l2cap_do_send(chan, skb);
882 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
884 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
885 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
886 set_bit(CONN_RNR_SENT, &chan->conn_state);
888 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
890 control |= __set_reqseq(chan, chan->buffer_seq);
892 l2cap_send_sframe(chan, control);
895 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
897 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
900 static void l2cap_send_conn_req(struct l2cap_chan *chan)
902 struct l2cap_conn *conn = chan->conn;
903 struct l2cap_conn_req req;
905 req.scid = cpu_to_le16(chan->scid);
908 chan->ident = l2cap_get_ident(conn);
910 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
912 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
915 static void l2cap_chan_ready(struct l2cap_chan *chan)
917 struct sock *sk = chan->sk;
922 parent = bt_sk(sk)->parent;
924 BT_DBG("sk %p, parent %p", sk, parent);
926 chan->conf_state = 0;
927 __clear_chan_timer(chan);
929 __l2cap_state_change(chan, BT_CONNECTED);
930 sk->sk_state_change(sk);
933 parent->sk_data_ready(parent, 0);
938 static void l2cap_do_start(struct l2cap_chan *chan)
940 struct l2cap_conn *conn = chan->conn;
942 if (conn->hcon->type == LE_LINK) {
943 l2cap_chan_ready(chan);
947 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
948 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
951 if (l2cap_chan_check_security(chan) &&
952 __l2cap_no_conn_pending(chan))
953 l2cap_send_conn_req(chan);
955 struct l2cap_info_req req;
956 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
958 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
959 conn->info_ident = l2cap_get_ident(conn);
961 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
963 l2cap_send_cmd(conn, conn->info_ident,
964 L2CAP_INFO_REQ, sizeof(req), &req);
968 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
970 u32 local_feat_mask = l2cap_feat_mask;
972 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
975 case L2CAP_MODE_ERTM:
976 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
977 case L2CAP_MODE_STREAMING:
978 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
984 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
986 struct sock *sk = chan->sk;
987 struct l2cap_disconn_req req;
992 if (chan->mode == L2CAP_MODE_ERTM) {
993 __clear_retrans_timer(chan);
994 __clear_monitor_timer(chan);
995 __clear_ack_timer(chan);
998 req.dcid = cpu_to_le16(chan->dcid);
999 req.scid = cpu_to_le16(chan->scid);
1000 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1001 L2CAP_DISCONN_REQ, sizeof(req), &req);
1004 __l2cap_state_change(chan, BT_DISCONN);
1005 __l2cap_chan_set_err(chan, err);
1009 /* ---- L2CAP connections ---- */
1010 static void l2cap_conn_start(struct l2cap_conn *conn)
1012 struct l2cap_chan *chan, *tmp;
1014 BT_DBG("conn %p", conn);
1016 mutex_lock(&conn->chan_lock);
1018 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1019 struct sock *sk = chan->sk;
1021 l2cap_chan_lock(chan);
1023 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1024 l2cap_chan_unlock(chan);
1028 if (chan->state == BT_CONNECT) {
1029 if (!l2cap_chan_check_security(chan) ||
1030 !__l2cap_no_conn_pending(chan)) {
1031 l2cap_chan_unlock(chan);
1035 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1036 && test_bit(CONF_STATE2_DEVICE,
1037 &chan->conf_state)) {
1038 l2cap_chan_close(chan, ECONNRESET);
1039 l2cap_chan_unlock(chan);
1043 l2cap_send_conn_req(chan);
1045 } else if (chan->state == BT_CONNECT2) {
1046 struct l2cap_conn_rsp rsp;
1048 rsp.scid = cpu_to_le16(chan->dcid);
1049 rsp.dcid = cpu_to_le16(chan->scid);
1051 if (l2cap_chan_check_security(chan)) {
1053 if (test_bit(BT_SK_DEFER_SETUP,
1054 &bt_sk(sk)->flags)) {
1055 struct sock *parent = bt_sk(sk)->parent;
1056 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1057 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1059 parent->sk_data_ready(parent, 0);
1062 __l2cap_state_change(chan, BT_CONFIG);
1063 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1064 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1068 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1069 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1072 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1075 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1076 rsp.result != L2CAP_CR_SUCCESS) {
1077 l2cap_chan_unlock(chan);
1081 set_bit(CONF_REQ_SENT, &chan->conf_state);
1082 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1083 l2cap_build_conf_req(chan, buf), buf);
1084 chan->num_conf_req++;
1087 l2cap_chan_unlock(chan);
1090 mutex_unlock(&conn->chan_lock);
1093 /* Find socket with cid and source/destination bdaddr.
1094 * Returns closest match, locked.
1096 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1100 struct l2cap_chan *c, *c1 = NULL;
1102 read_lock(&chan_list_lock);
1104 list_for_each_entry(c, &chan_list, global_l) {
1105 struct sock *sk = c->sk;
1107 if (state && c->state != state)
1110 if (c->scid == cid) {
1111 int src_match, dst_match;
1112 int src_any, dst_any;
1115 src_match = !bacmp(&bt_sk(sk)->src, src);
1116 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1117 if (src_match && dst_match) {
1118 read_unlock(&chan_list_lock);
1123 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1124 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1125 if ((src_match && dst_any) || (src_any && dst_match) ||
1126 (src_any && dst_any))
1131 read_unlock(&chan_list_lock);
1136 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1138 struct sock *parent, *sk;
1139 struct l2cap_chan *chan, *pchan;
1143 /* Check if we have socket listening on cid */
1144 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1145 conn->src, conn->dst);
1153 /* Check for backlog size */
1154 if (sk_acceptq_is_full(parent)) {
1155 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1159 chan = pchan->ops->new_connection(pchan->data);
1165 hci_conn_hold(conn->hcon);
1167 bacpy(&bt_sk(sk)->src, conn->src);
1168 bacpy(&bt_sk(sk)->dst, conn->dst);
1170 bt_accept_enqueue(parent, sk);
1172 l2cap_chan_add(conn, chan);
1174 __set_chan_timer(chan, sk->sk_sndtimeo);
1176 __l2cap_state_change(chan, BT_CONNECTED);
1177 parent->sk_data_ready(parent, 0);
1180 release_sock(parent);
1183 static void l2cap_conn_ready(struct l2cap_conn *conn)
1185 struct l2cap_chan *chan;
1187 BT_DBG("conn %p", conn);
1189 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1190 l2cap_le_conn_ready(conn);
1192 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1193 smp_conn_security(conn, conn->hcon->pending_sec_level);
1195 mutex_lock(&conn->chan_lock);
1197 list_for_each_entry(chan, &conn->chan_l, list) {
1199 l2cap_chan_lock(chan);
1201 if (conn->hcon->type == LE_LINK) {
1202 if (smp_conn_security(conn, chan->sec_level))
1203 l2cap_chan_ready(chan);
1205 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1206 struct sock *sk = chan->sk;
1207 __clear_chan_timer(chan);
1209 __l2cap_state_change(chan, BT_CONNECTED);
1210 sk->sk_state_change(sk);
1213 } else if (chan->state == BT_CONNECT)
1214 l2cap_do_start(chan);
1216 l2cap_chan_unlock(chan);
1219 mutex_unlock(&conn->chan_lock);
1222 /* Notify sockets that we cannot guaranty reliability anymore */
1223 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1225 struct l2cap_chan *chan;
1227 BT_DBG("conn %p", conn);
1229 mutex_lock(&conn->chan_lock);
1231 list_for_each_entry(chan, &conn->chan_l, list) {
1232 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1233 __l2cap_chan_set_err(chan, err);
1236 mutex_unlock(&conn->chan_lock);
1239 static void l2cap_info_timeout(struct work_struct *work)
1241 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1244 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1245 conn->info_ident = 0;
1247 l2cap_conn_start(conn);
1250 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1252 struct l2cap_conn *conn = hcon->l2cap_data;
1253 struct l2cap_chan *chan, *l;
1258 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1260 kfree_skb(conn->rx_skb);
1262 mutex_lock(&conn->chan_lock);
1265 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1266 l2cap_chan_hold(chan);
1267 l2cap_chan_lock(chan);
1269 l2cap_chan_del(chan, err);
1271 l2cap_chan_unlock(chan);
1273 chan->ops->close(chan->data);
1274 l2cap_chan_put(chan);
1277 mutex_unlock(&conn->chan_lock);
1279 hci_chan_del(conn->hchan);
1281 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1282 cancel_delayed_work_sync(&conn->info_timer);
1284 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1285 cancel_delayed_work_sync(&conn->security_timer);
1286 smp_chan_destroy(conn);
1289 hcon->l2cap_data = NULL;
1293 static void security_timeout(struct work_struct *work)
1295 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1296 security_timer.work);
1298 BT_DBG("conn %p", conn);
1300 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1301 smp_chan_destroy(conn);
1302 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1306 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1308 struct l2cap_conn *conn = hcon->l2cap_data;
1309 struct hci_chan *hchan;
1314 hchan = hci_chan_create(hcon);
1318 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1320 hci_chan_del(hchan);
1324 hcon->l2cap_data = conn;
1326 conn->hchan = hchan;
1328 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1330 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1331 conn->mtu = hcon->hdev->le_mtu;
1333 conn->mtu = hcon->hdev->acl_mtu;
1335 conn->src = &hcon->hdev->bdaddr;
1336 conn->dst = &hcon->dst;
1338 conn->feat_mask = 0;
1340 spin_lock_init(&conn->lock);
1341 mutex_init(&conn->chan_lock);
1343 INIT_LIST_HEAD(&conn->chan_l);
1345 if (hcon->type == LE_LINK)
1346 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1348 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1350 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1355 /* ---- Socket interface ---- */
1357 /* Find socket with psm and source / destination bdaddr.
1358 * Returns closest match.
1360 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1364 struct l2cap_chan *c, *c1 = NULL;
1366 read_lock(&chan_list_lock);
1368 list_for_each_entry(c, &chan_list, global_l) {
1369 struct sock *sk = c->sk;
1371 if (state && c->state != state)
1374 if (c->psm == psm) {
1375 int src_match, dst_match;
1376 int src_any, dst_any;
1379 src_match = !bacmp(&bt_sk(sk)->src, src);
1380 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1381 if (src_match && dst_match) {
1382 read_unlock(&chan_list_lock);
1387 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1388 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1389 if ((src_match && dst_any) || (src_any && dst_match) ||
1390 (src_any && dst_any))
1395 read_unlock(&chan_list_lock);
1400 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1401 bdaddr_t *dst, u8 dst_type)
1403 struct sock *sk = chan->sk;
1404 bdaddr_t *src = &bt_sk(sk)->src;
1405 struct l2cap_conn *conn;
1406 struct hci_conn *hcon;
1407 struct hci_dev *hdev;
1411 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1412 dst_type, __le16_to_cpu(chan->psm));
1414 hdev = hci_get_route(dst, src);
1416 return -EHOSTUNREACH;
1420 l2cap_chan_lock(chan);
1422 /* PSM must be odd and lsb of upper byte must be 0 */
1423 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1424 chan->chan_type != L2CAP_CHAN_RAW) {
1429 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1434 switch (chan->mode) {
1435 case L2CAP_MODE_BASIC:
1437 case L2CAP_MODE_ERTM:
1438 case L2CAP_MODE_STREAMING:
1449 switch (sk->sk_state) {
1453 /* Already connecting */
1459 /* Already connected */
1475 /* Set destination address and psm */
1476 bacpy(&bt_sk(sk)->dst, dst);
1483 auth_type = l2cap_get_auth_type(chan);
1485 if (chan->dcid == L2CAP_CID_LE_DATA)
1486 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1487 chan->sec_level, auth_type);
1489 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1490 chan->sec_level, auth_type);
1493 err = PTR_ERR(hcon);
1497 conn = l2cap_conn_add(hcon, 0);
1504 if (hcon->type == LE_LINK) {
1507 if (!list_empty(&conn->chan_l)) {
1516 /* Update source addr of the socket */
1517 bacpy(src, conn->src);
1519 l2cap_chan_unlock(chan);
1520 l2cap_chan_add(conn, chan);
1521 l2cap_chan_lock(chan);
1523 l2cap_state_change(chan, BT_CONNECT);
1524 __set_chan_timer(chan, sk->sk_sndtimeo);
1526 if (hcon->state == BT_CONNECTED) {
1527 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1528 __clear_chan_timer(chan);
1529 if (l2cap_chan_check_security(chan))
1530 l2cap_state_change(chan, BT_CONNECTED);
1532 l2cap_do_start(chan);
1538 l2cap_chan_unlock(chan);
1539 hci_dev_unlock(hdev);
1544 int __l2cap_wait_ack(struct sock *sk)
1546 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1547 DECLARE_WAITQUEUE(wait, current);
1551 add_wait_queue(sk_sleep(sk), &wait);
1552 set_current_state(TASK_INTERRUPTIBLE);
1553 while (chan->unacked_frames > 0 && chan->conn) {
1557 if (signal_pending(current)) {
1558 err = sock_intr_errno(timeo);
1563 timeo = schedule_timeout(timeo);
1565 set_current_state(TASK_INTERRUPTIBLE);
1567 err = sock_error(sk);
1571 set_current_state(TASK_RUNNING);
1572 remove_wait_queue(sk_sleep(sk), &wait);
1576 static void l2cap_monitor_timeout(struct work_struct *work)
1578 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1579 monitor_timer.work);
1581 BT_DBG("chan %p", chan);
1583 l2cap_chan_lock(chan);
1585 if (chan->retry_count >= chan->remote_max_tx) {
1586 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1587 l2cap_chan_unlock(chan);
1588 l2cap_chan_put(chan);
1592 chan->retry_count++;
1593 __set_monitor_timer(chan);
1595 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1596 l2cap_chan_unlock(chan);
1597 l2cap_chan_put(chan);
1600 static void l2cap_retrans_timeout(struct work_struct *work)
1602 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1603 retrans_timer.work);
1605 BT_DBG("chan %p", chan);
1607 l2cap_chan_lock(chan);
1609 chan->retry_count = 1;
1610 __set_monitor_timer(chan);
1612 set_bit(CONN_WAIT_F, &chan->conn_state);
1614 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1616 l2cap_chan_unlock(chan);
1617 l2cap_chan_put(chan);
1620 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1622 struct sk_buff *skb;
1624 while ((skb = skb_peek(&chan->tx_q)) &&
1625 chan->unacked_frames) {
1626 if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
1629 skb = skb_dequeue(&chan->tx_q);
1632 chan->unacked_frames--;
1635 if (!chan->unacked_frames)
1636 __clear_retrans_timer(chan);
1639 static void l2cap_streaming_send(struct l2cap_chan *chan)
1641 struct sk_buff *skb;
1645 while ((skb = skb_dequeue(&chan->tx_q))) {
1646 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1647 control |= __set_txseq(chan, chan->next_tx_seq);
1648 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1649 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1651 if (chan->fcs == L2CAP_FCS_CRC16) {
1652 fcs = crc16(0, (u8 *)skb->data,
1653 skb->len - L2CAP_FCS_SIZE);
1654 put_unaligned_le16(fcs,
1655 skb->data + skb->len - L2CAP_FCS_SIZE);
1658 l2cap_do_send(chan, skb);
1660 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1664 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1666 struct sk_buff *skb, *tx_skb;
1670 skb = skb_peek(&chan->tx_q);
1674 while (bt_cb(skb)->control.txseq != tx_seq) {
1675 if (skb_queue_is_last(&chan->tx_q, skb))
1678 skb = skb_queue_next(&chan->tx_q, skb);
1681 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1682 chan->remote_max_tx) {
1683 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1687 tx_skb = skb_clone(skb, GFP_ATOMIC);
1688 bt_cb(skb)->control.retries++;
1690 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1691 control &= __get_sar_mask(chan);
1693 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1694 control |= __set_ctrl_final(chan);
1696 control |= __set_reqseq(chan, chan->buffer_seq);
1697 control |= __set_txseq(chan, tx_seq);
1699 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1701 if (chan->fcs == L2CAP_FCS_CRC16) {
1702 fcs = crc16(0, (u8 *)tx_skb->data,
1703 tx_skb->len - L2CAP_FCS_SIZE);
1704 put_unaligned_le16(fcs,
1705 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1708 l2cap_do_send(chan, tx_skb);
1711 static int l2cap_ertm_send(struct l2cap_chan *chan)
1713 struct sk_buff *skb, *tx_skb;
1718 if (chan->state != BT_CONNECTED)
1721 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1724 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1726 if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
1727 chan->remote_max_tx) {
1728 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1732 tx_skb = skb_clone(skb, GFP_ATOMIC);
1734 bt_cb(skb)->control.retries++;
1736 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1737 control &= __get_sar_mask(chan);
1739 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1740 control |= __set_ctrl_final(chan);
1742 control |= __set_reqseq(chan, chan->buffer_seq);
1743 control |= __set_txseq(chan, chan->next_tx_seq);
1744 control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
1746 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1748 if (chan->fcs == L2CAP_FCS_CRC16) {
1749 fcs = crc16(0, (u8 *)skb->data,
1750 tx_skb->len - L2CAP_FCS_SIZE);
1751 put_unaligned_le16(fcs, skb->data +
1752 tx_skb->len - L2CAP_FCS_SIZE);
1755 l2cap_do_send(chan, tx_skb);
1757 __set_retrans_timer(chan);
1759 bt_cb(skb)->control.txseq = chan->next_tx_seq;
1761 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1763 if (bt_cb(skb)->control.retries == 1) {
1764 chan->unacked_frames++;
1767 __clear_ack_timer(chan);
1770 chan->frames_sent++;
1772 if (skb_queue_is_last(&chan->tx_q, skb))
1773 chan->tx_send_head = NULL;
1775 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1781 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1785 if (!skb_queue_empty(&chan->tx_q))
1786 chan->tx_send_head = chan->tx_q.next;
1788 chan->next_tx_seq = chan->expected_ack_seq;
1789 ret = l2cap_ertm_send(chan);
1793 static void __l2cap_send_ack(struct l2cap_chan *chan)
1797 control |= __set_reqseq(chan, chan->buffer_seq);
1799 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1800 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1801 set_bit(CONN_RNR_SENT, &chan->conn_state);
1802 l2cap_send_sframe(chan, control);
1806 if (l2cap_ertm_send(chan) > 0)
1809 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1810 l2cap_send_sframe(chan, control);
1813 static void l2cap_send_ack(struct l2cap_chan *chan)
1815 __clear_ack_timer(chan);
1816 __l2cap_send_ack(chan);
1819 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1821 struct srej_list *tail;
1824 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1825 control |= __set_ctrl_final(chan);
1827 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1828 control |= __set_reqseq(chan, tail->tx_seq);
1830 l2cap_send_sframe(chan, control);
1833 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1834 struct msghdr *msg, int len,
1835 int count, struct sk_buff *skb)
1837 struct l2cap_conn *conn = chan->conn;
1838 struct sk_buff **frag;
1841 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1847 /* Continuation fragments (no L2CAP header) */
1848 frag = &skb_shinfo(skb)->frag_list;
1850 struct sk_buff *tmp;
1852 count = min_t(unsigned int, conn->mtu, len);
1854 tmp = chan->ops->alloc_skb(chan, count,
1855 msg->msg_flags & MSG_DONTWAIT);
1857 return PTR_ERR(tmp);
1861 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1864 (*frag)->priority = skb->priority;
1869 skb->len += (*frag)->len;
1870 skb->data_len += (*frag)->len;
1872 frag = &(*frag)->next;
1878 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1879 struct msghdr *msg, size_t len,
1882 struct l2cap_conn *conn = chan->conn;
1883 struct sk_buff *skb;
1884 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1885 struct l2cap_hdr *lh;
1887 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1889 count = min_t(unsigned int, (conn->mtu - hlen), len);
1891 skb = chan->ops->alloc_skb(chan, count + hlen,
1892 msg->msg_flags & MSG_DONTWAIT);
1896 skb->priority = priority;
1898 /* Create L2CAP header */
1899 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1900 lh->cid = cpu_to_le16(chan->dcid);
1901 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1902 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1904 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1905 if (unlikely(err < 0)) {
1907 return ERR_PTR(err);
1912 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1913 struct msghdr *msg, size_t len,
1916 struct l2cap_conn *conn = chan->conn;
1917 struct sk_buff *skb;
1919 struct l2cap_hdr *lh;
1921 BT_DBG("chan %p len %d", chan, (int)len);
1923 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1925 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1926 msg->msg_flags & MSG_DONTWAIT);
1930 skb->priority = priority;
1932 /* Create L2CAP header */
1933 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1934 lh->cid = cpu_to_le16(chan->dcid);
1935 lh->len = cpu_to_le16(len);
1937 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1938 if (unlikely(err < 0)) {
1940 return ERR_PTR(err);
1945 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1946 struct msghdr *msg, size_t len,
1949 struct l2cap_conn *conn = chan->conn;
1950 struct sk_buff *skb;
1951 int err, count, hlen;
1952 struct l2cap_hdr *lh;
1954 BT_DBG("chan %p len %d", chan, (int)len);
1957 return ERR_PTR(-ENOTCONN);
1959 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1960 hlen = L2CAP_EXT_HDR_SIZE;
1962 hlen = L2CAP_ENH_HDR_SIZE;
1965 hlen += L2CAP_SDULEN_SIZE;
1967 if (chan->fcs == L2CAP_FCS_CRC16)
1968 hlen += L2CAP_FCS_SIZE;
1970 count = min_t(unsigned int, (conn->mtu - hlen), len);
1972 skb = chan->ops->alloc_skb(chan, count + hlen,
1973 msg->msg_flags & MSG_DONTWAIT);
1977 /* Create L2CAP header */
1978 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1979 lh->cid = cpu_to_le16(chan->dcid);
1980 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1982 __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
1985 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1987 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1988 if (unlikely(err < 0)) {
1990 return ERR_PTR(err);
1993 if (chan->fcs == L2CAP_FCS_CRC16)
1994 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1996 bt_cb(skb)->control.retries = 0;
2000 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2001 struct sk_buff_head *seg_queue,
2002 struct msghdr *msg, size_t len)
2004 struct sk_buff *skb;
2010 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2012 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2013 * so fragmented skbs are not used. The HCI layer's handling
2014 * of fragmented skbs is not compatible with ERTM's queueing.
2017 /* PDU size is derived from the HCI MTU */
2018 pdu_len = chan->conn->mtu;
2020 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2022 /* Adjust for largest possible L2CAP overhead. */
2023 pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
2025 /* Remote device may have requested smaller PDUs */
2026 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2028 if (len <= pdu_len) {
2029 sar = L2CAP_SAR_UNSEGMENTED;
2033 sar = L2CAP_SAR_START;
2035 pdu_len -= L2CAP_SDULEN_SIZE;
2039 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2042 __skb_queue_purge(seg_queue);
2043 return PTR_ERR(skb);
2046 bt_cb(skb)->control.sar = sar;
2047 __skb_queue_tail(seg_queue, skb);
2052 pdu_len += L2CAP_SDULEN_SIZE;
2055 if (len <= pdu_len) {
2056 sar = L2CAP_SAR_END;
2059 sar = L2CAP_SAR_CONTINUE;
2066 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2069 struct sk_buff *skb;
2071 struct sk_buff_head seg_queue;
2073 /* Connectionless channel */
2074 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2075 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2077 return PTR_ERR(skb);
2079 l2cap_do_send(chan, skb);
2083 switch (chan->mode) {
2084 case L2CAP_MODE_BASIC:
2085 /* Check outgoing MTU */
2086 if (len > chan->omtu)
2089 /* Create a basic PDU */
2090 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2092 return PTR_ERR(skb);
2094 l2cap_do_send(chan, skb);
2098 case L2CAP_MODE_ERTM:
2099 case L2CAP_MODE_STREAMING:
2100 /* Check outgoing MTU */
2101 if (len > chan->omtu) {
2106 __skb_queue_head_init(&seg_queue);
2108 /* Do segmentation before calling in to the state machine,
2109 * since it's possible to block while waiting for memory
2112 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2114 /* The channel could have been closed while segmenting,
2115 * check that it is still connected.
2117 if (chan->state != BT_CONNECTED) {
2118 __skb_queue_purge(&seg_queue);
2125 if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
2126 chan->tx_send_head = seg_queue.next;
2127 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2129 if (chan->mode == L2CAP_MODE_ERTM)
2130 err = l2cap_ertm_send(chan);
2132 l2cap_streaming_send(chan);
2137 /* If the skbs were not queued for sending, they'll still be in
2138 * seg_queue and need to be purged.
2140 __skb_queue_purge(&seg_queue);
2144 BT_DBG("bad state %1.1x", chan->mode);
2151 /* Copy frame to all raw sockets on that connection */
2152 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2154 struct sk_buff *nskb;
2155 struct l2cap_chan *chan;
2157 BT_DBG("conn %p", conn);
2159 mutex_lock(&conn->chan_lock);
2161 list_for_each_entry(chan, &conn->chan_l, list) {
2162 struct sock *sk = chan->sk;
2163 if (chan->chan_type != L2CAP_CHAN_RAW)
2166 /* Don't send frame to the socket it came from */
2169 nskb = skb_clone(skb, GFP_ATOMIC);
2173 if (chan->ops->recv(chan->data, nskb))
2177 mutex_unlock(&conn->chan_lock);
2180 /* ---- L2CAP signalling commands ---- */
2181 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2182 u8 code, u8 ident, u16 dlen, void *data)
2184 struct sk_buff *skb, **frag;
2185 struct l2cap_cmd_hdr *cmd;
2186 struct l2cap_hdr *lh;
2189 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2190 conn, code, ident, dlen);
2192 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2193 count = min_t(unsigned int, conn->mtu, len);
2195 skb = bt_skb_alloc(count, GFP_ATOMIC);
2199 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2200 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2202 if (conn->hcon->type == LE_LINK)
2203 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2205 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2207 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2210 cmd->len = cpu_to_le16(dlen);
2213 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2214 memcpy(skb_put(skb, count), data, count);
2220 /* Continuation fragments (no L2CAP header) */
2221 frag = &skb_shinfo(skb)->frag_list;
2223 count = min_t(unsigned int, conn->mtu, len);
2225 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2229 memcpy(skb_put(*frag, count), data, count);
2234 frag = &(*frag)->next;
2244 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2246 struct l2cap_conf_opt *opt = *ptr;
2249 len = L2CAP_CONF_OPT_SIZE + opt->len;
2257 *val = *((u8 *) opt->val);
2261 *val = get_unaligned_le16(opt->val);
2265 *val = get_unaligned_le32(opt->val);
2269 *val = (unsigned long) opt->val;
2273 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2277 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2279 struct l2cap_conf_opt *opt = *ptr;
2281 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2288 *((u8 *) opt->val) = val;
2292 put_unaligned_le16(val, opt->val);
2296 put_unaligned_le32(val, opt->val);
2300 memcpy(opt->val, (void *) val, len);
2304 *ptr += L2CAP_CONF_OPT_SIZE + len;
2307 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2309 struct l2cap_conf_efs efs;
2311 switch (chan->mode) {
2312 case L2CAP_MODE_ERTM:
2313 efs.id = chan->local_id;
2314 efs.stype = chan->local_stype;
2315 efs.msdu = cpu_to_le16(chan->local_msdu);
2316 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2317 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2318 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2321 case L2CAP_MODE_STREAMING:
2323 efs.stype = L2CAP_SERV_BESTEFFORT;
2324 efs.msdu = cpu_to_le16(chan->local_msdu);
2325 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2334 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2335 (unsigned long) &efs);
2338 static void l2cap_ack_timeout(struct work_struct *work)
2340 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2343 BT_DBG("chan %p", chan);
2345 l2cap_chan_lock(chan);
2347 __l2cap_send_ack(chan);
2349 l2cap_chan_unlock(chan);
2351 l2cap_chan_put(chan);
2354 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2358 chan->next_tx_seq = 0;
2359 chan->expected_tx_seq = 0;
2360 chan->expected_ack_seq = 0;
2361 chan->unacked_frames = 0;
2362 chan->buffer_seq = 0;
2363 chan->num_acked = 0;
2364 chan->frames_sent = 0;
2365 chan->last_acked_seq = 0;
2367 chan->sdu_last_frag = NULL;
2370 skb_queue_head_init(&chan->tx_q);
2372 if (chan->mode != L2CAP_MODE_ERTM)
2375 chan->rx_state = L2CAP_RX_STATE_RECV;
2376 chan->tx_state = L2CAP_TX_STATE_XMIT;
2378 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2379 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2380 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2382 skb_queue_head_init(&chan->srej_q);
2384 INIT_LIST_HEAD(&chan->srej_l);
2385 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2389 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2392 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2395 case L2CAP_MODE_STREAMING:
2396 case L2CAP_MODE_ERTM:
2397 if (l2cap_mode_supported(mode, remote_feat_mask))
2401 return L2CAP_MODE_BASIC;
2405 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2407 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2410 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2412 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2415 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2417 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2418 __l2cap_ews_supported(chan)) {
2419 /* use extended control field */
2420 set_bit(FLAG_EXT_CTRL, &chan->flags);
2421 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2423 chan->tx_win = min_t(u16, chan->tx_win,
2424 L2CAP_DEFAULT_TX_WINDOW);
2425 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2429 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2431 struct l2cap_conf_req *req = data;
2432 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2433 void *ptr = req->data;
2436 BT_DBG("chan %p", chan);
2438 if (chan->num_conf_req || chan->num_conf_rsp)
2441 switch (chan->mode) {
2442 case L2CAP_MODE_STREAMING:
2443 case L2CAP_MODE_ERTM:
2444 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2447 if (__l2cap_efs_supported(chan))
2448 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2452 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2457 if (chan->imtu != L2CAP_DEFAULT_MTU)
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2460 switch (chan->mode) {
2461 case L2CAP_MODE_BASIC:
2462 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2463 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2466 rfc.mode = L2CAP_MODE_BASIC;
2468 rfc.max_transmit = 0;
2469 rfc.retrans_timeout = 0;
2470 rfc.monitor_timeout = 0;
2471 rfc.max_pdu_size = 0;
2473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2474 (unsigned long) &rfc);
2477 case L2CAP_MODE_ERTM:
2478 rfc.mode = L2CAP_MODE_ERTM;
2479 rfc.max_transmit = chan->max_tx;
2480 rfc.retrans_timeout = 0;
2481 rfc.monitor_timeout = 0;
2483 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2484 L2CAP_EXT_HDR_SIZE -
2487 rfc.max_pdu_size = cpu_to_le16(size);
2489 l2cap_txwin_setup(chan);
2491 rfc.txwin_size = min_t(u16, chan->tx_win,
2492 L2CAP_DEFAULT_TX_WINDOW);
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2495 (unsigned long) &rfc);
2497 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2498 l2cap_add_opt_efs(&ptr, chan);
2500 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2503 if (chan->fcs == L2CAP_FCS_NONE ||
2504 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2505 chan->fcs = L2CAP_FCS_NONE;
2506 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2509 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2514 case L2CAP_MODE_STREAMING:
2515 rfc.mode = L2CAP_MODE_STREAMING;
2517 rfc.max_transmit = 0;
2518 rfc.retrans_timeout = 0;
2519 rfc.monitor_timeout = 0;
2521 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2522 L2CAP_EXT_HDR_SIZE -
2525 rfc.max_pdu_size = cpu_to_le16(size);
2527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2528 (unsigned long) &rfc);
2530 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2531 l2cap_add_opt_efs(&ptr, chan);
2533 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2536 if (chan->fcs == L2CAP_FCS_NONE ||
2537 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2538 chan->fcs = L2CAP_FCS_NONE;
2539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2544 req->dcid = cpu_to_le16(chan->dcid);
2545 req->flags = cpu_to_le16(0);
2550 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2552 struct l2cap_conf_rsp *rsp = data;
2553 void *ptr = rsp->data;
2554 void *req = chan->conf_req;
2555 int len = chan->conf_len;
2556 int type, hint, olen;
2558 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2559 struct l2cap_conf_efs efs;
2561 u16 mtu = L2CAP_DEFAULT_MTU;
2562 u16 result = L2CAP_CONF_SUCCESS;
2565 BT_DBG("chan %p", chan);
2567 while (len >= L2CAP_CONF_OPT_SIZE) {
2568 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2570 hint = type & L2CAP_CONF_HINT;
2571 type &= L2CAP_CONF_MASK;
2574 case L2CAP_CONF_MTU:
2578 case L2CAP_CONF_FLUSH_TO:
2579 chan->flush_to = val;
2582 case L2CAP_CONF_QOS:
2585 case L2CAP_CONF_RFC:
2586 if (olen == sizeof(rfc))
2587 memcpy(&rfc, (void *) val, olen);
2590 case L2CAP_CONF_FCS:
2591 if (val == L2CAP_FCS_NONE)
2592 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2595 case L2CAP_CONF_EFS:
2597 if (olen == sizeof(efs))
2598 memcpy(&efs, (void *) val, olen);
2601 case L2CAP_CONF_EWS:
2603 return -ECONNREFUSED;
2605 set_bit(FLAG_EXT_CTRL, &chan->flags);
2606 set_bit(CONF_EWS_RECV, &chan->conf_state);
2607 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2608 chan->remote_tx_win = val;
2615 result = L2CAP_CONF_UNKNOWN;
2616 *((u8 *) ptr++) = type;
2621 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2624 switch (chan->mode) {
2625 case L2CAP_MODE_STREAMING:
2626 case L2CAP_MODE_ERTM:
2627 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2628 chan->mode = l2cap_select_mode(rfc.mode,
2629 chan->conn->feat_mask);
2634 if (__l2cap_efs_supported(chan))
2635 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2637 return -ECONNREFUSED;
2640 if (chan->mode != rfc.mode)
2641 return -ECONNREFUSED;
2647 if (chan->mode != rfc.mode) {
2648 result = L2CAP_CONF_UNACCEPT;
2649 rfc.mode = chan->mode;
2651 if (chan->num_conf_rsp == 1)
2652 return -ECONNREFUSED;
2654 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2655 sizeof(rfc), (unsigned long) &rfc);
2658 if (result == L2CAP_CONF_SUCCESS) {
2659 /* Configure output options and let the other side know
2660 * which ones we don't like. */
2662 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2663 result = L2CAP_CONF_UNACCEPT;
2666 set_bit(CONF_MTU_DONE, &chan->conf_state);
2668 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2671 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2672 efs.stype != L2CAP_SERV_NOTRAFIC &&
2673 efs.stype != chan->local_stype) {
2675 result = L2CAP_CONF_UNACCEPT;
2677 if (chan->num_conf_req >= 1)
2678 return -ECONNREFUSED;
2680 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2682 (unsigned long) &efs);
2684 /* Send PENDING Conf Rsp */
2685 result = L2CAP_CONF_PENDING;
2686 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2691 case L2CAP_MODE_BASIC:
2692 chan->fcs = L2CAP_FCS_NONE;
2693 set_bit(CONF_MODE_DONE, &chan->conf_state);
2696 case L2CAP_MODE_ERTM:
2697 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2698 chan->remote_tx_win = rfc.txwin_size;
2700 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2702 chan->remote_max_tx = rfc.max_transmit;
2704 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2706 L2CAP_EXT_HDR_SIZE -
2709 rfc.max_pdu_size = cpu_to_le16(size);
2710 chan->remote_mps = size;
2712 rfc.retrans_timeout =
2713 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2714 rfc.monitor_timeout =
2715 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2717 set_bit(CONF_MODE_DONE, &chan->conf_state);
2719 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2720 sizeof(rfc), (unsigned long) &rfc);
2722 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2723 chan->remote_id = efs.id;
2724 chan->remote_stype = efs.stype;
2725 chan->remote_msdu = le16_to_cpu(efs.msdu);
2726 chan->remote_flush_to =
2727 le32_to_cpu(efs.flush_to);
2728 chan->remote_acc_lat =
2729 le32_to_cpu(efs.acc_lat);
2730 chan->remote_sdu_itime =
2731 le32_to_cpu(efs.sdu_itime);
2732 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2733 sizeof(efs), (unsigned long) &efs);
2737 case L2CAP_MODE_STREAMING:
2738 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2740 L2CAP_EXT_HDR_SIZE -
2743 rfc.max_pdu_size = cpu_to_le16(size);
2744 chan->remote_mps = size;
2746 set_bit(CONF_MODE_DONE, &chan->conf_state);
2748 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2749 sizeof(rfc), (unsigned long) &rfc);
2754 result = L2CAP_CONF_UNACCEPT;
2756 memset(&rfc, 0, sizeof(rfc));
2757 rfc.mode = chan->mode;
2760 if (result == L2CAP_CONF_SUCCESS)
2761 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2763 rsp->scid = cpu_to_le16(chan->dcid);
2764 rsp->result = cpu_to_le16(result);
2765 rsp->flags = cpu_to_le16(0x0000);
2770 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2772 struct l2cap_conf_req *req = data;
2773 void *ptr = req->data;
2776 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2777 struct l2cap_conf_efs efs;
2779 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2781 while (len >= L2CAP_CONF_OPT_SIZE) {
2782 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2785 case L2CAP_CONF_MTU:
2786 if (val < L2CAP_DEFAULT_MIN_MTU) {
2787 *result = L2CAP_CONF_UNACCEPT;
2788 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2794 case L2CAP_CONF_FLUSH_TO:
2795 chan->flush_to = val;
2796 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2800 case L2CAP_CONF_RFC:
2801 if (olen == sizeof(rfc))
2802 memcpy(&rfc, (void *)val, olen);
2804 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2805 rfc.mode != chan->mode)
2806 return -ECONNREFUSED;
2810 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2811 sizeof(rfc), (unsigned long) &rfc);
2814 case L2CAP_CONF_EWS:
2815 chan->tx_win = min_t(u16, val,
2816 L2CAP_DEFAULT_EXT_WINDOW);
2817 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2821 case L2CAP_CONF_EFS:
2822 if (olen == sizeof(efs))
2823 memcpy(&efs, (void *)val, olen);
2825 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2826 efs.stype != L2CAP_SERV_NOTRAFIC &&
2827 efs.stype != chan->local_stype)
2828 return -ECONNREFUSED;
2830 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2831 sizeof(efs), (unsigned long) &efs);
2836 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2837 return -ECONNREFUSED;
2839 chan->mode = rfc.mode;
2841 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2843 case L2CAP_MODE_ERTM:
2844 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2845 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2846 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2848 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2849 chan->local_msdu = le16_to_cpu(efs.msdu);
2850 chan->local_sdu_itime =
2851 le32_to_cpu(efs.sdu_itime);
2852 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2853 chan->local_flush_to =
2854 le32_to_cpu(efs.flush_to);
2858 case L2CAP_MODE_STREAMING:
2859 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2863 req->dcid = cpu_to_le16(chan->dcid);
2864 req->flags = cpu_to_le16(0x0000);
2869 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2871 struct l2cap_conf_rsp *rsp = data;
2872 void *ptr = rsp->data;
2874 BT_DBG("chan %p", chan);
2876 rsp->scid = cpu_to_le16(chan->dcid);
2877 rsp->result = cpu_to_le16(result);
2878 rsp->flags = cpu_to_le16(flags);
2883 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2885 struct l2cap_conn_rsp rsp;
2886 struct l2cap_conn *conn = chan->conn;
2889 rsp.scid = cpu_to_le16(chan->dcid);
2890 rsp.dcid = cpu_to_le16(chan->scid);
2891 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2892 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2893 l2cap_send_cmd(conn, chan->ident,
2894 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2896 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2899 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2900 l2cap_build_conf_req(chan, buf), buf);
2901 chan->num_conf_req++;
2904 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2908 struct l2cap_conf_rfc rfc;
2910 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2912 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2915 while (len >= L2CAP_CONF_OPT_SIZE) {
2916 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2919 case L2CAP_CONF_RFC:
2920 if (olen == sizeof(rfc))
2921 memcpy(&rfc, (void *)val, olen);
2926 /* Use sane default values in case a misbehaving remote device
2927 * did not send an RFC option.
2929 rfc.mode = chan->mode;
2930 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2931 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2932 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2934 BT_ERR("Expected RFC option was not found, using defaults");
2938 case L2CAP_MODE_ERTM:
2939 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2940 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2941 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2943 case L2CAP_MODE_STREAMING:
2944 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2948 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2950 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2952 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2955 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2956 cmd->ident == conn->info_ident) {
2957 cancel_delayed_work(&conn->info_timer);
2959 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2960 conn->info_ident = 0;
2962 l2cap_conn_start(conn);
2968 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2970 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2971 struct l2cap_conn_rsp rsp;
2972 struct l2cap_chan *chan = NULL, *pchan;
2973 struct sock *parent, *sk = NULL;
2974 int result, status = L2CAP_CS_NO_INFO;
2976 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2977 __le16 psm = req->psm;
2979 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2981 /* Check if we have socket listening on psm */
2982 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2984 result = L2CAP_CR_BAD_PSM;
2990 mutex_lock(&conn->chan_lock);
2993 /* Check if the ACL is secure enough (if not SDP) */
2994 if (psm != cpu_to_le16(0x0001) &&
2995 !hci_conn_check_link_mode(conn->hcon)) {
2996 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2997 result = L2CAP_CR_SEC_BLOCK;
3001 result = L2CAP_CR_NO_MEM;
3003 /* Check for backlog size */
3004 if (sk_acceptq_is_full(parent)) {
3005 BT_DBG("backlog full %d", parent->sk_ack_backlog);
3009 chan = pchan->ops->new_connection(pchan->data);
3015 /* Check if we already have channel with that dcid */
3016 if (__l2cap_get_chan_by_dcid(conn, scid)) {
3017 sock_set_flag(sk, SOCK_ZAPPED);
3018 chan->ops->close(chan->data);
3022 hci_conn_hold(conn->hcon);
3024 bacpy(&bt_sk(sk)->src, conn->src);
3025 bacpy(&bt_sk(sk)->dst, conn->dst);
3029 bt_accept_enqueue(parent, sk);
3031 __l2cap_chan_add(conn, chan);
3035 __set_chan_timer(chan, sk->sk_sndtimeo);
3037 chan->ident = cmd->ident;
3039 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3040 if (l2cap_chan_check_security(chan)) {
3041 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3042 __l2cap_state_change(chan, BT_CONNECT2);
3043 result = L2CAP_CR_PEND;
3044 status = L2CAP_CS_AUTHOR_PEND;
3045 parent->sk_data_ready(parent, 0);
3047 __l2cap_state_change(chan, BT_CONFIG);
3048 result = L2CAP_CR_SUCCESS;
3049 status = L2CAP_CS_NO_INFO;
3052 __l2cap_state_change(chan, BT_CONNECT2);
3053 result = L2CAP_CR_PEND;
3054 status = L2CAP_CS_AUTHEN_PEND;
3057 __l2cap_state_change(chan, BT_CONNECT2);
3058 result = L2CAP_CR_PEND;
3059 status = L2CAP_CS_NO_INFO;
3063 release_sock(parent);
3064 mutex_unlock(&conn->chan_lock);
3067 rsp.scid = cpu_to_le16(scid);
3068 rsp.dcid = cpu_to_le16(dcid);
3069 rsp.result = cpu_to_le16(result);
3070 rsp.status = cpu_to_le16(status);
3071 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3073 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3074 struct l2cap_info_req info;
3075 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3077 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3078 conn->info_ident = l2cap_get_ident(conn);
3080 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3082 l2cap_send_cmd(conn, conn->info_ident,
3083 L2CAP_INFO_REQ, sizeof(info), &info);
3086 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3087 result == L2CAP_CR_SUCCESS) {
3089 set_bit(CONF_REQ_SENT, &chan->conf_state);
3090 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3091 l2cap_build_conf_req(chan, buf), buf);
3092 chan->num_conf_req++;
3098 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3100 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3101 u16 scid, dcid, result, status;
3102 struct l2cap_chan *chan;
3106 scid = __le16_to_cpu(rsp->scid);
3107 dcid = __le16_to_cpu(rsp->dcid);
3108 result = __le16_to_cpu(rsp->result);
3109 status = __le16_to_cpu(rsp->status);
3111 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3112 dcid, scid, result, status);
3114 mutex_lock(&conn->chan_lock);
3117 chan = __l2cap_get_chan_by_scid(conn, scid);
3123 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3132 l2cap_chan_lock(chan);
3135 case L2CAP_CR_SUCCESS:
3136 l2cap_state_change(chan, BT_CONFIG);
3139 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3141 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3144 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3145 l2cap_build_conf_req(chan, req), req);
3146 chan->num_conf_req++;
3150 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3154 l2cap_chan_del(chan, ECONNREFUSED);
3158 l2cap_chan_unlock(chan);
3161 mutex_unlock(&conn->chan_lock);
3166 static inline void set_default_fcs(struct l2cap_chan *chan)
3168 /* FCS is enabled only in ERTM or streaming mode, if one or both
3171 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3172 chan->fcs = L2CAP_FCS_NONE;
3173 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3174 chan->fcs = L2CAP_FCS_CRC16;
3177 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3179 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3182 struct l2cap_chan *chan;
3185 dcid = __le16_to_cpu(req->dcid);
3186 flags = __le16_to_cpu(req->flags);
3188 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3190 chan = l2cap_get_chan_by_scid(conn, dcid);
3194 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3195 struct l2cap_cmd_rej_cid rej;
3197 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3198 rej.scid = cpu_to_le16(chan->scid);
3199 rej.dcid = cpu_to_le16(chan->dcid);
3201 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3206 /* Reject if config buffer is too small. */
3207 len = cmd_len - sizeof(*req);
3208 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3209 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3210 l2cap_build_conf_rsp(chan, rsp,
3211 L2CAP_CONF_REJECT, flags), rsp);
3216 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3217 chan->conf_len += len;
3219 if (flags & 0x0001) {
3220 /* Incomplete config. Send empty response. */
3221 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3222 l2cap_build_conf_rsp(chan, rsp,
3223 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3227 /* Complete config. */
3228 len = l2cap_parse_conf_req(chan, rsp);
3230 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3234 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3235 chan->num_conf_rsp++;
3237 /* Reset config buffer. */
3240 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3243 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3244 set_default_fcs(chan);
3246 l2cap_state_change(chan, BT_CONNECTED);
3248 if (chan->mode == L2CAP_MODE_ERTM ||
3249 chan->mode == L2CAP_MODE_STREAMING)
3250 err = l2cap_ertm_init(chan);
3253 l2cap_send_disconn_req(chan->conn, chan, -err);
3255 l2cap_chan_ready(chan);
3260 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3262 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3263 l2cap_build_conf_req(chan, buf), buf);
3264 chan->num_conf_req++;
3267 /* Got Conf Rsp PENDING from remote side and asume we sent
3268 Conf Rsp PENDING in the code above */
3269 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3270 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3272 /* check compatibility */
3274 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3275 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3277 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3278 l2cap_build_conf_rsp(chan, rsp,
3279 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3283 l2cap_chan_unlock(chan);
3287 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3289 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3290 u16 scid, flags, result;
3291 struct l2cap_chan *chan;
3292 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3295 scid = __le16_to_cpu(rsp->scid);
3296 flags = __le16_to_cpu(rsp->flags);
3297 result = __le16_to_cpu(rsp->result);
3299 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3302 chan = l2cap_get_chan_by_scid(conn, scid);
3307 case L2CAP_CONF_SUCCESS:
3308 l2cap_conf_rfc_get(chan, rsp->data, len);
3309 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3312 case L2CAP_CONF_PENDING:
3313 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3315 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3318 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3321 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3325 /* check compatibility */
3327 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3328 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3330 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3331 l2cap_build_conf_rsp(chan, buf,
3332 L2CAP_CONF_SUCCESS, 0x0000), buf);
3336 case L2CAP_CONF_UNACCEPT:
3337 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3340 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3341 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3345 /* throw out any old stored conf requests */
3346 result = L2CAP_CONF_SUCCESS;
3347 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3350 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3354 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3355 L2CAP_CONF_REQ, len, req);
3356 chan->num_conf_req++;
3357 if (result != L2CAP_CONF_SUCCESS)
3363 l2cap_chan_set_err(chan, ECONNRESET);
3365 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3366 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3373 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3375 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3376 set_default_fcs(chan);
3378 l2cap_state_change(chan, BT_CONNECTED);
3379 if (chan->mode == L2CAP_MODE_ERTM ||
3380 chan->mode == L2CAP_MODE_STREAMING)
3381 err = l2cap_ertm_init(chan);
3384 l2cap_send_disconn_req(chan->conn, chan, -err);
3386 l2cap_chan_ready(chan);
3390 l2cap_chan_unlock(chan);
3394 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3396 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3397 struct l2cap_disconn_rsp rsp;
3399 struct l2cap_chan *chan;
3402 scid = __le16_to_cpu(req->scid);
3403 dcid = __le16_to_cpu(req->dcid);
3405 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3407 mutex_lock(&conn->chan_lock);
3409 chan = __l2cap_get_chan_by_scid(conn, dcid);
3411 mutex_unlock(&conn->chan_lock);
3415 l2cap_chan_lock(chan);
3419 rsp.dcid = cpu_to_le16(chan->scid);
3420 rsp.scid = cpu_to_le16(chan->dcid);
3421 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3424 sk->sk_shutdown = SHUTDOWN_MASK;
3427 l2cap_chan_hold(chan);
3428 l2cap_chan_del(chan, ECONNRESET);
3430 l2cap_chan_unlock(chan);
3432 chan->ops->close(chan->data);
3433 l2cap_chan_put(chan);
3435 mutex_unlock(&conn->chan_lock);
3440 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3442 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3444 struct l2cap_chan *chan;
3446 scid = __le16_to_cpu(rsp->scid);
3447 dcid = __le16_to_cpu(rsp->dcid);
3449 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3451 mutex_lock(&conn->chan_lock);
3453 chan = __l2cap_get_chan_by_scid(conn, scid);
3455 mutex_unlock(&conn->chan_lock);
3459 l2cap_chan_lock(chan);
3461 l2cap_chan_hold(chan);
3462 l2cap_chan_del(chan, 0);
3464 l2cap_chan_unlock(chan);
3466 chan->ops->close(chan->data);
3467 l2cap_chan_put(chan);
3469 mutex_unlock(&conn->chan_lock);
3474 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3476 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3479 type = __le16_to_cpu(req->type);
3481 BT_DBG("type 0x%4.4x", type);
3483 if (type == L2CAP_IT_FEAT_MASK) {
3485 u32 feat_mask = l2cap_feat_mask;
3486 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3487 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3488 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3490 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3493 feat_mask |= L2CAP_FEAT_EXT_FLOW
3494 | L2CAP_FEAT_EXT_WINDOW;
3496 put_unaligned_le32(feat_mask, rsp->data);
3497 l2cap_send_cmd(conn, cmd->ident,
3498 L2CAP_INFO_RSP, sizeof(buf), buf);
3499 } else if (type == L2CAP_IT_FIXED_CHAN) {
3501 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3504 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3506 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3508 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3509 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3510 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3511 l2cap_send_cmd(conn, cmd->ident,
3512 L2CAP_INFO_RSP, sizeof(buf), buf);
3514 struct l2cap_info_rsp rsp;
3515 rsp.type = cpu_to_le16(type);
3516 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3517 l2cap_send_cmd(conn, cmd->ident,
3518 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3524 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3526 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3529 type = __le16_to_cpu(rsp->type);
3530 result = __le16_to_cpu(rsp->result);
3532 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3534 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3535 if (cmd->ident != conn->info_ident ||
3536 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3539 cancel_delayed_work(&conn->info_timer);
3541 if (result != L2CAP_IR_SUCCESS) {
3542 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3543 conn->info_ident = 0;
3545 l2cap_conn_start(conn);
3551 case L2CAP_IT_FEAT_MASK:
3552 conn->feat_mask = get_unaligned_le32(rsp->data);
3554 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3555 struct l2cap_info_req req;
3556 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3558 conn->info_ident = l2cap_get_ident(conn);
3560 l2cap_send_cmd(conn, conn->info_ident,
3561 L2CAP_INFO_REQ, sizeof(req), &req);
3563 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3564 conn->info_ident = 0;
3566 l2cap_conn_start(conn);
3570 case L2CAP_IT_FIXED_CHAN:
3571 conn->fixed_chan_mask = rsp->data[0];
3572 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3573 conn->info_ident = 0;
3575 l2cap_conn_start(conn);
3582 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3583 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3586 struct l2cap_create_chan_req *req = data;
3587 struct l2cap_create_chan_rsp rsp;
3590 if (cmd_len != sizeof(*req))
3596 psm = le16_to_cpu(req->psm);
3597 scid = le16_to_cpu(req->scid);
3599 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3601 /* Placeholder: Always reject */
3603 rsp.scid = cpu_to_le16(scid);
3604 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3605 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3607 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3613 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3614 struct l2cap_cmd_hdr *cmd, void *data)
3616 BT_DBG("conn %p", conn);
3618 return l2cap_connect_rsp(conn, cmd, data);
3621 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3622 u16 icid, u16 result)
3624 struct l2cap_move_chan_rsp rsp;
3626 BT_DBG("icid %d, result %d", icid, result);
3628 rsp.icid = cpu_to_le16(icid);
3629 rsp.result = cpu_to_le16(result);
3631 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3634 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3635 struct l2cap_chan *chan, u16 icid, u16 result)
3637 struct l2cap_move_chan_cfm cfm;
3640 BT_DBG("icid %d, result %d", icid, result);
3642 ident = l2cap_get_ident(conn);
3644 chan->ident = ident;
3646 cfm.icid = cpu_to_le16(icid);
3647 cfm.result = cpu_to_le16(result);
3649 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3652 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3655 struct l2cap_move_chan_cfm_rsp rsp;
3657 BT_DBG("icid %d", icid);
3659 rsp.icid = cpu_to_le16(icid);
3660 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3663 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3664 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3666 struct l2cap_move_chan_req *req = data;
3668 u16 result = L2CAP_MR_NOT_ALLOWED;
3670 if (cmd_len != sizeof(*req))
3673 icid = le16_to_cpu(req->icid);
3675 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3680 /* Placeholder: Always refuse */
3681 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3686 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3687 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3689 struct l2cap_move_chan_rsp *rsp = data;
3692 if (cmd_len != sizeof(*rsp))
3695 icid = le16_to_cpu(rsp->icid);
3696 result = le16_to_cpu(rsp->result);
3698 BT_DBG("icid %d, result %d", icid, result);
3700 /* Placeholder: Always unconfirmed */
3701 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3706 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3707 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3709 struct l2cap_move_chan_cfm *cfm = data;
3712 if (cmd_len != sizeof(*cfm))
3715 icid = le16_to_cpu(cfm->icid);
3716 result = le16_to_cpu(cfm->result);
3718 BT_DBG("icid %d, result %d", icid, result);
3720 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3725 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3726 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3728 struct l2cap_move_chan_cfm_rsp *rsp = data;
3731 if (cmd_len != sizeof(*rsp))
3734 icid = le16_to_cpu(rsp->icid);
3736 BT_DBG("icid %d", icid);
3741 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3746 if (min > max || min < 6 || max > 3200)
3749 if (to_multiplier < 10 || to_multiplier > 3200)
3752 if (max >= to_multiplier * 8)
3755 max_latency = (to_multiplier * 8 / max) - 1;
3756 if (latency > 499 || latency > max_latency)
3762 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3763 struct l2cap_cmd_hdr *cmd, u8 *data)
3765 struct hci_conn *hcon = conn->hcon;
3766 struct l2cap_conn_param_update_req *req;
3767 struct l2cap_conn_param_update_rsp rsp;
3768 u16 min, max, latency, to_multiplier, cmd_len;
3771 if (!(hcon->link_mode & HCI_LM_MASTER))
3774 cmd_len = __le16_to_cpu(cmd->len);
3775 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3778 req = (struct l2cap_conn_param_update_req *) data;
3779 min = __le16_to_cpu(req->min);
3780 max = __le16_to_cpu(req->max);
3781 latency = __le16_to_cpu(req->latency);
3782 to_multiplier = __le16_to_cpu(req->to_multiplier);
3784 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3785 min, max, latency, to_multiplier);
3787 memset(&rsp, 0, sizeof(rsp));
3789 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3791 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3793 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3795 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3799 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3804 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3805 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3809 switch (cmd->code) {
3810 case L2CAP_COMMAND_REJ:
3811 l2cap_command_rej(conn, cmd, data);
3814 case L2CAP_CONN_REQ:
3815 err = l2cap_connect_req(conn, cmd, data);
3818 case L2CAP_CONN_RSP:
3819 err = l2cap_connect_rsp(conn, cmd, data);
3822 case L2CAP_CONF_REQ:
3823 err = l2cap_config_req(conn, cmd, cmd_len, data);
3826 case L2CAP_CONF_RSP:
3827 err = l2cap_config_rsp(conn, cmd, data);
3830 case L2CAP_DISCONN_REQ:
3831 err = l2cap_disconnect_req(conn, cmd, data);
3834 case L2CAP_DISCONN_RSP:
3835 err = l2cap_disconnect_rsp(conn, cmd, data);
3838 case L2CAP_ECHO_REQ:
3839 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3842 case L2CAP_ECHO_RSP:
3845 case L2CAP_INFO_REQ:
3846 err = l2cap_information_req(conn, cmd, data);
3849 case L2CAP_INFO_RSP:
3850 err = l2cap_information_rsp(conn, cmd, data);
3853 case L2CAP_CREATE_CHAN_REQ:
3854 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3857 case L2CAP_CREATE_CHAN_RSP:
3858 err = l2cap_create_channel_rsp(conn, cmd, data);
3861 case L2CAP_MOVE_CHAN_REQ:
3862 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3865 case L2CAP_MOVE_CHAN_RSP:
3866 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3869 case L2CAP_MOVE_CHAN_CFM:
3870 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3873 case L2CAP_MOVE_CHAN_CFM_RSP:
3874 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3878 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3886 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3887 struct l2cap_cmd_hdr *cmd, u8 *data)
3889 switch (cmd->code) {
3890 case L2CAP_COMMAND_REJ:
3893 case L2CAP_CONN_PARAM_UPDATE_REQ:
3894 return l2cap_conn_param_update_req(conn, cmd, data);
3896 case L2CAP_CONN_PARAM_UPDATE_RSP:
3900 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3905 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3906 struct sk_buff *skb)
3908 u8 *data = skb->data;
3910 struct l2cap_cmd_hdr cmd;
3913 l2cap_raw_recv(conn, skb);
3915 while (len >= L2CAP_CMD_HDR_SIZE) {
3917 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3918 data += L2CAP_CMD_HDR_SIZE;
3919 len -= L2CAP_CMD_HDR_SIZE;
3921 cmd_len = le16_to_cpu(cmd.len);
3923 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3925 if (cmd_len > len || !cmd.ident) {
3926 BT_DBG("corrupted command");
3930 if (conn->hcon->type == LE_LINK)
3931 err = l2cap_le_sig_cmd(conn, &cmd, data);
3933 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3936 struct l2cap_cmd_rej_unk rej;
3938 BT_ERR("Wrong link type (%d)", err);
3940 /* FIXME: Map err to a valid reason */
3941 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3942 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3952 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3954 u16 our_fcs, rcv_fcs;
3957 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3958 hdr_size = L2CAP_EXT_HDR_SIZE;
3960 hdr_size = L2CAP_ENH_HDR_SIZE;
3962 if (chan->fcs == L2CAP_FCS_CRC16) {
3963 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3964 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3965 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3967 if (our_fcs != rcv_fcs)
3973 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3977 chan->frames_sent = 0;
3979 control |= __set_reqseq(chan, chan->buffer_seq);
3981 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3982 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3983 l2cap_send_sframe(chan, control);
3984 set_bit(CONN_RNR_SENT, &chan->conn_state);
3987 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3988 l2cap_retransmit_frames(chan);
3990 l2cap_ertm_send(chan);
3992 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3993 chan->frames_sent == 0) {
3994 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3995 l2cap_send_sframe(chan, control);
3999 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
4001 struct sk_buff *next_skb;
4002 int tx_seq_offset, next_tx_seq_offset;
4004 bt_cb(skb)->control.txseq = tx_seq;
4005 bt_cb(skb)->control.sar = sar;
4007 next_skb = skb_peek(&chan->srej_q);
4009 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4012 if (bt_cb(next_skb)->control.txseq == tx_seq)
4015 next_tx_seq_offset = __seq_offset(chan,
4016 bt_cb(next_skb)->control.txseq, chan->buffer_seq);
4018 if (next_tx_seq_offset > tx_seq_offset) {
4019 __skb_queue_before(&chan->srej_q, next_skb, skb);
4023 if (skb_queue_is_last(&chan->srej_q, next_skb))
4026 next_skb = skb_queue_next(&chan->srej_q, next_skb);
4029 __skb_queue_tail(&chan->srej_q, skb);
4034 static void append_skb_frag(struct sk_buff *skb,
4035 struct sk_buff *new_frag, struct sk_buff **last_frag)
4037 /* skb->len reflects data in skb as well as all fragments
4038 * skb->data_len reflects only data in fragments
4040 if (!skb_has_frag_list(skb))
4041 skb_shinfo(skb)->frag_list = new_frag;
4043 new_frag->next = NULL;
4045 (*last_frag)->next = new_frag;
4046 *last_frag = new_frag;
4048 skb->len += new_frag->len;
4049 skb->data_len += new_frag->len;
4050 skb->truesize += new_frag->truesize;
4053 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4057 switch (__get_ctrl_sar(chan, control)) {
4058 case L2CAP_SAR_UNSEGMENTED:
4062 err = chan->ops->recv(chan->data, skb);
4065 case L2CAP_SAR_START:
4069 chan->sdu_len = get_unaligned_le16(skb->data);
4070 skb_pull(skb, L2CAP_SDULEN_SIZE);
4072 if (chan->sdu_len > chan->imtu) {
4077 if (skb->len >= chan->sdu_len)
4081 chan->sdu_last_frag = skb;
4087 case L2CAP_SAR_CONTINUE:
4091 append_skb_frag(chan->sdu, skb,
4092 &chan->sdu_last_frag);
4095 if (chan->sdu->len >= chan->sdu_len)
4105 append_skb_frag(chan->sdu, skb,
4106 &chan->sdu_last_frag);
4109 if (chan->sdu->len != chan->sdu_len)
4112 err = chan->ops->recv(chan->data, chan->sdu);
4115 /* Reassembly complete */
4117 chan->sdu_last_frag = NULL;
4125 kfree_skb(chan->sdu);
4127 chan->sdu_last_frag = NULL;
4134 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4136 BT_DBG("chan %p, Enter local busy", chan);
4138 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4139 l2cap_seq_list_clear(&chan->srej_list);
4141 __set_ack_timer(chan);
4144 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4148 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4151 control = __set_reqseq(chan, chan->buffer_seq);
4152 control |= __set_ctrl_poll(chan);
4153 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4154 l2cap_send_sframe(chan, control);
4155 chan->retry_count = 1;
4157 __clear_retrans_timer(chan);
4158 __set_monitor_timer(chan);
4160 set_bit(CONN_WAIT_F, &chan->conn_state);
4163 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4164 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4166 BT_DBG("chan %p, Exit local busy", chan);
4169 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4171 if (chan->mode == L2CAP_MODE_ERTM) {
4173 l2cap_ertm_enter_local_busy(chan);
4175 l2cap_ertm_exit_local_busy(chan);
4179 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4181 struct sk_buff *skb;
4184 while ((skb = skb_peek(&chan->srej_q)) &&
4185 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4188 if (bt_cb(skb)->control.txseq != tx_seq)
4191 skb = skb_dequeue(&chan->srej_q);
4192 control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
4193 err = l2cap_reassemble_sdu(chan, skb, control);
4196 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4200 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4201 tx_seq = __next_seq(chan, tx_seq);
4205 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4207 struct srej_list *l, *tmp;
4210 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4211 if (l->tx_seq == tx_seq) {
4216 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4217 control |= __set_reqseq(chan, l->tx_seq);
4218 l2cap_send_sframe(chan, control);
4220 list_add_tail(&l->list, &chan->srej_l);
4224 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4226 struct srej_list *new;
4229 while (tx_seq != chan->expected_tx_seq) {
4230 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4231 control |= __set_reqseq(chan, chan->expected_tx_seq);
4232 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4233 l2cap_send_sframe(chan, control);
4235 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4239 new->tx_seq = chan->expected_tx_seq;
4241 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4243 list_add_tail(&new->list, &chan->srej_l);
4246 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4251 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4253 u16 tx_seq = __get_txseq(chan, rx_control);
4254 u16 req_seq = __get_reqseq(chan, rx_control);
4255 u8 sar = __get_ctrl_sar(chan, rx_control);
4256 int tx_seq_offset, expected_tx_seq_offset;
4257 int num_to_ack = (chan->tx_win/6) + 1;
4260 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4261 tx_seq, rx_control);
4263 if (__is_ctrl_final(chan, rx_control) &&
4264 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4265 __clear_monitor_timer(chan);
4266 if (chan->unacked_frames > 0)
4267 __set_retrans_timer(chan);
4268 clear_bit(CONN_WAIT_F, &chan->conn_state);
4271 chan->expected_ack_seq = req_seq;
4272 l2cap_drop_acked_frames(chan);
4274 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4276 /* invalid tx_seq */
4277 if (tx_seq_offset >= chan->tx_win) {
4278 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4282 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4283 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4284 l2cap_send_ack(chan);
4288 if (tx_seq == chan->expected_tx_seq)
4291 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4292 struct srej_list *first;
4294 first = list_first_entry(&chan->srej_l,
4295 struct srej_list, list);
4296 if (tx_seq == first->tx_seq) {
4297 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4298 l2cap_check_srej_gap(chan, tx_seq);
4300 list_del(&first->list);
4303 if (list_empty(&chan->srej_l)) {
4304 chan->buffer_seq = chan->buffer_seq_srej;
4305 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4306 l2cap_send_ack(chan);
4307 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4310 struct srej_list *l;
4312 /* duplicated tx_seq */
4313 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4316 list_for_each_entry(l, &chan->srej_l, list) {
4317 if (l->tx_seq == tx_seq) {
4318 l2cap_resend_srejframe(chan, tx_seq);
4323 err = l2cap_send_srejframe(chan, tx_seq);
4325 l2cap_send_disconn_req(chan->conn, chan, -err);
4330 expected_tx_seq_offset = __seq_offset(chan,
4331 chan->expected_tx_seq, chan->buffer_seq);
4333 /* duplicated tx_seq */
4334 if (tx_seq_offset < expected_tx_seq_offset)
4337 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4339 BT_DBG("chan %p, Enter SREJ", chan);
4341 INIT_LIST_HEAD(&chan->srej_l);
4342 chan->buffer_seq_srej = chan->buffer_seq;
4344 __skb_queue_head_init(&chan->srej_q);
4345 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4347 /* Set P-bit only if there are some I-frames to ack. */
4348 if (__clear_ack_timer(chan))
4349 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4351 err = l2cap_send_srejframe(chan, tx_seq);
4353 l2cap_send_disconn_req(chan->conn, chan, -err);
4360 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4362 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4363 bt_cb(skb)->control.txseq = tx_seq;
4364 bt_cb(skb)->control.sar = sar;
4365 __skb_queue_tail(&chan->srej_q, skb);
4369 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4370 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4373 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4377 if (__is_ctrl_final(chan, rx_control)) {
4378 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4379 l2cap_retransmit_frames(chan);
4383 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4384 if (chan->num_acked == num_to_ack - 1)
4385 l2cap_send_ack(chan);
4387 __set_ack_timer(chan);
4396 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4398 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4399 __get_reqseq(chan, rx_control), rx_control);
4401 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4402 l2cap_drop_acked_frames(chan);
4404 if (__is_ctrl_poll(chan, rx_control)) {
4405 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4406 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4407 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4408 (chan->unacked_frames > 0))
4409 __set_retrans_timer(chan);
4411 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4412 l2cap_send_srejtail(chan);
4414 l2cap_send_i_or_rr_or_rnr(chan);
4417 } else if (__is_ctrl_final(chan, rx_control)) {
4418 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4420 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4421 l2cap_retransmit_frames(chan);
4424 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4425 (chan->unacked_frames > 0))
4426 __set_retrans_timer(chan);
4428 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4429 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4430 l2cap_send_ack(chan);
4432 l2cap_ertm_send(chan);
4436 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4438 u16 tx_seq = __get_reqseq(chan, rx_control);
4440 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4442 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4444 chan->expected_ack_seq = tx_seq;
4445 l2cap_drop_acked_frames(chan);
4447 if (__is_ctrl_final(chan, rx_control)) {
4448 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4449 l2cap_retransmit_frames(chan);
4451 l2cap_retransmit_frames(chan);
4453 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4454 set_bit(CONN_REJ_ACT, &chan->conn_state);
4457 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4459 u16 tx_seq = __get_reqseq(chan, rx_control);
4461 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4463 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4465 if (__is_ctrl_poll(chan, rx_control)) {
4466 chan->expected_ack_seq = tx_seq;
4467 l2cap_drop_acked_frames(chan);
4469 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4470 l2cap_retransmit_one_frame(chan, tx_seq);
4472 l2cap_ertm_send(chan);
4474 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4475 chan->srej_save_reqseq = tx_seq;
4476 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4478 } else if (__is_ctrl_final(chan, rx_control)) {
4479 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4480 chan->srej_save_reqseq == tx_seq)
4481 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4483 l2cap_retransmit_one_frame(chan, tx_seq);
4485 l2cap_retransmit_one_frame(chan, tx_seq);
4486 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4487 chan->srej_save_reqseq = tx_seq;
4488 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4493 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4495 u16 tx_seq = __get_reqseq(chan, rx_control);
4497 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4499 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4500 chan->expected_ack_seq = tx_seq;
4501 l2cap_drop_acked_frames(chan);
4503 if (__is_ctrl_poll(chan, rx_control))
4504 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4506 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4507 __clear_retrans_timer(chan);
4508 if (__is_ctrl_poll(chan, rx_control))
4509 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4513 if (__is_ctrl_poll(chan, rx_control)) {
4514 l2cap_send_srejtail(chan);
4516 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4517 l2cap_send_sframe(chan, rx_control);
4521 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4523 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4525 if (__is_ctrl_final(chan, rx_control) &&
4526 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4527 __clear_monitor_timer(chan);
4528 if (chan->unacked_frames > 0)
4529 __set_retrans_timer(chan);
4530 clear_bit(CONN_WAIT_F, &chan->conn_state);
4533 switch (__get_ctrl_super(chan, rx_control)) {
4534 case L2CAP_SUPER_RR:
4535 l2cap_data_channel_rrframe(chan, rx_control);
4538 case L2CAP_SUPER_REJ:
4539 l2cap_data_channel_rejframe(chan, rx_control);
4542 case L2CAP_SUPER_SREJ:
4543 l2cap_data_channel_srejframe(chan, rx_control);
4546 case L2CAP_SUPER_RNR:
4547 l2cap_data_channel_rnrframe(chan, rx_control);
4555 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4559 int len, next_tx_seq_offset, req_seq_offset;
4561 __unpack_control(chan, skb);
4563 control = __get_control(chan, skb->data);
4564 skb_pull(skb, __ctrl_size(chan));
4568 * We can just drop the corrupted I-frame here.
4569 * Receiver will miss it and start proper recovery
4570 * procedures and ask retransmission.
4572 if (l2cap_check_fcs(chan, skb))
4575 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4576 len -= L2CAP_SDULEN_SIZE;
4578 if (chan->fcs == L2CAP_FCS_CRC16)
4579 len -= L2CAP_FCS_SIZE;
4581 if (len > chan->mps) {
4582 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4586 req_seq = __get_reqseq(chan, control);
4588 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4590 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4591 chan->expected_ack_seq);
4593 /* check for invalid req-seq */
4594 if (req_seq_offset > next_tx_seq_offset) {
4595 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4599 if (!__is_sframe(chan, control)) {
4601 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4605 l2cap_data_channel_iframe(chan, control, skb);
4609 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4613 l2cap_data_channel_sframe(chan, control, skb);
4623 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4625 struct l2cap_chan *chan;
4630 chan = l2cap_get_chan_by_scid(conn, cid);
4632 BT_DBG("unknown cid 0x%4.4x", cid);
4633 /* Drop packet and return */
4638 BT_DBG("chan %p, len %d", chan, skb->len);
4640 if (chan->state != BT_CONNECTED)
4643 switch (chan->mode) {
4644 case L2CAP_MODE_BASIC:
4645 /* If socket recv buffers overflows we drop data here
4646 * which is *bad* because L2CAP has to be reliable.
4647 * But we don't have any other choice. L2CAP doesn't
4648 * provide flow control mechanism. */
4650 if (chan->imtu < skb->len)
4653 if (!chan->ops->recv(chan->data, skb))
4657 case L2CAP_MODE_ERTM:
4658 l2cap_ertm_data_rcv(chan, skb);
4662 case L2CAP_MODE_STREAMING:
4663 control = __get_control(chan, skb->data);
4664 skb_pull(skb, __ctrl_size(chan));
4667 if (l2cap_check_fcs(chan, skb))
4670 if (__is_sar_start(chan, control))
4671 len -= L2CAP_SDULEN_SIZE;
4673 if (chan->fcs == L2CAP_FCS_CRC16)
4674 len -= L2CAP_FCS_SIZE;
4676 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4679 tx_seq = __get_txseq(chan, control);
4681 if (chan->expected_tx_seq != tx_seq) {
4682 /* Frame(s) missing - must discard partial SDU */
4683 kfree_skb(chan->sdu);
4685 chan->sdu_last_frag = NULL;
4688 /* TODO: Notify userland of missing data */
4691 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4693 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4694 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4699 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4707 l2cap_chan_unlock(chan);
4712 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4714 struct l2cap_chan *chan;
4716 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4720 BT_DBG("chan %p, len %d", chan, skb->len);
4722 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4725 if (chan->imtu < skb->len)
4728 if (!chan->ops->recv(chan->data, skb))
4737 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4738 struct sk_buff *skb)
4740 struct l2cap_chan *chan;
4742 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4746 BT_DBG("chan %p, len %d", chan, skb->len);
4748 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4751 if (chan->imtu < skb->len)
4754 if (!chan->ops->recv(chan->data, skb))
4763 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4765 struct l2cap_hdr *lh = (void *) skb->data;
4769 skb_pull(skb, L2CAP_HDR_SIZE);
4770 cid = __le16_to_cpu(lh->cid);
4771 len = __le16_to_cpu(lh->len);
4773 if (len != skb->len) {
4778 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4781 case L2CAP_CID_LE_SIGNALING:
4782 case L2CAP_CID_SIGNALING:
4783 l2cap_sig_channel(conn, skb);
4786 case L2CAP_CID_CONN_LESS:
4787 psm = get_unaligned((__le16 *) skb->data);
4789 l2cap_conless_channel(conn, psm, skb);
4792 case L2CAP_CID_LE_DATA:
4793 l2cap_att_channel(conn, cid, skb);
4797 if (smp_sig_channel(conn, skb))
4798 l2cap_conn_del(conn->hcon, EACCES);
4802 l2cap_data_channel(conn, cid, skb);
4807 /* ---- L2CAP interface with lower layer (HCI) ---- */
4809 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4811 int exact = 0, lm1 = 0, lm2 = 0;
4812 struct l2cap_chan *c;
4814 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4816 /* Find listening sockets and check their link_mode */
4817 read_lock(&chan_list_lock);
4818 list_for_each_entry(c, &chan_list, global_l) {
4819 struct sock *sk = c->sk;
4821 if (c->state != BT_LISTEN)
4824 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4825 lm1 |= HCI_LM_ACCEPT;
4826 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4827 lm1 |= HCI_LM_MASTER;
4829 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4830 lm2 |= HCI_LM_ACCEPT;
4831 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4832 lm2 |= HCI_LM_MASTER;
4835 read_unlock(&chan_list_lock);
4837 return exact ? lm1 : lm2;
4840 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4842 struct l2cap_conn *conn;
4844 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4847 conn = l2cap_conn_add(hcon, status);
4849 l2cap_conn_ready(conn);
4851 l2cap_conn_del(hcon, bt_to_errno(status));
4856 int l2cap_disconn_ind(struct hci_conn *hcon)
4858 struct l2cap_conn *conn = hcon->l2cap_data;
4860 BT_DBG("hcon %p", hcon);
4863 return HCI_ERROR_REMOTE_USER_TERM;
4864 return conn->disc_reason;
4867 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4869 BT_DBG("hcon %p reason %d", hcon, reason);
4871 l2cap_conn_del(hcon, bt_to_errno(reason));
4875 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4877 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4880 if (encrypt == 0x00) {
4881 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4882 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4883 } else if (chan->sec_level == BT_SECURITY_HIGH)
4884 l2cap_chan_close(chan, ECONNREFUSED);
4886 if (chan->sec_level == BT_SECURITY_MEDIUM)
4887 __clear_chan_timer(chan);
4891 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4893 struct l2cap_conn *conn = hcon->l2cap_data;
4894 struct l2cap_chan *chan;
4899 BT_DBG("conn %p", conn);
4901 if (hcon->type == LE_LINK) {
4902 if (!status && encrypt)
4903 smp_distribute_keys(conn, 0);
4904 cancel_delayed_work(&conn->security_timer);
4907 mutex_lock(&conn->chan_lock);
4909 list_for_each_entry(chan, &conn->chan_l, list) {
4910 l2cap_chan_lock(chan);
4912 BT_DBG("chan->scid %d", chan->scid);
4914 if (chan->scid == L2CAP_CID_LE_DATA) {
4915 if (!status && encrypt) {
4916 chan->sec_level = hcon->sec_level;
4917 l2cap_chan_ready(chan);
4920 l2cap_chan_unlock(chan);
4924 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4925 l2cap_chan_unlock(chan);
4929 if (!status && (chan->state == BT_CONNECTED ||
4930 chan->state == BT_CONFIG)) {
4931 struct sock *sk = chan->sk;
4933 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
4934 sk->sk_state_change(sk);
4936 l2cap_check_encryption(chan, encrypt);
4937 l2cap_chan_unlock(chan);
4941 if (chan->state == BT_CONNECT) {
4943 l2cap_send_conn_req(chan);
4945 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4947 } else if (chan->state == BT_CONNECT2) {
4948 struct sock *sk = chan->sk;
4949 struct l2cap_conn_rsp rsp;
4955 if (test_bit(BT_SK_DEFER_SETUP,
4956 &bt_sk(sk)->flags)) {
4957 struct sock *parent = bt_sk(sk)->parent;
4958 res = L2CAP_CR_PEND;
4959 stat = L2CAP_CS_AUTHOR_PEND;
4961 parent->sk_data_ready(parent, 0);
4963 __l2cap_state_change(chan, BT_CONFIG);
4964 res = L2CAP_CR_SUCCESS;
4965 stat = L2CAP_CS_NO_INFO;
4968 __l2cap_state_change(chan, BT_DISCONN);
4969 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4970 res = L2CAP_CR_SEC_BLOCK;
4971 stat = L2CAP_CS_NO_INFO;
4976 rsp.scid = cpu_to_le16(chan->dcid);
4977 rsp.dcid = cpu_to_le16(chan->scid);
4978 rsp.result = cpu_to_le16(res);
4979 rsp.status = cpu_to_le16(stat);
4980 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4984 l2cap_chan_unlock(chan);
4987 mutex_unlock(&conn->chan_lock);
4992 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4994 struct l2cap_conn *conn = hcon->l2cap_data;
4997 conn = l2cap_conn_add(hcon, 0);
5002 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5004 if (!(flags & ACL_CONT)) {
5005 struct l2cap_hdr *hdr;
5009 BT_ERR("Unexpected start frame (len %d)", skb->len);
5010 kfree_skb(conn->rx_skb);
5011 conn->rx_skb = NULL;
5013 l2cap_conn_unreliable(conn, ECOMM);
5016 /* Start fragment always begin with Basic L2CAP header */
5017 if (skb->len < L2CAP_HDR_SIZE) {
5018 BT_ERR("Frame is too short (len %d)", skb->len);
5019 l2cap_conn_unreliable(conn, ECOMM);
5023 hdr = (struct l2cap_hdr *) skb->data;
5024 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5026 if (len == skb->len) {
5027 /* Complete frame received */
5028 l2cap_recv_frame(conn, skb);
5032 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5034 if (skb->len > len) {
5035 BT_ERR("Frame is too long (len %d, expected len %d)",
5037 l2cap_conn_unreliable(conn, ECOMM);
5041 /* Allocate skb for the complete frame (with header) */
5042 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5046 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5048 conn->rx_len = len - skb->len;
5050 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5052 if (!conn->rx_len) {
5053 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5054 l2cap_conn_unreliable(conn, ECOMM);
5058 if (skb->len > conn->rx_len) {
5059 BT_ERR("Fragment is too long (len %d, expected %d)",
5060 skb->len, conn->rx_len);
5061 kfree_skb(conn->rx_skb);
5062 conn->rx_skb = NULL;
5064 l2cap_conn_unreliable(conn, ECOMM);
5068 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5070 conn->rx_len -= skb->len;
5072 if (!conn->rx_len) {
5073 /* Complete frame received */
5074 l2cap_recv_frame(conn, conn->rx_skb);
5075 conn->rx_skb = NULL;
5084 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5086 struct l2cap_chan *c;
5088 read_lock(&chan_list_lock);
5090 list_for_each_entry(c, &chan_list, global_l) {
5091 struct sock *sk = c->sk;
5093 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5094 batostr(&bt_sk(sk)->src),
5095 batostr(&bt_sk(sk)->dst),
5096 c->state, __le16_to_cpu(c->psm),
5097 c->scid, c->dcid, c->imtu, c->omtu,
5098 c->sec_level, c->mode);
5101 read_unlock(&chan_list_lock);
5106 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5108 return single_open(file, l2cap_debugfs_show, inode->i_private);
5111 static const struct file_operations l2cap_debugfs_fops = {
5112 .open = l2cap_debugfs_open,
5114 .llseek = seq_lseek,
5115 .release = single_release,
5118 static struct dentry *l2cap_debugfs;
5120 int __init l2cap_init(void)
5124 err = l2cap_init_sockets();
5129 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5130 bt_debugfs, NULL, &l2cap_debugfs_fops);
5132 BT_ERR("Failed to create L2CAP debug file");
5138 void l2cap_exit(void)
5140 debugfs_remove(l2cap_debugfs);
5141 l2cap_cleanup_sockets();
5144 module_param(disable_ertm, bool, 0644);
5145 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");