2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 /* ---- L2CAP channels ---- */
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
81 list_for_each_entry(c, &conn->chan_l, list) {
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
92 list_for_each_entry(c, &conn->chan_l, list) {
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid);
107 mutex_unlock(&conn->chan_lock);
112 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
114 struct l2cap_chan *c;
116 list_for_each_entry(c, &conn->chan_l, list) {
117 if (c->ident == ident)
123 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
125 struct l2cap_chan *c;
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
134 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
136 struct l2cap_chan *c;
138 list_for_each_entry(c, &chan_list, global_l) {
139 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
145 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
149 write_lock(&chan_list_lock);
151 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
164 for (p = 0x1001; p < 0x1100; p += 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
166 chan->psm = cpu_to_le16(p);
167 chan->sport = cpu_to_le16(p);
174 write_unlock(&chan_list_lock);
178 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
180 write_lock(&chan_list_lock);
184 write_unlock(&chan_list_lock);
189 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
191 u16 cid = L2CAP_CID_DYN_START;
193 for (; cid < L2CAP_CID_DYN_END; cid++) {
194 if (!__l2cap_get_chan_by_scid(conn, cid))
201 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
203 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
204 state_to_string(state));
207 chan->ops->state_change(chan->data, state);
210 static void l2cap_state_change(struct l2cap_chan *chan, int state)
212 struct sock *sk = chan->sk;
215 __l2cap_state_change(chan, state);
219 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
221 struct sock *sk = chan->sk;
226 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
228 struct sock *sk = chan->sk;
231 __l2cap_chan_set_err(chan, err);
235 /* ---- L2CAP sequence number lists ---- */
237 /* For ERTM, ordered lists of sequence numbers must be tracked for
238 * SREJ requests that are received and for frames that are to be
239 * retransmitted. These seq_list functions implement a singly-linked
240 * list in an array, where membership in the list can also be checked
241 * in constant time. Items can also be added to the tail of the list
242 * and removed from the head in constant time, without further memory
246 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
248 size_t alloc_size, i;
250 /* Allocated size is a power of 2 to map sequence numbers
251 * (which may be up to 14 bits) in to a smaller array that is
252 * sized for the negotiated ERTM transmit windows.
254 alloc_size = roundup_pow_of_two(size);
256 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
260 seq_list->mask = alloc_size - 1;
261 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
262 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
263 for (i = 0; i < alloc_size; i++)
264 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
269 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
271 kfree(seq_list->list);
274 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
277 /* Constant-time check for list membership */
278 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
281 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
283 u16 mask = seq_list->mask;
285 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
286 /* In case someone tries to pop the head of an empty list */
287 return L2CAP_SEQ_LIST_CLEAR;
288 } else if (seq_list->head == seq) {
289 /* Head can be removed in constant time */
290 seq_list->head = seq_list->list[seq & mask];
291 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
293 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
294 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
295 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
298 /* Walk the list to find the sequence number */
299 u16 prev = seq_list->head;
300 while (seq_list->list[prev & mask] != seq) {
301 prev = seq_list->list[prev & mask];
302 if (prev == L2CAP_SEQ_LIST_TAIL)
303 return L2CAP_SEQ_LIST_CLEAR;
306 /* Unlink the number from the list and clear it */
307 seq_list->list[prev & mask] = seq_list->list[seq & mask];
308 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
309 if (seq_list->tail == seq)
310 seq_list->tail = prev;
315 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
317 /* Remove the head in constant time */
318 return l2cap_seq_list_remove(seq_list, seq_list->head);
321 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
323 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
325 for (i = 0; i <= seq_list->mask; i++)
326 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
333 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
335 u16 mask = seq_list->mask;
337 /* All appends happen in constant time */
339 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
340 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
341 seq_list->head = seq;
343 seq_list->list[seq_list->tail & mask] = seq;
345 seq_list->tail = seq;
346 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
350 static void l2cap_chan_timeout(struct work_struct *work)
352 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
354 struct l2cap_conn *conn = chan->conn;
357 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
359 mutex_lock(&conn->chan_lock);
360 l2cap_chan_lock(chan);
362 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
363 reason = ECONNREFUSED;
364 else if (chan->state == BT_CONNECT &&
365 chan->sec_level != BT_SECURITY_SDP)
366 reason = ECONNREFUSED;
370 l2cap_chan_close(chan, reason);
372 l2cap_chan_unlock(chan);
374 chan->ops->close(chan->data);
375 mutex_unlock(&conn->chan_lock);
377 l2cap_chan_put(chan);
380 struct l2cap_chan *l2cap_chan_create(void)
382 struct l2cap_chan *chan;
384 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
388 mutex_init(&chan->lock);
390 write_lock(&chan_list_lock);
391 list_add(&chan->global_l, &chan_list);
392 write_unlock(&chan_list_lock);
394 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
396 chan->state = BT_OPEN;
398 atomic_set(&chan->refcnt, 1);
400 BT_DBG("chan %p", chan);
405 void l2cap_chan_destroy(struct l2cap_chan *chan)
407 write_lock(&chan_list_lock);
408 list_del(&chan->global_l);
409 write_unlock(&chan_list_lock);
411 l2cap_chan_put(chan);
414 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
416 chan->fcs = L2CAP_FCS_CRC16;
417 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
418 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
419 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
420 chan->sec_level = BT_SECURITY_LOW;
422 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
425 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
427 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
428 __le16_to_cpu(chan->psm), chan->dcid);
430 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
434 switch (chan->chan_type) {
435 case L2CAP_CHAN_CONN_ORIENTED:
436 if (conn->hcon->type == LE_LINK) {
438 chan->omtu = L2CAP_LE_DEFAULT_MTU;
439 chan->scid = L2CAP_CID_LE_DATA;
440 chan->dcid = L2CAP_CID_LE_DATA;
442 /* Alloc CID for connection-oriented socket */
443 chan->scid = l2cap_alloc_cid(conn);
444 chan->omtu = L2CAP_DEFAULT_MTU;
448 case L2CAP_CHAN_CONN_LESS:
449 /* Connectionless socket */
450 chan->scid = L2CAP_CID_CONN_LESS;
451 chan->dcid = L2CAP_CID_CONN_LESS;
452 chan->omtu = L2CAP_DEFAULT_MTU;
456 /* Raw socket can send/recv signalling messages only */
457 chan->scid = L2CAP_CID_SIGNALING;
458 chan->dcid = L2CAP_CID_SIGNALING;
459 chan->omtu = L2CAP_DEFAULT_MTU;
462 chan->local_id = L2CAP_BESTEFFORT_ID;
463 chan->local_stype = L2CAP_SERV_BESTEFFORT;
464 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
465 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
466 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
467 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
469 l2cap_chan_hold(chan);
471 list_add(&chan->list, &conn->chan_l);
474 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
476 mutex_lock(&conn->chan_lock);
477 __l2cap_chan_add(conn, chan);
478 mutex_unlock(&conn->chan_lock);
481 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
483 struct sock *sk = chan->sk;
484 struct l2cap_conn *conn = chan->conn;
485 struct sock *parent = bt_sk(sk)->parent;
487 __clear_chan_timer(chan);
489 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
492 /* Delete from channel list */
493 list_del(&chan->list);
495 l2cap_chan_put(chan);
498 hci_conn_put(conn->hcon);
503 __l2cap_state_change(chan, BT_CLOSED);
504 sock_set_flag(sk, SOCK_ZAPPED);
507 __l2cap_chan_set_err(chan, err);
510 bt_accept_unlink(sk);
511 parent->sk_data_ready(parent, 0);
513 sk->sk_state_change(sk);
517 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
518 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
521 skb_queue_purge(&chan->tx_q);
523 if (chan->mode == L2CAP_MODE_ERTM) {
524 struct srej_list *l, *tmp;
526 __clear_retrans_timer(chan);
527 __clear_monitor_timer(chan);
528 __clear_ack_timer(chan);
530 skb_queue_purge(&chan->srej_q);
532 l2cap_seq_list_free(&chan->srej_list);
533 l2cap_seq_list_free(&chan->retrans_list);
534 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
541 static void l2cap_chan_cleanup_listen(struct sock *parent)
545 BT_DBG("parent %p", parent);
547 /* Close not yet accepted channels */
548 while ((sk = bt_accept_dequeue(parent, NULL))) {
549 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
551 l2cap_chan_lock(chan);
552 __clear_chan_timer(chan);
553 l2cap_chan_close(chan, ECONNRESET);
554 l2cap_chan_unlock(chan);
556 chan->ops->close(chan->data);
560 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
562 struct l2cap_conn *conn = chan->conn;
563 struct sock *sk = chan->sk;
565 BT_DBG("chan %p state %s sk %p", chan,
566 state_to_string(chan->state), sk);
568 switch (chan->state) {
571 l2cap_chan_cleanup_listen(sk);
573 __l2cap_state_change(chan, BT_CLOSED);
574 sock_set_flag(sk, SOCK_ZAPPED);
580 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
581 conn->hcon->type == ACL_LINK) {
582 __set_chan_timer(chan, sk->sk_sndtimeo);
583 l2cap_send_disconn_req(conn, chan, reason);
585 l2cap_chan_del(chan, reason);
589 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
590 conn->hcon->type == ACL_LINK) {
591 struct l2cap_conn_rsp rsp;
594 if (bt_sk(sk)->defer_setup)
595 result = L2CAP_CR_SEC_BLOCK;
597 result = L2CAP_CR_BAD_PSM;
598 l2cap_state_change(chan, BT_DISCONN);
600 rsp.scid = cpu_to_le16(chan->dcid);
601 rsp.dcid = cpu_to_le16(chan->scid);
602 rsp.result = cpu_to_le16(result);
603 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
604 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
608 l2cap_chan_del(chan, reason);
613 l2cap_chan_del(chan, reason);
618 sock_set_flag(sk, SOCK_ZAPPED);
624 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
626 if (chan->chan_type == L2CAP_CHAN_RAW) {
627 switch (chan->sec_level) {
628 case BT_SECURITY_HIGH:
629 return HCI_AT_DEDICATED_BONDING_MITM;
630 case BT_SECURITY_MEDIUM:
631 return HCI_AT_DEDICATED_BONDING;
633 return HCI_AT_NO_BONDING;
635 } else if (chan->psm == cpu_to_le16(0x0001)) {
636 if (chan->sec_level == BT_SECURITY_LOW)
637 chan->sec_level = BT_SECURITY_SDP;
639 if (chan->sec_level == BT_SECURITY_HIGH)
640 return HCI_AT_NO_BONDING_MITM;
642 return HCI_AT_NO_BONDING;
644 switch (chan->sec_level) {
645 case BT_SECURITY_HIGH:
646 return HCI_AT_GENERAL_BONDING_MITM;
647 case BT_SECURITY_MEDIUM:
648 return HCI_AT_GENERAL_BONDING;
650 return HCI_AT_NO_BONDING;
655 /* Service level security */
656 int l2cap_chan_check_security(struct l2cap_chan *chan)
658 struct l2cap_conn *conn = chan->conn;
661 auth_type = l2cap_get_auth_type(chan);
663 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
666 static u8 l2cap_get_ident(struct l2cap_conn *conn)
670 /* Get next available identificator.
671 * 1 - 128 are used by kernel.
672 * 129 - 199 are reserved.
673 * 200 - 254 are used by utilities like l2ping, etc.
676 spin_lock(&conn->lock);
678 if (++conn->tx_ident > 128)
683 spin_unlock(&conn->lock);
688 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
690 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
693 BT_DBG("code 0x%2.2x", code);
698 if (lmp_no_flush_capable(conn->hcon->hdev))
699 flags = ACL_START_NO_FLUSH;
703 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
704 skb->priority = HCI_PRIO_MAX;
706 hci_send_acl(conn->hchan, skb, flags);
709 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
711 struct hci_conn *hcon = chan->conn->hcon;
714 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
717 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
718 lmp_no_flush_capable(hcon->hdev))
719 flags = ACL_START_NO_FLUSH;
723 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
724 hci_send_acl(chan->conn->hchan, skb, flags);
727 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
730 struct l2cap_hdr *lh;
731 struct l2cap_conn *conn = chan->conn;
734 if (chan->state != BT_CONNECTED)
737 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
738 hlen = L2CAP_EXT_HDR_SIZE;
740 hlen = L2CAP_ENH_HDR_SIZE;
742 if (chan->fcs == L2CAP_FCS_CRC16)
743 hlen += L2CAP_FCS_SIZE;
745 BT_DBG("chan %p, control 0x%8.8x", chan, control);
747 count = min_t(unsigned int, conn->mtu, hlen);
749 control |= __set_sframe(chan);
751 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
752 control |= __set_ctrl_final(chan);
754 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
755 control |= __set_ctrl_poll(chan);
757 skb = bt_skb_alloc(count, GFP_ATOMIC);
761 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
762 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
763 lh->cid = cpu_to_le16(chan->dcid);
765 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
767 if (chan->fcs == L2CAP_FCS_CRC16) {
768 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
769 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
772 skb->priority = HCI_PRIO_MAX;
773 l2cap_do_send(chan, skb);
776 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
778 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
779 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
780 set_bit(CONN_RNR_SENT, &chan->conn_state);
782 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
784 control |= __set_reqseq(chan, chan->buffer_seq);
786 l2cap_send_sframe(chan, control);
789 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
793 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
794 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
796 if (control->sframe) {
797 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
798 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
799 packed |= L2CAP_CTRL_FRAME_TYPE;
801 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
802 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
808 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
810 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
811 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
813 if (enh & L2CAP_CTRL_FRAME_TYPE) {
816 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
817 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
824 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
825 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
832 static u32 __pack_extended_control(struct l2cap_ctrl *control)
836 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
837 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
839 if (control->sframe) {
840 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
841 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
842 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
844 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
845 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
851 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
853 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
854 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
856 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
859 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
860 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
867 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
868 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
875 static inline void __unpack_control(struct l2cap_chan *chan,
878 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
879 __unpack_extended_control(get_unaligned_le32(skb->data),
880 &bt_cb(skb)->control);
882 __unpack_enhanced_control(get_unaligned_le16(skb->data),
883 &bt_cb(skb)->control);
887 static inline void __pack_control(struct l2cap_chan *chan,
888 struct l2cap_ctrl *control,
891 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
892 put_unaligned_le32(__pack_extended_control(control),
893 skb->data + L2CAP_HDR_SIZE);
895 put_unaligned_le16(__pack_enhanced_control(control),
896 skb->data + L2CAP_HDR_SIZE);
900 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
902 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
905 static void l2cap_send_conn_req(struct l2cap_chan *chan)
907 struct l2cap_conn *conn = chan->conn;
908 struct l2cap_conn_req req;
910 req.scid = cpu_to_le16(chan->scid);
913 chan->ident = l2cap_get_ident(conn);
915 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
917 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
920 static void l2cap_chan_ready(struct l2cap_chan *chan)
922 struct sock *sk = chan->sk;
927 parent = bt_sk(sk)->parent;
929 BT_DBG("sk %p, parent %p", sk, parent);
931 chan->conf_state = 0;
932 __clear_chan_timer(chan);
934 __l2cap_state_change(chan, BT_CONNECTED);
935 sk->sk_state_change(sk);
938 parent->sk_data_ready(parent, 0);
943 static void l2cap_do_start(struct l2cap_chan *chan)
945 struct l2cap_conn *conn = chan->conn;
947 if (conn->hcon->type == LE_LINK) {
948 l2cap_chan_ready(chan);
952 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
953 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
956 if (l2cap_chan_check_security(chan) &&
957 __l2cap_no_conn_pending(chan))
958 l2cap_send_conn_req(chan);
960 struct l2cap_info_req req;
961 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
963 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
964 conn->info_ident = l2cap_get_ident(conn);
966 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
968 l2cap_send_cmd(conn, conn->info_ident,
969 L2CAP_INFO_REQ, sizeof(req), &req);
973 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
975 u32 local_feat_mask = l2cap_feat_mask;
977 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
980 case L2CAP_MODE_ERTM:
981 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
982 case L2CAP_MODE_STREAMING:
983 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
989 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
991 struct sock *sk = chan->sk;
992 struct l2cap_disconn_req req;
997 if (chan->mode == L2CAP_MODE_ERTM) {
998 __clear_retrans_timer(chan);
999 __clear_monitor_timer(chan);
1000 __clear_ack_timer(chan);
1003 req.dcid = cpu_to_le16(chan->dcid);
1004 req.scid = cpu_to_le16(chan->scid);
1005 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1006 L2CAP_DISCONN_REQ, sizeof(req), &req);
1009 __l2cap_state_change(chan, BT_DISCONN);
1010 __l2cap_chan_set_err(chan, err);
1014 /* ---- L2CAP connections ---- */
1015 static void l2cap_conn_start(struct l2cap_conn *conn)
1017 struct l2cap_chan *chan, *tmp;
1019 BT_DBG("conn %p", conn);
1021 mutex_lock(&conn->chan_lock);
1023 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1024 struct sock *sk = chan->sk;
1026 l2cap_chan_lock(chan);
1028 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1029 l2cap_chan_unlock(chan);
1033 if (chan->state == BT_CONNECT) {
1034 if (!l2cap_chan_check_security(chan) ||
1035 !__l2cap_no_conn_pending(chan)) {
1036 l2cap_chan_unlock(chan);
1040 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1041 && test_bit(CONF_STATE2_DEVICE,
1042 &chan->conf_state)) {
1043 l2cap_chan_close(chan, ECONNRESET);
1044 l2cap_chan_unlock(chan);
1048 l2cap_send_conn_req(chan);
1050 } else if (chan->state == BT_CONNECT2) {
1051 struct l2cap_conn_rsp rsp;
1053 rsp.scid = cpu_to_le16(chan->dcid);
1054 rsp.dcid = cpu_to_le16(chan->scid);
1056 if (l2cap_chan_check_security(chan)) {
1058 if (bt_sk(sk)->defer_setup) {
1059 struct sock *parent = bt_sk(sk)->parent;
1060 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1061 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1063 parent->sk_data_ready(parent, 0);
1066 __l2cap_state_change(chan, BT_CONFIG);
1067 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1068 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1072 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1073 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1076 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1079 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1080 rsp.result != L2CAP_CR_SUCCESS) {
1081 l2cap_chan_unlock(chan);
1085 set_bit(CONF_REQ_SENT, &chan->conf_state);
1086 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1087 l2cap_build_conf_req(chan, buf), buf);
1088 chan->num_conf_req++;
1091 l2cap_chan_unlock(chan);
1094 mutex_unlock(&conn->chan_lock);
1097 /* Find socket with cid and source/destination bdaddr.
1098 * Returns closest match, locked.
1100 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1104 struct l2cap_chan *c, *c1 = NULL;
1106 read_lock(&chan_list_lock);
1108 list_for_each_entry(c, &chan_list, global_l) {
1109 struct sock *sk = c->sk;
1111 if (state && c->state != state)
1114 if (c->scid == cid) {
1115 int src_match, dst_match;
1116 int src_any, dst_any;
1119 src_match = !bacmp(&bt_sk(sk)->src, src);
1120 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1121 if (src_match && dst_match) {
1122 read_unlock(&chan_list_lock);
1127 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1128 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1129 if ((src_match && dst_any) || (src_any && dst_match) ||
1130 (src_any && dst_any))
1135 read_unlock(&chan_list_lock);
1140 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1142 struct sock *parent, *sk;
1143 struct l2cap_chan *chan, *pchan;
1147 /* Check if we have socket listening on cid */
1148 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1149 conn->src, conn->dst);
1157 /* Check for backlog size */
1158 if (sk_acceptq_is_full(parent)) {
1159 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1163 chan = pchan->ops->new_connection(pchan->data);
1169 hci_conn_hold(conn->hcon);
1171 bacpy(&bt_sk(sk)->src, conn->src);
1172 bacpy(&bt_sk(sk)->dst, conn->dst);
1174 bt_accept_enqueue(parent, sk);
1176 l2cap_chan_add(conn, chan);
1178 __set_chan_timer(chan, sk->sk_sndtimeo);
1180 __l2cap_state_change(chan, BT_CONNECTED);
1181 parent->sk_data_ready(parent, 0);
1184 release_sock(parent);
1187 static void l2cap_conn_ready(struct l2cap_conn *conn)
1189 struct l2cap_chan *chan;
1191 BT_DBG("conn %p", conn);
1193 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1194 l2cap_le_conn_ready(conn);
1196 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1197 smp_conn_security(conn, conn->hcon->pending_sec_level);
1199 mutex_lock(&conn->chan_lock);
1201 list_for_each_entry(chan, &conn->chan_l, list) {
1203 l2cap_chan_lock(chan);
1205 if (conn->hcon->type == LE_LINK) {
1206 if (smp_conn_security(conn, chan->sec_level))
1207 l2cap_chan_ready(chan);
1209 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1210 struct sock *sk = chan->sk;
1211 __clear_chan_timer(chan);
1213 __l2cap_state_change(chan, BT_CONNECTED);
1214 sk->sk_state_change(sk);
1217 } else if (chan->state == BT_CONNECT)
1218 l2cap_do_start(chan);
1220 l2cap_chan_unlock(chan);
1223 mutex_unlock(&conn->chan_lock);
1226 /* Notify sockets that we cannot guaranty reliability anymore */
1227 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1229 struct l2cap_chan *chan;
1231 BT_DBG("conn %p", conn);
1233 mutex_lock(&conn->chan_lock);
1235 list_for_each_entry(chan, &conn->chan_l, list) {
1236 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1237 __l2cap_chan_set_err(chan, err);
1240 mutex_unlock(&conn->chan_lock);
1243 static void l2cap_info_timeout(struct work_struct *work)
1245 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1248 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1249 conn->info_ident = 0;
1251 l2cap_conn_start(conn);
1254 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1256 struct l2cap_conn *conn = hcon->l2cap_data;
1257 struct l2cap_chan *chan, *l;
1262 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1264 kfree_skb(conn->rx_skb);
1266 mutex_lock(&conn->chan_lock);
1269 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1270 l2cap_chan_lock(chan);
1272 l2cap_chan_del(chan, err);
1274 l2cap_chan_unlock(chan);
1276 chan->ops->close(chan->data);
1279 mutex_unlock(&conn->chan_lock);
1281 hci_chan_del(conn->hchan);
1283 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1284 cancel_delayed_work_sync(&conn->info_timer);
1286 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1287 cancel_delayed_work_sync(&conn->security_timer);
1288 smp_chan_destroy(conn);
1291 hcon->l2cap_data = NULL;
1295 static void security_timeout(struct work_struct *work)
1297 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1298 security_timer.work);
1300 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1303 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1305 struct l2cap_conn *conn = hcon->l2cap_data;
1306 struct hci_chan *hchan;
1311 hchan = hci_chan_create(hcon);
1315 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1317 hci_chan_del(hchan);
1321 hcon->l2cap_data = conn;
1323 conn->hchan = hchan;
1325 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1327 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1328 conn->mtu = hcon->hdev->le_mtu;
1330 conn->mtu = hcon->hdev->acl_mtu;
1332 conn->src = &hcon->hdev->bdaddr;
1333 conn->dst = &hcon->dst;
1335 conn->feat_mask = 0;
1337 spin_lock_init(&conn->lock);
1338 mutex_init(&conn->chan_lock);
1340 INIT_LIST_HEAD(&conn->chan_l);
1342 if (hcon->type == LE_LINK)
1343 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1345 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1347 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1352 /* ---- Socket interface ---- */
1354 /* Find socket with psm and source / destination bdaddr.
1355 * Returns closest match.
1357 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1361 struct l2cap_chan *c, *c1 = NULL;
1363 read_lock(&chan_list_lock);
1365 list_for_each_entry(c, &chan_list, global_l) {
1366 struct sock *sk = c->sk;
1368 if (state && c->state != state)
1371 if (c->psm == psm) {
1372 int src_match, dst_match;
1373 int src_any, dst_any;
1376 src_match = !bacmp(&bt_sk(sk)->src, src);
1377 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1378 if (src_match && dst_match) {
1379 read_unlock(&chan_list_lock);
1384 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1385 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1386 if ((src_match && dst_any) || (src_any && dst_match) ||
1387 (src_any && dst_any))
1392 read_unlock(&chan_list_lock);
1397 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1399 struct sock *sk = chan->sk;
1400 bdaddr_t *src = &bt_sk(sk)->src;
1401 struct l2cap_conn *conn;
1402 struct hci_conn *hcon;
1403 struct hci_dev *hdev;
1407 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1408 __le16_to_cpu(chan->psm));
1410 hdev = hci_get_route(dst, src);
1412 return -EHOSTUNREACH;
1416 l2cap_chan_lock(chan);
1418 /* PSM must be odd and lsb of upper byte must be 0 */
1419 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1420 chan->chan_type != L2CAP_CHAN_RAW) {
1425 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1430 switch (chan->mode) {
1431 case L2CAP_MODE_BASIC:
1433 case L2CAP_MODE_ERTM:
1434 case L2CAP_MODE_STREAMING:
1445 switch (sk->sk_state) {
1449 /* Already connecting */
1455 /* Already connected */
1471 /* Set destination address and psm */
1472 bacpy(&bt_sk(sk)->dst, dst);
1479 auth_type = l2cap_get_auth_type(chan);
1481 if (chan->dcid == L2CAP_CID_LE_DATA)
1482 hcon = hci_connect(hdev, LE_LINK, dst, BDADDR_LE_RANDOM,
1483 chan->sec_level, auth_type);
1485 hcon = hci_connect(hdev, ACL_LINK, dst, BDADDR_BREDR,
1486 chan->sec_level, auth_type);
1489 err = PTR_ERR(hcon);
1493 conn = l2cap_conn_add(hcon, 0);
1500 if (hcon->type == LE_LINK) {
1503 if (!list_empty(&conn->chan_l)) {
1512 /* Update source addr of the socket */
1513 bacpy(src, conn->src);
1515 l2cap_chan_unlock(chan);
1516 l2cap_chan_add(conn, chan);
1517 l2cap_chan_lock(chan);
1519 l2cap_state_change(chan, BT_CONNECT);
1520 __set_chan_timer(chan, sk->sk_sndtimeo);
1522 if (hcon->state == BT_CONNECTED) {
1523 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1524 __clear_chan_timer(chan);
1525 if (l2cap_chan_check_security(chan))
1526 l2cap_state_change(chan, BT_CONNECTED);
1528 l2cap_do_start(chan);
1534 l2cap_chan_unlock(chan);
1535 hci_dev_unlock(hdev);
1540 int __l2cap_wait_ack(struct sock *sk)
1542 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1543 DECLARE_WAITQUEUE(wait, current);
1547 add_wait_queue(sk_sleep(sk), &wait);
1548 set_current_state(TASK_INTERRUPTIBLE);
1549 while (chan->unacked_frames > 0 && chan->conn) {
1553 if (signal_pending(current)) {
1554 err = sock_intr_errno(timeo);
1559 timeo = schedule_timeout(timeo);
1561 set_current_state(TASK_INTERRUPTIBLE);
1563 err = sock_error(sk);
1567 set_current_state(TASK_RUNNING);
1568 remove_wait_queue(sk_sleep(sk), &wait);
1572 static void l2cap_monitor_timeout(struct work_struct *work)
1574 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1575 monitor_timer.work);
1577 BT_DBG("chan %p", chan);
1579 l2cap_chan_lock(chan);
1581 if (chan->retry_count >= chan->remote_max_tx) {
1582 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1583 l2cap_chan_unlock(chan);
1584 l2cap_chan_put(chan);
1588 chan->retry_count++;
1589 __set_monitor_timer(chan);
1591 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1592 l2cap_chan_unlock(chan);
1593 l2cap_chan_put(chan);
1596 static void l2cap_retrans_timeout(struct work_struct *work)
1598 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1599 retrans_timer.work);
1601 BT_DBG("chan %p", chan);
1603 l2cap_chan_lock(chan);
1605 chan->retry_count = 1;
1606 __set_monitor_timer(chan);
1608 set_bit(CONN_WAIT_F, &chan->conn_state);
1610 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1612 l2cap_chan_unlock(chan);
1613 l2cap_chan_put(chan);
1616 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1618 struct sk_buff *skb;
1620 while ((skb = skb_peek(&chan->tx_q)) &&
1621 chan->unacked_frames) {
1622 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1625 skb = skb_dequeue(&chan->tx_q);
1628 chan->unacked_frames--;
1631 if (!chan->unacked_frames)
1632 __clear_retrans_timer(chan);
1635 static void l2cap_streaming_send(struct l2cap_chan *chan)
1637 struct sk_buff *skb;
1641 while ((skb = skb_dequeue(&chan->tx_q))) {
1642 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1643 control |= __set_txseq(chan, chan->next_tx_seq);
1644 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1646 if (chan->fcs == L2CAP_FCS_CRC16) {
1647 fcs = crc16(0, (u8 *)skb->data,
1648 skb->len - L2CAP_FCS_SIZE);
1649 put_unaligned_le16(fcs,
1650 skb->data + skb->len - L2CAP_FCS_SIZE);
1653 l2cap_do_send(chan, skb);
1655 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1659 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1661 struct sk_buff *skb, *tx_skb;
1665 skb = skb_peek(&chan->tx_q);
1669 while (bt_cb(skb)->tx_seq != tx_seq) {
1670 if (skb_queue_is_last(&chan->tx_q, skb))
1673 skb = skb_queue_next(&chan->tx_q, skb);
1676 if (chan->remote_max_tx &&
1677 bt_cb(skb)->retries == chan->remote_max_tx) {
1678 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1682 tx_skb = skb_clone(skb, GFP_ATOMIC);
1683 bt_cb(skb)->retries++;
1685 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1686 control &= __get_sar_mask(chan);
1688 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1689 control |= __set_ctrl_final(chan);
1691 control |= __set_reqseq(chan, chan->buffer_seq);
1692 control |= __set_txseq(chan, tx_seq);
1694 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1696 if (chan->fcs == L2CAP_FCS_CRC16) {
1697 fcs = crc16(0, (u8 *)tx_skb->data,
1698 tx_skb->len - L2CAP_FCS_SIZE);
1699 put_unaligned_le16(fcs,
1700 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1703 l2cap_do_send(chan, tx_skb);
1706 static int l2cap_ertm_send(struct l2cap_chan *chan)
1708 struct sk_buff *skb, *tx_skb;
1713 if (chan->state != BT_CONNECTED)
1716 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1718 if (chan->remote_max_tx &&
1719 bt_cb(skb)->retries == chan->remote_max_tx) {
1720 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1724 tx_skb = skb_clone(skb, GFP_ATOMIC);
1726 bt_cb(skb)->retries++;
1728 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1729 control &= __get_sar_mask(chan);
1731 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1732 control |= __set_ctrl_final(chan);
1734 control |= __set_reqseq(chan, chan->buffer_seq);
1735 control |= __set_txseq(chan, chan->next_tx_seq);
1737 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1739 if (chan->fcs == L2CAP_FCS_CRC16) {
1740 fcs = crc16(0, (u8 *)skb->data,
1741 tx_skb->len - L2CAP_FCS_SIZE);
1742 put_unaligned_le16(fcs, skb->data +
1743 tx_skb->len - L2CAP_FCS_SIZE);
1746 l2cap_do_send(chan, tx_skb);
1748 __set_retrans_timer(chan);
1750 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1752 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1754 if (bt_cb(skb)->retries == 1) {
1755 chan->unacked_frames++;
1758 __clear_ack_timer(chan);
1761 chan->frames_sent++;
1763 if (skb_queue_is_last(&chan->tx_q, skb))
1764 chan->tx_send_head = NULL;
1766 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1772 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1776 if (!skb_queue_empty(&chan->tx_q))
1777 chan->tx_send_head = chan->tx_q.next;
1779 chan->next_tx_seq = chan->expected_ack_seq;
1780 ret = l2cap_ertm_send(chan);
1784 static void __l2cap_send_ack(struct l2cap_chan *chan)
1788 control |= __set_reqseq(chan, chan->buffer_seq);
1790 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1791 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1792 set_bit(CONN_RNR_SENT, &chan->conn_state);
1793 l2cap_send_sframe(chan, control);
1797 if (l2cap_ertm_send(chan) > 0)
1800 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1801 l2cap_send_sframe(chan, control);
1804 static void l2cap_send_ack(struct l2cap_chan *chan)
1806 __clear_ack_timer(chan);
1807 __l2cap_send_ack(chan);
1810 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1812 struct srej_list *tail;
1815 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1816 control |= __set_ctrl_final(chan);
1818 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1819 control |= __set_reqseq(chan, tail->tx_seq);
1821 l2cap_send_sframe(chan, control);
1824 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1825 struct msghdr *msg, int len,
1826 int count, struct sk_buff *skb)
1828 struct l2cap_conn *conn = chan->conn;
1829 struct sk_buff **frag;
1832 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1838 /* Continuation fragments (no L2CAP header) */
1839 frag = &skb_shinfo(skb)->frag_list;
1841 count = min_t(unsigned int, conn->mtu, len);
1843 *frag = chan->ops->alloc_skb(chan, count,
1844 msg->msg_flags & MSG_DONTWAIT);
1847 return PTR_ERR(*frag);
1848 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1851 (*frag)->priority = skb->priority;
1856 frag = &(*frag)->next;
1862 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1863 struct msghdr *msg, size_t len,
1866 struct l2cap_conn *conn = chan->conn;
1867 struct sk_buff *skb;
1868 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1869 struct l2cap_hdr *lh;
1871 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1873 count = min_t(unsigned int, (conn->mtu - hlen), len);
1875 skb = chan->ops->alloc_skb(chan, count + hlen,
1876 msg->msg_flags & MSG_DONTWAIT);
1880 skb->priority = priority;
1882 /* Create L2CAP header */
1883 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1884 lh->cid = cpu_to_le16(chan->dcid);
1885 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1886 put_unaligned(chan->psm, skb_put(skb, 2));
1888 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1889 if (unlikely(err < 0)) {
1891 return ERR_PTR(err);
1896 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1897 struct msghdr *msg, size_t len,
1900 struct l2cap_conn *conn = chan->conn;
1901 struct sk_buff *skb;
1902 int err, count, hlen = L2CAP_HDR_SIZE;
1903 struct l2cap_hdr *lh;
1905 BT_DBG("chan %p len %d", chan, (int)len);
1907 count = min_t(unsigned int, (conn->mtu - hlen), len);
1909 skb = chan->ops->alloc_skb(chan, count + hlen,
1910 msg->msg_flags & MSG_DONTWAIT);
1914 skb->priority = priority;
1916 /* Create L2CAP header */
1917 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1918 lh->cid = cpu_to_le16(chan->dcid);
1919 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1921 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1922 if (unlikely(err < 0)) {
1924 return ERR_PTR(err);
1929 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1930 struct msghdr *msg, size_t len,
1931 u32 control, u16 sdulen)
1933 struct l2cap_conn *conn = chan->conn;
1934 struct sk_buff *skb;
1935 int err, count, hlen;
1936 struct l2cap_hdr *lh;
1938 BT_DBG("chan %p len %d", chan, (int)len);
1941 return ERR_PTR(-ENOTCONN);
1943 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1944 hlen = L2CAP_EXT_HDR_SIZE;
1946 hlen = L2CAP_ENH_HDR_SIZE;
1949 hlen += L2CAP_SDULEN_SIZE;
1951 if (chan->fcs == L2CAP_FCS_CRC16)
1952 hlen += L2CAP_FCS_SIZE;
1954 count = min_t(unsigned int, (conn->mtu - hlen), len);
1956 skb = chan->ops->alloc_skb(chan, count + hlen,
1957 msg->msg_flags & MSG_DONTWAIT);
1961 /* Create L2CAP header */
1962 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1963 lh->cid = cpu_to_le16(chan->dcid);
1964 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1966 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1969 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1971 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1972 if (unlikely(err < 0)) {
1974 return ERR_PTR(err);
1977 if (chan->fcs == L2CAP_FCS_CRC16)
1978 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1980 bt_cb(skb)->retries = 0;
1984 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1986 struct sk_buff *skb;
1987 struct sk_buff_head sar_queue;
1991 skb_queue_head_init(&sar_queue);
1992 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1993 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1995 return PTR_ERR(skb);
1997 __skb_queue_tail(&sar_queue, skb);
1998 len -= chan->remote_mps;
1999 size += chan->remote_mps;
2004 if (len > chan->remote_mps) {
2005 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
2006 buflen = chan->remote_mps;
2008 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
2012 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
2014 skb_queue_purge(&sar_queue);
2015 return PTR_ERR(skb);
2018 __skb_queue_tail(&sar_queue, skb);
2022 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
2023 if (chan->tx_send_head == NULL)
2024 chan->tx_send_head = sar_queue.next;
2029 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2032 struct sk_buff *skb;
2036 /* Connectionless channel */
2037 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2038 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2040 return PTR_ERR(skb);
2042 l2cap_do_send(chan, skb);
2046 switch (chan->mode) {
2047 case L2CAP_MODE_BASIC:
2048 /* Check outgoing MTU */
2049 if (len > chan->omtu)
2052 /* Create a basic PDU */
2053 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2055 return PTR_ERR(skb);
2057 l2cap_do_send(chan, skb);
2061 case L2CAP_MODE_ERTM:
2062 case L2CAP_MODE_STREAMING:
2063 /* Entire SDU fits into one PDU */
2064 if (len <= chan->remote_mps) {
2065 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
2066 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
2069 return PTR_ERR(skb);
2071 __skb_queue_tail(&chan->tx_q, skb);
2073 if (chan->tx_send_head == NULL)
2074 chan->tx_send_head = skb;
2077 /* Segment SDU into multiples PDUs */
2078 err = l2cap_sar_segment_sdu(chan, msg, len);
2083 if (chan->mode == L2CAP_MODE_STREAMING) {
2084 l2cap_streaming_send(chan);
2089 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
2090 test_bit(CONN_WAIT_F, &chan->conn_state)) {
2095 err = l2cap_ertm_send(chan);
2102 BT_DBG("bad state %1.1x", chan->mode);
2109 /* Copy frame to all raw sockets on that connection */
2110 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2112 struct sk_buff *nskb;
2113 struct l2cap_chan *chan;
2115 BT_DBG("conn %p", conn);
2117 mutex_lock(&conn->chan_lock);
2119 list_for_each_entry(chan, &conn->chan_l, list) {
2120 struct sock *sk = chan->sk;
2121 if (chan->chan_type != L2CAP_CHAN_RAW)
2124 /* Don't send frame to the socket it came from */
2127 nskb = skb_clone(skb, GFP_ATOMIC);
2131 if (chan->ops->recv(chan->data, nskb))
2135 mutex_unlock(&conn->chan_lock);
2138 /* ---- L2CAP signalling commands ---- */
2139 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2140 u8 code, u8 ident, u16 dlen, void *data)
2142 struct sk_buff *skb, **frag;
2143 struct l2cap_cmd_hdr *cmd;
2144 struct l2cap_hdr *lh;
2147 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2148 conn, code, ident, dlen);
2150 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2151 count = min_t(unsigned int, conn->mtu, len);
2153 skb = bt_skb_alloc(count, GFP_ATOMIC);
2157 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2158 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2160 if (conn->hcon->type == LE_LINK)
2161 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2163 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2165 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2168 cmd->len = cpu_to_le16(dlen);
2171 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2172 memcpy(skb_put(skb, count), data, count);
2178 /* Continuation fragments (no L2CAP header) */
2179 frag = &skb_shinfo(skb)->frag_list;
2181 count = min_t(unsigned int, conn->mtu, len);
2183 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2187 memcpy(skb_put(*frag, count), data, count);
2192 frag = &(*frag)->next;
2202 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2204 struct l2cap_conf_opt *opt = *ptr;
2207 len = L2CAP_CONF_OPT_SIZE + opt->len;
2215 *val = *((u8 *) opt->val);
2219 *val = get_unaligned_le16(opt->val);
2223 *val = get_unaligned_le32(opt->val);
2227 *val = (unsigned long) opt->val;
2231 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2235 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2237 struct l2cap_conf_opt *opt = *ptr;
2239 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2246 *((u8 *) opt->val) = val;
2250 put_unaligned_le16(val, opt->val);
2254 put_unaligned_le32(val, opt->val);
2258 memcpy(opt->val, (void *) val, len);
2262 *ptr += L2CAP_CONF_OPT_SIZE + len;
2265 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2267 struct l2cap_conf_efs efs;
2269 switch (chan->mode) {
2270 case L2CAP_MODE_ERTM:
2271 efs.id = chan->local_id;
2272 efs.stype = chan->local_stype;
2273 efs.msdu = cpu_to_le16(chan->local_msdu);
2274 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2275 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2276 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2279 case L2CAP_MODE_STREAMING:
2281 efs.stype = L2CAP_SERV_BESTEFFORT;
2282 efs.msdu = cpu_to_le16(chan->local_msdu);
2283 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2292 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2293 (unsigned long) &efs);
2296 static void l2cap_ack_timeout(struct work_struct *work)
2298 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2301 BT_DBG("chan %p", chan);
2303 l2cap_chan_lock(chan);
2305 __l2cap_send_ack(chan);
2307 l2cap_chan_unlock(chan);
2309 l2cap_chan_put(chan);
2312 static inline int l2cap_ertm_init(struct l2cap_chan *chan)
2316 chan->expected_ack_seq = 0;
2317 chan->unacked_frames = 0;
2318 chan->buffer_seq = 0;
2319 chan->num_acked = 0;
2320 chan->frames_sent = 0;
2322 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2323 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2324 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2326 skb_queue_head_init(&chan->srej_q);
2328 INIT_LIST_HEAD(&chan->srej_l);
2329 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2333 return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2336 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2339 case L2CAP_MODE_STREAMING:
2340 case L2CAP_MODE_ERTM:
2341 if (l2cap_mode_supported(mode, remote_feat_mask))
2345 return L2CAP_MODE_BASIC;
2349 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2351 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2354 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2356 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2359 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2361 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2362 __l2cap_ews_supported(chan)) {
2363 /* use extended control field */
2364 set_bit(FLAG_EXT_CTRL, &chan->flags);
2365 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2367 chan->tx_win = min_t(u16, chan->tx_win,
2368 L2CAP_DEFAULT_TX_WINDOW);
2369 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2373 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2375 struct l2cap_conf_req *req = data;
2376 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2377 void *ptr = req->data;
2380 BT_DBG("chan %p", chan);
2382 if (chan->num_conf_req || chan->num_conf_rsp)
2385 switch (chan->mode) {
2386 case L2CAP_MODE_STREAMING:
2387 case L2CAP_MODE_ERTM:
2388 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2391 if (__l2cap_efs_supported(chan))
2392 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2396 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2401 if (chan->imtu != L2CAP_DEFAULT_MTU)
2402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2404 switch (chan->mode) {
2405 case L2CAP_MODE_BASIC:
2406 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2407 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2410 rfc.mode = L2CAP_MODE_BASIC;
2412 rfc.max_transmit = 0;
2413 rfc.retrans_timeout = 0;
2414 rfc.monitor_timeout = 0;
2415 rfc.max_pdu_size = 0;
2417 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2418 (unsigned long) &rfc);
2421 case L2CAP_MODE_ERTM:
2422 rfc.mode = L2CAP_MODE_ERTM;
2423 rfc.max_transmit = chan->max_tx;
2424 rfc.retrans_timeout = 0;
2425 rfc.monitor_timeout = 0;
2427 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2428 L2CAP_EXT_HDR_SIZE -
2431 rfc.max_pdu_size = cpu_to_le16(size);
2433 l2cap_txwin_setup(chan);
2435 rfc.txwin_size = min_t(u16, chan->tx_win,
2436 L2CAP_DEFAULT_TX_WINDOW);
2438 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2439 (unsigned long) &rfc);
2441 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2442 l2cap_add_opt_efs(&ptr, chan);
2444 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2447 if (chan->fcs == L2CAP_FCS_NONE ||
2448 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2449 chan->fcs = L2CAP_FCS_NONE;
2450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2453 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2454 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2458 case L2CAP_MODE_STREAMING:
2459 rfc.mode = L2CAP_MODE_STREAMING;
2461 rfc.max_transmit = 0;
2462 rfc.retrans_timeout = 0;
2463 rfc.monitor_timeout = 0;
2465 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2466 L2CAP_EXT_HDR_SIZE -
2469 rfc.max_pdu_size = cpu_to_le16(size);
2471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2472 (unsigned long) &rfc);
2474 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2475 l2cap_add_opt_efs(&ptr, chan);
2477 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2480 if (chan->fcs == L2CAP_FCS_NONE ||
2481 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2482 chan->fcs = L2CAP_FCS_NONE;
2483 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2488 req->dcid = cpu_to_le16(chan->dcid);
2489 req->flags = cpu_to_le16(0);
2494 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2496 struct l2cap_conf_rsp *rsp = data;
2497 void *ptr = rsp->data;
2498 void *req = chan->conf_req;
2499 int len = chan->conf_len;
2500 int type, hint, olen;
2502 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2503 struct l2cap_conf_efs efs;
2505 u16 mtu = L2CAP_DEFAULT_MTU;
2506 u16 result = L2CAP_CONF_SUCCESS;
2509 BT_DBG("chan %p", chan);
2511 while (len >= L2CAP_CONF_OPT_SIZE) {
2512 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2514 hint = type & L2CAP_CONF_HINT;
2515 type &= L2CAP_CONF_MASK;
2518 case L2CAP_CONF_MTU:
2522 case L2CAP_CONF_FLUSH_TO:
2523 chan->flush_to = val;
2526 case L2CAP_CONF_QOS:
2529 case L2CAP_CONF_RFC:
2530 if (olen == sizeof(rfc))
2531 memcpy(&rfc, (void *) val, olen);
2534 case L2CAP_CONF_FCS:
2535 if (val == L2CAP_FCS_NONE)
2536 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2539 case L2CAP_CONF_EFS:
2541 if (olen == sizeof(efs))
2542 memcpy(&efs, (void *) val, olen);
2545 case L2CAP_CONF_EWS:
2547 return -ECONNREFUSED;
2549 set_bit(FLAG_EXT_CTRL, &chan->flags);
2550 set_bit(CONF_EWS_RECV, &chan->conf_state);
2551 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2552 chan->remote_tx_win = val;
2559 result = L2CAP_CONF_UNKNOWN;
2560 *((u8 *) ptr++) = type;
2565 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2568 switch (chan->mode) {
2569 case L2CAP_MODE_STREAMING:
2570 case L2CAP_MODE_ERTM:
2571 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2572 chan->mode = l2cap_select_mode(rfc.mode,
2573 chan->conn->feat_mask);
2578 if (__l2cap_efs_supported(chan))
2579 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2581 return -ECONNREFUSED;
2584 if (chan->mode != rfc.mode)
2585 return -ECONNREFUSED;
2591 if (chan->mode != rfc.mode) {
2592 result = L2CAP_CONF_UNACCEPT;
2593 rfc.mode = chan->mode;
2595 if (chan->num_conf_rsp == 1)
2596 return -ECONNREFUSED;
2598 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2599 sizeof(rfc), (unsigned long) &rfc);
2602 if (result == L2CAP_CONF_SUCCESS) {
2603 /* Configure output options and let the other side know
2604 * which ones we don't like. */
2606 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2607 result = L2CAP_CONF_UNACCEPT;
2610 set_bit(CONF_MTU_DONE, &chan->conf_state);
2612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2615 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2616 efs.stype != L2CAP_SERV_NOTRAFIC &&
2617 efs.stype != chan->local_stype) {
2619 result = L2CAP_CONF_UNACCEPT;
2621 if (chan->num_conf_req >= 1)
2622 return -ECONNREFUSED;
2624 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2626 (unsigned long) &efs);
2628 /* Send PENDING Conf Rsp */
2629 result = L2CAP_CONF_PENDING;
2630 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2635 case L2CAP_MODE_BASIC:
2636 chan->fcs = L2CAP_FCS_NONE;
2637 set_bit(CONF_MODE_DONE, &chan->conf_state);
2640 case L2CAP_MODE_ERTM:
2641 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2642 chan->remote_tx_win = rfc.txwin_size;
2644 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2646 chan->remote_max_tx = rfc.max_transmit;
2648 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2650 L2CAP_EXT_HDR_SIZE -
2653 rfc.max_pdu_size = cpu_to_le16(size);
2654 chan->remote_mps = size;
2656 rfc.retrans_timeout =
2657 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2658 rfc.monitor_timeout =
2659 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2661 set_bit(CONF_MODE_DONE, &chan->conf_state);
2663 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2664 sizeof(rfc), (unsigned long) &rfc);
2666 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2667 chan->remote_id = efs.id;
2668 chan->remote_stype = efs.stype;
2669 chan->remote_msdu = le16_to_cpu(efs.msdu);
2670 chan->remote_flush_to =
2671 le32_to_cpu(efs.flush_to);
2672 chan->remote_acc_lat =
2673 le32_to_cpu(efs.acc_lat);
2674 chan->remote_sdu_itime =
2675 le32_to_cpu(efs.sdu_itime);
2676 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2677 sizeof(efs), (unsigned long) &efs);
2681 case L2CAP_MODE_STREAMING:
2682 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2684 L2CAP_EXT_HDR_SIZE -
2687 rfc.max_pdu_size = cpu_to_le16(size);
2688 chan->remote_mps = size;
2690 set_bit(CONF_MODE_DONE, &chan->conf_state);
2692 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2693 sizeof(rfc), (unsigned long) &rfc);
2698 result = L2CAP_CONF_UNACCEPT;
2700 memset(&rfc, 0, sizeof(rfc));
2701 rfc.mode = chan->mode;
2704 if (result == L2CAP_CONF_SUCCESS)
2705 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2707 rsp->scid = cpu_to_le16(chan->dcid);
2708 rsp->result = cpu_to_le16(result);
2709 rsp->flags = cpu_to_le16(0x0000);
2714 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2716 struct l2cap_conf_req *req = data;
2717 void *ptr = req->data;
2720 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2721 struct l2cap_conf_efs efs;
2723 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2725 while (len >= L2CAP_CONF_OPT_SIZE) {
2726 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2729 case L2CAP_CONF_MTU:
2730 if (val < L2CAP_DEFAULT_MIN_MTU) {
2731 *result = L2CAP_CONF_UNACCEPT;
2732 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2735 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2738 case L2CAP_CONF_FLUSH_TO:
2739 chan->flush_to = val;
2740 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2744 case L2CAP_CONF_RFC:
2745 if (olen == sizeof(rfc))
2746 memcpy(&rfc, (void *)val, olen);
2748 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2749 rfc.mode != chan->mode)
2750 return -ECONNREFUSED;
2754 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2755 sizeof(rfc), (unsigned long) &rfc);
2758 case L2CAP_CONF_EWS:
2759 chan->tx_win = min_t(u16, val,
2760 L2CAP_DEFAULT_EXT_WINDOW);
2761 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2765 case L2CAP_CONF_EFS:
2766 if (olen == sizeof(efs))
2767 memcpy(&efs, (void *)val, olen);
2769 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2770 efs.stype != L2CAP_SERV_NOTRAFIC &&
2771 efs.stype != chan->local_stype)
2772 return -ECONNREFUSED;
2774 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2775 sizeof(efs), (unsigned long) &efs);
2780 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2781 return -ECONNREFUSED;
2783 chan->mode = rfc.mode;
2785 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2787 case L2CAP_MODE_ERTM:
2788 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2789 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2790 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2792 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2793 chan->local_msdu = le16_to_cpu(efs.msdu);
2794 chan->local_sdu_itime =
2795 le32_to_cpu(efs.sdu_itime);
2796 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2797 chan->local_flush_to =
2798 le32_to_cpu(efs.flush_to);
2802 case L2CAP_MODE_STREAMING:
2803 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2807 req->dcid = cpu_to_le16(chan->dcid);
2808 req->flags = cpu_to_le16(0x0000);
2813 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2815 struct l2cap_conf_rsp *rsp = data;
2816 void *ptr = rsp->data;
2818 BT_DBG("chan %p", chan);
2820 rsp->scid = cpu_to_le16(chan->dcid);
2821 rsp->result = cpu_to_le16(result);
2822 rsp->flags = cpu_to_le16(flags);
2827 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2829 struct l2cap_conn_rsp rsp;
2830 struct l2cap_conn *conn = chan->conn;
2833 rsp.scid = cpu_to_le16(chan->dcid);
2834 rsp.dcid = cpu_to_le16(chan->scid);
2835 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2836 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2837 l2cap_send_cmd(conn, chan->ident,
2838 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2840 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2843 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2844 l2cap_build_conf_req(chan, buf), buf);
2845 chan->num_conf_req++;
2848 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2852 struct l2cap_conf_rfc rfc;
2854 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2856 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2859 while (len >= L2CAP_CONF_OPT_SIZE) {
2860 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2863 case L2CAP_CONF_RFC:
2864 if (olen == sizeof(rfc))
2865 memcpy(&rfc, (void *)val, olen);
2870 /* Use sane default values in case a misbehaving remote device
2871 * did not send an RFC option.
2873 rfc.mode = chan->mode;
2874 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2875 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2876 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2878 BT_ERR("Expected RFC option was not found, using defaults");
2882 case L2CAP_MODE_ERTM:
2883 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2884 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2885 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2887 case L2CAP_MODE_STREAMING:
2888 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2892 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2894 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2896 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2899 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2900 cmd->ident == conn->info_ident) {
2901 cancel_delayed_work(&conn->info_timer);
2903 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2904 conn->info_ident = 0;
2906 l2cap_conn_start(conn);
2912 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2914 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2915 struct l2cap_conn_rsp rsp;
2916 struct l2cap_chan *chan = NULL, *pchan;
2917 struct sock *parent, *sk = NULL;
2918 int result, status = L2CAP_CS_NO_INFO;
2920 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2921 __le16 psm = req->psm;
2923 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
2925 /* Check if we have socket listening on psm */
2926 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
2928 result = L2CAP_CR_BAD_PSM;
2934 mutex_lock(&conn->chan_lock);
2937 /* Check if the ACL is secure enough (if not SDP) */
2938 if (psm != cpu_to_le16(0x0001) &&
2939 !hci_conn_check_link_mode(conn->hcon)) {
2940 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2941 result = L2CAP_CR_SEC_BLOCK;
2945 result = L2CAP_CR_NO_MEM;
2947 /* Check for backlog size */
2948 if (sk_acceptq_is_full(parent)) {
2949 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2953 chan = pchan->ops->new_connection(pchan->data);
2959 /* Check if we already have channel with that dcid */
2960 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2961 sock_set_flag(sk, SOCK_ZAPPED);
2962 chan->ops->close(chan->data);
2966 hci_conn_hold(conn->hcon);
2968 bacpy(&bt_sk(sk)->src, conn->src);
2969 bacpy(&bt_sk(sk)->dst, conn->dst);
2973 bt_accept_enqueue(parent, sk);
2975 __l2cap_chan_add(conn, chan);
2979 __set_chan_timer(chan, sk->sk_sndtimeo);
2981 chan->ident = cmd->ident;
2983 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2984 if (l2cap_chan_check_security(chan)) {
2985 if (bt_sk(sk)->defer_setup) {
2986 __l2cap_state_change(chan, BT_CONNECT2);
2987 result = L2CAP_CR_PEND;
2988 status = L2CAP_CS_AUTHOR_PEND;
2989 parent->sk_data_ready(parent, 0);
2991 __l2cap_state_change(chan, BT_CONFIG);
2992 result = L2CAP_CR_SUCCESS;
2993 status = L2CAP_CS_NO_INFO;
2996 __l2cap_state_change(chan, BT_CONNECT2);
2997 result = L2CAP_CR_PEND;
2998 status = L2CAP_CS_AUTHEN_PEND;
3001 __l2cap_state_change(chan, BT_CONNECT2);
3002 result = L2CAP_CR_PEND;
3003 status = L2CAP_CS_NO_INFO;
3007 release_sock(parent);
3008 mutex_unlock(&conn->chan_lock);
3011 rsp.scid = cpu_to_le16(scid);
3012 rsp.dcid = cpu_to_le16(dcid);
3013 rsp.result = cpu_to_le16(result);
3014 rsp.status = cpu_to_le16(status);
3015 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3017 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3018 struct l2cap_info_req info;
3019 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3021 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3022 conn->info_ident = l2cap_get_ident(conn);
3024 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3026 l2cap_send_cmd(conn, conn->info_ident,
3027 L2CAP_INFO_REQ, sizeof(info), &info);
3030 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3031 result == L2CAP_CR_SUCCESS) {
3033 set_bit(CONF_REQ_SENT, &chan->conf_state);
3034 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3035 l2cap_build_conf_req(chan, buf), buf);
3036 chan->num_conf_req++;
3042 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3044 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3045 u16 scid, dcid, result, status;
3046 struct l2cap_chan *chan;
3050 scid = __le16_to_cpu(rsp->scid);
3051 dcid = __le16_to_cpu(rsp->dcid);
3052 result = __le16_to_cpu(rsp->result);
3053 status = __le16_to_cpu(rsp->status);
3055 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3056 dcid, scid, result, status);
3058 mutex_lock(&conn->chan_lock);
3061 chan = __l2cap_get_chan_by_scid(conn, scid);
3067 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3076 l2cap_chan_lock(chan);
3079 case L2CAP_CR_SUCCESS:
3080 l2cap_state_change(chan, BT_CONFIG);
3083 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3085 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3088 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3089 l2cap_build_conf_req(chan, req), req);
3090 chan->num_conf_req++;
3094 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3098 l2cap_chan_del(chan, ECONNREFUSED);
3102 l2cap_chan_unlock(chan);
3105 mutex_unlock(&conn->chan_lock);
3110 static inline void set_default_fcs(struct l2cap_chan *chan)
3112 /* FCS is enabled only in ERTM or streaming mode, if one or both
3115 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3116 chan->fcs = L2CAP_FCS_NONE;
3117 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3118 chan->fcs = L2CAP_FCS_CRC16;
3121 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3123 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3126 struct l2cap_chan *chan;
3129 dcid = __le16_to_cpu(req->dcid);
3130 flags = __le16_to_cpu(req->flags);
3132 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3134 chan = l2cap_get_chan_by_scid(conn, dcid);
3138 l2cap_chan_lock(chan);
3140 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3141 struct l2cap_cmd_rej_cid rej;
3143 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3144 rej.scid = cpu_to_le16(chan->scid);
3145 rej.dcid = cpu_to_le16(chan->dcid);
3147 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3152 /* Reject if config buffer is too small. */
3153 len = cmd_len - sizeof(*req);
3154 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3155 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3156 l2cap_build_conf_rsp(chan, rsp,
3157 L2CAP_CONF_REJECT, flags), rsp);
3162 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3163 chan->conf_len += len;
3165 if (flags & 0x0001) {
3166 /* Incomplete config. Send empty response. */
3167 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3168 l2cap_build_conf_rsp(chan, rsp,
3169 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3173 /* Complete config. */
3174 len = l2cap_parse_conf_req(chan, rsp);
3176 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3180 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3181 chan->num_conf_rsp++;
3183 /* Reset config buffer. */
3186 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3189 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3190 set_default_fcs(chan);
3192 l2cap_state_change(chan, BT_CONNECTED);
3194 chan->next_tx_seq = 0;
3195 chan->expected_tx_seq = 0;
3196 skb_queue_head_init(&chan->tx_q);
3197 if (chan->mode == L2CAP_MODE_ERTM)
3198 err = l2cap_ertm_init(chan);
3201 l2cap_send_disconn_req(chan->conn, chan, -err);
3203 l2cap_chan_ready(chan);
3208 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3210 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3211 l2cap_build_conf_req(chan, buf), buf);
3212 chan->num_conf_req++;
3215 /* Got Conf Rsp PENDING from remote side and asume we sent
3216 Conf Rsp PENDING in the code above */
3217 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3218 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3220 /* check compatibility */
3222 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3223 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3225 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3226 l2cap_build_conf_rsp(chan, rsp,
3227 L2CAP_CONF_SUCCESS, 0x0000), rsp);
3231 l2cap_chan_unlock(chan);
3235 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3237 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3238 u16 scid, flags, result;
3239 struct l2cap_chan *chan;
3240 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3243 scid = __le16_to_cpu(rsp->scid);
3244 flags = __le16_to_cpu(rsp->flags);
3245 result = __le16_to_cpu(rsp->result);
3247 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3250 chan = l2cap_get_chan_by_scid(conn, scid);
3254 l2cap_chan_lock(chan);
3257 case L2CAP_CONF_SUCCESS:
3258 l2cap_conf_rfc_get(chan, rsp->data, len);
3259 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3262 case L2CAP_CONF_PENDING:
3263 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3265 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3268 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3271 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3275 /* check compatibility */
3277 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3278 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3280 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3281 l2cap_build_conf_rsp(chan, buf,
3282 L2CAP_CONF_SUCCESS, 0x0000), buf);
3286 case L2CAP_CONF_UNACCEPT:
3287 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3290 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3291 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3295 /* throw out any old stored conf requests */
3296 result = L2CAP_CONF_SUCCESS;
3297 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3300 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3304 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3305 L2CAP_CONF_REQ, len, req);
3306 chan->num_conf_req++;
3307 if (result != L2CAP_CONF_SUCCESS)
3313 l2cap_chan_set_err(chan, ECONNRESET);
3315 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3316 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3323 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3325 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3326 set_default_fcs(chan);
3328 l2cap_state_change(chan, BT_CONNECTED);
3329 chan->next_tx_seq = 0;
3330 chan->expected_tx_seq = 0;
3331 skb_queue_head_init(&chan->tx_q);
3332 if (chan->mode == L2CAP_MODE_ERTM)
3333 err = l2cap_ertm_init(chan);
3336 l2cap_send_disconn_req(chan->conn, chan, -err);
3338 l2cap_chan_ready(chan);
3342 l2cap_chan_unlock(chan);
3346 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3348 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3349 struct l2cap_disconn_rsp rsp;
3351 struct l2cap_chan *chan;
3354 scid = __le16_to_cpu(req->scid);
3355 dcid = __le16_to_cpu(req->dcid);
3357 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3359 mutex_lock(&conn->chan_lock);
3361 chan = __l2cap_get_chan_by_scid(conn, dcid);
3363 mutex_unlock(&conn->chan_lock);
3367 l2cap_chan_lock(chan);
3371 rsp.dcid = cpu_to_le16(chan->scid);
3372 rsp.scid = cpu_to_le16(chan->dcid);
3373 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3376 sk->sk_shutdown = SHUTDOWN_MASK;
3379 l2cap_chan_del(chan, ECONNRESET);
3381 l2cap_chan_unlock(chan);
3383 chan->ops->close(chan->data);
3385 mutex_unlock(&conn->chan_lock);
3390 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3392 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3394 struct l2cap_chan *chan;
3396 scid = __le16_to_cpu(rsp->scid);
3397 dcid = __le16_to_cpu(rsp->dcid);
3399 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3401 mutex_lock(&conn->chan_lock);
3403 chan = __l2cap_get_chan_by_scid(conn, scid);
3405 mutex_unlock(&conn->chan_lock);
3409 l2cap_chan_lock(chan);
3411 l2cap_chan_del(chan, 0);
3413 l2cap_chan_unlock(chan);
3415 chan->ops->close(chan->data);
3417 mutex_unlock(&conn->chan_lock);
3422 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3424 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3427 type = __le16_to_cpu(req->type);
3429 BT_DBG("type 0x%4.4x", type);
3431 if (type == L2CAP_IT_FEAT_MASK) {
3433 u32 feat_mask = l2cap_feat_mask;
3434 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3435 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3436 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3438 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3441 feat_mask |= L2CAP_FEAT_EXT_FLOW
3442 | L2CAP_FEAT_EXT_WINDOW;
3444 put_unaligned_le32(feat_mask, rsp->data);
3445 l2cap_send_cmd(conn, cmd->ident,
3446 L2CAP_INFO_RSP, sizeof(buf), buf);
3447 } else if (type == L2CAP_IT_FIXED_CHAN) {
3449 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3452 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3454 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3456 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3457 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3458 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3459 l2cap_send_cmd(conn, cmd->ident,
3460 L2CAP_INFO_RSP, sizeof(buf), buf);
3462 struct l2cap_info_rsp rsp;
3463 rsp.type = cpu_to_le16(type);
3464 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3465 l2cap_send_cmd(conn, cmd->ident,
3466 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3472 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3474 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3477 type = __le16_to_cpu(rsp->type);
3478 result = __le16_to_cpu(rsp->result);
3480 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3482 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3483 if (cmd->ident != conn->info_ident ||
3484 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3487 cancel_delayed_work(&conn->info_timer);
3489 if (result != L2CAP_IR_SUCCESS) {
3490 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3491 conn->info_ident = 0;
3493 l2cap_conn_start(conn);
3499 case L2CAP_IT_FEAT_MASK:
3500 conn->feat_mask = get_unaligned_le32(rsp->data);
3502 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3503 struct l2cap_info_req req;
3504 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3506 conn->info_ident = l2cap_get_ident(conn);
3508 l2cap_send_cmd(conn, conn->info_ident,
3509 L2CAP_INFO_REQ, sizeof(req), &req);
3511 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3512 conn->info_ident = 0;
3514 l2cap_conn_start(conn);
3518 case L2CAP_IT_FIXED_CHAN:
3519 conn->fixed_chan_mask = rsp->data[0];
3520 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3521 conn->info_ident = 0;
3523 l2cap_conn_start(conn);
3530 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3531 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3534 struct l2cap_create_chan_req *req = data;
3535 struct l2cap_create_chan_rsp rsp;
3538 if (cmd_len != sizeof(*req))
3544 psm = le16_to_cpu(req->psm);
3545 scid = le16_to_cpu(req->scid);
3547 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3549 /* Placeholder: Always reject */
3551 rsp.scid = cpu_to_le16(scid);
3552 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3553 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3555 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3561 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3562 struct l2cap_cmd_hdr *cmd, void *data)
3564 BT_DBG("conn %p", conn);
3566 return l2cap_connect_rsp(conn, cmd, data);
3569 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3570 u16 icid, u16 result)
3572 struct l2cap_move_chan_rsp rsp;
3574 BT_DBG("icid %d, result %d", icid, result);
3576 rsp.icid = cpu_to_le16(icid);
3577 rsp.result = cpu_to_le16(result);
3579 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3582 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3583 struct l2cap_chan *chan, u16 icid, u16 result)
3585 struct l2cap_move_chan_cfm cfm;
3588 BT_DBG("icid %d, result %d", icid, result);
3590 ident = l2cap_get_ident(conn);
3592 chan->ident = ident;
3594 cfm.icid = cpu_to_le16(icid);
3595 cfm.result = cpu_to_le16(result);
3597 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3600 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3603 struct l2cap_move_chan_cfm_rsp rsp;
3605 BT_DBG("icid %d", icid);
3607 rsp.icid = cpu_to_le16(icid);
3608 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3611 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3612 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3614 struct l2cap_move_chan_req *req = data;
3616 u16 result = L2CAP_MR_NOT_ALLOWED;
3618 if (cmd_len != sizeof(*req))
3621 icid = le16_to_cpu(req->icid);
3623 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3628 /* Placeholder: Always refuse */
3629 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3634 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3635 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3637 struct l2cap_move_chan_rsp *rsp = data;
3640 if (cmd_len != sizeof(*rsp))
3643 icid = le16_to_cpu(rsp->icid);
3644 result = le16_to_cpu(rsp->result);
3646 BT_DBG("icid %d, result %d", icid, result);
3648 /* Placeholder: Always unconfirmed */
3649 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3654 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3655 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3657 struct l2cap_move_chan_cfm *cfm = data;
3660 if (cmd_len != sizeof(*cfm))
3663 icid = le16_to_cpu(cfm->icid);
3664 result = le16_to_cpu(cfm->result);
3666 BT_DBG("icid %d, result %d", icid, result);
3668 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3673 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3674 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3676 struct l2cap_move_chan_cfm_rsp *rsp = data;
3679 if (cmd_len != sizeof(*rsp))
3682 icid = le16_to_cpu(rsp->icid);
3684 BT_DBG("icid %d", icid);
3689 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3694 if (min > max || min < 6 || max > 3200)
3697 if (to_multiplier < 10 || to_multiplier > 3200)
3700 if (max >= to_multiplier * 8)
3703 max_latency = (to_multiplier * 8 / max) - 1;
3704 if (latency > 499 || latency > max_latency)
3710 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3711 struct l2cap_cmd_hdr *cmd, u8 *data)
3713 struct hci_conn *hcon = conn->hcon;
3714 struct l2cap_conn_param_update_req *req;
3715 struct l2cap_conn_param_update_rsp rsp;
3716 u16 min, max, latency, to_multiplier, cmd_len;
3719 if (!(hcon->link_mode & HCI_LM_MASTER))
3722 cmd_len = __le16_to_cpu(cmd->len);
3723 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3726 req = (struct l2cap_conn_param_update_req *) data;
3727 min = __le16_to_cpu(req->min);
3728 max = __le16_to_cpu(req->max);
3729 latency = __le16_to_cpu(req->latency);
3730 to_multiplier = __le16_to_cpu(req->to_multiplier);
3732 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3733 min, max, latency, to_multiplier);
3735 memset(&rsp, 0, sizeof(rsp));
3737 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3739 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3741 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3743 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3747 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3752 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3753 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3757 switch (cmd->code) {
3758 case L2CAP_COMMAND_REJ:
3759 l2cap_command_rej(conn, cmd, data);
3762 case L2CAP_CONN_REQ:
3763 err = l2cap_connect_req(conn, cmd, data);
3766 case L2CAP_CONN_RSP:
3767 err = l2cap_connect_rsp(conn, cmd, data);
3770 case L2CAP_CONF_REQ:
3771 err = l2cap_config_req(conn, cmd, cmd_len, data);
3774 case L2CAP_CONF_RSP:
3775 err = l2cap_config_rsp(conn, cmd, data);
3778 case L2CAP_DISCONN_REQ:
3779 err = l2cap_disconnect_req(conn, cmd, data);
3782 case L2CAP_DISCONN_RSP:
3783 err = l2cap_disconnect_rsp(conn, cmd, data);
3786 case L2CAP_ECHO_REQ:
3787 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3790 case L2CAP_ECHO_RSP:
3793 case L2CAP_INFO_REQ:
3794 err = l2cap_information_req(conn, cmd, data);
3797 case L2CAP_INFO_RSP:
3798 err = l2cap_information_rsp(conn, cmd, data);
3801 case L2CAP_CREATE_CHAN_REQ:
3802 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3805 case L2CAP_CREATE_CHAN_RSP:
3806 err = l2cap_create_channel_rsp(conn, cmd, data);
3809 case L2CAP_MOVE_CHAN_REQ:
3810 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3813 case L2CAP_MOVE_CHAN_RSP:
3814 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3817 case L2CAP_MOVE_CHAN_CFM:
3818 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3821 case L2CAP_MOVE_CHAN_CFM_RSP:
3822 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3826 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3834 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3835 struct l2cap_cmd_hdr *cmd, u8 *data)
3837 switch (cmd->code) {
3838 case L2CAP_COMMAND_REJ:
3841 case L2CAP_CONN_PARAM_UPDATE_REQ:
3842 return l2cap_conn_param_update_req(conn, cmd, data);
3844 case L2CAP_CONN_PARAM_UPDATE_RSP:
3848 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3853 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3854 struct sk_buff *skb)
3856 u8 *data = skb->data;
3858 struct l2cap_cmd_hdr cmd;
3861 l2cap_raw_recv(conn, skb);
3863 while (len >= L2CAP_CMD_HDR_SIZE) {
3865 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3866 data += L2CAP_CMD_HDR_SIZE;
3867 len -= L2CAP_CMD_HDR_SIZE;
3869 cmd_len = le16_to_cpu(cmd.len);
3871 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3873 if (cmd_len > len || !cmd.ident) {
3874 BT_DBG("corrupted command");
3878 if (conn->hcon->type == LE_LINK)
3879 err = l2cap_le_sig_cmd(conn, &cmd, data);
3881 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3884 struct l2cap_cmd_rej_unk rej;
3886 BT_ERR("Wrong link type (%d)", err);
3888 /* FIXME: Map err to a valid reason */
3889 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3890 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3900 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3902 u16 our_fcs, rcv_fcs;
3905 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3906 hdr_size = L2CAP_EXT_HDR_SIZE;
3908 hdr_size = L2CAP_ENH_HDR_SIZE;
3910 if (chan->fcs == L2CAP_FCS_CRC16) {
3911 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3912 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3913 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3915 if (our_fcs != rcv_fcs)
3921 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3925 chan->frames_sent = 0;
3927 control |= __set_reqseq(chan, chan->buffer_seq);
3929 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3930 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3931 l2cap_send_sframe(chan, control);
3932 set_bit(CONN_RNR_SENT, &chan->conn_state);
3935 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3936 l2cap_retransmit_frames(chan);
3938 l2cap_ertm_send(chan);
3940 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3941 chan->frames_sent == 0) {
3942 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3943 l2cap_send_sframe(chan, control);
3947 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3949 struct sk_buff *next_skb;
3950 int tx_seq_offset, next_tx_seq_offset;
3952 bt_cb(skb)->tx_seq = tx_seq;
3953 bt_cb(skb)->sar = sar;
3955 next_skb = skb_peek(&chan->srej_q);
3957 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3960 if (bt_cb(next_skb)->tx_seq == tx_seq)
3963 next_tx_seq_offset = __seq_offset(chan,
3964 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3966 if (next_tx_seq_offset > tx_seq_offset) {
3967 __skb_queue_before(&chan->srej_q, next_skb, skb);
3971 if (skb_queue_is_last(&chan->srej_q, next_skb))
3974 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3977 __skb_queue_tail(&chan->srej_q, skb);
3982 static void append_skb_frag(struct sk_buff *skb,
3983 struct sk_buff *new_frag, struct sk_buff **last_frag)
3985 /* skb->len reflects data in skb as well as all fragments
3986 * skb->data_len reflects only data in fragments
3988 if (!skb_has_frag_list(skb))
3989 skb_shinfo(skb)->frag_list = new_frag;
3991 new_frag->next = NULL;
3993 (*last_frag)->next = new_frag;
3994 *last_frag = new_frag;
3996 skb->len += new_frag->len;
3997 skb->data_len += new_frag->len;
3998 skb->truesize += new_frag->truesize;
4001 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
4005 switch (__get_ctrl_sar(chan, control)) {
4006 case L2CAP_SAR_UNSEGMENTED:
4010 err = chan->ops->recv(chan->data, skb);
4013 case L2CAP_SAR_START:
4017 chan->sdu_len = get_unaligned_le16(skb->data);
4018 skb_pull(skb, L2CAP_SDULEN_SIZE);
4020 if (chan->sdu_len > chan->imtu) {
4025 if (skb->len >= chan->sdu_len)
4029 chan->sdu_last_frag = skb;
4035 case L2CAP_SAR_CONTINUE:
4039 append_skb_frag(chan->sdu, skb,
4040 &chan->sdu_last_frag);
4043 if (chan->sdu->len >= chan->sdu_len)
4053 append_skb_frag(chan->sdu, skb,
4054 &chan->sdu_last_frag);
4057 if (chan->sdu->len != chan->sdu_len)
4060 err = chan->ops->recv(chan->data, chan->sdu);
4063 /* Reassembly complete */
4065 chan->sdu_last_frag = NULL;
4073 kfree_skb(chan->sdu);
4075 chan->sdu_last_frag = NULL;
4082 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
4084 BT_DBG("chan %p, Enter local busy", chan);
4086 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4087 l2cap_seq_list_clear(&chan->srej_list);
4089 __set_ack_timer(chan);
4092 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
4096 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4099 control = __set_reqseq(chan, chan->buffer_seq);
4100 control |= __set_ctrl_poll(chan);
4101 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
4102 l2cap_send_sframe(chan, control);
4103 chan->retry_count = 1;
4105 __clear_retrans_timer(chan);
4106 __set_monitor_timer(chan);
4108 set_bit(CONN_WAIT_F, &chan->conn_state);
4111 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
4112 clear_bit(CONN_RNR_SENT, &chan->conn_state);
4114 BT_DBG("chan %p, Exit local busy", chan);
4117 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4119 if (chan->mode == L2CAP_MODE_ERTM) {
4121 l2cap_ertm_enter_local_busy(chan);
4123 l2cap_ertm_exit_local_busy(chan);
4127 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
4129 struct sk_buff *skb;
4132 while ((skb = skb_peek(&chan->srej_q)) &&
4133 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4136 if (bt_cb(skb)->tx_seq != tx_seq)
4139 skb = skb_dequeue(&chan->srej_q);
4140 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
4141 err = l2cap_reassemble_sdu(chan, skb, control);
4144 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4148 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
4149 tx_seq = __next_seq(chan, tx_seq);
4153 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4155 struct srej_list *l, *tmp;
4158 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
4159 if (l->tx_seq == tx_seq) {
4164 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4165 control |= __set_reqseq(chan, l->tx_seq);
4166 l2cap_send_sframe(chan, control);
4168 list_add_tail(&l->list, &chan->srej_l);
4172 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
4174 struct srej_list *new;
4177 while (tx_seq != chan->expected_tx_seq) {
4178 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
4179 control |= __set_reqseq(chan, chan->expected_tx_seq);
4180 l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
4181 l2cap_send_sframe(chan, control);
4183 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
4187 new->tx_seq = chan->expected_tx_seq;
4189 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4191 list_add_tail(&new->list, &chan->srej_l);
4194 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4199 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4201 u16 tx_seq = __get_txseq(chan, rx_control);
4202 u16 req_seq = __get_reqseq(chan, rx_control);
4203 u8 sar = __get_ctrl_sar(chan, rx_control);
4204 int tx_seq_offset, expected_tx_seq_offset;
4205 int num_to_ack = (chan->tx_win/6) + 1;
4208 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
4209 tx_seq, rx_control);
4211 if (__is_ctrl_final(chan, rx_control) &&
4212 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4213 __clear_monitor_timer(chan);
4214 if (chan->unacked_frames > 0)
4215 __set_retrans_timer(chan);
4216 clear_bit(CONN_WAIT_F, &chan->conn_state);
4219 chan->expected_ack_seq = req_seq;
4220 l2cap_drop_acked_frames(chan);
4222 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
4224 /* invalid tx_seq */
4225 if (tx_seq_offset >= chan->tx_win) {
4226 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4230 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4231 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
4232 l2cap_send_ack(chan);
4236 if (tx_seq == chan->expected_tx_seq)
4239 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4240 struct srej_list *first;
4242 first = list_first_entry(&chan->srej_l,
4243 struct srej_list, list);
4244 if (tx_seq == first->tx_seq) {
4245 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4246 l2cap_check_srej_gap(chan, tx_seq);
4248 list_del(&first->list);
4251 if (list_empty(&chan->srej_l)) {
4252 chan->buffer_seq = chan->buffer_seq_srej;
4253 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
4254 l2cap_send_ack(chan);
4255 BT_DBG("chan %p, Exit SREJ_SENT", chan);
4258 struct srej_list *l;
4260 /* duplicated tx_seq */
4261 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
4264 list_for_each_entry(l, &chan->srej_l, list) {
4265 if (l->tx_seq == tx_seq) {
4266 l2cap_resend_srejframe(chan, tx_seq);
4271 err = l2cap_send_srejframe(chan, tx_seq);
4273 l2cap_send_disconn_req(chan->conn, chan, -err);
4278 expected_tx_seq_offset = __seq_offset(chan,
4279 chan->expected_tx_seq, chan->buffer_seq);
4281 /* duplicated tx_seq */
4282 if (tx_seq_offset < expected_tx_seq_offset)
4285 set_bit(CONN_SREJ_SENT, &chan->conn_state);
4287 BT_DBG("chan %p, Enter SREJ", chan);
4289 INIT_LIST_HEAD(&chan->srej_l);
4290 chan->buffer_seq_srej = chan->buffer_seq;
4292 __skb_queue_head_init(&chan->srej_q);
4293 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4295 /* Set P-bit only if there are some I-frames to ack. */
4296 if (__clear_ack_timer(chan))
4297 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4299 err = l2cap_send_srejframe(chan, tx_seq);
4301 l2cap_send_disconn_req(chan->conn, chan, -err);
4308 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4310 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4311 bt_cb(skb)->tx_seq = tx_seq;
4312 bt_cb(skb)->sar = sar;
4313 __skb_queue_tail(&chan->srej_q, skb);
4317 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4318 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4321 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4325 if (__is_ctrl_final(chan, rx_control)) {
4326 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4327 l2cap_retransmit_frames(chan);
4331 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4332 if (chan->num_acked == num_to_ack - 1)
4333 l2cap_send_ack(chan);
4335 __set_ack_timer(chan);
4344 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4346 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4347 __get_reqseq(chan, rx_control), rx_control);
4349 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4350 l2cap_drop_acked_frames(chan);
4352 if (__is_ctrl_poll(chan, rx_control)) {
4353 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4354 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4355 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4356 (chan->unacked_frames > 0))
4357 __set_retrans_timer(chan);
4359 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4360 l2cap_send_srejtail(chan);
4362 l2cap_send_i_or_rr_or_rnr(chan);
4365 } else if (__is_ctrl_final(chan, rx_control)) {
4366 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4368 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4369 l2cap_retransmit_frames(chan);
4372 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4373 (chan->unacked_frames > 0))
4374 __set_retrans_timer(chan);
4376 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4377 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4378 l2cap_send_ack(chan);
4380 l2cap_ertm_send(chan);
4384 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4386 u16 tx_seq = __get_reqseq(chan, rx_control);
4388 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4390 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4392 chan->expected_ack_seq = tx_seq;
4393 l2cap_drop_acked_frames(chan);
4395 if (__is_ctrl_final(chan, rx_control)) {
4396 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4397 l2cap_retransmit_frames(chan);
4399 l2cap_retransmit_frames(chan);
4401 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4402 set_bit(CONN_REJ_ACT, &chan->conn_state);
4405 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4407 u16 tx_seq = __get_reqseq(chan, rx_control);
4409 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4411 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4413 if (__is_ctrl_poll(chan, rx_control)) {
4414 chan->expected_ack_seq = tx_seq;
4415 l2cap_drop_acked_frames(chan);
4417 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4418 l2cap_retransmit_one_frame(chan, tx_seq);
4420 l2cap_ertm_send(chan);
4422 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4423 chan->srej_save_reqseq = tx_seq;
4424 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4426 } else if (__is_ctrl_final(chan, rx_control)) {
4427 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4428 chan->srej_save_reqseq == tx_seq)
4429 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4431 l2cap_retransmit_one_frame(chan, tx_seq);
4433 l2cap_retransmit_one_frame(chan, tx_seq);
4434 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4435 chan->srej_save_reqseq = tx_seq;
4436 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4441 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4443 u16 tx_seq = __get_reqseq(chan, rx_control);
4445 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4447 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4448 chan->expected_ack_seq = tx_seq;
4449 l2cap_drop_acked_frames(chan);
4451 if (__is_ctrl_poll(chan, rx_control))
4452 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4454 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4455 __clear_retrans_timer(chan);
4456 if (__is_ctrl_poll(chan, rx_control))
4457 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4461 if (__is_ctrl_poll(chan, rx_control)) {
4462 l2cap_send_srejtail(chan);
4464 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4465 l2cap_send_sframe(chan, rx_control);
4469 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4471 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4473 if (__is_ctrl_final(chan, rx_control) &&
4474 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4475 __clear_monitor_timer(chan);
4476 if (chan->unacked_frames > 0)
4477 __set_retrans_timer(chan);
4478 clear_bit(CONN_WAIT_F, &chan->conn_state);
4481 switch (__get_ctrl_super(chan, rx_control)) {
4482 case L2CAP_SUPER_RR:
4483 l2cap_data_channel_rrframe(chan, rx_control);
4486 case L2CAP_SUPER_REJ:
4487 l2cap_data_channel_rejframe(chan, rx_control);
4490 case L2CAP_SUPER_SREJ:
4491 l2cap_data_channel_srejframe(chan, rx_control);
4494 case L2CAP_SUPER_RNR:
4495 l2cap_data_channel_rnrframe(chan, rx_control);
4503 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4507 int len, next_tx_seq_offset, req_seq_offset;
4509 __unpack_control(chan, skb);
4511 control = __get_control(chan, skb->data);
4512 skb_pull(skb, __ctrl_size(chan));
4516 * We can just drop the corrupted I-frame here.
4517 * Receiver will miss it and start proper recovery
4518 * procedures and ask retransmission.
4520 if (l2cap_check_fcs(chan, skb))
4523 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4524 len -= L2CAP_SDULEN_SIZE;
4526 if (chan->fcs == L2CAP_FCS_CRC16)
4527 len -= L2CAP_FCS_SIZE;
4529 if (len > chan->mps) {
4530 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4534 req_seq = __get_reqseq(chan, control);
4536 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4538 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4539 chan->expected_ack_seq);
4541 /* check for invalid req-seq */
4542 if (req_seq_offset > next_tx_seq_offset) {
4543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4547 if (!__is_sframe(chan, control)) {
4549 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4553 l2cap_data_channel_iframe(chan, control, skb);
4557 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4561 l2cap_data_channel_sframe(chan, control, skb);
4571 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4573 struct l2cap_chan *chan;
4578 chan = l2cap_get_chan_by_scid(conn, cid);
4580 BT_DBG("unknown cid 0x%4.4x", cid);
4581 /* Drop packet and return */
4586 l2cap_chan_lock(chan);
4588 BT_DBG("chan %p, len %d", chan, skb->len);
4590 if (chan->state != BT_CONNECTED)
4593 switch (chan->mode) {
4594 case L2CAP_MODE_BASIC:
4595 /* If socket recv buffers overflows we drop data here
4596 * which is *bad* because L2CAP has to be reliable.
4597 * But we don't have any other choice. L2CAP doesn't
4598 * provide flow control mechanism. */
4600 if (chan->imtu < skb->len)
4603 if (!chan->ops->recv(chan->data, skb))
4607 case L2CAP_MODE_ERTM:
4608 l2cap_ertm_data_rcv(chan, skb);
4612 case L2CAP_MODE_STREAMING:
4613 control = __get_control(chan, skb->data);
4614 skb_pull(skb, __ctrl_size(chan));
4617 if (l2cap_check_fcs(chan, skb))
4620 if (__is_sar_start(chan, control))
4621 len -= L2CAP_SDULEN_SIZE;
4623 if (chan->fcs == L2CAP_FCS_CRC16)
4624 len -= L2CAP_FCS_SIZE;
4626 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4629 tx_seq = __get_txseq(chan, control);
4631 if (chan->expected_tx_seq != tx_seq) {
4632 /* Frame(s) missing - must discard partial SDU */
4633 kfree_skb(chan->sdu);
4635 chan->sdu_last_frag = NULL;
4638 /* TODO: Notify userland of missing data */
4641 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4643 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4644 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4649 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4657 l2cap_chan_unlock(chan);
4662 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4664 struct l2cap_chan *chan;
4666 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
4670 BT_DBG("chan %p, len %d", chan, skb->len);
4672 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4675 if (chan->imtu < skb->len)
4678 if (!chan->ops->recv(chan->data, skb))
4687 static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
4688 struct sk_buff *skb)
4690 struct l2cap_chan *chan;
4692 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
4696 BT_DBG("chan %p, len %d", chan, skb->len);
4698 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4701 if (chan->imtu < skb->len)
4704 if (!chan->ops->recv(chan->data, skb))
4713 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4715 struct l2cap_hdr *lh = (void *) skb->data;
4719 skb_pull(skb, L2CAP_HDR_SIZE);
4720 cid = __le16_to_cpu(lh->cid);
4721 len = __le16_to_cpu(lh->len);
4723 if (len != skb->len) {
4728 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4731 case L2CAP_CID_LE_SIGNALING:
4732 case L2CAP_CID_SIGNALING:
4733 l2cap_sig_channel(conn, skb);
4736 case L2CAP_CID_CONN_LESS:
4737 psm = get_unaligned((__le16 *) skb->data);
4739 l2cap_conless_channel(conn, psm, skb);
4742 case L2CAP_CID_LE_DATA:
4743 l2cap_att_channel(conn, cid, skb);
4747 if (smp_sig_channel(conn, skb))
4748 l2cap_conn_del(conn->hcon, EACCES);
4752 l2cap_data_channel(conn, cid, skb);
4757 /* ---- L2CAP interface with lower layer (HCI) ---- */
4759 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4761 int exact = 0, lm1 = 0, lm2 = 0;
4762 struct l2cap_chan *c;
4764 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4766 /* Find listening sockets and check their link_mode */
4767 read_lock(&chan_list_lock);
4768 list_for_each_entry(c, &chan_list, global_l) {
4769 struct sock *sk = c->sk;
4771 if (c->state != BT_LISTEN)
4774 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4775 lm1 |= HCI_LM_ACCEPT;
4776 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4777 lm1 |= HCI_LM_MASTER;
4779 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4780 lm2 |= HCI_LM_ACCEPT;
4781 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4782 lm2 |= HCI_LM_MASTER;
4785 read_unlock(&chan_list_lock);
4787 return exact ? lm1 : lm2;
4790 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4792 struct l2cap_conn *conn;
4794 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4797 conn = l2cap_conn_add(hcon, status);
4799 l2cap_conn_ready(conn);
4801 l2cap_conn_del(hcon, bt_to_errno(status));
4806 int l2cap_disconn_ind(struct hci_conn *hcon)
4808 struct l2cap_conn *conn = hcon->l2cap_data;
4810 BT_DBG("hcon %p", hcon);
4813 return HCI_ERROR_REMOTE_USER_TERM;
4814 return conn->disc_reason;
4817 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4819 BT_DBG("hcon %p reason %d", hcon, reason);
4821 l2cap_conn_del(hcon, bt_to_errno(reason));
4825 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4827 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4830 if (encrypt == 0x00) {
4831 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4832 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4833 } else if (chan->sec_level == BT_SECURITY_HIGH)
4834 l2cap_chan_close(chan, ECONNREFUSED);
4836 if (chan->sec_level == BT_SECURITY_MEDIUM)
4837 __clear_chan_timer(chan);
4841 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4843 struct l2cap_conn *conn = hcon->l2cap_data;
4844 struct l2cap_chan *chan;
4849 BT_DBG("conn %p", conn);
4851 if (hcon->type == LE_LINK) {
4852 if (!status && encrypt)
4853 smp_distribute_keys(conn, 0);
4854 cancel_delayed_work(&conn->security_timer);
4857 mutex_lock(&conn->chan_lock);
4859 list_for_each_entry(chan, &conn->chan_l, list) {
4860 l2cap_chan_lock(chan);
4862 BT_DBG("chan->scid %d", chan->scid);
4864 if (chan->scid == L2CAP_CID_LE_DATA) {
4865 if (!status && encrypt) {
4866 chan->sec_level = hcon->sec_level;
4867 l2cap_chan_ready(chan);
4870 l2cap_chan_unlock(chan);
4874 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4875 l2cap_chan_unlock(chan);
4879 if (!status && (chan->state == BT_CONNECTED ||
4880 chan->state == BT_CONFIG)) {
4881 l2cap_check_encryption(chan, encrypt);
4882 l2cap_chan_unlock(chan);
4886 if (chan->state == BT_CONNECT) {
4888 l2cap_send_conn_req(chan);
4890 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4892 } else if (chan->state == BT_CONNECT2) {
4893 struct sock *sk = chan->sk;
4894 struct l2cap_conn_rsp rsp;
4900 if (bt_sk(sk)->defer_setup) {
4901 struct sock *parent = bt_sk(sk)->parent;
4902 res = L2CAP_CR_PEND;
4903 stat = L2CAP_CS_AUTHOR_PEND;
4905 parent->sk_data_ready(parent, 0);
4907 __l2cap_state_change(chan, BT_CONFIG);
4908 res = L2CAP_CR_SUCCESS;
4909 stat = L2CAP_CS_NO_INFO;
4912 __l2cap_state_change(chan, BT_DISCONN);
4913 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4914 res = L2CAP_CR_SEC_BLOCK;
4915 stat = L2CAP_CS_NO_INFO;
4920 rsp.scid = cpu_to_le16(chan->dcid);
4921 rsp.dcid = cpu_to_le16(chan->scid);
4922 rsp.result = cpu_to_le16(res);
4923 rsp.status = cpu_to_le16(stat);
4924 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4928 l2cap_chan_unlock(chan);
4931 mutex_unlock(&conn->chan_lock);
4936 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4938 struct l2cap_conn *conn = hcon->l2cap_data;
4941 conn = l2cap_conn_add(hcon, 0);
4946 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4948 if (!(flags & ACL_CONT)) {
4949 struct l2cap_hdr *hdr;
4950 struct l2cap_chan *chan;
4955 BT_ERR("Unexpected start frame (len %d)", skb->len);
4956 kfree_skb(conn->rx_skb);
4957 conn->rx_skb = NULL;
4959 l2cap_conn_unreliable(conn, ECOMM);
4962 /* Start fragment always begin with Basic L2CAP header */
4963 if (skb->len < L2CAP_HDR_SIZE) {
4964 BT_ERR("Frame is too short (len %d)", skb->len);
4965 l2cap_conn_unreliable(conn, ECOMM);
4969 hdr = (struct l2cap_hdr *) skb->data;
4970 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4971 cid = __le16_to_cpu(hdr->cid);
4973 if (len == skb->len) {
4974 /* Complete frame received */
4975 l2cap_recv_frame(conn, skb);
4979 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4981 if (skb->len > len) {
4982 BT_ERR("Frame is too long (len %d, expected len %d)",
4984 l2cap_conn_unreliable(conn, ECOMM);
4988 chan = l2cap_get_chan_by_scid(conn, cid);
4990 if (chan && chan->sk) {
4991 struct sock *sk = chan->sk;
4994 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4995 BT_ERR("Frame exceeding recv MTU (len %d, "
4999 l2cap_conn_unreliable(conn, ECOMM);
5005 /* Allocate skb for the complete frame (with header) */
5006 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5010 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5012 conn->rx_len = len - skb->len;
5014 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5016 if (!conn->rx_len) {
5017 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5018 l2cap_conn_unreliable(conn, ECOMM);
5022 if (skb->len > conn->rx_len) {
5023 BT_ERR("Fragment is too long (len %d, expected %d)",
5024 skb->len, conn->rx_len);
5025 kfree_skb(conn->rx_skb);
5026 conn->rx_skb = NULL;
5028 l2cap_conn_unreliable(conn, ECOMM);
5032 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5034 conn->rx_len -= skb->len;
5036 if (!conn->rx_len) {
5037 /* Complete frame received */
5038 l2cap_recv_frame(conn, conn->rx_skb);
5039 conn->rx_skb = NULL;
5048 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5050 struct l2cap_chan *c;
5052 read_lock(&chan_list_lock);
5054 list_for_each_entry(c, &chan_list, global_l) {
5055 struct sock *sk = c->sk;
5057 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5058 batostr(&bt_sk(sk)->src),
5059 batostr(&bt_sk(sk)->dst),
5060 c->state, __le16_to_cpu(c->psm),
5061 c->scid, c->dcid, c->imtu, c->omtu,
5062 c->sec_level, c->mode);
5065 read_unlock(&chan_list_lock);
5070 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5072 return single_open(file, l2cap_debugfs_show, inode->i_private);
5075 static const struct file_operations l2cap_debugfs_fops = {
5076 .open = l2cap_debugfs_open,
5078 .llseek = seq_lseek,
5079 .release = single_release,
5082 static struct dentry *l2cap_debugfs;
5084 int __init l2cap_init(void)
5088 err = l2cap_init_sockets();
5093 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5094 bt_debugfs, NULL, &l2cap_debugfs_fops);
5096 BT_ERR("Failed to create L2CAP debug file");
5102 void l2cap_exit(void)
5104 debugfs_remove(l2cap_debugfs);
5105 l2cap_cleanup_sockets();
5108 module_param(disable_ertm, bool, 0644);
5109 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");