2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
67 list_for_each_entry(c, &conn->chan_l, list) {
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
78 list_for_each_entry(c, &conn->chan_l, list) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
95 mutex_unlock(&conn->chan_lock);
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
102 struct l2cap_chan *c;
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
113 struct l2cap_chan *c;
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
126 write_lock(&chan_list_lock);
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
151 write_unlock(&chan_list_lock);
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
157 write_lock(&chan_list_lock);
161 write_unlock(&chan_list_lock);
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
184 chan->ops->state_change(chan, state);
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
189 struct sock *sk = chan->sk;
192 __l2cap_state_change(chan, state);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
198 struct sock *sk = chan->sk;
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
205 struct sock *sk = chan->sk;
208 __l2cap_chan_set_err(chan, err);
212 static void __set_retrans_timer(struct l2cap_chan *chan)
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
221 static void __set_monitor_timer(struct l2cap_chan *chan)
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
452 chan->omtu = L2CAP_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
469 case L2CAP_CHAN_CONN_FIX_A2MP:
470 chan->scid = L2CAP_CID_A2MP;
471 chan->dcid = L2CAP_CID_A2MP;
472 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
473 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
477 /* Raw socket can send/recv signalling messages only */
478 chan->scid = L2CAP_CID_SIGNALING;
479 chan->dcid = L2CAP_CID_SIGNALING;
480 chan->omtu = L2CAP_DEFAULT_MTU;
483 chan->local_id = L2CAP_BESTEFFORT_ID;
484 chan->local_stype = L2CAP_SERV_BESTEFFORT;
485 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
486 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
487 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
488 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
490 l2cap_chan_hold(chan);
492 list_add(&chan->list, &conn->chan_l);
495 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
497 mutex_lock(&conn->chan_lock);
498 __l2cap_chan_add(conn, chan);
499 mutex_unlock(&conn->chan_lock);
502 void l2cap_chan_del(struct l2cap_chan *chan, int err)
504 struct l2cap_conn *conn = chan->conn;
506 __clear_chan_timer(chan);
508 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
511 /* Delete from channel list */
512 list_del(&chan->list);
514 l2cap_chan_put(chan);
518 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
519 hci_conn_put(conn->hcon);
522 if (chan->ops->teardown)
523 chan->ops->teardown(chan, err);
525 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
529 case L2CAP_MODE_BASIC:
532 case L2CAP_MODE_ERTM:
533 __clear_retrans_timer(chan);
534 __clear_monitor_timer(chan);
535 __clear_ack_timer(chan);
537 skb_queue_purge(&chan->srej_q);
539 l2cap_seq_list_free(&chan->srej_list);
540 l2cap_seq_list_free(&chan->retrans_list);
544 case L2CAP_MODE_STREAMING:
545 skb_queue_purge(&chan->tx_q);
552 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
554 struct l2cap_conn *conn = chan->conn;
555 struct sock *sk = chan->sk;
557 BT_DBG("chan %p state %s sk %p", chan,
558 state_to_string(chan->state), sk);
560 switch (chan->state) {
562 if (chan->ops->teardown)
563 chan->ops->teardown(chan, 0);
568 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
569 conn->hcon->type == ACL_LINK) {
570 __set_chan_timer(chan, sk->sk_sndtimeo);
571 l2cap_send_disconn_req(conn, chan, reason);
573 l2cap_chan_del(chan, reason);
577 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
578 conn->hcon->type == ACL_LINK) {
579 struct l2cap_conn_rsp rsp;
582 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
583 result = L2CAP_CR_SEC_BLOCK;
585 result = L2CAP_CR_BAD_PSM;
586 l2cap_state_change(chan, BT_DISCONN);
588 rsp.scid = cpu_to_le16(chan->dcid);
589 rsp.dcid = cpu_to_le16(chan->scid);
590 rsp.result = cpu_to_le16(result);
591 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
592 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
596 l2cap_chan_del(chan, reason);
601 l2cap_chan_del(chan, reason);
605 if (chan->ops->teardown)
606 chan->ops->teardown(chan, 0);
611 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
613 if (chan->chan_type == L2CAP_CHAN_RAW) {
614 switch (chan->sec_level) {
615 case BT_SECURITY_HIGH:
616 return HCI_AT_DEDICATED_BONDING_MITM;
617 case BT_SECURITY_MEDIUM:
618 return HCI_AT_DEDICATED_BONDING;
620 return HCI_AT_NO_BONDING;
622 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
623 if (chan->sec_level == BT_SECURITY_LOW)
624 chan->sec_level = BT_SECURITY_SDP;
626 if (chan->sec_level == BT_SECURITY_HIGH)
627 return HCI_AT_NO_BONDING_MITM;
629 return HCI_AT_NO_BONDING;
631 switch (chan->sec_level) {
632 case BT_SECURITY_HIGH:
633 return HCI_AT_GENERAL_BONDING_MITM;
634 case BT_SECURITY_MEDIUM:
635 return HCI_AT_GENERAL_BONDING;
637 return HCI_AT_NO_BONDING;
642 /* Service level security */
643 int l2cap_chan_check_security(struct l2cap_chan *chan)
645 struct l2cap_conn *conn = chan->conn;
648 auth_type = l2cap_get_auth_type(chan);
650 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
653 static u8 l2cap_get_ident(struct l2cap_conn *conn)
657 /* Get next available identificator.
658 * 1 - 128 are used by kernel.
659 * 129 - 199 are reserved.
660 * 200 - 254 are used by utilities like l2ping, etc.
663 spin_lock(&conn->lock);
665 if (++conn->tx_ident > 128)
670 spin_unlock(&conn->lock);
675 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
677 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
680 BT_DBG("code 0x%2.2x", code);
685 if (lmp_no_flush_capable(conn->hcon->hdev))
686 flags = ACL_START_NO_FLUSH;
690 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
691 skb->priority = HCI_PRIO_MAX;
693 hci_send_acl(conn->hchan, skb, flags);
696 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
698 struct hci_conn *hcon = chan->conn->hcon;
701 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
704 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
705 lmp_no_flush_capable(hcon->hdev))
706 flags = ACL_START_NO_FLUSH;
710 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
711 hci_send_acl(chan->conn->hchan, skb, flags);
714 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
716 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
717 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
719 if (enh & L2CAP_CTRL_FRAME_TYPE) {
722 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
723 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
730 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
731 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
738 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
740 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
741 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
743 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
746 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
747 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
754 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
755 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
762 static inline void __unpack_control(struct l2cap_chan *chan,
765 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
766 __unpack_extended_control(get_unaligned_le32(skb->data),
767 &bt_cb(skb)->control);
768 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
770 __unpack_enhanced_control(get_unaligned_le16(skb->data),
771 &bt_cb(skb)->control);
772 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
776 static u32 __pack_extended_control(struct l2cap_ctrl *control)
780 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
781 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
783 if (control->sframe) {
784 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
785 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
786 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
788 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
789 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
795 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
799 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
800 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
802 if (control->sframe) {
803 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
804 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
805 packed |= L2CAP_CTRL_FRAME_TYPE;
807 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
808 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
814 static inline void __pack_control(struct l2cap_chan *chan,
815 struct l2cap_ctrl *control,
818 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
819 put_unaligned_le32(__pack_extended_control(control),
820 skb->data + L2CAP_HDR_SIZE);
822 put_unaligned_le16(__pack_enhanced_control(control),
823 skb->data + L2CAP_HDR_SIZE);
827 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
829 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
830 return L2CAP_EXT_HDR_SIZE;
832 return L2CAP_ENH_HDR_SIZE;
835 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
839 struct l2cap_hdr *lh;
840 int hlen = __ertm_hdr_size(chan);
842 if (chan->fcs == L2CAP_FCS_CRC16)
843 hlen += L2CAP_FCS_SIZE;
845 skb = bt_skb_alloc(hlen, GFP_KERNEL);
848 return ERR_PTR(-ENOMEM);
850 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
851 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
852 lh->cid = cpu_to_le16(chan->dcid);
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
857 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
859 if (chan->fcs == L2CAP_FCS_CRC16) {
860 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
861 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
864 skb->priority = HCI_PRIO_MAX;
868 static void l2cap_send_sframe(struct l2cap_chan *chan,
869 struct l2cap_ctrl *control)
874 BT_DBG("chan %p, control %p", chan, control);
876 if (!control->sframe)
879 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
883 if (control->super == L2CAP_SUPER_RR)
884 clear_bit(CONN_RNR_SENT, &chan->conn_state);
885 else if (control->super == L2CAP_SUPER_RNR)
886 set_bit(CONN_RNR_SENT, &chan->conn_state);
888 if (control->super != L2CAP_SUPER_SREJ) {
889 chan->last_acked_seq = control->reqseq;
890 __clear_ack_timer(chan);
893 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
894 control->final, control->poll, control->super);
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
897 control_field = __pack_extended_control(control);
899 control_field = __pack_enhanced_control(control);
901 skb = l2cap_create_sframe_pdu(chan, control_field);
903 l2cap_do_send(chan, skb);
906 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
908 struct l2cap_ctrl control;
910 BT_DBG("chan %p, poll %d", chan, poll);
912 memset(&control, 0, sizeof(control));
916 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
917 control.super = L2CAP_SUPER_RNR;
919 control.super = L2CAP_SUPER_RR;
921 control.reqseq = chan->buffer_seq;
922 l2cap_send_sframe(chan, &control);
925 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
927 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
930 static void l2cap_send_conn_req(struct l2cap_chan *chan)
932 struct l2cap_conn *conn = chan->conn;
933 struct l2cap_conn_req req;
935 req.scid = cpu_to_le16(chan->scid);
938 chan->ident = l2cap_get_ident(conn);
940 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
942 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
945 static void l2cap_chan_ready(struct l2cap_chan *chan)
947 /* This clears all conf flags, including CONF_NOT_COMPLETE */
948 chan->conf_state = 0;
949 __clear_chan_timer(chan);
951 chan->state = BT_CONNECTED;
953 chan->ops->ready(chan);
956 static void l2cap_do_start(struct l2cap_chan *chan)
958 struct l2cap_conn *conn = chan->conn;
960 if (conn->hcon->type == LE_LINK) {
961 l2cap_chan_ready(chan);
965 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
966 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
969 if (l2cap_chan_check_security(chan) &&
970 __l2cap_no_conn_pending(chan))
971 l2cap_send_conn_req(chan);
973 struct l2cap_info_req req;
974 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
976 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
977 conn->info_ident = l2cap_get_ident(conn);
979 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
981 l2cap_send_cmd(conn, conn->info_ident,
982 L2CAP_INFO_REQ, sizeof(req), &req);
986 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
988 u32 local_feat_mask = l2cap_feat_mask;
990 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
993 case L2CAP_MODE_ERTM:
994 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
995 case L2CAP_MODE_STREAMING:
996 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1002 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1004 struct sock *sk = chan->sk;
1005 struct l2cap_disconn_req req;
1010 if (chan->mode == L2CAP_MODE_ERTM) {
1011 __clear_retrans_timer(chan);
1012 __clear_monitor_timer(chan);
1013 __clear_ack_timer(chan);
1016 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1017 __l2cap_state_change(chan, BT_DISCONN);
1021 req.dcid = cpu_to_le16(chan->dcid);
1022 req.scid = cpu_to_le16(chan->scid);
1023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1024 L2CAP_DISCONN_REQ, sizeof(req), &req);
1027 __l2cap_state_change(chan, BT_DISCONN);
1028 __l2cap_chan_set_err(chan, err);
1032 /* ---- L2CAP connections ---- */
1033 static void l2cap_conn_start(struct l2cap_conn *conn)
1035 struct l2cap_chan *chan, *tmp;
1037 BT_DBG("conn %p", conn);
1039 mutex_lock(&conn->chan_lock);
1041 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1042 struct sock *sk = chan->sk;
1044 l2cap_chan_lock(chan);
1046 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1047 l2cap_chan_unlock(chan);
1051 if (chan->state == BT_CONNECT) {
1052 if (!l2cap_chan_check_security(chan) ||
1053 !__l2cap_no_conn_pending(chan)) {
1054 l2cap_chan_unlock(chan);
1058 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1059 && test_bit(CONF_STATE2_DEVICE,
1060 &chan->conf_state)) {
1061 l2cap_chan_close(chan, ECONNRESET);
1062 l2cap_chan_unlock(chan);
1066 l2cap_send_conn_req(chan);
1068 } else if (chan->state == BT_CONNECT2) {
1069 struct l2cap_conn_rsp rsp;
1071 rsp.scid = cpu_to_le16(chan->dcid);
1072 rsp.dcid = cpu_to_le16(chan->scid);
1074 if (l2cap_chan_check_security(chan)) {
1076 if (test_bit(BT_SK_DEFER_SETUP,
1077 &bt_sk(sk)->flags)) {
1078 struct sock *parent = bt_sk(sk)->parent;
1079 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1080 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1082 parent->sk_data_ready(parent, 0);
1085 __l2cap_state_change(chan, BT_CONFIG);
1086 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1087 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1091 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1092 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1095 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1098 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1099 rsp.result != L2CAP_CR_SUCCESS) {
1100 l2cap_chan_unlock(chan);
1104 set_bit(CONF_REQ_SENT, &chan->conf_state);
1105 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1106 l2cap_build_conf_req(chan, buf), buf);
1107 chan->num_conf_req++;
1110 l2cap_chan_unlock(chan);
1113 mutex_unlock(&conn->chan_lock);
1116 /* Find socket with cid and source/destination bdaddr.
1117 * Returns closest match, locked.
1119 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1123 struct l2cap_chan *c, *c1 = NULL;
1125 read_lock(&chan_list_lock);
1127 list_for_each_entry(c, &chan_list, global_l) {
1128 struct sock *sk = c->sk;
1130 if (state && c->state != state)
1133 if (c->scid == cid) {
1134 int src_match, dst_match;
1135 int src_any, dst_any;
1138 src_match = !bacmp(&bt_sk(sk)->src, src);
1139 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1140 if (src_match && dst_match) {
1141 read_unlock(&chan_list_lock);
1146 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1147 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1148 if ((src_match && dst_any) || (src_any && dst_match) ||
1149 (src_any && dst_any))
1154 read_unlock(&chan_list_lock);
1159 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1161 struct sock *parent, *sk;
1162 struct l2cap_chan *chan, *pchan;
1166 /* Check if we have socket listening on cid */
1167 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1168 conn->src, conn->dst);
1176 chan = pchan->ops->new_connection(pchan);
1182 hci_conn_hold(conn->hcon);
1184 bacpy(&bt_sk(sk)->src, conn->src);
1185 bacpy(&bt_sk(sk)->dst, conn->dst);
1187 bt_accept_enqueue(parent, sk);
1189 l2cap_chan_add(conn, chan);
1191 l2cap_chan_ready(chan);
1194 release_sock(parent);
1197 static void l2cap_conn_ready(struct l2cap_conn *conn)
1199 struct l2cap_chan *chan;
1201 BT_DBG("conn %p", conn);
1203 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1204 l2cap_le_conn_ready(conn);
1206 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1207 smp_conn_security(conn, conn->hcon->pending_sec_level);
1209 mutex_lock(&conn->chan_lock);
1211 list_for_each_entry(chan, &conn->chan_l, list) {
1213 l2cap_chan_lock(chan);
1215 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1216 l2cap_chan_unlock(chan);
1220 if (conn->hcon->type == LE_LINK) {
1221 if (smp_conn_security(conn, chan->sec_level))
1222 l2cap_chan_ready(chan);
1224 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1225 struct sock *sk = chan->sk;
1226 __clear_chan_timer(chan);
1228 __l2cap_state_change(chan, BT_CONNECTED);
1229 sk->sk_state_change(sk);
1232 } else if (chan->state == BT_CONNECT)
1233 l2cap_do_start(chan);
1235 l2cap_chan_unlock(chan);
1238 mutex_unlock(&conn->chan_lock);
1241 /* Notify sockets that we cannot guaranty reliability anymore */
1242 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1244 struct l2cap_chan *chan;
1246 BT_DBG("conn %p", conn);
1248 mutex_lock(&conn->chan_lock);
1250 list_for_each_entry(chan, &conn->chan_l, list) {
1251 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1252 __l2cap_chan_set_err(chan, err);
1255 mutex_unlock(&conn->chan_lock);
1258 static void l2cap_info_timeout(struct work_struct *work)
1260 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1263 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1264 conn->info_ident = 0;
1266 l2cap_conn_start(conn);
1269 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1271 struct l2cap_conn *conn = hcon->l2cap_data;
1272 struct l2cap_chan *chan, *l;
1277 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1279 kfree_skb(conn->rx_skb);
1281 mutex_lock(&conn->chan_lock);
1284 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1285 l2cap_chan_hold(chan);
1286 l2cap_chan_lock(chan);
1288 l2cap_chan_del(chan, err);
1290 l2cap_chan_unlock(chan);
1292 chan->ops->close(chan);
1293 l2cap_chan_put(chan);
1296 mutex_unlock(&conn->chan_lock);
1298 hci_chan_del(conn->hchan);
1300 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1301 cancel_delayed_work_sync(&conn->info_timer);
1303 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1304 cancel_delayed_work_sync(&conn->security_timer);
1305 smp_chan_destroy(conn);
1308 hcon->l2cap_data = NULL;
1312 static void security_timeout(struct work_struct *work)
1314 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1315 security_timer.work);
1317 BT_DBG("conn %p", conn);
1319 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1320 smp_chan_destroy(conn);
1321 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1325 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1327 struct l2cap_conn *conn = hcon->l2cap_data;
1328 struct hci_chan *hchan;
1333 hchan = hci_chan_create(hcon);
1337 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1339 hci_chan_del(hchan);
1343 hcon->l2cap_data = conn;
1345 conn->hchan = hchan;
1347 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1349 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1350 conn->mtu = hcon->hdev->le_mtu;
1352 conn->mtu = hcon->hdev->acl_mtu;
1354 conn->src = &hcon->hdev->bdaddr;
1355 conn->dst = &hcon->dst;
1357 conn->feat_mask = 0;
1359 spin_lock_init(&conn->lock);
1360 mutex_init(&conn->chan_lock);
1362 INIT_LIST_HEAD(&conn->chan_l);
1364 if (hcon->type == LE_LINK)
1365 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1367 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1369 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1374 /* ---- Socket interface ---- */
1376 /* Find socket with psm and source / destination bdaddr.
1377 * Returns closest match.
1379 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1383 struct l2cap_chan *c, *c1 = NULL;
1385 read_lock(&chan_list_lock);
1387 list_for_each_entry(c, &chan_list, global_l) {
1388 struct sock *sk = c->sk;
1390 if (state && c->state != state)
1393 if (c->psm == psm) {
1394 int src_match, dst_match;
1395 int src_any, dst_any;
1398 src_match = !bacmp(&bt_sk(sk)->src, src);
1399 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1400 if (src_match && dst_match) {
1401 read_unlock(&chan_list_lock);
1406 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1407 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1408 if ((src_match && dst_any) || (src_any && dst_match) ||
1409 (src_any && dst_any))
1414 read_unlock(&chan_list_lock);
1419 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1420 bdaddr_t *dst, u8 dst_type)
1422 struct sock *sk = chan->sk;
1423 bdaddr_t *src = &bt_sk(sk)->src;
1424 struct l2cap_conn *conn;
1425 struct hci_conn *hcon;
1426 struct hci_dev *hdev;
1430 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1431 dst_type, __le16_to_cpu(chan->psm));
1433 hdev = hci_get_route(dst, src);
1435 return -EHOSTUNREACH;
1439 l2cap_chan_lock(chan);
1441 /* PSM must be odd and lsb of upper byte must be 0 */
1442 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1443 chan->chan_type != L2CAP_CHAN_RAW) {
1448 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1453 switch (chan->mode) {
1454 case L2CAP_MODE_BASIC:
1456 case L2CAP_MODE_ERTM:
1457 case L2CAP_MODE_STREAMING:
1466 switch (chan->state) {
1470 /* Already connecting */
1475 /* Already connected */
1489 /* Set destination address and psm */
1491 bacpy(&bt_sk(sk)->dst, dst);
1497 auth_type = l2cap_get_auth_type(chan);
1499 if (chan->dcid == L2CAP_CID_LE_DATA)
1500 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1501 chan->sec_level, auth_type);
1503 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1504 chan->sec_level, auth_type);
1507 err = PTR_ERR(hcon);
1511 conn = l2cap_conn_add(hcon, 0);
1518 if (hcon->type == LE_LINK) {
1521 if (!list_empty(&conn->chan_l)) {
1530 /* Update source addr of the socket */
1531 bacpy(src, conn->src);
1533 l2cap_chan_unlock(chan);
1534 l2cap_chan_add(conn, chan);
1535 l2cap_chan_lock(chan);
1537 l2cap_state_change(chan, BT_CONNECT);
1538 __set_chan_timer(chan, sk->sk_sndtimeo);
1540 if (hcon->state == BT_CONNECTED) {
1541 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1542 __clear_chan_timer(chan);
1543 if (l2cap_chan_check_security(chan))
1544 l2cap_state_change(chan, BT_CONNECTED);
1546 l2cap_do_start(chan);
1552 l2cap_chan_unlock(chan);
1553 hci_dev_unlock(hdev);
1558 int __l2cap_wait_ack(struct sock *sk)
1560 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1561 DECLARE_WAITQUEUE(wait, current);
1565 add_wait_queue(sk_sleep(sk), &wait);
1566 set_current_state(TASK_INTERRUPTIBLE);
1567 while (chan->unacked_frames > 0 && chan->conn) {
1571 if (signal_pending(current)) {
1572 err = sock_intr_errno(timeo);
1577 timeo = schedule_timeout(timeo);
1579 set_current_state(TASK_INTERRUPTIBLE);
1581 err = sock_error(sk);
1585 set_current_state(TASK_RUNNING);
1586 remove_wait_queue(sk_sleep(sk), &wait);
1590 static void l2cap_monitor_timeout(struct work_struct *work)
1592 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1593 monitor_timer.work);
1595 BT_DBG("chan %p", chan);
1597 l2cap_chan_lock(chan);
1600 l2cap_chan_unlock(chan);
1601 l2cap_chan_put(chan);
1605 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1607 l2cap_chan_unlock(chan);
1608 l2cap_chan_put(chan);
1611 static void l2cap_retrans_timeout(struct work_struct *work)
1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1614 retrans_timer.work);
1616 BT_DBG("chan %p", chan);
1618 l2cap_chan_lock(chan);
1621 l2cap_chan_unlock(chan);
1622 l2cap_chan_put(chan);
1626 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1627 l2cap_chan_unlock(chan);
1628 l2cap_chan_put(chan);
1631 static void l2cap_streaming_send(struct l2cap_chan *chan,
1632 struct sk_buff_head *skbs)
1634 struct sk_buff *skb;
1635 struct l2cap_ctrl *control;
1637 BT_DBG("chan %p, skbs %p", chan, skbs);
1639 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1641 while (!skb_queue_empty(&chan->tx_q)) {
1643 skb = skb_dequeue(&chan->tx_q);
1645 bt_cb(skb)->control.retries = 1;
1646 control = &bt_cb(skb)->control;
1648 control->reqseq = 0;
1649 control->txseq = chan->next_tx_seq;
1651 __pack_control(chan, control, skb);
1653 if (chan->fcs == L2CAP_FCS_CRC16) {
1654 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1655 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1658 l2cap_do_send(chan, skb);
1660 BT_DBG("Sent txseq %d", (int)control->txseq);
1662 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1663 chan->frames_sent++;
1667 static int l2cap_ertm_send(struct l2cap_chan *chan)
1669 struct sk_buff *skb, *tx_skb;
1670 struct l2cap_ctrl *control;
1673 BT_DBG("chan %p", chan);
1675 if (chan->state != BT_CONNECTED)
1678 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1681 while (chan->tx_send_head &&
1682 chan->unacked_frames < chan->remote_tx_win &&
1683 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1685 skb = chan->tx_send_head;
1687 bt_cb(skb)->control.retries = 1;
1688 control = &bt_cb(skb)->control;
1690 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1693 control->reqseq = chan->buffer_seq;
1694 chan->last_acked_seq = chan->buffer_seq;
1695 control->txseq = chan->next_tx_seq;
1697 __pack_control(chan, control, skb);
1699 if (chan->fcs == L2CAP_FCS_CRC16) {
1700 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1701 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1704 /* Clone after data has been modified. Data is assumed to be
1705 read-only (for locking purposes) on cloned sk_buffs.
1707 tx_skb = skb_clone(skb, GFP_KERNEL);
1712 __set_retrans_timer(chan);
1714 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1715 chan->unacked_frames++;
1716 chan->frames_sent++;
1719 if (skb_queue_is_last(&chan->tx_q, skb))
1720 chan->tx_send_head = NULL;
1722 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1724 l2cap_do_send(chan, tx_skb);
1725 BT_DBG("Sent txseq %d", (int)control->txseq);
1728 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent,
1729 (int) chan->unacked_frames, skb_queue_len(&chan->tx_q));
1734 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1736 struct l2cap_ctrl control;
1737 struct sk_buff *skb;
1738 struct sk_buff *tx_skb;
1741 BT_DBG("chan %p", chan);
1743 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1746 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1747 seq = l2cap_seq_list_pop(&chan->retrans_list);
1749 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1751 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1756 bt_cb(skb)->control.retries++;
1757 control = bt_cb(skb)->control;
1759 if (chan->max_tx != 0 &&
1760 bt_cb(skb)->control.retries > chan->max_tx) {
1761 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1762 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1763 l2cap_seq_list_clear(&chan->retrans_list);
1767 control.reqseq = chan->buffer_seq;
1768 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1773 if (skb_cloned(skb)) {
1774 /* Cloned sk_buffs are read-only, so we need a
1777 tx_skb = skb_copy(skb, GFP_ATOMIC);
1779 tx_skb = skb_clone(skb, GFP_ATOMIC);
1783 l2cap_seq_list_clear(&chan->retrans_list);
1787 /* Update skb contents */
1788 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1789 put_unaligned_le32(__pack_extended_control(&control),
1790 tx_skb->data + L2CAP_HDR_SIZE);
1792 put_unaligned_le16(__pack_enhanced_control(&control),
1793 tx_skb->data + L2CAP_HDR_SIZE);
1796 if (chan->fcs == L2CAP_FCS_CRC16) {
1797 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1798 put_unaligned_le16(fcs, skb_put(tx_skb,
1802 l2cap_do_send(chan, tx_skb);
1804 BT_DBG("Resent txseq %d", control.txseq);
1806 chan->last_acked_seq = chan->buffer_seq;
1810 static void l2cap_retransmit(struct l2cap_chan *chan,
1811 struct l2cap_ctrl *control)
1813 BT_DBG("chan %p, control %p", chan, control);
1815 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1816 l2cap_ertm_resend(chan);
1819 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1820 struct l2cap_ctrl *control)
1822 struct sk_buff *skb;
1824 BT_DBG("chan %p, control %p", chan, control);
1827 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1829 l2cap_seq_list_clear(&chan->retrans_list);
1831 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1834 if (chan->unacked_frames) {
1835 skb_queue_walk(&chan->tx_q, skb) {
1836 if (bt_cb(skb)->control.txseq == control->reqseq ||
1837 skb == chan->tx_send_head)
1841 skb_queue_walk_from(&chan->tx_q, skb) {
1842 if (skb == chan->tx_send_head)
1845 l2cap_seq_list_append(&chan->retrans_list,
1846 bt_cb(skb)->control.txseq);
1849 l2cap_ertm_resend(chan);
1853 static void l2cap_send_ack(struct l2cap_chan *chan)
1855 struct l2cap_ctrl control;
1856 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1857 chan->last_acked_seq);
1860 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1861 chan, chan->last_acked_seq, chan->buffer_seq);
1863 memset(&control, 0, sizeof(control));
1866 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1867 chan->rx_state == L2CAP_RX_STATE_RECV) {
1868 __clear_ack_timer(chan);
1869 control.super = L2CAP_SUPER_RNR;
1870 control.reqseq = chan->buffer_seq;
1871 l2cap_send_sframe(chan, &control);
1873 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1874 l2cap_ertm_send(chan);
1875 /* If any i-frames were sent, they included an ack */
1876 if (chan->buffer_seq == chan->last_acked_seq)
1880 /* Ack now if the tx window is 3/4ths full.
1881 * Calculate without mul or div
1883 threshold = chan->tx_win;
1884 threshold += threshold << 1;
1887 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1890 if (frames_to_ack >= threshold) {
1891 __clear_ack_timer(chan);
1892 control.super = L2CAP_SUPER_RR;
1893 control.reqseq = chan->buffer_seq;
1894 l2cap_send_sframe(chan, &control);
1899 __set_ack_timer(chan);
1903 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1904 struct msghdr *msg, int len,
1905 int count, struct sk_buff *skb)
1907 struct l2cap_conn *conn = chan->conn;
1908 struct sk_buff **frag;
1911 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1917 /* Continuation fragments (no L2CAP header) */
1918 frag = &skb_shinfo(skb)->frag_list;
1920 struct sk_buff *tmp;
1922 count = min_t(unsigned int, conn->mtu, len);
1924 tmp = chan->ops->alloc_skb(chan, count,
1925 msg->msg_flags & MSG_DONTWAIT);
1927 return PTR_ERR(tmp);
1931 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1934 (*frag)->priority = skb->priority;
1939 skb->len += (*frag)->len;
1940 skb->data_len += (*frag)->len;
1942 frag = &(*frag)->next;
1948 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1949 struct msghdr *msg, size_t len,
1952 struct l2cap_conn *conn = chan->conn;
1953 struct sk_buff *skb;
1954 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1955 struct l2cap_hdr *lh;
1957 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1959 count = min_t(unsigned int, (conn->mtu - hlen), len);
1961 skb = chan->ops->alloc_skb(chan, count + hlen,
1962 msg->msg_flags & MSG_DONTWAIT);
1966 skb->priority = priority;
1968 /* Create L2CAP header */
1969 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1970 lh->cid = cpu_to_le16(chan->dcid);
1971 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1972 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1974 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1975 if (unlikely(err < 0)) {
1977 return ERR_PTR(err);
1982 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1983 struct msghdr *msg, size_t len,
1986 struct l2cap_conn *conn = chan->conn;
1987 struct sk_buff *skb;
1989 struct l2cap_hdr *lh;
1991 BT_DBG("chan %p len %d", chan, (int)len);
1993 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1995 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1996 msg->msg_flags & MSG_DONTWAIT);
2000 skb->priority = priority;
2002 /* Create L2CAP header */
2003 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2004 lh->cid = cpu_to_le16(chan->dcid);
2005 lh->len = cpu_to_le16(len);
2007 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2008 if (unlikely(err < 0)) {
2010 return ERR_PTR(err);
2015 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2016 struct msghdr *msg, size_t len,
2019 struct l2cap_conn *conn = chan->conn;
2020 struct sk_buff *skb;
2021 int err, count, hlen;
2022 struct l2cap_hdr *lh;
2024 BT_DBG("chan %p len %d", chan, (int)len);
2027 return ERR_PTR(-ENOTCONN);
2029 hlen = __ertm_hdr_size(chan);
2032 hlen += L2CAP_SDULEN_SIZE;
2034 if (chan->fcs == L2CAP_FCS_CRC16)
2035 hlen += L2CAP_FCS_SIZE;
2037 count = min_t(unsigned int, (conn->mtu - hlen), len);
2039 skb = chan->ops->alloc_skb(chan, count + hlen,
2040 msg->msg_flags & MSG_DONTWAIT);
2044 /* Create L2CAP header */
2045 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2046 lh->cid = cpu_to_le16(chan->dcid);
2047 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2049 /* Control header is populated later */
2050 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2051 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2053 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2056 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2058 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2059 if (unlikely(err < 0)) {
2061 return ERR_PTR(err);
2064 bt_cb(skb)->control.fcs = chan->fcs;
2065 bt_cb(skb)->control.retries = 0;
2069 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2070 struct sk_buff_head *seg_queue,
2071 struct msghdr *msg, size_t len)
2073 struct sk_buff *skb;
2078 BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
2080 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2081 * so fragmented skbs are not used. The HCI layer's handling
2082 * of fragmented skbs is not compatible with ERTM's queueing.
2085 /* PDU size is derived from the HCI MTU */
2086 pdu_len = chan->conn->mtu;
2088 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2090 /* Adjust for largest possible L2CAP overhead. */
2092 pdu_len -= L2CAP_FCS_SIZE;
2094 pdu_len -= __ertm_hdr_size(chan);
2096 /* Remote device may have requested smaller PDUs */
2097 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2099 if (len <= pdu_len) {
2100 sar = L2CAP_SAR_UNSEGMENTED;
2104 sar = L2CAP_SAR_START;
2106 pdu_len -= L2CAP_SDULEN_SIZE;
2110 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2113 __skb_queue_purge(seg_queue);
2114 return PTR_ERR(skb);
2117 bt_cb(skb)->control.sar = sar;
2118 __skb_queue_tail(seg_queue, skb);
2123 pdu_len += L2CAP_SDULEN_SIZE;
2126 if (len <= pdu_len) {
2127 sar = L2CAP_SAR_END;
2130 sar = L2CAP_SAR_CONTINUE;
2137 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2140 struct sk_buff *skb;
2142 struct sk_buff_head seg_queue;
2144 /* Connectionless channel */
2145 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2146 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2148 return PTR_ERR(skb);
2150 l2cap_do_send(chan, skb);
2154 switch (chan->mode) {
2155 case L2CAP_MODE_BASIC:
2156 /* Check outgoing MTU */
2157 if (len > chan->omtu)
2160 /* Create a basic PDU */
2161 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2163 return PTR_ERR(skb);
2165 l2cap_do_send(chan, skb);
2169 case L2CAP_MODE_ERTM:
2170 case L2CAP_MODE_STREAMING:
2171 /* Check outgoing MTU */
2172 if (len > chan->omtu) {
2177 __skb_queue_head_init(&seg_queue);
2179 /* Do segmentation before calling in to the state machine,
2180 * since it's possible to block while waiting for memory
2183 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2185 /* The channel could have been closed while segmenting,
2186 * check that it is still connected.
2188 if (chan->state != BT_CONNECTED) {
2189 __skb_queue_purge(&seg_queue);
2196 if (chan->mode == L2CAP_MODE_ERTM)
2197 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2199 l2cap_streaming_send(chan, &seg_queue);
2203 /* If the skbs were not queued for sending, they'll still be in
2204 * seg_queue and need to be purged.
2206 __skb_queue_purge(&seg_queue);
2210 BT_DBG("bad state %1.1x", chan->mode);
2217 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2219 struct l2cap_ctrl control;
2222 BT_DBG("chan %p, txseq %d", chan, txseq);
2224 memset(&control, 0, sizeof(control));
2226 control.super = L2CAP_SUPER_SREJ;
2228 for (seq = chan->expected_tx_seq; seq != txseq;
2229 seq = __next_seq(chan, seq)) {
2230 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2231 control.reqseq = seq;
2232 l2cap_send_sframe(chan, &control);
2233 l2cap_seq_list_append(&chan->srej_list, seq);
2237 chan->expected_tx_seq = __next_seq(chan, txseq);
2240 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2242 struct l2cap_ctrl control;
2244 BT_DBG("chan %p", chan);
2246 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2249 memset(&control, 0, sizeof(control));
2251 control.super = L2CAP_SUPER_SREJ;
2252 control.reqseq = chan->srej_list.tail;
2253 l2cap_send_sframe(chan, &control);
2256 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2258 struct l2cap_ctrl control;
2262 BT_DBG("chan %p, txseq %d", chan, txseq);
2264 memset(&control, 0, sizeof(control));
2266 control.super = L2CAP_SUPER_SREJ;
2268 /* Capture initial list head to allow only one pass through the list. */
2269 initial_head = chan->srej_list.head;
2272 seq = l2cap_seq_list_pop(&chan->srej_list);
2273 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2276 control.reqseq = seq;
2277 l2cap_send_sframe(chan, &control);
2278 l2cap_seq_list_append(&chan->srej_list, seq);
2279 } while (chan->srej_list.head != initial_head);
2282 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2284 struct sk_buff *acked_skb;
2287 BT_DBG("chan %p, reqseq %d", chan, reqseq);
2289 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2292 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2293 chan->expected_ack_seq, chan->unacked_frames);
2295 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2296 ackseq = __next_seq(chan, ackseq)) {
2298 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2300 skb_unlink(acked_skb, &chan->tx_q);
2301 kfree_skb(acked_skb);
2302 chan->unacked_frames--;
2306 chan->expected_ack_seq = reqseq;
2308 if (chan->unacked_frames == 0)
2309 __clear_retrans_timer(chan);
2311 BT_DBG("unacked_frames %d", (int) chan->unacked_frames);
2314 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2316 BT_DBG("chan %p", chan);
2318 chan->expected_tx_seq = chan->buffer_seq;
2319 l2cap_seq_list_clear(&chan->srej_list);
2320 skb_queue_purge(&chan->srej_q);
2321 chan->rx_state = L2CAP_RX_STATE_RECV;
2324 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2325 struct l2cap_ctrl *control,
2326 struct sk_buff_head *skbs, u8 event)
2328 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2332 case L2CAP_EV_DATA_REQUEST:
2333 if (chan->tx_send_head == NULL)
2334 chan->tx_send_head = skb_peek(skbs);
2336 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2337 l2cap_ertm_send(chan);
2339 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2340 BT_DBG("Enter LOCAL_BUSY");
2341 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2343 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2344 /* The SREJ_SENT state must be aborted if we are to
2345 * enter the LOCAL_BUSY state.
2347 l2cap_abort_rx_srej_sent(chan);
2350 l2cap_send_ack(chan);
2353 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2354 BT_DBG("Exit LOCAL_BUSY");
2355 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2357 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2358 struct l2cap_ctrl local_control;
2360 memset(&local_control, 0, sizeof(local_control));
2361 local_control.sframe = 1;
2362 local_control.super = L2CAP_SUPER_RR;
2363 local_control.poll = 1;
2364 local_control.reqseq = chan->buffer_seq;
2365 l2cap_send_sframe(chan, &local_control);
2367 chan->retry_count = 1;
2368 __set_monitor_timer(chan);
2369 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2372 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2373 l2cap_process_reqseq(chan, control->reqseq);
2375 case L2CAP_EV_EXPLICIT_POLL:
2376 l2cap_send_rr_or_rnr(chan, 1);
2377 chan->retry_count = 1;
2378 __set_monitor_timer(chan);
2379 __clear_ack_timer(chan);
2380 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2382 case L2CAP_EV_RETRANS_TO:
2383 l2cap_send_rr_or_rnr(chan, 1);
2384 chan->retry_count = 1;
2385 __set_monitor_timer(chan);
2386 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2388 case L2CAP_EV_RECV_FBIT:
2389 /* Nothing to process */
2396 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2397 struct l2cap_ctrl *control,
2398 struct sk_buff_head *skbs, u8 event)
2400 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2404 case L2CAP_EV_DATA_REQUEST:
2405 if (chan->tx_send_head == NULL)
2406 chan->tx_send_head = skb_peek(skbs);
2407 /* Queue data, but don't send. */
2408 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2410 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2411 BT_DBG("Enter LOCAL_BUSY");
2412 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2414 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2415 /* The SREJ_SENT state must be aborted if we are to
2416 * enter the LOCAL_BUSY state.
2418 l2cap_abort_rx_srej_sent(chan);
2421 l2cap_send_ack(chan);
2424 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2425 BT_DBG("Exit LOCAL_BUSY");
2426 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2428 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2429 struct l2cap_ctrl local_control;
2430 memset(&local_control, 0, sizeof(local_control));
2431 local_control.sframe = 1;
2432 local_control.super = L2CAP_SUPER_RR;
2433 local_control.poll = 1;
2434 local_control.reqseq = chan->buffer_seq;
2435 l2cap_send_sframe(chan, &local_control);
2437 chan->retry_count = 1;
2438 __set_monitor_timer(chan);
2439 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2442 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2443 l2cap_process_reqseq(chan, control->reqseq);
2447 case L2CAP_EV_RECV_FBIT:
2448 if (control && control->final) {
2449 __clear_monitor_timer(chan);
2450 if (chan->unacked_frames > 0)
2451 __set_retrans_timer(chan);
2452 chan->retry_count = 0;
2453 chan->tx_state = L2CAP_TX_STATE_XMIT;
2454 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2457 case L2CAP_EV_EXPLICIT_POLL:
2460 case L2CAP_EV_MONITOR_TO:
2461 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2462 l2cap_send_rr_or_rnr(chan, 1);
2463 __set_monitor_timer(chan);
2464 chan->retry_count++;
2466 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2474 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2475 struct sk_buff_head *skbs, u8 event)
2477 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2478 chan, control, skbs, event, chan->tx_state);
2480 switch (chan->tx_state) {
2481 case L2CAP_TX_STATE_XMIT:
2482 l2cap_tx_state_xmit(chan, control, skbs, event);
2484 case L2CAP_TX_STATE_WAIT_F:
2485 l2cap_tx_state_wait_f(chan, control, skbs, event);
2493 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2494 struct l2cap_ctrl *control)
2496 BT_DBG("chan %p, control %p", chan, control);
2497 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2500 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2501 struct l2cap_ctrl *control)
2503 BT_DBG("chan %p, control %p", chan, control);
2504 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2507 /* Copy frame to all raw sockets on that connection */
2508 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2510 struct sk_buff *nskb;
2511 struct l2cap_chan *chan;
2513 BT_DBG("conn %p", conn);
2515 mutex_lock(&conn->chan_lock);
2517 list_for_each_entry(chan, &conn->chan_l, list) {
2518 struct sock *sk = chan->sk;
2519 if (chan->chan_type != L2CAP_CHAN_RAW)
2522 /* Don't send frame to the socket it came from */
2525 nskb = skb_clone(skb, GFP_ATOMIC);
2529 if (chan->ops->recv(chan, nskb))
2533 mutex_unlock(&conn->chan_lock);
2536 /* ---- L2CAP signalling commands ---- */
2537 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2538 u8 code, u8 ident, u16 dlen, void *data)
2540 struct sk_buff *skb, **frag;
2541 struct l2cap_cmd_hdr *cmd;
2542 struct l2cap_hdr *lh;
2545 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2546 conn, code, ident, dlen);
2548 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2549 count = min_t(unsigned int, conn->mtu, len);
2551 skb = bt_skb_alloc(count, GFP_ATOMIC);
2555 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2556 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2558 if (conn->hcon->type == LE_LINK)
2559 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2561 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2563 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2566 cmd->len = cpu_to_le16(dlen);
2569 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2570 memcpy(skb_put(skb, count), data, count);
2576 /* Continuation fragments (no L2CAP header) */
2577 frag = &skb_shinfo(skb)->frag_list;
2579 count = min_t(unsigned int, conn->mtu, len);
2581 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2585 memcpy(skb_put(*frag, count), data, count);
2590 frag = &(*frag)->next;
2600 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2602 struct l2cap_conf_opt *opt = *ptr;
2605 len = L2CAP_CONF_OPT_SIZE + opt->len;
2613 *val = *((u8 *) opt->val);
2617 *val = get_unaligned_le16(opt->val);
2621 *val = get_unaligned_le32(opt->val);
2625 *val = (unsigned long) opt->val;
2629 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2633 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2635 struct l2cap_conf_opt *opt = *ptr;
2637 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2644 *((u8 *) opt->val) = val;
2648 put_unaligned_le16(val, opt->val);
2652 put_unaligned_le32(val, opt->val);
2656 memcpy(opt->val, (void *) val, len);
2660 *ptr += L2CAP_CONF_OPT_SIZE + len;
2663 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2665 struct l2cap_conf_efs efs;
2667 switch (chan->mode) {
2668 case L2CAP_MODE_ERTM:
2669 efs.id = chan->local_id;
2670 efs.stype = chan->local_stype;
2671 efs.msdu = cpu_to_le16(chan->local_msdu);
2672 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2673 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2674 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2677 case L2CAP_MODE_STREAMING:
2679 efs.stype = L2CAP_SERV_BESTEFFORT;
2680 efs.msdu = cpu_to_le16(chan->local_msdu);
2681 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2690 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2691 (unsigned long) &efs);
2694 static void l2cap_ack_timeout(struct work_struct *work)
2696 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2700 BT_DBG("chan %p", chan);
2702 l2cap_chan_lock(chan);
2704 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2705 chan->last_acked_seq);
2708 l2cap_send_rr_or_rnr(chan, 0);
2710 l2cap_chan_unlock(chan);
2711 l2cap_chan_put(chan);
2714 int l2cap_ertm_init(struct l2cap_chan *chan)
2718 chan->next_tx_seq = 0;
2719 chan->expected_tx_seq = 0;
2720 chan->expected_ack_seq = 0;
2721 chan->unacked_frames = 0;
2722 chan->buffer_seq = 0;
2723 chan->frames_sent = 0;
2724 chan->last_acked_seq = 0;
2726 chan->sdu_last_frag = NULL;
2729 skb_queue_head_init(&chan->tx_q);
2731 if (chan->mode != L2CAP_MODE_ERTM)
2734 chan->rx_state = L2CAP_RX_STATE_RECV;
2735 chan->tx_state = L2CAP_TX_STATE_XMIT;
2737 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2738 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2739 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2741 skb_queue_head_init(&chan->srej_q);
2743 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2747 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2749 l2cap_seq_list_free(&chan->srej_list);
2754 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2757 case L2CAP_MODE_STREAMING:
2758 case L2CAP_MODE_ERTM:
2759 if (l2cap_mode_supported(mode, remote_feat_mask))
2763 return L2CAP_MODE_BASIC;
2767 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2769 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2772 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2774 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2777 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2779 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2780 __l2cap_ews_supported(chan)) {
2781 /* use extended control field */
2782 set_bit(FLAG_EXT_CTRL, &chan->flags);
2783 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2785 chan->tx_win = min_t(u16, chan->tx_win,
2786 L2CAP_DEFAULT_TX_WINDOW);
2787 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2791 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2793 struct l2cap_conf_req *req = data;
2794 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2795 void *ptr = req->data;
2798 BT_DBG("chan %p", chan);
2800 if (chan->num_conf_req || chan->num_conf_rsp)
2803 switch (chan->mode) {
2804 case L2CAP_MODE_STREAMING:
2805 case L2CAP_MODE_ERTM:
2806 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2809 if (__l2cap_efs_supported(chan))
2810 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2814 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2819 if (chan->imtu != L2CAP_DEFAULT_MTU)
2820 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2822 switch (chan->mode) {
2823 case L2CAP_MODE_BASIC:
2824 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2825 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2828 rfc.mode = L2CAP_MODE_BASIC;
2830 rfc.max_transmit = 0;
2831 rfc.retrans_timeout = 0;
2832 rfc.monitor_timeout = 0;
2833 rfc.max_pdu_size = 0;
2835 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2836 (unsigned long) &rfc);
2839 case L2CAP_MODE_ERTM:
2840 rfc.mode = L2CAP_MODE_ERTM;
2841 rfc.max_transmit = chan->max_tx;
2842 rfc.retrans_timeout = 0;
2843 rfc.monitor_timeout = 0;
2845 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2846 L2CAP_EXT_HDR_SIZE -
2849 rfc.max_pdu_size = cpu_to_le16(size);
2851 l2cap_txwin_setup(chan);
2853 rfc.txwin_size = min_t(u16, chan->tx_win,
2854 L2CAP_DEFAULT_TX_WINDOW);
2856 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2857 (unsigned long) &rfc);
2859 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2860 l2cap_add_opt_efs(&ptr, chan);
2862 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2865 if (chan->fcs == L2CAP_FCS_NONE ||
2866 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2867 chan->fcs = L2CAP_FCS_NONE;
2868 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2871 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2872 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2876 case L2CAP_MODE_STREAMING:
2877 l2cap_txwin_setup(chan);
2878 rfc.mode = L2CAP_MODE_STREAMING;
2880 rfc.max_transmit = 0;
2881 rfc.retrans_timeout = 0;
2882 rfc.monitor_timeout = 0;
2884 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2885 L2CAP_EXT_HDR_SIZE -
2888 rfc.max_pdu_size = cpu_to_le16(size);
2890 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2891 (unsigned long) &rfc);
2893 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2894 l2cap_add_opt_efs(&ptr, chan);
2896 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2899 if (chan->fcs == L2CAP_FCS_NONE ||
2900 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2901 chan->fcs = L2CAP_FCS_NONE;
2902 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2907 req->dcid = cpu_to_le16(chan->dcid);
2908 req->flags = __constant_cpu_to_le16(0);
2913 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2915 struct l2cap_conf_rsp *rsp = data;
2916 void *ptr = rsp->data;
2917 void *req = chan->conf_req;
2918 int len = chan->conf_len;
2919 int type, hint, olen;
2921 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2922 struct l2cap_conf_efs efs;
2924 u16 mtu = L2CAP_DEFAULT_MTU;
2925 u16 result = L2CAP_CONF_SUCCESS;
2928 BT_DBG("chan %p", chan);
2930 while (len >= L2CAP_CONF_OPT_SIZE) {
2931 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2933 hint = type & L2CAP_CONF_HINT;
2934 type &= L2CAP_CONF_MASK;
2937 case L2CAP_CONF_MTU:
2941 case L2CAP_CONF_FLUSH_TO:
2942 chan->flush_to = val;
2945 case L2CAP_CONF_QOS:
2948 case L2CAP_CONF_RFC:
2949 if (olen == sizeof(rfc))
2950 memcpy(&rfc, (void *) val, olen);
2953 case L2CAP_CONF_FCS:
2954 if (val == L2CAP_FCS_NONE)
2955 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2958 case L2CAP_CONF_EFS:
2960 if (olen == sizeof(efs))
2961 memcpy(&efs, (void *) val, olen);
2964 case L2CAP_CONF_EWS:
2966 return -ECONNREFUSED;
2968 set_bit(FLAG_EXT_CTRL, &chan->flags);
2969 set_bit(CONF_EWS_RECV, &chan->conf_state);
2970 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2971 chan->remote_tx_win = val;
2978 result = L2CAP_CONF_UNKNOWN;
2979 *((u8 *) ptr++) = type;
2984 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2987 switch (chan->mode) {
2988 case L2CAP_MODE_STREAMING:
2989 case L2CAP_MODE_ERTM:
2990 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2991 chan->mode = l2cap_select_mode(rfc.mode,
2992 chan->conn->feat_mask);
2997 if (__l2cap_efs_supported(chan))
2998 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3000 return -ECONNREFUSED;
3003 if (chan->mode != rfc.mode)
3004 return -ECONNREFUSED;
3010 if (chan->mode != rfc.mode) {
3011 result = L2CAP_CONF_UNACCEPT;
3012 rfc.mode = chan->mode;
3014 if (chan->num_conf_rsp == 1)
3015 return -ECONNREFUSED;
3017 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3018 sizeof(rfc), (unsigned long) &rfc);
3021 if (result == L2CAP_CONF_SUCCESS) {
3022 /* Configure output options and let the other side know
3023 * which ones we don't like. */
3025 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3026 result = L2CAP_CONF_UNACCEPT;
3029 set_bit(CONF_MTU_DONE, &chan->conf_state);
3031 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3034 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3035 efs.stype != L2CAP_SERV_NOTRAFIC &&
3036 efs.stype != chan->local_stype) {
3038 result = L2CAP_CONF_UNACCEPT;
3040 if (chan->num_conf_req >= 1)
3041 return -ECONNREFUSED;
3043 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3045 (unsigned long) &efs);
3047 /* Send PENDING Conf Rsp */
3048 result = L2CAP_CONF_PENDING;
3049 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3054 case L2CAP_MODE_BASIC:
3055 chan->fcs = L2CAP_FCS_NONE;
3056 set_bit(CONF_MODE_DONE, &chan->conf_state);
3059 case L2CAP_MODE_ERTM:
3060 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3061 chan->remote_tx_win = rfc.txwin_size;
3063 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3065 chan->remote_max_tx = rfc.max_transmit;
3067 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3069 L2CAP_EXT_HDR_SIZE -
3072 rfc.max_pdu_size = cpu_to_le16(size);
3073 chan->remote_mps = size;
3075 rfc.retrans_timeout =
3076 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3077 rfc.monitor_timeout =
3078 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3080 set_bit(CONF_MODE_DONE, &chan->conf_state);
3082 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3083 sizeof(rfc), (unsigned long) &rfc);
3085 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3086 chan->remote_id = efs.id;
3087 chan->remote_stype = efs.stype;
3088 chan->remote_msdu = le16_to_cpu(efs.msdu);
3089 chan->remote_flush_to =
3090 le32_to_cpu(efs.flush_to);
3091 chan->remote_acc_lat =
3092 le32_to_cpu(efs.acc_lat);
3093 chan->remote_sdu_itime =
3094 le32_to_cpu(efs.sdu_itime);
3095 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3096 sizeof(efs), (unsigned long) &efs);
3100 case L2CAP_MODE_STREAMING:
3101 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3103 L2CAP_EXT_HDR_SIZE -
3106 rfc.max_pdu_size = cpu_to_le16(size);
3107 chan->remote_mps = size;
3109 set_bit(CONF_MODE_DONE, &chan->conf_state);
3111 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3112 sizeof(rfc), (unsigned long) &rfc);
3117 result = L2CAP_CONF_UNACCEPT;
3119 memset(&rfc, 0, sizeof(rfc));
3120 rfc.mode = chan->mode;
3123 if (result == L2CAP_CONF_SUCCESS)
3124 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3126 rsp->scid = cpu_to_le16(chan->dcid);
3127 rsp->result = cpu_to_le16(result);
3128 rsp->flags = __constant_cpu_to_le16(0);
3133 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3135 struct l2cap_conf_req *req = data;
3136 void *ptr = req->data;
3139 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3140 struct l2cap_conf_efs efs;
3142 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3144 while (len >= L2CAP_CONF_OPT_SIZE) {
3145 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3148 case L2CAP_CONF_MTU:
3149 if (val < L2CAP_DEFAULT_MIN_MTU) {
3150 *result = L2CAP_CONF_UNACCEPT;
3151 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3154 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3157 case L2CAP_CONF_FLUSH_TO:
3158 chan->flush_to = val;
3159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3163 case L2CAP_CONF_RFC:
3164 if (olen == sizeof(rfc))
3165 memcpy(&rfc, (void *)val, olen);
3167 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3168 rfc.mode != chan->mode)
3169 return -ECONNREFUSED;
3173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3174 sizeof(rfc), (unsigned long) &rfc);
3177 case L2CAP_CONF_EWS:
3178 chan->tx_win = min_t(u16, val,
3179 L2CAP_DEFAULT_EXT_WINDOW);
3180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3184 case L2CAP_CONF_EFS:
3185 if (olen == sizeof(efs))
3186 memcpy(&efs, (void *)val, olen);
3188 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3189 efs.stype != L2CAP_SERV_NOTRAFIC &&
3190 efs.stype != chan->local_stype)
3191 return -ECONNREFUSED;
3193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3194 sizeof(efs), (unsigned long) &efs);
3199 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3200 return -ECONNREFUSED;
3202 chan->mode = rfc.mode;
3204 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3206 case L2CAP_MODE_ERTM:
3207 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3208 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3209 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3211 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3212 chan->local_msdu = le16_to_cpu(efs.msdu);
3213 chan->local_sdu_itime =
3214 le32_to_cpu(efs.sdu_itime);
3215 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3216 chan->local_flush_to =
3217 le32_to_cpu(efs.flush_to);
3221 case L2CAP_MODE_STREAMING:
3222 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3226 req->dcid = cpu_to_le16(chan->dcid);
3227 req->flags = __constant_cpu_to_le16(0);
3232 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3234 struct l2cap_conf_rsp *rsp = data;
3235 void *ptr = rsp->data;
3237 BT_DBG("chan %p", chan);
3239 rsp->scid = cpu_to_le16(chan->dcid);
3240 rsp->result = cpu_to_le16(result);
3241 rsp->flags = cpu_to_le16(flags);
3246 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3248 struct l2cap_conn_rsp rsp;
3249 struct l2cap_conn *conn = chan->conn;
3252 rsp.scid = cpu_to_le16(chan->dcid);
3253 rsp.dcid = cpu_to_le16(chan->scid);
3254 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3255 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3256 l2cap_send_cmd(conn, chan->ident,
3257 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3259 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3262 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3263 l2cap_build_conf_req(chan, buf), buf);
3264 chan->num_conf_req++;
3267 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3271 struct l2cap_conf_rfc rfc;
3273 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3275 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3278 while (len >= L2CAP_CONF_OPT_SIZE) {
3279 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3281 if (type != L2CAP_CONF_RFC)
3284 if (olen != sizeof(rfc))
3287 memcpy(&rfc, (void *)val, olen);
3291 /* Use sane default values in case a misbehaving remote device
3292 * did not send an RFC option.
3294 rfc.mode = chan->mode;
3295 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3296 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3297 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3299 BT_ERR("Expected RFC option was not found, using defaults");
3303 case L2CAP_MODE_ERTM:
3304 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3305 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3306 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3308 case L2CAP_MODE_STREAMING:
3309 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3313 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3315 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3317 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3320 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3321 cmd->ident == conn->info_ident) {
3322 cancel_delayed_work(&conn->info_timer);
3324 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3325 conn->info_ident = 0;
3327 l2cap_conn_start(conn);
3333 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3335 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3336 struct l2cap_conn_rsp rsp;
3337 struct l2cap_chan *chan = NULL, *pchan;
3338 struct sock *parent, *sk = NULL;
3339 int result, status = L2CAP_CS_NO_INFO;
3341 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3342 __le16 psm = req->psm;
3344 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3346 /* Check if we have socket listening on psm */
3347 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3349 result = L2CAP_CR_BAD_PSM;
3355 mutex_lock(&conn->chan_lock);
3358 /* Check if the ACL is secure enough (if not SDP) */
3359 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3360 !hci_conn_check_link_mode(conn->hcon)) {
3361 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3362 result = L2CAP_CR_SEC_BLOCK;
3366 result = L2CAP_CR_NO_MEM;
3368 /* Check if we already have channel with that dcid */
3369 if (__l2cap_get_chan_by_dcid(conn, scid))
3372 chan = pchan->ops->new_connection(pchan);
3378 hci_conn_hold(conn->hcon);
3380 bacpy(&bt_sk(sk)->src, conn->src);
3381 bacpy(&bt_sk(sk)->dst, conn->dst);
3385 bt_accept_enqueue(parent, sk);
3387 __l2cap_chan_add(conn, chan);
3391 __set_chan_timer(chan, sk->sk_sndtimeo);
3393 chan->ident = cmd->ident;
3395 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3396 if (l2cap_chan_check_security(chan)) {
3397 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3398 __l2cap_state_change(chan, BT_CONNECT2);
3399 result = L2CAP_CR_PEND;
3400 status = L2CAP_CS_AUTHOR_PEND;
3401 parent->sk_data_ready(parent, 0);
3403 __l2cap_state_change(chan, BT_CONFIG);
3404 result = L2CAP_CR_SUCCESS;
3405 status = L2CAP_CS_NO_INFO;
3408 __l2cap_state_change(chan, BT_CONNECT2);
3409 result = L2CAP_CR_PEND;
3410 status = L2CAP_CS_AUTHEN_PEND;
3413 __l2cap_state_change(chan, BT_CONNECT2);
3414 result = L2CAP_CR_PEND;
3415 status = L2CAP_CS_NO_INFO;
3419 release_sock(parent);
3420 mutex_unlock(&conn->chan_lock);
3423 rsp.scid = cpu_to_le16(scid);
3424 rsp.dcid = cpu_to_le16(dcid);
3425 rsp.result = cpu_to_le16(result);
3426 rsp.status = cpu_to_le16(status);
3427 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3429 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3430 struct l2cap_info_req info;
3431 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3433 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3434 conn->info_ident = l2cap_get_ident(conn);
3436 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3438 l2cap_send_cmd(conn, conn->info_ident,
3439 L2CAP_INFO_REQ, sizeof(info), &info);
3442 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3443 result == L2CAP_CR_SUCCESS) {
3445 set_bit(CONF_REQ_SENT, &chan->conf_state);
3446 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3447 l2cap_build_conf_req(chan, buf), buf);
3448 chan->num_conf_req++;
3454 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3456 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3457 u16 scid, dcid, result, status;
3458 struct l2cap_chan *chan;
3462 scid = __le16_to_cpu(rsp->scid);
3463 dcid = __le16_to_cpu(rsp->dcid);
3464 result = __le16_to_cpu(rsp->result);
3465 status = __le16_to_cpu(rsp->status);
3467 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3468 dcid, scid, result, status);
3470 mutex_lock(&conn->chan_lock);
3473 chan = __l2cap_get_chan_by_scid(conn, scid);
3479 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3488 l2cap_chan_lock(chan);
3491 case L2CAP_CR_SUCCESS:
3492 l2cap_state_change(chan, BT_CONFIG);
3495 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3497 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3500 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3501 l2cap_build_conf_req(chan, req), req);
3502 chan->num_conf_req++;
3506 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3510 l2cap_chan_del(chan, ECONNREFUSED);
3514 l2cap_chan_unlock(chan);
3517 mutex_unlock(&conn->chan_lock);
3522 static inline void set_default_fcs(struct l2cap_chan *chan)
3524 /* FCS is enabled only in ERTM or streaming mode, if one or both
3527 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3528 chan->fcs = L2CAP_FCS_NONE;
3529 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3530 chan->fcs = L2CAP_FCS_CRC16;
3533 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3535 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3538 struct l2cap_chan *chan;
3541 dcid = __le16_to_cpu(req->dcid);
3542 flags = __le16_to_cpu(req->flags);
3544 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3546 chan = l2cap_get_chan_by_scid(conn, dcid);
3550 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3551 struct l2cap_cmd_rej_cid rej;
3553 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3554 rej.scid = cpu_to_le16(chan->scid);
3555 rej.dcid = cpu_to_le16(chan->dcid);
3557 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3562 /* Reject if config buffer is too small. */
3563 len = cmd_len - sizeof(*req);
3564 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3565 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3566 l2cap_build_conf_rsp(chan, rsp,
3567 L2CAP_CONF_REJECT, flags), rsp);
3572 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3573 chan->conf_len += len;
3575 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3576 /* Incomplete config. Send empty response. */
3577 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3578 l2cap_build_conf_rsp(chan, rsp,
3579 L2CAP_CONF_SUCCESS, flags), rsp);
3583 /* Complete config. */
3584 len = l2cap_parse_conf_req(chan, rsp);
3586 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3590 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3591 chan->num_conf_rsp++;
3593 /* Reset config buffer. */
3596 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3599 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3600 set_default_fcs(chan);
3602 if (chan->mode == L2CAP_MODE_ERTM ||
3603 chan->mode == L2CAP_MODE_STREAMING)
3604 err = l2cap_ertm_init(chan);
3607 l2cap_send_disconn_req(chan->conn, chan, -err);
3609 l2cap_chan_ready(chan);
3614 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3616 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3617 l2cap_build_conf_req(chan, buf), buf);
3618 chan->num_conf_req++;
3621 /* Got Conf Rsp PENDING from remote side and asume we sent
3622 Conf Rsp PENDING in the code above */
3623 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3624 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3626 /* check compatibility */
3628 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3629 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3631 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3632 l2cap_build_conf_rsp(chan, rsp,
3633 L2CAP_CONF_SUCCESS, flags), rsp);
3637 l2cap_chan_unlock(chan);
3641 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3643 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3644 u16 scid, flags, result;
3645 struct l2cap_chan *chan;
3646 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3649 scid = __le16_to_cpu(rsp->scid);
3650 flags = __le16_to_cpu(rsp->flags);
3651 result = __le16_to_cpu(rsp->result);
3653 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3656 chan = l2cap_get_chan_by_scid(conn, scid);
3661 case L2CAP_CONF_SUCCESS:
3662 l2cap_conf_rfc_get(chan, rsp->data, len);
3663 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3666 case L2CAP_CONF_PENDING:
3667 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3669 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3672 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3675 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3679 /* check compatibility */
3681 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3682 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3684 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3685 l2cap_build_conf_rsp(chan, buf,
3686 L2CAP_CONF_SUCCESS, 0x0000), buf);
3690 case L2CAP_CONF_UNACCEPT:
3691 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3694 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3695 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3699 /* throw out any old stored conf requests */
3700 result = L2CAP_CONF_SUCCESS;
3701 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3704 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3708 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3709 L2CAP_CONF_REQ, len, req);
3710 chan->num_conf_req++;
3711 if (result != L2CAP_CONF_SUCCESS)
3717 l2cap_chan_set_err(chan, ECONNRESET);
3719 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3720 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3724 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3727 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3729 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3730 set_default_fcs(chan);
3732 if (chan->mode == L2CAP_MODE_ERTM ||
3733 chan->mode == L2CAP_MODE_STREAMING)
3734 err = l2cap_ertm_init(chan);
3737 l2cap_send_disconn_req(chan->conn, chan, -err);
3739 l2cap_chan_ready(chan);
3743 l2cap_chan_unlock(chan);
3747 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3749 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3750 struct l2cap_disconn_rsp rsp;
3752 struct l2cap_chan *chan;
3755 scid = __le16_to_cpu(req->scid);
3756 dcid = __le16_to_cpu(req->dcid);
3758 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3760 mutex_lock(&conn->chan_lock);
3762 chan = __l2cap_get_chan_by_scid(conn, dcid);
3764 mutex_unlock(&conn->chan_lock);
3768 l2cap_chan_lock(chan);
3772 rsp.dcid = cpu_to_le16(chan->scid);
3773 rsp.scid = cpu_to_le16(chan->dcid);
3774 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3777 sk->sk_shutdown = SHUTDOWN_MASK;
3780 l2cap_chan_hold(chan);
3781 l2cap_chan_del(chan, ECONNRESET);
3783 l2cap_chan_unlock(chan);
3785 chan->ops->close(chan);
3786 l2cap_chan_put(chan);
3788 mutex_unlock(&conn->chan_lock);
3793 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3795 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3797 struct l2cap_chan *chan;
3799 scid = __le16_to_cpu(rsp->scid);
3800 dcid = __le16_to_cpu(rsp->dcid);
3802 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3804 mutex_lock(&conn->chan_lock);
3806 chan = __l2cap_get_chan_by_scid(conn, scid);
3808 mutex_unlock(&conn->chan_lock);
3812 l2cap_chan_lock(chan);
3814 l2cap_chan_hold(chan);
3815 l2cap_chan_del(chan, 0);
3817 l2cap_chan_unlock(chan);
3819 chan->ops->close(chan);
3820 l2cap_chan_put(chan);
3822 mutex_unlock(&conn->chan_lock);
3827 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3829 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3832 type = __le16_to_cpu(req->type);
3834 BT_DBG("type 0x%4.4x", type);
3836 if (type == L2CAP_IT_FEAT_MASK) {
3838 u32 feat_mask = l2cap_feat_mask;
3839 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3840 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3841 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3843 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3846 feat_mask |= L2CAP_FEAT_EXT_FLOW
3847 | L2CAP_FEAT_EXT_WINDOW;
3849 put_unaligned_le32(feat_mask, rsp->data);
3850 l2cap_send_cmd(conn, cmd->ident,
3851 L2CAP_INFO_RSP, sizeof(buf), buf);
3852 } else if (type == L2CAP_IT_FIXED_CHAN) {
3854 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3857 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3859 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3861 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3862 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3863 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3864 l2cap_send_cmd(conn, cmd->ident,
3865 L2CAP_INFO_RSP, sizeof(buf), buf);
3867 struct l2cap_info_rsp rsp;
3868 rsp.type = cpu_to_le16(type);
3869 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3870 l2cap_send_cmd(conn, cmd->ident,
3871 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3877 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3879 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3882 type = __le16_to_cpu(rsp->type);
3883 result = __le16_to_cpu(rsp->result);
3885 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3887 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3888 if (cmd->ident != conn->info_ident ||
3889 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3892 cancel_delayed_work(&conn->info_timer);
3894 if (result != L2CAP_IR_SUCCESS) {
3895 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3896 conn->info_ident = 0;
3898 l2cap_conn_start(conn);
3904 case L2CAP_IT_FEAT_MASK:
3905 conn->feat_mask = get_unaligned_le32(rsp->data);
3907 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3908 struct l2cap_info_req req;
3909 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3911 conn->info_ident = l2cap_get_ident(conn);
3913 l2cap_send_cmd(conn, conn->info_ident,
3914 L2CAP_INFO_REQ, sizeof(req), &req);
3916 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3917 conn->info_ident = 0;
3919 l2cap_conn_start(conn);
3923 case L2CAP_IT_FIXED_CHAN:
3924 conn->fixed_chan_mask = rsp->data[0];
3925 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3926 conn->info_ident = 0;
3928 l2cap_conn_start(conn);
3935 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3936 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3939 struct l2cap_create_chan_req *req = data;
3940 struct l2cap_create_chan_rsp rsp;
3943 if (cmd_len != sizeof(*req))
3949 psm = le16_to_cpu(req->psm);
3950 scid = le16_to_cpu(req->scid);
3952 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3954 /* Placeholder: Always reject */
3956 rsp.scid = cpu_to_le16(scid);
3957 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3958 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3960 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3966 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3967 struct l2cap_cmd_hdr *cmd, void *data)
3969 BT_DBG("conn %p", conn);
3971 return l2cap_connect_rsp(conn, cmd, data);
3974 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3975 u16 icid, u16 result)
3977 struct l2cap_move_chan_rsp rsp;
3979 BT_DBG("icid %d, result %d", icid, result);
3981 rsp.icid = cpu_to_le16(icid);
3982 rsp.result = cpu_to_le16(result);
3984 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3987 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3988 struct l2cap_chan *chan, u16 icid, u16 result)
3990 struct l2cap_move_chan_cfm cfm;
3993 BT_DBG("icid %d, result %d", icid, result);
3995 ident = l2cap_get_ident(conn);
3997 chan->ident = ident;
3999 cfm.icid = cpu_to_le16(icid);
4000 cfm.result = cpu_to_le16(result);
4002 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4005 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4008 struct l2cap_move_chan_cfm_rsp rsp;
4010 BT_DBG("icid %d", icid);
4012 rsp.icid = cpu_to_le16(icid);
4013 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4016 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4017 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4019 struct l2cap_move_chan_req *req = data;
4021 u16 result = L2CAP_MR_NOT_ALLOWED;
4023 if (cmd_len != sizeof(*req))
4026 icid = le16_to_cpu(req->icid);
4028 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
4033 /* Placeholder: Always refuse */
4034 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4039 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4040 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4042 struct l2cap_move_chan_rsp *rsp = data;
4045 if (cmd_len != sizeof(*rsp))
4048 icid = le16_to_cpu(rsp->icid);
4049 result = le16_to_cpu(rsp->result);
4051 BT_DBG("icid %d, result %d", icid, result);
4053 /* Placeholder: Always unconfirmed */
4054 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4059 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4060 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4062 struct l2cap_move_chan_cfm *cfm = data;
4065 if (cmd_len != sizeof(*cfm))
4068 icid = le16_to_cpu(cfm->icid);
4069 result = le16_to_cpu(cfm->result);
4071 BT_DBG("icid %d, result %d", icid, result);
4073 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4078 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4079 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
4081 struct l2cap_move_chan_cfm_rsp *rsp = data;
4084 if (cmd_len != sizeof(*rsp))
4087 icid = le16_to_cpu(rsp->icid);
4089 BT_DBG("icid %d", icid);
4094 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4099 if (min > max || min < 6 || max > 3200)
4102 if (to_multiplier < 10 || to_multiplier > 3200)
4105 if (max >= to_multiplier * 8)
4108 max_latency = (to_multiplier * 8 / max) - 1;
4109 if (latency > 499 || latency > max_latency)
4115 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4116 struct l2cap_cmd_hdr *cmd, u8 *data)
4118 struct hci_conn *hcon = conn->hcon;
4119 struct l2cap_conn_param_update_req *req;
4120 struct l2cap_conn_param_update_rsp rsp;
4121 u16 min, max, latency, to_multiplier, cmd_len;
4124 if (!(hcon->link_mode & HCI_LM_MASTER))
4127 cmd_len = __le16_to_cpu(cmd->len);
4128 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4131 req = (struct l2cap_conn_param_update_req *) data;
4132 min = __le16_to_cpu(req->min);
4133 max = __le16_to_cpu(req->max);
4134 latency = __le16_to_cpu(req->latency);
4135 to_multiplier = __le16_to_cpu(req->to_multiplier);
4137 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4138 min, max, latency, to_multiplier);
4140 memset(&rsp, 0, sizeof(rsp));
4142 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4144 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4146 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4148 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4152 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4157 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4158 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4162 switch (cmd->code) {
4163 case L2CAP_COMMAND_REJ:
4164 l2cap_command_rej(conn, cmd, data);
4167 case L2CAP_CONN_REQ:
4168 err = l2cap_connect_req(conn, cmd, data);
4171 case L2CAP_CONN_RSP:
4172 err = l2cap_connect_rsp(conn, cmd, data);
4175 case L2CAP_CONF_REQ:
4176 err = l2cap_config_req(conn, cmd, cmd_len, data);
4179 case L2CAP_CONF_RSP:
4180 err = l2cap_config_rsp(conn, cmd, data);
4183 case L2CAP_DISCONN_REQ:
4184 err = l2cap_disconnect_req(conn, cmd, data);
4187 case L2CAP_DISCONN_RSP:
4188 err = l2cap_disconnect_rsp(conn, cmd, data);
4191 case L2CAP_ECHO_REQ:
4192 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4195 case L2CAP_ECHO_RSP:
4198 case L2CAP_INFO_REQ:
4199 err = l2cap_information_req(conn, cmd, data);
4202 case L2CAP_INFO_RSP:
4203 err = l2cap_information_rsp(conn, cmd, data);
4206 case L2CAP_CREATE_CHAN_REQ:
4207 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4210 case L2CAP_CREATE_CHAN_RSP:
4211 err = l2cap_create_channel_rsp(conn, cmd, data);
4214 case L2CAP_MOVE_CHAN_REQ:
4215 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4218 case L2CAP_MOVE_CHAN_RSP:
4219 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4222 case L2CAP_MOVE_CHAN_CFM:
4223 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4226 case L2CAP_MOVE_CHAN_CFM_RSP:
4227 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4231 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4239 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4240 struct l2cap_cmd_hdr *cmd, u8 *data)
4242 switch (cmd->code) {
4243 case L2CAP_COMMAND_REJ:
4246 case L2CAP_CONN_PARAM_UPDATE_REQ:
4247 return l2cap_conn_param_update_req(conn, cmd, data);
4249 case L2CAP_CONN_PARAM_UPDATE_RSP:
4253 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4258 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4259 struct sk_buff *skb)
4261 u8 *data = skb->data;
4263 struct l2cap_cmd_hdr cmd;
4266 l2cap_raw_recv(conn, skb);
4268 while (len >= L2CAP_CMD_HDR_SIZE) {
4270 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4271 data += L2CAP_CMD_HDR_SIZE;
4272 len -= L2CAP_CMD_HDR_SIZE;
4274 cmd_len = le16_to_cpu(cmd.len);
4276 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4278 if (cmd_len > len || !cmd.ident) {
4279 BT_DBG("corrupted command");
4283 if (conn->hcon->type == LE_LINK)
4284 err = l2cap_le_sig_cmd(conn, &cmd, data);
4286 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4289 struct l2cap_cmd_rej_unk rej;
4291 BT_ERR("Wrong link type (%d)", err);
4293 /* FIXME: Map err to a valid reason */
4294 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4295 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4305 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4307 u16 our_fcs, rcv_fcs;
4310 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4311 hdr_size = L2CAP_EXT_HDR_SIZE;
4313 hdr_size = L2CAP_ENH_HDR_SIZE;
4315 if (chan->fcs == L2CAP_FCS_CRC16) {
4316 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4317 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4318 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4320 if (our_fcs != rcv_fcs)
4326 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4328 struct l2cap_ctrl control;
4330 BT_DBG("chan %p", chan);
4332 memset(&control, 0, sizeof(control));
4335 control.reqseq = chan->buffer_seq;
4336 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4338 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4339 control.super = L2CAP_SUPER_RNR;
4340 l2cap_send_sframe(chan, &control);
4343 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4344 chan->unacked_frames > 0)
4345 __set_retrans_timer(chan);
4347 /* Send pending iframes */
4348 l2cap_ertm_send(chan);
4350 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4351 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4352 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4355 control.super = L2CAP_SUPER_RR;
4356 l2cap_send_sframe(chan, &control);
4360 static void append_skb_frag(struct sk_buff *skb,
4361 struct sk_buff *new_frag, struct sk_buff **last_frag)
4363 /* skb->len reflects data in skb as well as all fragments
4364 * skb->data_len reflects only data in fragments
4366 if (!skb_has_frag_list(skb))
4367 skb_shinfo(skb)->frag_list = new_frag;
4369 new_frag->next = NULL;
4371 (*last_frag)->next = new_frag;
4372 *last_frag = new_frag;
4374 skb->len += new_frag->len;
4375 skb->data_len += new_frag->len;
4376 skb->truesize += new_frag->truesize;
4379 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4380 struct l2cap_ctrl *control)
4384 switch (control->sar) {
4385 case L2CAP_SAR_UNSEGMENTED:
4389 err = chan->ops->recv(chan, skb);
4392 case L2CAP_SAR_START:
4396 chan->sdu_len = get_unaligned_le16(skb->data);
4397 skb_pull(skb, L2CAP_SDULEN_SIZE);
4399 if (chan->sdu_len > chan->imtu) {
4404 if (skb->len >= chan->sdu_len)
4408 chan->sdu_last_frag = skb;
4414 case L2CAP_SAR_CONTINUE:
4418 append_skb_frag(chan->sdu, skb,
4419 &chan->sdu_last_frag);
4422 if (chan->sdu->len >= chan->sdu_len)
4432 append_skb_frag(chan->sdu, skb,
4433 &chan->sdu_last_frag);
4436 if (chan->sdu->len != chan->sdu_len)
4439 err = chan->ops->recv(chan, chan->sdu);
4442 /* Reassembly complete */
4444 chan->sdu_last_frag = NULL;
4452 kfree_skb(chan->sdu);
4454 chan->sdu_last_frag = NULL;
4461 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4465 if (chan->mode != L2CAP_MODE_ERTM)
4468 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4469 l2cap_tx(chan, NULL, NULL, event);
4472 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4475 /* Pass sequential frames to l2cap_reassemble_sdu()
4476 * until a gap is encountered.
4479 BT_DBG("chan %p", chan);
4481 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4482 struct sk_buff *skb;
4483 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4484 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4486 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4491 skb_unlink(skb, &chan->srej_q);
4492 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4493 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4498 if (skb_queue_empty(&chan->srej_q)) {
4499 chan->rx_state = L2CAP_RX_STATE_RECV;
4500 l2cap_send_ack(chan);
4506 static void l2cap_handle_srej(struct l2cap_chan *chan,
4507 struct l2cap_ctrl *control)
4509 struct sk_buff *skb;
4511 BT_DBG("chan %p, control %p", chan, control);
4513 if (control->reqseq == chan->next_tx_seq) {
4514 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4515 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4519 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4522 BT_DBG("Seq %d not available for retransmission",
4527 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4528 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4529 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4533 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4535 if (control->poll) {
4536 l2cap_pass_to_tx(chan, control);
4538 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4539 l2cap_retransmit(chan, control);
4540 l2cap_ertm_send(chan);
4542 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4543 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4544 chan->srej_save_reqseq = control->reqseq;
4547 l2cap_pass_to_tx_fbit(chan, control);
4549 if (control->final) {
4550 if (chan->srej_save_reqseq != control->reqseq ||
4551 !test_and_clear_bit(CONN_SREJ_ACT,
4553 l2cap_retransmit(chan, control);
4555 l2cap_retransmit(chan, control);
4556 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4557 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4558 chan->srej_save_reqseq = control->reqseq;
4564 static void l2cap_handle_rej(struct l2cap_chan *chan,
4565 struct l2cap_ctrl *control)
4567 struct sk_buff *skb;
4569 BT_DBG("chan %p, control %p", chan, control);
4571 if (control->reqseq == chan->next_tx_seq) {
4572 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4573 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4577 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4579 if (chan->max_tx && skb &&
4580 bt_cb(skb)->control.retries >= chan->max_tx) {
4581 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4582 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4586 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4588 l2cap_pass_to_tx(chan, control);
4590 if (control->final) {
4591 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4592 l2cap_retransmit_all(chan, control);
4594 l2cap_retransmit_all(chan, control);
4595 l2cap_ertm_send(chan);
4596 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4597 set_bit(CONN_REJ_ACT, &chan->conn_state);
4601 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4603 BT_DBG("chan %p, txseq %d", chan, txseq);
4605 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4606 chan->expected_tx_seq);
4608 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4609 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4611 /* See notes below regarding "double poll" and
4614 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4615 BT_DBG("Invalid/Ignore - after SREJ");
4616 return L2CAP_TXSEQ_INVALID_IGNORE;
4618 BT_DBG("Invalid - in window after SREJ sent");
4619 return L2CAP_TXSEQ_INVALID;
4623 if (chan->srej_list.head == txseq) {
4624 BT_DBG("Expected SREJ");
4625 return L2CAP_TXSEQ_EXPECTED_SREJ;
4628 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4629 BT_DBG("Duplicate SREJ - txseq already stored");
4630 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4633 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4634 BT_DBG("Unexpected SREJ - not requested");
4635 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4639 if (chan->expected_tx_seq == txseq) {
4640 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4642 BT_DBG("Invalid - txseq outside tx window");
4643 return L2CAP_TXSEQ_INVALID;
4646 return L2CAP_TXSEQ_EXPECTED;
4650 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4651 __seq_offset(chan, chan->expected_tx_seq,
4652 chan->last_acked_seq)){
4653 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4654 return L2CAP_TXSEQ_DUPLICATE;
4657 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4658 /* A source of invalid packets is a "double poll" condition,
4659 * where delays cause us to send multiple poll packets. If
4660 * the remote stack receives and processes both polls,
4661 * sequence numbers can wrap around in such a way that a
4662 * resent frame has a sequence number that looks like new data
4663 * with a sequence gap. This would trigger an erroneous SREJ
4666 * Fortunately, this is impossible with a tx window that's
4667 * less than half of the maximum sequence number, which allows
4668 * invalid frames to be safely ignored.
4670 * With tx window sizes greater than half of the tx window
4671 * maximum, the frame is invalid and cannot be ignored. This
4672 * causes a disconnect.
4675 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4676 BT_DBG("Invalid/Ignore - txseq outside tx window");
4677 return L2CAP_TXSEQ_INVALID_IGNORE;
4679 BT_DBG("Invalid - txseq outside tx window");
4680 return L2CAP_TXSEQ_INVALID;
4683 BT_DBG("Unexpected - txseq indicates missing frames");
4684 return L2CAP_TXSEQ_UNEXPECTED;
4688 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4689 struct l2cap_ctrl *control,
4690 struct sk_buff *skb, u8 event)
4693 bool skb_in_use = 0;
4695 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4699 case L2CAP_EV_RECV_IFRAME:
4700 switch (l2cap_classify_txseq(chan, control->txseq)) {
4701 case L2CAP_TXSEQ_EXPECTED:
4702 l2cap_pass_to_tx(chan, control);
4704 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4705 BT_DBG("Busy, discarding expected seq %d",
4710 chan->expected_tx_seq = __next_seq(chan,
4713 chan->buffer_seq = chan->expected_tx_seq;
4716 err = l2cap_reassemble_sdu(chan, skb, control);
4720 if (control->final) {
4721 if (!test_and_clear_bit(CONN_REJ_ACT,
4722 &chan->conn_state)) {
4724 l2cap_retransmit_all(chan, control);
4725 l2cap_ertm_send(chan);
4729 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4730 l2cap_send_ack(chan);
4732 case L2CAP_TXSEQ_UNEXPECTED:
4733 l2cap_pass_to_tx(chan, control);
4735 /* Can't issue SREJ frames in the local busy state.
4736 * Drop this frame, it will be seen as missing
4737 * when local busy is exited.
4739 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4740 BT_DBG("Busy, discarding unexpected seq %d",
4745 /* There was a gap in the sequence, so an SREJ
4746 * must be sent for each missing frame. The
4747 * current frame is stored for later use.
4749 skb_queue_tail(&chan->srej_q, skb);
4751 BT_DBG("Queued %p (queue len %d)", skb,
4752 skb_queue_len(&chan->srej_q));
4754 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4755 l2cap_seq_list_clear(&chan->srej_list);
4756 l2cap_send_srej(chan, control->txseq);
4758 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4760 case L2CAP_TXSEQ_DUPLICATE:
4761 l2cap_pass_to_tx(chan, control);
4763 case L2CAP_TXSEQ_INVALID_IGNORE:
4765 case L2CAP_TXSEQ_INVALID:
4767 l2cap_send_disconn_req(chan->conn, chan,
4772 case L2CAP_EV_RECV_RR:
4773 l2cap_pass_to_tx(chan, control);
4774 if (control->final) {
4775 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4777 if (!test_and_clear_bit(CONN_REJ_ACT,
4778 &chan->conn_state)) {
4780 l2cap_retransmit_all(chan, control);
4783 l2cap_ertm_send(chan);
4784 } else if (control->poll) {
4785 l2cap_send_i_or_rr_or_rnr(chan);
4787 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4788 &chan->conn_state) &&
4789 chan->unacked_frames)
4790 __set_retrans_timer(chan);
4792 l2cap_ertm_send(chan);
4795 case L2CAP_EV_RECV_RNR:
4796 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4797 l2cap_pass_to_tx(chan, control);
4798 if (control && control->poll) {
4799 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4800 l2cap_send_rr_or_rnr(chan, 0);
4802 __clear_retrans_timer(chan);
4803 l2cap_seq_list_clear(&chan->retrans_list);
4805 case L2CAP_EV_RECV_REJ:
4806 l2cap_handle_rej(chan, control);
4808 case L2CAP_EV_RECV_SREJ:
4809 l2cap_handle_srej(chan, control);
4815 if (skb && !skb_in_use) {
4816 BT_DBG("Freeing %p", skb);
4823 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4824 struct l2cap_ctrl *control,
4825 struct sk_buff *skb, u8 event)
4828 u16 txseq = control->txseq;
4829 bool skb_in_use = 0;
4831 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4835 case L2CAP_EV_RECV_IFRAME:
4836 switch (l2cap_classify_txseq(chan, txseq)) {
4837 case L2CAP_TXSEQ_EXPECTED:
4838 /* Keep frame for reassembly later */
4839 l2cap_pass_to_tx(chan, control);
4840 skb_queue_tail(&chan->srej_q, skb);
4842 BT_DBG("Queued %p (queue len %d)", skb,
4843 skb_queue_len(&chan->srej_q));
4845 chan->expected_tx_seq = __next_seq(chan, txseq);
4847 case L2CAP_TXSEQ_EXPECTED_SREJ:
4848 l2cap_seq_list_pop(&chan->srej_list);
4850 l2cap_pass_to_tx(chan, control);
4851 skb_queue_tail(&chan->srej_q, skb);
4853 BT_DBG("Queued %p (queue len %d)", skb,
4854 skb_queue_len(&chan->srej_q));
4856 err = l2cap_rx_queued_iframes(chan);
4861 case L2CAP_TXSEQ_UNEXPECTED:
4862 /* Got a frame that can't be reassembled yet.
4863 * Save it for later, and send SREJs to cover
4864 * the missing frames.
4866 skb_queue_tail(&chan->srej_q, skb);
4868 BT_DBG("Queued %p (queue len %d)", skb,
4869 skb_queue_len(&chan->srej_q));
4871 l2cap_pass_to_tx(chan, control);
4872 l2cap_send_srej(chan, control->txseq);
4874 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4875 /* This frame was requested with an SREJ, but
4876 * some expected retransmitted frames are
4877 * missing. Request retransmission of missing
4880 skb_queue_tail(&chan->srej_q, skb);
4882 BT_DBG("Queued %p (queue len %d)", skb,
4883 skb_queue_len(&chan->srej_q));
4885 l2cap_pass_to_tx(chan, control);
4886 l2cap_send_srej_list(chan, control->txseq);
4888 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4889 /* We've already queued this frame. Drop this copy. */
4890 l2cap_pass_to_tx(chan, control);
4892 case L2CAP_TXSEQ_DUPLICATE:
4893 /* Expecting a later sequence number, so this frame
4894 * was already received. Ignore it completely.
4897 case L2CAP_TXSEQ_INVALID_IGNORE:
4899 case L2CAP_TXSEQ_INVALID:
4901 l2cap_send_disconn_req(chan->conn, chan,
4906 case L2CAP_EV_RECV_RR:
4907 l2cap_pass_to_tx(chan, control);
4908 if (control->final) {
4909 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4911 if (!test_and_clear_bit(CONN_REJ_ACT,
4912 &chan->conn_state)) {
4914 l2cap_retransmit_all(chan, control);
4917 l2cap_ertm_send(chan);
4918 } else if (control->poll) {
4919 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4920 &chan->conn_state) &&
4921 chan->unacked_frames) {
4922 __set_retrans_timer(chan);
4925 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4926 l2cap_send_srej_tail(chan);
4928 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4929 &chan->conn_state) &&
4930 chan->unacked_frames)
4931 __set_retrans_timer(chan);
4933 l2cap_send_ack(chan);
4936 case L2CAP_EV_RECV_RNR:
4937 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4938 l2cap_pass_to_tx(chan, control);
4939 if (control->poll) {
4940 l2cap_send_srej_tail(chan);
4942 struct l2cap_ctrl rr_control;
4943 memset(&rr_control, 0, sizeof(rr_control));
4944 rr_control.sframe = 1;
4945 rr_control.super = L2CAP_SUPER_RR;
4946 rr_control.reqseq = chan->buffer_seq;
4947 l2cap_send_sframe(chan, &rr_control);
4951 case L2CAP_EV_RECV_REJ:
4952 l2cap_handle_rej(chan, control);
4954 case L2CAP_EV_RECV_SREJ:
4955 l2cap_handle_srej(chan, control);
4959 if (skb && !skb_in_use) {
4960 BT_DBG("Freeing %p", skb);
4967 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4969 /* Make sure reqseq is for a packet that has been sent but not acked */
4972 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4973 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4976 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4977 struct sk_buff *skb, u8 event)
4981 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4982 control, skb, event, chan->rx_state);
4984 if (__valid_reqseq(chan, control->reqseq)) {
4985 switch (chan->rx_state) {
4986 case L2CAP_RX_STATE_RECV:
4987 err = l2cap_rx_state_recv(chan, control, skb, event);
4989 case L2CAP_RX_STATE_SREJ_SENT:
4990 err = l2cap_rx_state_srej_sent(chan, control, skb,
4998 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4999 control->reqseq, chan->next_tx_seq,
5000 chan->expected_ack_seq);
5001 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5007 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5008 struct sk_buff *skb)
5012 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5015 if (l2cap_classify_txseq(chan, control->txseq) ==
5016 L2CAP_TXSEQ_EXPECTED) {
5017 l2cap_pass_to_tx(chan, control);
5019 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5020 __next_seq(chan, chan->buffer_seq));
5022 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5024 l2cap_reassemble_sdu(chan, skb, control);
5027 kfree_skb(chan->sdu);
5030 chan->sdu_last_frag = NULL;
5034 BT_DBG("Freeing %p", skb);
5039 chan->last_acked_seq = control->txseq;
5040 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5045 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5047 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5051 __unpack_control(chan, skb);
5056 * We can just drop the corrupted I-frame here.
5057 * Receiver will miss it and start proper recovery
5058 * procedures and ask for retransmission.
5060 if (l2cap_check_fcs(chan, skb))
5063 if (!control->sframe && control->sar == L2CAP_SAR_START)
5064 len -= L2CAP_SDULEN_SIZE;
5066 if (chan->fcs == L2CAP_FCS_CRC16)
5067 len -= L2CAP_FCS_SIZE;
5069 if (len > chan->mps) {
5070 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5074 if (!control->sframe) {
5077 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5078 control->sar, control->reqseq, control->final,
5081 /* Validate F-bit - F=0 always valid, F=1 only
5082 * valid in TX WAIT_F
5084 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5087 if (chan->mode != L2CAP_MODE_STREAMING) {
5088 event = L2CAP_EV_RECV_IFRAME;
5089 err = l2cap_rx(chan, control, skb, event);
5091 err = l2cap_stream_rx(chan, control, skb);
5095 l2cap_send_disconn_req(chan->conn, chan,
5098 const u8 rx_func_to_event[4] = {
5099 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5100 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5103 /* Only I-frames are expected in streaming mode */
5104 if (chan->mode == L2CAP_MODE_STREAMING)
5107 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5108 control->reqseq, control->final, control->poll,
5113 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5117 /* Validate F and P bits */
5118 if (control->final && (control->poll ||
5119 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5122 event = rx_func_to_event[control->super];
5123 if (l2cap_rx(chan, control, skb, event))
5124 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5134 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5135 struct sk_buff *skb)
5137 struct l2cap_chan *chan;
5139 chan = l2cap_get_chan_by_scid(conn, cid);
5141 if (cid == L2CAP_CID_A2MP) {
5142 chan = a2mp_channel_create(conn, skb);
5148 l2cap_chan_lock(chan);
5150 BT_DBG("unknown cid 0x%4.4x", cid);
5151 /* Drop packet and return */
5157 BT_DBG("chan %p, len %d", chan, skb->len);
5159 if (chan->state != BT_CONNECTED)
5162 switch (chan->mode) {
5163 case L2CAP_MODE_BASIC:
5164 /* If socket recv buffers overflows we drop data here
5165 * which is *bad* because L2CAP has to be reliable.
5166 * But we don't have any other choice. L2CAP doesn't
5167 * provide flow control mechanism. */
5169 if (chan->imtu < skb->len)
5172 if (!chan->ops->recv(chan, skb))
5176 case L2CAP_MODE_ERTM:
5177 case L2CAP_MODE_STREAMING:
5178 l2cap_data_rcv(chan, skb);
5182 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5190 l2cap_chan_unlock(chan);
5193 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5194 struct sk_buff *skb)
5196 struct l2cap_chan *chan;
5198 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5202 BT_DBG("chan %p, len %d", chan, skb->len);
5204 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5207 if (chan->imtu < skb->len)
5210 if (!chan->ops->recv(chan, skb))
5217 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5218 struct sk_buff *skb)
5220 struct l2cap_chan *chan;
5222 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5226 BT_DBG("chan %p, len %d", chan, skb->len);
5228 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5231 if (chan->imtu < skb->len)
5234 if (!chan->ops->recv(chan, skb))
5241 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5243 struct l2cap_hdr *lh = (void *) skb->data;
5247 skb_pull(skb, L2CAP_HDR_SIZE);
5248 cid = __le16_to_cpu(lh->cid);
5249 len = __le16_to_cpu(lh->len);
5251 if (len != skb->len) {
5256 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5259 case L2CAP_CID_LE_SIGNALING:
5260 case L2CAP_CID_SIGNALING:
5261 l2cap_sig_channel(conn, skb);
5264 case L2CAP_CID_CONN_LESS:
5265 psm = get_unaligned((__le16 *) skb->data);
5266 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5267 l2cap_conless_channel(conn, psm, skb);
5270 case L2CAP_CID_LE_DATA:
5271 l2cap_att_channel(conn, cid, skb);
5275 if (smp_sig_channel(conn, skb))
5276 l2cap_conn_del(conn->hcon, EACCES);
5280 l2cap_data_channel(conn, cid, skb);
5285 /* ---- L2CAP interface with lower layer (HCI) ---- */
5287 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5289 int exact = 0, lm1 = 0, lm2 = 0;
5290 struct l2cap_chan *c;
5292 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5294 /* Find listening sockets and check their link_mode */
5295 read_lock(&chan_list_lock);
5296 list_for_each_entry(c, &chan_list, global_l) {
5297 struct sock *sk = c->sk;
5299 if (c->state != BT_LISTEN)
5302 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5303 lm1 |= HCI_LM_ACCEPT;
5304 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5305 lm1 |= HCI_LM_MASTER;
5307 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5308 lm2 |= HCI_LM_ACCEPT;
5309 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5310 lm2 |= HCI_LM_MASTER;
5313 read_unlock(&chan_list_lock);
5315 return exact ? lm1 : lm2;
5318 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5320 struct l2cap_conn *conn;
5322 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5325 conn = l2cap_conn_add(hcon, status);
5327 l2cap_conn_ready(conn);
5329 l2cap_conn_del(hcon, bt_to_errno(status));
5334 int l2cap_disconn_ind(struct hci_conn *hcon)
5336 struct l2cap_conn *conn = hcon->l2cap_data;
5338 BT_DBG("hcon %p", hcon);
5341 return HCI_ERROR_REMOTE_USER_TERM;
5342 return conn->disc_reason;
5345 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5347 BT_DBG("hcon %p reason %d", hcon, reason);
5349 l2cap_conn_del(hcon, bt_to_errno(reason));
5353 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5355 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5358 if (encrypt == 0x00) {
5359 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5360 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5361 } else if (chan->sec_level == BT_SECURITY_HIGH)
5362 l2cap_chan_close(chan, ECONNREFUSED);
5364 if (chan->sec_level == BT_SECURITY_MEDIUM)
5365 __clear_chan_timer(chan);
5369 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5371 struct l2cap_conn *conn = hcon->l2cap_data;
5372 struct l2cap_chan *chan;
5377 BT_DBG("conn %p", conn);
5379 if (hcon->type == LE_LINK) {
5380 if (!status && encrypt)
5381 smp_distribute_keys(conn, 0);
5382 cancel_delayed_work(&conn->security_timer);
5385 mutex_lock(&conn->chan_lock);
5387 list_for_each_entry(chan, &conn->chan_l, list) {
5388 l2cap_chan_lock(chan);
5390 BT_DBG("chan->scid %d", chan->scid);
5392 if (chan->scid == L2CAP_CID_LE_DATA) {
5393 if (!status && encrypt) {
5394 chan->sec_level = hcon->sec_level;
5395 l2cap_chan_ready(chan);
5398 l2cap_chan_unlock(chan);
5402 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5403 l2cap_chan_unlock(chan);
5407 if (!status && (chan->state == BT_CONNECTED ||
5408 chan->state == BT_CONFIG)) {
5409 struct sock *sk = chan->sk;
5411 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5412 sk->sk_state_change(sk);
5414 l2cap_check_encryption(chan, encrypt);
5415 l2cap_chan_unlock(chan);
5419 if (chan->state == BT_CONNECT) {
5421 l2cap_send_conn_req(chan);
5423 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5425 } else if (chan->state == BT_CONNECT2) {
5426 struct sock *sk = chan->sk;
5427 struct l2cap_conn_rsp rsp;
5433 if (test_bit(BT_SK_DEFER_SETUP,
5434 &bt_sk(sk)->flags)) {
5435 struct sock *parent = bt_sk(sk)->parent;
5436 res = L2CAP_CR_PEND;
5437 stat = L2CAP_CS_AUTHOR_PEND;
5439 parent->sk_data_ready(parent, 0);
5441 __l2cap_state_change(chan, BT_CONFIG);
5442 res = L2CAP_CR_SUCCESS;
5443 stat = L2CAP_CS_NO_INFO;
5446 __l2cap_state_change(chan, BT_DISCONN);
5447 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5448 res = L2CAP_CR_SEC_BLOCK;
5449 stat = L2CAP_CS_NO_INFO;
5454 rsp.scid = cpu_to_le16(chan->dcid);
5455 rsp.dcid = cpu_to_le16(chan->scid);
5456 rsp.result = cpu_to_le16(res);
5457 rsp.status = cpu_to_le16(stat);
5458 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5461 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5462 res == L2CAP_CR_SUCCESS) {
5464 set_bit(CONF_REQ_SENT, &chan->conf_state);
5465 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5467 l2cap_build_conf_req(chan, buf),
5469 chan->num_conf_req++;
5473 l2cap_chan_unlock(chan);
5476 mutex_unlock(&conn->chan_lock);
5481 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5483 struct l2cap_conn *conn = hcon->l2cap_data;
5486 conn = l2cap_conn_add(hcon, 0);
5491 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5493 if (!(flags & ACL_CONT)) {
5494 struct l2cap_hdr *hdr;
5498 BT_ERR("Unexpected start frame (len %d)", skb->len);
5499 kfree_skb(conn->rx_skb);
5500 conn->rx_skb = NULL;
5502 l2cap_conn_unreliable(conn, ECOMM);
5505 /* Start fragment always begin with Basic L2CAP header */
5506 if (skb->len < L2CAP_HDR_SIZE) {
5507 BT_ERR("Frame is too short (len %d)", skb->len);
5508 l2cap_conn_unreliable(conn, ECOMM);
5512 hdr = (struct l2cap_hdr *) skb->data;
5513 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5515 if (len == skb->len) {
5516 /* Complete frame received */
5517 l2cap_recv_frame(conn, skb);
5521 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5523 if (skb->len > len) {
5524 BT_ERR("Frame is too long (len %d, expected len %d)",
5526 l2cap_conn_unreliable(conn, ECOMM);
5530 /* Allocate skb for the complete frame (with header) */
5531 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5535 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5537 conn->rx_len = len - skb->len;
5539 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5541 if (!conn->rx_len) {
5542 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5543 l2cap_conn_unreliable(conn, ECOMM);
5547 if (skb->len > conn->rx_len) {
5548 BT_ERR("Fragment is too long (len %d, expected %d)",
5549 skb->len, conn->rx_len);
5550 kfree_skb(conn->rx_skb);
5551 conn->rx_skb = NULL;
5553 l2cap_conn_unreliable(conn, ECOMM);
5557 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5559 conn->rx_len -= skb->len;
5561 if (!conn->rx_len) {
5562 /* Complete frame received */
5563 l2cap_recv_frame(conn, conn->rx_skb);
5564 conn->rx_skb = NULL;
5573 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5575 struct l2cap_chan *c;
5577 read_lock(&chan_list_lock);
5579 list_for_each_entry(c, &chan_list, global_l) {
5580 struct sock *sk = c->sk;
5582 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5583 batostr(&bt_sk(sk)->src),
5584 batostr(&bt_sk(sk)->dst),
5585 c->state, __le16_to_cpu(c->psm),
5586 c->scid, c->dcid, c->imtu, c->omtu,
5587 c->sec_level, c->mode);
5590 read_unlock(&chan_list_lock);
5595 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5597 return single_open(file, l2cap_debugfs_show, inode->i_private);
5600 static const struct file_operations l2cap_debugfs_fops = {
5601 .open = l2cap_debugfs_open,
5603 .llseek = seq_lseek,
5604 .release = single_release,
5607 static struct dentry *l2cap_debugfs;
5609 int __init l2cap_init(void)
5613 err = l2cap_init_sockets();
5618 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5619 bt_debugfs, NULL, &l2cap_debugfs_fops);
5621 BT_ERR("Failed to create L2CAP debug file");
5627 void l2cap_exit(void)
5629 debugfs_remove(l2cap_debugfs);
5630 l2cap_cleanup_sockets();
5633 module_param(disable_ertm, bool, 0644);
5634 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");