2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
67 list_for_each_entry(c, &conn->chan_l, list) {
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
78 list_for_each_entry(c, &conn->chan_l, list) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
95 mutex_unlock(&conn->chan_lock);
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
102 struct l2cap_chan *c;
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
113 struct l2cap_chan *c;
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
126 write_lock(&chan_list_lock);
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
151 write_unlock(&chan_list_lock);
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
157 write_lock(&chan_list_lock);
161 write_unlock(&chan_list_lock);
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
184 chan->ops->state_change(chan, state);
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
189 struct sock *sk = chan->sk;
192 __l2cap_state_change(chan, state);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
198 struct sock *sk = chan->sk;
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
205 struct sock *sk = chan->sk;
208 __l2cap_chan_set_err(chan, err);
212 static void __set_retrans_timer(struct l2cap_chan *chan)
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
221 static void __set_monitor_timer(struct l2cap_chan *chan)
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 atomic_set(&chan->refcnt, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 void l2cap_chan_destroy(struct l2cap_chan *chan)
421 write_lock(&chan_list_lock);
422 list_del(&chan->global_l);
423 write_unlock(&chan_list_lock);
425 l2cap_chan_put(chan);
428 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
430 chan->fcs = L2CAP_FCS_CRC16;
431 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
432 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
433 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
434 chan->sec_level = BT_SECURITY_LOW;
436 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
439 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
442 __le16_to_cpu(chan->psm), chan->dcid);
444 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
448 switch (chan->chan_type) {
449 case L2CAP_CHAN_CONN_ORIENTED:
450 if (conn->hcon->type == LE_LINK) {
452 chan->omtu = L2CAP_DEFAULT_MTU;
453 chan->scid = L2CAP_CID_LE_DATA;
454 chan->dcid = L2CAP_CID_LE_DATA;
456 /* Alloc CID for connection-oriented socket */
457 chan->scid = l2cap_alloc_cid(conn);
458 chan->omtu = L2CAP_DEFAULT_MTU;
462 case L2CAP_CHAN_CONN_LESS:
463 /* Connectionless socket */
464 chan->scid = L2CAP_CID_CONN_LESS;
465 chan->dcid = L2CAP_CID_CONN_LESS;
466 chan->omtu = L2CAP_DEFAULT_MTU;
469 case L2CAP_CHAN_CONN_FIX_A2MP:
470 chan->scid = L2CAP_CID_A2MP;
471 chan->dcid = L2CAP_CID_A2MP;
472 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
473 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
477 /* Raw socket can send/recv signalling messages only */
478 chan->scid = L2CAP_CID_SIGNALING;
479 chan->dcid = L2CAP_CID_SIGNALING;
480 chan->omtu = L2CAP_DEFAULT_MTU;
483 chan->local_id = L2CAP_BESTEFFORT_ID;
484 chan->local_stype = L2CAP_SERV_BESTEFFORT;
485 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
486 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
487 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
488 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
490 l2cap_chan_hold(chan);
492 list_add(&chan->list, &conn->chan_l);
495 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
497 mutex_lock(&conn->chan_lock);
498 __l2cap_chan_add(conn, chan);
499 mutex_unlock(&conn->chan_lock);
502 void l2cap_chan_del(struct l2cap_chan *chan, int err)
504 struct l2cap_conn *conn = chan->conn;
506 __clear_chan_timer(chan);
508 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
511 /* Delete from channel list */
512 list_del(&chan->list);
514 l2cap_chan_put(chan);
518 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
519 hci_conn_put(conn->hcon);
522 if (chan->ops->teardown)
523 chan->ops->teardown(chan, err);
525 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
529 case L2CAP_MODE_BASIC:
532 case L2CAP_MODE_ERTM:
533 __clear_retrans_timer(chan);
534 __clear_monitor_timer(chan);
535 __clear_ack_timer(chan);
537 skb_queue_purge(&chan->srej_q);
539 l2cap_seq_list_free(&chan->srej_list);
540 l2cap_seq_list_free(&chan->retrans_list);
544 case L2CAP_MODE_STREAMING:
545 skb_queue_purge(&chan->tx_q);
552 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
554 struct l2cap_conn *conn = chan->conn;
555 struct sock *sk = chan->sk;
557 BT_DBG("chan %p state %s sk %p", chan,
558 state_to_string(chan->state), sk);
560 switch (chan->state) {
562 if (chan->ops->teardown)
563 chan->ops->teardown(chan, 0);
568 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
569 conn->hcon->type == ACL_LINK) {
570 __set_chan_timer(chan, sk->sk_sndtimeo);
571 l2cap_send_disconn_req(conn, chan, reason);
573 l2cap_chan_del(chan, reason);
577 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
578 conn->hcon->type == ACL_LINK) {
579 struct l2cap_conn_rsp rsp;
582 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
583 result = L2CAP_CR_SEC_BLOCK;
585 result = L2CAP_CR_BAD_PSM;
586 l2cap_state_change(chan, BT_DISCONN);
588 rsp.scid = cpu_to_le16(chan->dcid);
589 rsp.dcid = cpu_to_le16(chan->scid);
590 rsp.result = cpu_to_le16(result);
591 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
592 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
596 l2cap_chan_del(chan, reason);
601 l2cap_chan_del(chan, reason);
605 if (chan->ops->teardown)
606 chan->ops->teardown(chan, 0);
611 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
613 if (chan->chan_type == L2CAP_CHAN_RAW) {
614 switch (chan->sec_level) {
615 case BT_SECURITY_HIGH:
616 return HCI_AT_DEDICATED_BONDING_MITM;
617 case BT_SECURITY_MEDIUM:
618 return HCI_AT_DEDICATED_BONDING;
620 return HCI_AT_NO_BONDING;
622 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
623 if (chan->sec_level == BT_SECURITY_LOW)
624 chan->sec_level = BT_SECURITY_SDP;
626 if (chan->sec_level == BT_SECURITY_HIGH)
627 return HCI_AT_NO_BONDING_MITM;
629 return HCI_AT_NO_BONDING;
631 switch (chan->sec_level) {
632 case BT_SECURITY_HIGH:
633 return HCI_AT_GENERAL_BONDING_MITM;
634 case BT_SECURITY_MEDIUM:
635 return HCI_AT_GENERAL_BONDING;
637 return HCI_AT_NO_BONDING;
642 /* Service level security */
643 int l2cap_chan_check_security(struct l2cap_chan *chan)
645 struct l2cap_conn *conn = chan->conn;
648 auth_type = l2cap_get_auth_type(chan);
650 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
653 static u8 l2cap_get_ident(struct l2cap_conn *conn)
657 /* Get next available identificator.
658 * 1 - 128 are used by kernel.
659 * 129 - 199 are reserved.
660 * 200 - 254 are used by utilities like l2ping, etc.
663 spin_lock(&conn->lock);
665 if (++conn->tx_ident > 128)
670 spin_unlock(&conn->lock);
675 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
677 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
680 BT_DBG("code 0x%2.2x", code);
685 if (lmp_no_flush_capable(conn->hcon->hdev))
686 flags = ACL_START_NO_FLUSH;
690 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
691 skb->priority = HCI_PRIO_MAX;
693 hci_send_acl(conn->hchan, skb, flags);
696 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
698 struct hci_conn *hcon = chan->conn->hcon;
701 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
704 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
705 lmp_no_flush_capable(hcon->hdev))
706 flags = ACL_START_NO_FLUSH;
710 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
711 hci_send_acl(chan->conn->hchan, skb, flags);
714 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
716 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
717 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
719 if (enh & L2CAP_CTRL_FRAME_TYPE) {
722 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
723 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
730 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
731 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
738 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
740 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
741 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
743 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
746 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
747 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
754 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
755 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
762 static inline void __unpack_control(struct l2cap_chan *chan,
765 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
766 __unpack_extended_control(get_unaligned_le32(skb->data),
767 &bt_cb(skb)->control);
768 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
770 __unpack_enhanced_control(get_unaligned_le16(skb->data),
771 &bt_cb(skb)->control);
772 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
776 static u32 __pack_extended_control(struct l2cap_ctrl *control)
780 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
781 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
783 if (control->sframe) {
784 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
785 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
786 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
788 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
789 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
795 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
799 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
800 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
802 if (control->sframe) {
803 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
804 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
805 packed |= L2CAP_CTRL_FRAME_TYPE;
807 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
808 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
814 static inline void __pack_control(struct l2cap_chan *chan,
815 struct l2cap_ctrl *control,
818 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
819 put_unaligned_le32(__pack_extended_control(control),
820 skb->data + L2CAP_HDR_SIZE);
822 put_unaligned_le16(__pack_enhanced_control(control),
823 skb->data + L2CAP_HDR_SIZE);
827 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
829 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
830 return L2CAP_EXT_HDR_SIZE;
832 return L2CAP_ENH_HDR_SIZE;
835 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
839 struct l2cap_hdr *lh;
840 int hlen = __ertm_hdr_size(chan);
842 if (chan->fcs == L2CAP_FCS_CRC16)
843 hlen += L2CAP_FCS_SIZE;
845 skb = bt_skb_alloc(hlen, GFP_KERNEL);
848 return ERR_PTR(-ENOMEM);
850 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
851 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
852 lh->cid = cpu_to_le16(chan->dcid);
854 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
855 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
857 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
859 if (chan->fcs == L2CAP_FCS_CRC16) {
860 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
861 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
864 skb->priority = HCI_PRIO_MAX;
868 static void l2cap_send_sframe(struct l2cap_chan *chan,
869 struct l2cap_ctrl *control)
874 BT_DBG("chan %p, control %p", chan, control);
876 if (!control->sframe)
879 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
883 if (control->super == L2CAP_SUPER_RR)
884 clear_bit(CONN_RNR_SENT, &chan->conn_state);
885 else if (control->super == L2CAP_SUPER_RNR)
886 set_bit(CONN_RNR_SENT, &chan->conn_state);
888 if (control->super != L2CAP_SUPER_SREJ) {
889 chan->last_acked_seq = control->reqseq;
890 __clear_ack_timer(chan);
893 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
894 control->final, control->poll, control->super);
896 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
897 control_field = __pack_extended_control(control);
899 control_field = __pack_enhanced_control(control);
901 skb = l2cap_create_sframe_pdu(chan, control_field);
903 l2cap_do_send(chan, skb);
906 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
908 struct l2cap_ctrl control;
910 BT_DBG("chan %p, poll %d", chan, poll);
912 memset(&control, 0, sizeof(control));
916 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
917 control.super = L2CAP_SUPER_RNR;
919 control.super = L2CAP_SUPER_RR;
921 control.reqseq = chan->buffer_seq;
922 l2cap_send_sframe(chan, &control);
925 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
927 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
930 static void l2cap_send_conn_req(struct l2cap_chan *chan)
932 struct l2cap_conn *conn = chan->conn;
933 struct l2cap_conn_req req;
935 req.scid = cpu_to_le16(chan->scid);
938 chan->ident = l2cap_get_ident(conn);
940 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
942 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
945 static void l2cap_chan_ready(struct l2cap_chan *chan)
947 /* This clears all conf flags, including CONF_NOT_COMPLETE */
948 chan->conf_state = 0;
949 __clear_chan_timer(chan);
951 chan->state = BT_CONNECTED;
953 chan->ops->ready(chan);
956 static void l2cap_do_start(struct l2cap_chan *chan)
958 struct l2cap_conn *conn = chan->conn;
960 if (conn->hcon->type == LE_LINK) {
961 l2cap_chan_ready(chan);
965 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
966 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
969 if (l2cap_chan_check_security(chan) &&
970 __l2cap_no_conn_pending(chan))
971 l2cap_send_conn_req(chan);
973 struct l2cap_info_req req;
974 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
976 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
977 conn->info_ident = l2cap_get_ident(conn);
979 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
981 l2cap_send_cmd(conn, conn->info_ident,
982 L2CAP_INFO_REQ, sizeof(req), &req);
986 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
988 u32 local_feat_mask = l2cap_feat_mask;
990 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
993 case L2CAP_MODE_ERTM:
994 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
995 case L2CAP_MODE_STREAMING:
996 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1002 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1004 struct sock *sk = chan->sk;
1005 struct l2cap_disconn_req req;
1010 if (chan->mode == L2CAP_MODE_ERTM) {
1011 __clear_retrans_timer(chan);
1012 __clear_monitor_timer(chan);
1013 __clear_ack_timer(chan);
1016 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1017 __l2cap_state_change(chan, BT_DISCONN);
1021 req.dcid = cpu_to_le16(chan->dcid);
1022 req.scid = cpu_to_le16(chan->scid);
1023 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1024 L2CAP_DISCONN_REQ, sizeof(req), &req);
1027 __l2cap_state_change(chan, BT_DISCONN);
1028 __l2cap_chan_set_err(chan, err);
1032 /* ---- L2CAP connections ---- */
1033 static void l2cap_conn_start(struct l2cap_conn *conn)
1035 struct l2cap_chan *chan, *tmp;
1037 BT_DBG("conn %p", conn);
1039 mutex_lock(&conn->chan_lock);
1041 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1042 struct sock *sk = chan->sk;
1044 l2cap_chan_lock(chan);
1046 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1047 l2cap_chan_unlock(chan);
1051 if (chan->state == BT_CONNECT) {
1052 if (!l2cap_chan_check_security(chan) ||
1053 !__l2cap_no_conn_pending(chan)) {
1054 l2cap_chan_unlock(chan);
1058 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1059 && test_bit(CONF_STATE2_DEVICE,
1060 &chan->conf_state)) {
1061 l2cap_chan_close(chan, ECONNRESET);
1062 l2cap_chan_unlock(chan);
1066 l2cap_send_conn_req(chan);
1068 } else if (chan->state == BT_CONNECT2) {
1069 struct l2cap_conn_rsp rsp;
1071 rsp.scid = cpu_to_le16(chan->dcid);
1072 rsp.dcid = cpu_to_le16(chan->scid);
1074 if (l2cap_chan_check_security(chan)) {
1076 if (test_bit(BT_SK_DEFER_SETUP,
1077 &bt_sk(sk)->flags)) {
1078 struct sock *parent = bt_sk(sk)->parent;
1079 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1080 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1082 parent->sk_data_ready(parent, 0);
1085 __l2cap_state_change(chan, BT_CONFIG);
1086 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1087 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1091 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1092 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1095 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1098 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1099 rsp.result != L2CAP_CR_SUCCESS) {
1100 l2cap_chan_unlock(chan);
1104 set_bit(CONF_REQ_SENT, &chan->conf_state);
1105 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1106 l2cap_build_conf_req(chan, buf), buf);
1107 chan->num_conf_req++;
1110 l2cap_chan_unlock(chan);
1113 mutex_unlock(&conn->chan_lock);
1116 /* Find socket with cid and source/destination bdaddr.
1117 * Returns closest match, locked.
1119 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1123 struct l2cap_chan *c, *c1 = NULL;
1125 read_lock(&chan_list_lock);
1127 list_for_each_entry(c, &chan_list, global_l) {
1128 struct sock *sk = c->sk;
1130 if (state && c->state != state)
1133 if (c->scid == cid) {
1134 int src_match, dst_match;
1135 int src_any, dst_any;
1138 src_match = !bacmp(&bt_sk(sk)->src, src);
1139 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1140 if (src_match && dst_match) {
1141 read_unlock(&chan_list_lock);
1146 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1147 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1148 if ((src_match && dst_any) || (src_any && dst_match) ||
1149 (src_any && dst_any))
1154 read_unlock(&chan_list_lock);
1159 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1161 struct sock *parent, *sk;
1162 struct l2cap_chan *chan, *pchan;
1166 /* Check if we have socket listening on cid */
1167 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1168 conn->src, conn->dst);
1176 chan = pchan->ops->new_connection(pchan);
1182 hci_conn_hold(conn->hcon);
1184 bacpy(&bt_sk(sk)->src, conn->src);
1185 bacpy(&bt_sk(sk)->dst, conn->dst);
1187 bt_accept_enqueue(parent, sk);
1189 l2cap_chan_add(conn, chan);
1191 l2cap_chan_ready(chan);
1194 release_sock(parent);
1197 static void l2cap_conn_ready(struct l2cap_conn *conn)
1199 struct l2cap_chan *chan;
1201 BT_DBG("conn %p", conn);
1203 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1204 l2cap_le_conn_ready(conn);
1206 if (conn->hcon->out && conn->hcon->type == LE_LINK)
1207 smp_conn_security(conn, conn->hcon->pending_sec_level);
1209 mutex_lock(&conn->chan_lock);
1211 list_for_each_entry(chan, &conn->chan_l, list) {
1213 l2cap_chan_lock(chan);
1215 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1216 l2cap_chan_unlock(chan);
1220 if (conn->hcon->type == LE_LINK) {
1221 if (smp_conn_security(conn, chan->sec_level))
1222 l2cap_chan_ready(chan);
1224 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1225 struct sock *sk = chan->sk;
1226 __clear_chan_timer(chan);
1228 __l2cap_state_change(chan, BT_CONNECTED);
1229 sk->sk_state_change(sk);
1232 } else if (chan->state == BT_CONNECT)
1233 l2cap_do_start(chan);
1235 l2cap_chan_unlock(chan);
1238 mutex_unlock(&conn->chan_lock);
1241 /* Notify sockets that we cannot guaranty reliability anymore */
1242 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1244 struct l2cap_chan *chan;
1246 BT_DBG("conn %p", conn);
1248 mutex_lock(&conn->chan_lock);
1250 list_for_each_entry(chan, &conn->chan_l, list) {
1251 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1252 __l2cap_chan_set_err(chan, err);
1255 mutex_unlock(&conn->chan_lock);
1258 static void l2cap_info_timeout(struct work_struct *work)
1260 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1263 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1264 conn->info_ident = 0;
1266 l2cap_conn_start(conn);
1269 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1271 struct l2cap_conn *conn = hcon->l2cap_data;
1272 struct l2cap_chan *chan, *l;
1277 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1279 kfree_skb(conn->rx_skb);
1281 mutex_lock(&conn->chan_lock);
1284 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1285 l2cap_chan_hold(chan);
1286 l2cap_chan_lock(chan);
1288 l2cap_chan_del(chan, err);
1290 l2cap_chan_unlock(chan);
1292 chan->ops->close(chan);
1293 l2cap_chan_put(chan);
1296 mutex_unlock(&conn->chan_lock);
1298 hci_chan_del(conn->hchan);
1300 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1301 cancel_delayed_work_sync(&conn->info_timer);
1303 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1304 cancel_delayed_work_sync(&conn->security_timer);
1305 smp_chan_destroy(conn);
1308 hcon->l2cap_data = NULL;
1312 static void security_timeout(struct work_struct *work)
1314 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1315 security_timer.work);
1317 BT_DBG("conn %p", conn);
1319 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1320 smp_chan_destroy(conn);
1321 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1325 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1327 struct l2cap_conn *conn = hcon->l2cap_data;
1328 struct hci_chan *hchan;
1333 hchan = hci_chan_create(hcon);
1337 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1339 hci_chan_del(hchan);
1343 hcon->l2cap_data = conn;
1345 conn->hchan = hchan;
1347 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1349 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1350 conn->mtu = hcon->hdev->le_mtu;
1352 conn->mtu = hcon->hdev->acl_mtu;
1354 conn->src = &hcon->hdev->bdaddr;
1355 conn->dst = &hcon->dst;
1357 conn->feat_mask = 0;
1359 spin_lock_init(&conn->lock);
1360 mutex_init(&conn->chan_lock);
1362 INIT_LIST_HEAD(&conn->chan_l);
1364 if (hcon->type == LE_LINK)
1365 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1367 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1369 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1374 /* ---- Socket interface ---- */
1376 /* Find socket with psm and source / destination bdaddr.
1377 * Returns closest match.
1379 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1383 struct l2cap_chan *c, *c1 = NULL;
1385 read_lock(&chan_list_lock);
1387 list_for_each_entry(c, &chan_list, global_l) {
1388 struct sock *sk = c->sk;
1390 if (state && c->state != state)
1393 if (c->psm == psm) {
1394 int src_match, dst_match;
1395 int src_any, dst_any;
1398 src_match = !bacmp(&bt_sk(sk)->src, src);
1399 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1400 if (src_match && dst_match) {
1401 read_unlock(&chan_list_lock);
1406 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1407 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1408 if ((src_match && dst_any) || (src_any && dst_match) ||
1409 (src_any && dst_any))
1414 read_unlock(&chan_list_lock);
1419 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1420 bdaddr_t *dst, u8 dst_type)
1422 struct sock *sk = chan->sk;
1423 bdaddr_t *src = &bt_sk(sk)->src;
1424 struct l2cap_conn *conn;
1425 struct hci_conn *hcon;
1426 struct hci_dev *hdev;
1430 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1431 dst_type, __le16_to_cpu(chan->psm));
1433 hdev = hci_get_route(dst, src);
1435 return -EHOSTUNREACH;
1439 l2cap_chan_lock(chan);
1441 /* PSM must be odd and lsb of upper byte must be 0 */
1442 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1443 chan->chan_type != L2CAP_CHAN_RAW) {
1448 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1453 switch (chan->mode) {
1454 case L2CAP_MODE_BASIC:
1456 case L2CAP_MODE_ERTM:
1457 case L2CAP_MODE_STREAMING:
1466 switch (chan->state) {
1470 /* Already connecting */
1475 /* Already connected */
1489 /* Set destination address and psm */
1491 bacpy(&bt_sk(sk)->dst, dst);
1497 auth_type = l2cap_get_auth_type(chan);
1499 if (chan->dcid == L2CAP_CID_LE_DATA)
1500 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1501 chan->sec_level, auth_type);
1503 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1504 chan->sec_level, auth_type);
1507 err = PTR_ERR(hcon);
1511 conn = l2cap_conn_add(hcon, 0);
1518 if (hcon->type == LE_LINK) {
1521 if (!list_empty(&conn->chan_l)) {
1530 /* Update source addr of the socket */
1531 bacpy(src, conn->src);
1533 l2cap_chan_unlock(chan);
1534 l2cap_chan_add(conn, chan);
1535 l2cap_chan_lock(chan);
1537 l2cap_state_change(chan, BT_CONNECT);
1538 __set_chan_timer(chan, sk->sk_sndtimeo);
1540 if (hcon->state == BT_CONNECTED) {
1541 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1542 __clear_chan_timer(chan);
1543 if (l2cap_chan_check_security(chan))
1544 l2cap_state_change(chan, BT_CONNECTED);
1546 l2cap_do_start(chan);
1552 l2cap_chan_unlock(chan);
1553 hci_dev_unlock(hdev);
1558 int __l2cap_wait_ack(struct sock *sk)
1560 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1561 DECLARE_WAITQUEUE(wait, current);
1565 add_wait_queue(sk_sleep(sk), &wait);
1566 set_current_state(TASK_INTERRUPTIBLE);
1567 while (chan->unacked_frames > 0 && chan->conn) {
1571 if (signal_pending(current)) {
1572 err = sock_intr_errno(timeo);
1577 timeo = schedule_timeout(timeo);
1579 set_current_state(TASK_INTERRUPTIBLE);
1581 err = sock_error(sk);
1585 set_current_state(TASK_RUNNING);
1586 remove_wait_queue(sk_sleep(sk), &wait);
1590 static void l2cap_monitor_timeout(struct work_struct *work)
1592 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1593 monitor_timer.work);
1595 BT_DBG("chan %p", chan);
1597 l2cap_chan_lock(chan);
1600 l2cap_chan_unlock(chan);
1601 l2cap_chan_put(chan);
1605 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1607 l2cap_chan_unlock(chan);
1608 l2cap_chan_put(chan);
1611 static void l2cap_retrans_timeout(struct work_struct *work)
1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1614 retrans_timer.work);
1616 BT_DBG("chan %p", chan);
1618 l2cap_chan_lock(chan);
1621 l2cap_chan_unlock(chan);
1622 l2cap_chan_put(chan);
1626 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1627 l2cap_chan_unlock(chan);
1628 l2cap_chan_put(chan);
1631 static void l2cap_streaming_send(struct l2cap_chan *chan,
1632 struct sk_buff_head *skbs)
1634 struct sk_buff *skb;
1635 struct l2cap_ctrl *control;
1637 BT_DBG("chan %p, skbs %p", chan, skbs);
1639 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1641 while (!skb_queue_empty(&chan->tx_q)) {
1643 skb = skb_dequeue(&chan->tx_q);
1645 bt_cb(skb)->control.retries = 1;
1646 control = &bt_cb(skb)->control;
1648 control->reqseq = 0;
1649 control->txseq = chan->next_tx_seq;
1651 __pack_control(chan, control, skb);
1653 if (chan->fcs == L2CAP_FCS_CRC16) {
1654 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1655 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1658 l2cap_do_send(chan, skb);
1660 BT_DBG("Sent txseq %u", control->txseq);
1662 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1663 chan->frames_sent++;
1667 static int l2cap_ertm_send(struct l2cap_chan *chan)
1669 struct sk_buff *skb, *tx_skb;
1670 struct l2cap_ctrl *control;
1673 BT_DBG("chan %p", chan);
1675 if (chan->state != BT_CONNECTED)
1678 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1681 while (chan->tx_send_head &&
1682 chan->unacked_frames < chan->remote_tx_win &&
1683 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1685 skb = chan->tx_send_head;
1687 bt_cb(skb)->control.retries = 1;
1688 control = &bt_cb(skb)->control;
1690 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1693 control->reqseq = chan->buffer_seq;
1694 chan->last_acked_seq = chan->buffer_seq;
1695 control->txseq = chan->next_tx_seq;
1697 __pack_control(chan, control, skb);
1699 if (chan->fcs == L2CAP_FCS_CRC16) {
1700 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1701 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1704 /* Clone after data has been modified. Data is assumed to be
1705 read-only (for locking purposes) on cloned sk_buffs.
1707 tx_skb = skb_clone(skb, GFP_KERNEL);
1712 __set_retrans_timer(chan);
1714 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1715 chan->unacked_frames++;
1716 chan->frames_sent++;
1719 if (skb_queue_is_last(&chan->tx_q, skb))
1720 chan->tx_send_head = NULL;
1722 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1724 l2cap_do_send(chan, tx_skb);
1725 BT_DBG("Sent txseq %u", control->txseq);
1728 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1729 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1734 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1736 struct l2cap_ctrl control;
1737 struct sk_buff *skb;
1738 struct sk_buff *tx_skb;
1741 BT_DBG("chan %p", chan);
1743 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1746 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1747 seq = l2cap_seq_list_pop(&chan->retrans_list);
1749 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1751 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1756 bt_cb(skb)->control.retries++;
1757 control = bt_cb(skb)->control;
1759 if (chan->max_tx != 0 &&
1760 bt_cb(skb)->control.retries > chan->max_tx) {
1761 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1762 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1763 l2cap_seq_list_clear(&chan->retrans_list);
1767 control.reqseq = chan->buffer_seq;
1768 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1773 if (skb_cloned(skb)) {
1774 /* Cloned sk_buffs are read-only, so we need a
1777 tx_skb = skb_copy(skb, GFP_ATOMIC);
1779 tx_skb = skb_clone(skb, GFP_ATOMIC);
1783 l2cap_seq_list_clear(&chan->retrans_list);
1787 /* Update skb contents */
1788 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1789 put_unaligned_le32(__pack_extended_control(&control),
1790 tx_skb->data + L2CAP_HDR_SIZE);
1792 put_unaligned_le16(__pack_enhanced_control(&control),
1793 tx_skb->data + L2CAP_HDR_SIZE);
1796 if (chan->fcs == L2CAP_FCS_CRC16) {
1797 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1798 put_unaligned_le16(fcs, skb_put(tx_skb,
1802 l2cap_do_send(chan, tx_skb);
1804 BT_DBG("Resent txseq %d", control.txseq);
1806 chan->last_acked_seq = chan->buffer_seq;
1810 static void l2cap_retransmit(struct l2cap_chan *chan,
1811 struct l2cap_ctrl *control)
1813 BT_DBG("chan %p, control %p", chan, control);
1815 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1816 l2cap_ertm_resend(chan);
1819 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1820 struct l2cap_ctrl *control)
1822 struct sk_buff *skb;
1824 BT_DBG("chan %p, control %p", chan, control);
1827 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1829 l2cap_seq_list_clear(&chan->retrans_list);
1831 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1834 if (chan->unacked_frames) {
1835 skb_queue_walk(&chan->tx_q, skb) {
1836 if (bt_cb(skb)->control.txseq == control->reqseq ||
1837 skb == chan->tx_send_head)
1841 skb_queue_walk_from(&chan->tx_q, skb) {
1842 if (skb == chan->tx_send_head)
1845 l2cap_seq_list_append(&chan->retrans_list,
1846 bt_cb(skb)->control.txseq);
1849 l2cap_ertm_resend(chan);
1853 static void l2cap_send_ack(struct l2cap_chan *chan)
1855 struct l2cap_ctrl control;
1856 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1857 chan->last_acked_seq);
1860 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1861 chan, chan->last_acked_seq, chan->buffer_seq);
1863 memset(&control, 0, sizeof(control));
1866 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1867 chan->rx_state == L2CAP_RX_STATE_RECV) {
1868 __clear_ack_timer(chan);
1869 control.super = L2CAP_SUPER_RNR;
1870 control.reqseq = chan->buffer_seq;
1871 l2cap_send_sframe(chan, &control);
1873 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1874 l2cap_ertm_send(chan);
1875 /* If any i-frames were sent, they included an ack */
1876 if (chan->buffer_seq == chan->last_acked_seq)
1880 /* Ack now if the tx window is 3/4ths full.
1881 * Calculate without mul or div
1883 threshold = chan->tx_win;
1884 threshold += threshold << 1;
1887 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1890 if (frames_to_ack >= threshold) {
1891 __clear_ack_timer(chan);
1892 control.super = L2CAP_SUPER_RR;
1893 control.reqseq = chan->buffer_seq;
1894 l2cap_send_sframe(chan, &control);
1899 __set_ack_timer(chan);
1903 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1904 struct msghdr *msg, int len,
1905 int count, struct sk_buff *skb)
1907 struct l2cap_conn *conn = chan->conn;
1908 struct sk_buff **frag;
1911 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1917 /* Continuation fragments (no L2CAP header) */
1918 frag = &skb_shinfo(skb)->frag_list;
1920 struct sk_buff *tmp;
1922 count = min_t(unsigned int, conn->mtu, len);
1924 tmp = chan->ops->alloc_skb(chan, count,
1925 msg->msg_flags & MSG_DONTWAIT);
1927 return PTR_ERR(tmp);
1931 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1934 (*frag)->priority = skb->priority;
1939 skb->len += (*frag)->len;
1940 skb->data_len += (*frag)->len;
1942 frag = &(*frag)->next;
1948 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1949 struct msghdr *msg, size_t len,
1952 struct l2cap_conn *conn = chan->conn;
1953 struct sk_buff *skb;
1954 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1955 struct l2cap_hdr *lh;
1957 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1959 count = min_t(unsigned int, (conn->mtu - hlen), len);
1961 skb = chan->ops->alloc_skb(chan, count + hlen,
1962 msg->msg_flags & MSG_DONTWAIT);
1966 skb->priority = priority;
1968 /* Create L2CAP header */
1969 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1970 lh->cid = cpu_to_le16(chan->dcid);
1971 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1972 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1974 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1975 if (unlikely(err < 0)) {
1977 return ERR_PTR(err);
1982 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1983 struct msghdr *msg, size_t len,
1986 struct l2cap_conn *conn = chan->conn;
1987 struct sk_buff *skb;
1989 struct l2cap_hdr *lh;
1991 BT_DBG("chan %p len %zu", chan, len);
1993 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
1995 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
1996 msg->msg_flags & MSG_DONTWAIT);
2000 skb->priority = priority;
2002 /* Create L2CAP header */
2003 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2004 lh->cid = cpu_to_le16(chan->dcid);
2005 lh->len = cpu_to_le16(len);
2007 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2008 if (unlikely(err < 0)) {
2010 return ERR_PTR(err);
2015 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2016 struct msghdr *msg, size_t len,
2019 struct l2cap_conn *conn = chan->conn;
2020 struct sk_buff *skb;
2021 int err, count, hlen;
2022 struct l2cap_hdr *lh;
2024 BT_DBG("chan %p len %zu", chan, len);
2027 return ERR_PTR(-ENOTCONN);
2029 hlen = __ertm_hdr_size(chan);
2032 hlen += L2CAP_SDULEN_SIZE;
2034 if (chan->fcs == L2CAP_FCS_CRC16)
2035 hlen += L2CAP_FCS_SIZE;
2037 count = min_t(unsigned int, (conn->mtu - hlen), len);
2039 skb = chan->ops->alloc_skb(chan, count + hlen,
2040 msg->msg_flags & MSG_DONTWAIT);
2044 /* Create L2CAP header */
2045 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2046 lh->cid = cpu_to_le16(chan->dcid);
2047 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2049 /* Control header is populated later */
2050 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2051 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2053 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2056 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2058 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2059 if (unlikely(err < 0)) {
2061 return ERR_PTR(err);
2064 bt_cb(skb)->control.fcs = chan->fcs;
2065 bt_cb(skb)->control.retries = 0;
2069 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2070 struct sk_buff_head *seg_queue,
2071 struct msghdr *msg, size_t len)
2073 struct sk_buff *skb;
2078 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2080 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2081 * so fragmented skbs are not used. The HCI layer's handling
2082 * of fragmented skbs is not compatible with ERTM's queueing.
2085 /* PDU size is derived from the HCI MTU */
2086 pdu_len = chan->conn->mtu;
2088 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2090 /* Adjust for largest possible L2CAP overhead. */
2092 pdu_len -= L2CAP_FCS_SIZE;
2094 pdu_len -= __ertm_hdr_size(chan);
2096 /* Remote device may have requested smaller PDUs */
2097 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2099 if (len <= pdu_len) {
2100 sar = L2CAP_SAR_UNSEGMENTED;
2104 sar = L2CAP_SAR_START;
2106 pdu_len -= L2CAP_SDULEN_SIZE;
2110 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2113 __skb_queue_purge(seg_queue);
2114 return PTR_ERR(skb);
2117 bt_cb(skb)->control.sar = sar;
2118 __skb_queue_tail(seg_queue, skb);
2123 pdu_len += L2CAP_SDULEN_SIZE;
2126 if (len <= pdu_len) {
2127 sar = L2CAP_SAR_END;
2130 sar = L2CAP_SAR_CONTINUE;
2137 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2140 struct sk_buff *skb;
2142 struct sk_buff_head seg_queue;
2144 /* Connectionless channel */
2145 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2146 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2148 return PTR_ERR(skb);
2150 l2cap_do_send(chan, skb);
2154 switch (chan->mode) {
2155 case L2CAP_MODE_BASIC:
2156 /* Check outgoing MTU */
2157 if (len > chan->omtu)
2160 /* Create a basic PDU */
2161 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2163 return PTR_ERR(skb);
2165 l2cap_do_send(chan, skb);
2169 case L2CAP_MODE_ERTM:
2170 case L2CAP_MODE_STREAMING:
2171 /* Check outgoing MTU */
2172 if (len > chan->omtu) {
2177 __skb_queue_head_init(&seg_queue);
2179 /* Do segmentation before calling in to the state machine,
2180 * since it's possible to block while waiting for memory
2183 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2185 /* The channel could have been closed while segmenting,
2186 * check that it is still connected.
2188 if (chan->state != BT_CONNECTED) {
2189 __skb_queue_purge(&seg_queue);
2196 if (chan->mode == L2CAP_MODE_ERTM)
2197 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2199 l2cap_streaming_send(chan, &seg_queue);
2203 /* If the skbs were not queued for sending, they'll still be in
2204 * seg_queue and need to be purged.
2206 __skb_queue_purge(&seg_queue);
2210 BT_DBG("bad state %1.1x", chan->mode);
2217 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2219 struct l2cap_ctrl control;
2222 BT_DBG("chan %p, txseq %u", chan, txseq);
2224 memset(&control, 0, sizeof(control));
2226 control.super = L2CAP_SUPER_SREJ;
2228 for (seq = chan->expected_tx_seq; seq != txseq;
2229 seq = __next_seq(chan, seq)) {
2230 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2231 control.reqseq = seq;
2232 l2cap_send_sframe(chan, &control);
2233 l2cap_seq_list_append(&chan->srej_list, seq);
2237 chan->expected_tx_seq = __next_seq(chan, txseq);
2240 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2242 struct l2cap_ctrl control;
2244 BT_DBG("chan %p", chan);
2246 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2249 memset(&control, 0, sizeof(control));
2251 control.super = L2CAP_SUPER_SREJ;
2252 control.reqseq = chan->srej_list.tail;
2253 l2cap_send_sframe(chan, &control);
2256 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2258 struct l2cap_ctrl control;
2262 BT_DBG("chan %p, txseq %u", chan, txseq);
2264 memset(&control, 0, sizeof(control));
2266 control.super = L2CAP_SUPER_SREJ;
2268 /* Capture initial list head to allow only one pass through the list. */
2269 initial_head = chan->srej_list.head;
2272 seq = l2cap_seq_list_pop(&chan->srej_list);
2273 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2276 control.reqseq = seq;
2277 l2cap_send_sframe(chan, &control);
2278 l2cap_seq_list_append(&chan->srej_list, seq);
2279 } while (chan->srej_list.head != initial_head);
2282 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2284 struct sk_buff *acked_skb;
2287 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2289 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2292 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2293 chan->expected_ack_seq, chan->unacked_frames);
2295 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2296 ackseq = __next_seq(chan, ackseq)) {
2298 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2300 skb_unlink(acked_skb, &chan->tx_q);
2301 kfree_skb(acked_skb);
2302 chan->unacked_frames--;
2306 chan->expected_ack_seq = reqseq;
2308 if (chan->unacked_frames == 0)
2309 __clear_retrans_timer(chan);
2311 BT_DBG("unacked_frames %u", chan->unacked_frames);
2314 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2316 BT_DBG("chan %p", chan);
2318 chan->expected_tx_seq = chan->buffer_seq;
2319 l2cap_seq_list_clear(&chan->srej_list);
2320 skb_queue_purge(&chan->srej_q);
2321 chan->rx_state = L2CAP_RX_STATE_RECV;
2324 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2325 struct l2cap_ctrl *control,
2326 struct sk_buff_head *skbs, u8 event)
2328 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2332 case L2CAP_EV_DATA_REQUEST:
2333 if (chan->tx_send_head == NULL)
2334 chan->tx_send_head = skb_peek(skbs);
2336 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2337 l2cap_ertm_send(chan);
2339 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2340 BT_DBG("Enter LOCAL_BUSY");
2341 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2343 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2344 /* The SREJ_SENT state must be aborted if we are to
2345 * enter the LOCAL_BUSY state.
2347 l2cap_abort_rx_srej_sent(chan);
2350 l2cap_send_ack(chan);
2353 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2354 BT_DBG("Exit LOCAL_BUSY");
2355 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2357 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2358 struct l2cap_ctrl local_control;
2360 memset(&local_control, 0, sizeof(local_control));
2361 local_control.sframe = 1;
2362 local_control.super = L2CAP_SUPER_RR;
2363 local_control.poll = 1;
2364 local_control.reqseq = chan->buffer_seq;
2365 l2cap_send_sframe(chan, &local_control);
2367 chan->retry_count = 1;
2368 __set_monitor_timer(chan);
2369 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2372 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2373 l2cap_process_reqseq(chan, control->reqseq);
2375 case L2CAP_EV_EXPLICIT_POLL:
2376 l2cap_send_rr_or_rnr(chan, 1);
2377 chan->retry_count = 1;
2378 __set_monitor_timer(chan);
2379 __clear_ack_timer(chan);
2380 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2382 case L2CAP_EV_RETRANS_TO:
2383 l2cap_send_rr_or_rnr(chan, 1);
2384 chan->retry_count = 1;
2385 __set_monitor_timer(chan);
2386 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2388 case L2CAP_EV_RECV_FBIT:
2389 /* Nothing to process */
2396 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2397 struct l2cap_ctrl *control,
2398 struct sk_buff_head *skbs, u8 event)
2400 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2404 case L2CAP_EV_DATA_REQUEST:
2405 if (chan->tx_send_head == NULL)
2406 chan->tx_send_head = skb_peek(skbs);
2407 /* Queue data, but don't send. */
2408 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2410 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2411 BT_DBG("Enter LOCAL_BUSY");
2412 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2414 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2415 /* The SREJ_SENT state must be aborted if we are to
2416 * enter the LOCAL_BUSY state.
2418 l2cap_abort_rx_srej_sent(chan);
2421 l2cap_send_ack(chan);
2424 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2425 BT_DBG("Exit LOCAL_BUSY");
2426 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2428 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2429 struct l2cap_ctrl local_control;
2430 memset(&local_control, 0, sizeof(local_control));
2431 local_control.sframe = 1;
2432 local_control.super = L2CAP_SUPER_RR;
2433 local_control.poll = 1;
2434 local_control.reqseq = chan->buffer_seq;
2435 l2cap_send_sframe(chan, &local_control);
2437 chan->retry_count = 1;
2438 __set_monitor_timer(chan);
2439 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2442 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2443 l2cap_process_reqseq(chan, control->reqseq);
2447 case L2CAP_EV_RECV_FBIT:
2448 if (control && control->final) {
2449 __clear_monitor_timer(chan);
2450 if (chan->unacked_frames > 0)
2451 __set_retrans_timer(chan);
2452 chan->retry_count = 0;
2453 chan->tx_state = L2CAP_TX_STATE_XMIT;
2454 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2457 case L2CAP_EV_EXPLICIT_POLL:
2460 case L2CAP_EV_MONITOR_TO:
2461 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2462 l2cap_send_rr_or_rnr(chan, 1);
2463 __set_monitor_timer(chan);
2464 chan->retry_count++;
2466 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2474 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2475 struct sk_buff_head *skbs, u8 event)
2477 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2478 chan, control, skbs, event, chan->tx_state);
2480 switch (chan->tx_state) {
2481 case L2CAP_TX_STATE_XMIT:
2482 l2cap_tx_state_xmit(chan, control, skbs, event);
2484 case L2CAP_TX_STATE_WAIT_F:
2485 l2cap_tx_state_wait_f(chan, control, skbs, event);
2493 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2494 struct l2cap_ctrl *control)
2496 BT_DBG("chan %p, control %p", chan, control);
2497 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2500 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2501 struct l2cap_ctrl *control)
2503 BT_DBG("chan %p, control %p", chan, control);
2504 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2507 /* Copy frame to all raw sockets on that connection */
2508 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2510 struct sk_buff *nskb;
2511 struct l2cap_chan *chan;
2513 BT_DBG("conn %p", conn);
2515 mutex_lock(&conn->chan_lock);
2517 list_for_each_entry(chan, &conn->chan_l, list) {
2518 struct sock *sk = chan->sk;
2519 if (chan->chan_type != L2CAP_CHAN_RAW)
2522 /* Don't send frame to the socket it came from */
2525 nskb = skb_clone(skb, GFP_ATOMIC);
2529 if (chan->ops->recv(chan, nskb))
2533 mutex_unlock(&conn->chan_lock);
2536 /* ---- L2CAP signalling commands ---- */
2537 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2538 u8 ident, u16 dlen, void *data)
2540 struct sk_buff *skb, **frag;
2541 struct l2cap_cmd_hdr *cmd;
2542 struct l2cap_hdr *lh;
2545 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2546 conn, code, ident, dlen);
2548 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2549 count = min_t(unsigned int, conn->mtu, len);
2551 skb = bt_skb_alloc(count, GFP_ATOMIC);
2555 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2556 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2558 if (conn->hcon->type == LE_LINK)
2559 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2561 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2563 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2566 cmd->len = cpu_to_le16(dlen);
2569 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2570 memcpy(skb_put(skb, count), data, count);
2576 /* Continuation fragments (no L2CAP header) */
2577 frag = &skb_shinfo(skb)->frag_list;
2579 count = min_t(unsigned int, conn->mtu, len);
2581 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2585 memcpy(skb_put(*frag, count), data, count);
2590 frag = &(*frag)->next;
2600 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2602 struct l2cap_conf_opt *opt = *ptr;
2605 len = L2CAP_CONF_OPT_SIZE + opt->len;
2613 *val = *((u8 *) opt->val);
2617 *val = get_unaligned_le16(opt->val);
2621 *val = get_unaligned_le32(opt->val);
2625 *val = (unsigned long) opt->val;
2629 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2633 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2635 struct l2cap_conf_opt *opt = *ptr;
2637 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2644 *((u8 *) opt->val) = val;
2648 put_unaligned_le16(val, opt->val);
2652 put_unaligned_le32(val, opt->val);
2656 memcpy(opt->val, (void *) val, len);
2660 *ptr += L2CAP_CONF_OPT_SIZE + len;
2663 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2665 struct l2cap_conf_efs efs;
2667 switch (chan->mode) {
2668 case L2CAP_MODE_ERTM:
2669 efs.id = chan->local_id;
2670 efs.stype = chan->local_stype;
2671 efs.msdu = cpu_to_le16(chan->local_msdu);
2672 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2673 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2674 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2677 case L2CAP_MODE_STREAMING:
2679 efs.stype = L2CAP_SERV_BESTEFFORT;
2680 efs.msdu = cpu_to_le16(chan->local_msdu);
2681 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2690 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2691 (unsigned long) &efs);
2694 static void l2cap_ack_timeout(struct work_struct *work)
2696 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2700 BT_DBG("chan %p", chan);
2702 l2cap_chan_lock(chan);
2704 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2705 chan->last_acked_seq);
2708 l2cap_send_rr_or_rnr(chan, 0);
2710 l2cap_chan_unlock(chan);
2711 l2cap_chan_put(chan);
2714 int l2cap_ertm_init(struct l2cap_chan *chan)
2718 chan->next_tx_seq = 0;
2719 chan->expected_tx_seq = 0;
2720 chan->expected_ack_seq = 0;
2721 chan->unacked_frames = 0;
2722 chan->buffer_seq = 0;
2723 chan->frames_sent = 0;
2724 chan->last_acked_seq = 0;
2726 chan->sdu_last_frag = NULL;
2729 skb_queue_head_init(&chan->tx_q);
2731 if (chan->mode != L2CAP_MODE_ERTM)
2734 chan->rx_state = L2CAP_RX_STATE_RECV;
2735 chan->tx_state = L2CAP_TX_STATE_XMIT;
2737 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2738 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2739 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2741 skb_queue_head_init(&chan->srej_q);
2743 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2747 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2749 l2cap_seq_list_free(&chan->srej_list);
2754 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2757 case L2CAP_MODE_STREAMING:
2758 case L2CAP_MODE_ERTM:
2759 if (l2cap_mode_supported(mode, remote_feat_mask))
2763 return L2CAP_MODE_BASIC;
2767 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2769 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2772 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2774 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2777 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2779 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2780 __l2cap_ews_supported(chan)) {
2781 /* use extended control field */
2782 set_bit(FLAG_EXT_CTRL, &chan->flags);
2783 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2785 chan->tx_win = min_t(u16, chan->tx_win,
2786 L2CAP_DEFAULT_TX_WINDOW);
2787 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2791 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2793 struct l2cap_conf_req *req = data;
2794 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2795 void *ptr = req->data;
2798 BT_DBG("chan %p", chan);
2800 if (chan->num_conf_req || chan->num_conf_rsp)
2803 switch (chan->mode) {
2804 case L2CAP_MODE_STREAMING:
2805 case L2CAP_MODE_ERTM:
2806 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2809 if (__l2cap_efs_supported(chan))
2810 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2814 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2819 if (chan->imtu != L2CAP_DEFAULT_MTU)
2820 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2822 switch (chan->mode) {
2823 case L2CAP_MODE_BASIC:
2824 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2825 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2828 rfc.mode = L2CAP_MODE_BASIC;
2830 rfc.max_transmit = 0;
2831 rfc.retrans_timeout = 0;
2832 rfc.monitor_timeout = 0;
2833 rfc.max_pdu_size = 0;
2835 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2836 (unsigned long) &rfc);
2839 case L2CAP_MODE_ERTM:
2840 rfc.mode = L2CAP_MODE_ERTM;
2841 rfc.max_transmit = chan->max_tx;
2842 rfc.retrans_timeout = 0;
2843 rfc.monitor_timeout = 0;
2845 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2846 L2CAP_EXT_HDR_SIZE -
2849 rfc.max_pdu_size = cpu_to_le16(size);
2851 l2cap_txwin_setup(chan);
2853 rfc.txwin_size = min_t(u16, chan->tx_win,
2854 L2CAP_DEFAULT_TX_WINDOW);
2856 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2857 (unsigned long) &rfc);
2859 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2860 l2cap_add_opt_efs(&ptr, chan);
2862 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2865 if (chan->fcs == L2CAP_FCS_NONE ||
2866 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2867 chan->fcs = L2CAP_FCS_NONE;
2868 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2871 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2872 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2876 case L2CAP_MODE_STREAMING:
2877 l2cap_txwin_setup(chan);
2878 rfc.mode = L2CAP_MODE_STREAMING;
2880 rfc.max_transmit = 0;
2881 rfc.retrans_timeout = 0;
2882 rfc.monitor_timeout = 0;
2884 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2885 L2CAP_EXT_HDR_SIZE -
2888 rfc.max_pdu_size = cpu_to_le16(size);
2890 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2891 (unsigned long) &rfc);
2893 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2894 l2cap_add_opt_efs(&ptr, chan);
2896 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2899 if (chan->fcs == L2CAP_FCS_NONE ||
2900 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2901 chan->fcs = L2CAP_FCS_NONE;
2902 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2907 req->dcid = cpu_to_le16(chan->dcid);
2908 req->flags = __constant_cpu_to_le16(0);
2913 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2915 struct l2cap_conf_rsp *rsp = data;
2916 void *ptr = rsp->data;
2917 void *req = chan->conf_req;
2918 int len = chan->conf_len;
2919 int type, hint, olen;
2921 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2922 struct l2cap_conf_efs efs;
2924 u16 mtu = L2CAP_DEFAULT_MTU;
2925 u16 result = L2CAP_CONF_SUCCESS;
2928 BT_DBG("chan %p", chan);
2930 while (len >= L2CAP_CONF_OPT_SIZE) {
2931 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2933 hint = type & L2CAP_CONF_HINT;
2934 type &= L2CAP_CONF_MASK;
2937 case L2CAP_CONF_MTU:
2941 case L2CAP_CONF_FLUSH_TO:
2942 chan->flush_to = val;
2945 case L2CAP_CONF_QOS:
2948 case L2CAP_CONF_RFC:
2949 if (olen == sizeof(rfc))
2950 memcpy(&rfc, (void *) val, olen);
2953 case L2CAP_CONF_FCS:
2954 if (val == L2CAP_FCS_NONE)
2955 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2958 case L2CAP_CONF_EFS:
2960 if (olen == sizeof(efs))
2961 memcpy(&efs, (void *) val, olen);
2964 case L2CAP_CONF_EWS:
2966 return -ECONNREFUSED;
2968 set_bit(FLAG_EXT_CTRL, &chan->flags);
2969 set_bit(CONF_EWS_RECV, &chan->conf_state);
2970 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2971 chan->remote_tx_win = val;
2978 result = L2CAP_CONF_UNKNOWN;
2979 *((u8 *) ptr++) = type;
2984 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2987 switch (chan->mode) {
2988 case L2CAP_MODE_STREAMING:
2989 case L2CAP_MODE_ERTM:
2990 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2991 chan->mode = l2cap_select_mode(rfc.mode,
2992 chan->conn->feat_mask);
2997 if (__l2cap_efs_supported(chan))
2998 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3000 return -ECONNREFUSED;
3003 if (chan->mode != rfc.mode)
3004 return -ECONNREFUSED;
3010 if (chan->mode != rfc.mode) {
3011 result = L2CAP_CONF_UNACCEPT;
3012 rfc.mode = chan->mode;
3014 if (chan->num_conf_rsp == 1)
3015 return -ECONNREFUSED;
3017 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3018 sizeof(rfc), (unsigned long) &rfc);
3021 if (result == L2CAP_CONF_SUCCESS) {
3022 /* Configure output options and let the other side know
3023 * which ones we don't like. */
3025 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3026 result = L2CAP_CONF_UNACCEPT;
3029 set_bit(CONF_MTU_DONE, &chan->conf_state);
3031 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3034 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3035 efs.stype != L2CAP_SERV_NOTRAFIC &&
3036 efs.stype != chan->local_stype) {
3038 result = L2CAP_CONF_UNACCEPT;
3040 if (chan->num_conf_req >= 1)
3041 return -ECONNREFUSED;
3043 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3045 (unsigned long) &efs);
3047 /* Send PENDING Conf Rsp */
3048 result = L2CAP_CONF_PENDING;
3049 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3054 case L2CAP_MODE_BASIC:
3055 chan->fcs = L2CAP_FCS_NONE;
3056 set_bit(CONF_MODE_DONE, &chan->conf_state);
3059 case L2CAP_MODE_ERTM:
3060 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3061 chan->remote_tx_win = rfc.txwin_size;
3063 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3065 chan->remote_max_tx = rfc.max_transmit;
3067 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3069 L2CAP_EXT_HDR_SIZE -
3072 rfc.max_pdu_size = cpu_to_le16(size);
3073 chan->remote_mps = size;
3075 rfc.retrans_timeout =
3076 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3077 rfc.monitor_timeout =
3078 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3080 set_bit(CONF_MODE_DONE, &chan->conf_state);
3082 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3083 sizeof(rfc), (unsigned long) &rfc);
3085 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3086 chan->remote_id = efs.id;
3087 chan->remote_stype = efs.stype;
3088 chan->remote_msdu = le16_to_cpu(efs.msdu);
3089 chan->remote_flush_to =
3090 le32_to_cpu(efs.flush_to);
3091 chan->remote_acc_lat =
3092 le32_to_cpu(efs.acc_lat);
3093 chan->remote_sdu_itime =
3094 le32_to_cpu(efs.sdu_itime);
3095 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3096 sizeof(efs), (unsigned long) &efs);
3100 case L2CAP_MODE_STREAMING:
3101 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3103 L2CAP_EXT_HDR_SIZE -
3106 rfc.max_pdu_size = cpu_to_le16(size);
3107 chan->remote_mps = size;
3109 set_bit(CONF_MODE_DONE, &chan->conf_state);
3111 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3112 sizeof(rfc), (unsigned long) &rfc);
3117 result = L2CAP_CONF_UNACCEPT;
3119 memset(&rfc, 0, sizeof(rfc));
3120 rfc.mode = chan->mode;
3123 if (result == L2CAP_CONF_SUCCESS)
3124 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3126 rsp->scid = cpu_to_le16(chan->dcid);
3127 rsp->result = cpu_to_le16(result);
3128 rsp->flags = __constant_cpu_to_le16(0);
3133 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3135 struct l2cap_conf_req *req = data;
3136 void *ptr = req->data;
3139 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3140 struct l2cap_conf_efs efs;
3142 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3144 while (len >= L2CAP_CONF_OPT_SIZE) {
3145 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3148 case L2CAP_CONF_MTU:
3149 if (val < L2CAP_DEFAULT_MIN_MTU) {
3150 *result = L2CAP_CONF_UNACCEPT;
3151 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3154 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3157 case L2CAP_CONF_FLUSH_TO:
3158 chan->flush_to = val;
3159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3163 case L2CAP_CONF_RFC:
3164 if (olen == sizeof(rfc))
3165 memcpy(&rfc, (void *)val, olen);
3167 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3168 rfc.mode != chan->mode)
3169 return -ECONNREFUSED;
3173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3174 sizeof(rfc), (unsigned long) &rfc);
3177 case L2CAP_CONF_EWS:
3178 chan->tx_win = min_t(u16, val,
3179 L2CAP_DEFAULT_EXT_WINDOW);
3180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3184 case L2CAP_CONF_EFS:
3185 if (olen == sizeof(efs))
3186 memcpy(&efs, (void *)val, olen);
3188 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3189 efs.stype != L2CAP_SERV_NOTRAFIC &&
3190 efs.stype != chan->local_stype)
3191 return -ECONNREFUSED;
3193 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3194 sizeof(efs), (unsigned long) &efs);
3199 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3200 return -ECONNREFUSED;
3202 chan->mode = rfc.mode;
3204 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3206 case L2CAP_MODE_ERTM:
3207 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3208 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3209 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3211 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3212 chan->local_msdu = le16_to_cpu(efs.msdu);
3213 chan->local_sdu_itime =
3214 le32_to_cpu(efs.sdu_itime);
3215 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3216 chan->local_flush_to =
3217 le32_to_cpu(efs.flush_to);
3221 case L2CAP_MODE_STREAMING:
3222 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3226 req->dcid = cpu_to_le16(chan->dcid);
3227 req->flags = __constant_cpu_to_le16(0);
3232 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3234 struct l2cap_conf_rsp *rsp = data;
3235 void *ptr = rsp->data;
3237 BT_DBG("chan %p", chan);
3239 rsp->scid = cpu_to_le16(chan->dcid);
3240 rsp->result = cpu_to_le16(result);
3241 rsp->flags = cpu_to_le16(flags);
3246 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3248 struct l2cap_conn_rsp rsp;
3249 struct l2cap_conn *conn = chan->conn;
3252 rsp.scid = cpu_to_le16(chan->dcid);
3253 rsp.dcid = cpu_to_le16(chan->scid);
3254 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3255 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3256 l2cap_send_cmd(conn, chan->ident,
3257 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3259 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3262 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3263 l2cap_build_conf_req(chan, buf), buf);
3264 chan->num_conf_req++;
3267 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3271 struct l2cap_conf_rfc rfc;
3273 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3275 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3278 while (len >= L2CAP_CONF_OPT_SIZE) {
3279 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3281 if (type != L2CAP_CONF_RFC)
3284 if (olen != sizeof(rfc))
3287 memcpy(&rfc, (void *)val, olen);
3291 /* Use sane default values in case a misbehaving remote device
3292 * did not send an RFC option.
3294 rfc.mode = chan->mode;
3295 rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3296 rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3297 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
3299 BT_ERR("Expected RFC option was not found, using defaults");
3303 case L2CAP_MODE_ERTM:
3304 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3305 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3306 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3308 case L2CAP_MODE_STREAMING:
3309 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3313 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3315 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3317 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3320 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3321 cmd->ident == conn->info_ident) {
3322 cancel_delayed_work(&conn->info_timer);
3324 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3325 conn->info_ident = 0;
3327 l2cap_conn_start(conn);
3333 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3335 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3336 struct l2cap_conn_rsp rsp;
3337 struct l2cap_chan *chan = NULL, *pchan;
3338 struct sock *parent, *sk = NULL;
3339 int result, status = L2CAP_CS_NO_INFO;
3341 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3342 __le16 psm = req->psm;
3344 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3346 /* Check if we have socket listening on psm */
3347 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3349 result = L2CAP_CR_BAD_PSM;
3355 mutex_lock(&conn->chan_lock);
3358 /* Check if the ACL is secure enough (if not SDP) */
3359 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3360 !hci_conn_check_link_mode(conn->hcon)) {
3361 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3362 result = L2CAP_CR_SEC_BLOCK;
3366 result = L2CAP_CR_NO_MEM;
3368 /* Check if we already have channel with that dcid */
3369 if (__l2cap_get_chan_by_dcid(conn, scid))
3372 chan = pchan->ops->new_connection(pchan);
3378 hci_conn_hold(conn->hcon);
3380 bacpy(&bt_sk(sk)->src, conn->src);
3381 bacpy(&bt_sk(sk)->dst, conn->dst);
3385 bt_accept_enqueue(parent, sk);
3387 __l2cap_chan_add(conn, chan);
3391 __set_chan_timer(chan, sk->sk_sndtimeo);
3393 chan->ident = cmd->ident;
3395 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3396 if (l2cap_chan_check_security(chan)) {
3397 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3398 __l2cap_state_change(chan, BT_CONNECT2);
3399 result = L2CAP_CR_PEND;
3400 status = L2CAP_CS_AUTHOR_PEND;
3401 parent->sk_data_ready(parent, 0);
3403 __l2cap_state_change(chan, BT_CONFIG);
3404 result = L2CAP_CR_SUCCESS;
3405 status = L2CAP_CS_NO_INFO;
3408 __l2cap_state_change(chan, BT_CONNECT2);
3409 result = L2CAP_CR_PEND;
3410 status = L2CAP_CS_AUTHEN_PEND;
3413 __l2cap_state_change(chan, BT_CONNECT2);
3414 result = L2CAP_CR_PEND;
3415 status = L2CAP_CS_NO_INFO;
3419 release_sock(parent);
3420 mutex_unlock(&conn->chan_lock);
3423 rsp.scid = cpu_to_le16(scid);
3424 rsp.dcid = cpu_to_le16(dcid);
3425 rsp.result = cpu_to_le16(result);
3426 rsp.status = cpu_to_le16(status);
3427 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3429 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3430 struct l2cap_info_req info;
3431 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3433 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3434 conn->info_ident = l2cap_get_ident(conn);
3436 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3438 l2cap_send_cmd(conn, conn->info_ident,
3439 L2CAP_INFO_REQ, sizeof(info), &info);
3442 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3443 result == L2CAP_CR_SUCCESS) {
3445 set_bit(CONF_REQ_SENT, &chan->conf_state);
3446 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3447 l2cap_build_conf_req(chan, buf), buf);
3448 chan->num_conf_req++;
3454 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3456 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3457 u16 scid, dcid, result, status;
3458 struct l2cap_chan *chan;
3462 scid = __le16_to_cpu(rsp->scid);
3463 dcid = __le16_to_cpu(rsp->dcid);
3464 result = __le16_to_cpu(rsp->result);
3465 status = __le16_to_cpu(rsp->status);
3467 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3468 dcid, scid, result, status);
3470 mutex_lock(&conn->chan_lock);
3473 chan = __l2cap_get_chan_by_scid(conn, scid);
3479 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3488 l2cap_chan_lock(chan);
3491 case L2CAP_CR_SUCCESS:
3492 l2cap_state_change(chan, BT_CONFIG);
3495 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3497 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3500 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3501 l2cap_build_conf_req(chan, req), req);
3502 chan->num_conf_req++;
3506 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3510 l2cap_chan_del(chan, ECONNREFUSED);
3514 l2cap_chan_unlock(chan);
3517 mutex_unlock(&conn->chan_lock);
3522 static inline void set_default_fcs(struct l2cap_chan *chan)
3524 /* FCS is enabled only in ERTM or streaming mode, if one or both
3527 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3528 chan->fcs = L2CAP_FCS_NONE;
3529 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3530 chan->fcs = L2CAP_FCS_CRC16;
3533 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3535 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3538 struct l2cap_chan *chan;
3541 dcid = __le16_to_cpu(req->dcid);
3542 flags = __le16_to_cpu(req->flags);
3544 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3546 chan = l2cap_get_chan_by_scid(conn, dcid);
3550 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3551 struct l2cap_cmd_rej_cid rej;
3553 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3554 rej.scid = cpu_to_le16(chan->scid);
3555 rej.dcid = cpu_to_le16(chan->dcid);
3557 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3562 /* Reject if config buffer is too small. */
3563 len = cmd_len - sizeof(*req);
3564 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3565 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3566 l2cap_build_conf_rsp(chan, rsp,
3567 L2CAP_CONF_REJECT, flags), rsp);
3572 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3573 chan->conf_len += len;
3575 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3576 /* Incomplete config. Send empty response. */
3577 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3578 l2cap_build_conf_rsp(chan, rsp,
3579 L2CAP_CONF_SUCCESS, flags), rsp);
3583 /* Complete config. */
3584 len = l2cap_parse_conf_req(chan, rsp);
3586 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3590 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3591 chan->num_conf_rsp++;
3593 /* Reset config buffer. */
3596 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3599 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3600 set_default_fcs(chan);
3602 if (chan->mode == L2CAP_MODE_ERTM ||
3603 chan->mode == L2CAP_MODE_STREAMING)
3604 err = l2cap_ertm_init(chan);
3607 l2cap_send_disconn_req(chan->conn, chan, -err);
3609 l2cap_chan_ready(chan);
3614 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3616 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3617 l2cap_build_conf_req(chan, buf), buf);
3618 chan->num_conf_req++;
3621 /* Got Conf Rsp PENDING from remote side and asume we sent
3622 Conf Rsp PENDING in the code above */
3623 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3624 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3626 /* check compatibility */
3628 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3629 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3631 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3632 l2cap_build_conf_rsp(chan, rsp,
3633 L2CAP_CONF_SUCCESS, flags), rsp);
3637 l2cap_chan_unlock(chan);
3641 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3643 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3644 u16 scid, flags, result;
3645 struct l2cap_chan *chan;
3646 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3649 scid = __le16_to_cpu(rsp->scid);
3650 flags = __le16_to_cpu(rsp->flags);
3651 result = __le16_to_cpu(rsp->result);
3653 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3656 chan = l2cap_get_chan_by_scid(conn, scid);
3661 case L2CAP_CONF_SUCCESS:
3662 l2cap_conf_rfc_get(chan, rsp->data, len);
3663 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3666 case L2CAP_CONF_PENDING:
3667 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3669 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3672 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3675 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3679 /* check compatibility */
3681 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3682 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3684 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3685 l2cap_build_conf_rsp(chan, buf,
3686 L2CAP_CONF_SUCCESS, 0x0000), buf);
3690 case L2CAP_CONF_UNACCEPT:
3691 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3694 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3695 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3699 /* throw out any old stored conf requests */
3700 result = L2CAP_CONF_SUCCESS;
3701 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3704 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3708 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3709 L2CAP_CONF_REQ, len, req);
3710 chan->num_conf_req++;
3711 if (result != L2CAP_CONF_SUCCESS)
3717 l2cap_chan_set_err(chan, ECONNRESET);
3719 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3720 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3724 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3727 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3729 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3730 set_default_fcs(chan);
3732 if (chan->mode == L2CAP_MODE_ERTM ||
3733 chan->mode == L2CAP_MODE_STREAMING)
3734 err = l2cap_ertm_init(chan);
3737 l2cap_send_disconn_req(chan->conn, chan, -err);
3739 l2cap_chan_ready(chan);
3743 l2cap_chan_unlock(chan);
3747 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3749 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3750 struct l2cap_disconn_rsp rsp;
3752 struct l2cap_chan *chan;
3755 scid = __le16_to_cpu(req->scid);
3756 dcid = __le16_to_cpu(req->dcid);
3758 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3760 mutex_lock(&conn->chan_lock);
3762 chan = __l2cap_get_chan_by_scid(conn, dcid);
3764 mutex_unlock(&conn->chan_lock);
3768 l2cap_chan_lock(chan);
3772 rsp.dcid = cpu_to_le16(chan->scid);
3773 rsp.scid = cpu_to_le16(chan->dcid);
3774 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3777 sk->sk_shutdown = SHUTDOWN_MASK;
3780 l2cap_chan_hold(chan);
3781 l2cap_chan_del(chan, ECONNRESET);
3783 l2cap_chan_unlock(chan);
3785 chan->ops->close(chan);
3786 l2cap_chan_put(chan);
3788 mutex_unlock(&conn->chan_lock);
3793 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3795 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3797 struct l2cap_chan *chan;
3799 scid = __le16_to_cpu(rsp->scid);
3800 dcid = __le16_to_cpu(rsp->dcid);
3802 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3804 mutex_lock(&conn->chan_lock);
3806 chan = __l2cap_get_chan_by_scid(conn, scid);
3808 mutex_unlock(&conn->chan_lock);
3812 l2cap_chan_lock(chan);
3814 l2cap_chan_hold(chan);
3815 l2cap_chan_del(chan, 0);
3817 l2cap_chan_unlock(chan);
3819 chan->ops->close(chan);
3820 l2cap_chan_put(chan);
3822 mutex_unlock(&conn->chan_lock);
3827 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3829 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3832 type = __le16_to_cpu(req->type);
3834 BT_DBG("type 0x%4.4x", type);
3836 if (type == L2CAP_IT_FEAT_MASK) {
3838 u32 feat_mask = l2cap_feat_mask;
3839 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3840 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3841 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3843 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3846 feat_mask |= L2CAP_FEAT_EXT_FLOW
3847 | L2CAP_FEAT_EXT_WINDOW;
3849 put_unaligned_le32(feat_mask, rsp->data);
3850 l2cap_send_cmd(conn, cmd->ident,
3851 L2CAP_INFO_RSP, sizeof(buf), buf);
3852 } else if (type == L2CAP_IT_FIXED_CHAN) {
3854 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3857 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3859 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3861 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3862 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3863 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3864 l2cap_send_cmd(conn, cmd->ident,
3865 L2CAP_INFO_RSP, sizeof(buf), buf);
3867 struct l2cap_info_rsp rsp;
3868 rsp.type = cpu_to_le16(type);
3869 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3870 l2cap_send_cmd(conn, cmd->ident,
3871 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3877 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3879 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3882 type = __le16_to_cpu(rsp->type);
3883 result = __le16_to_cpu(rsp->result);
3885 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3887 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3888 if (cmd->ident != conn->info_ident ||
3889 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3892 cancel_delayed_work(&conn->info_timer);
3894 if (result != L2CAP_IR_SUCCESS) {
3895 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3896 conn->info_ident = 0;
3898 l2cap_conn_start(conn);
3904 case L2CAP_IT_FEAT_MASK:
3905 conn->feat_mask = get_unaligned_le32(rsp->data);
3907 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3908 struct l2cap_info_req req;
3909 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3911 conn->info_ident = l2cap_get_ident(conn);
3913 l2cap_send_cmd(conn, conn->info_ident,
3914 L2CAP_INFO_REQ, sizeof(req), &req);
3916 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3917 conn->info_ident = 0;
3919 l2cap_conn_start(conn);
3923 case L2CAP_IT_FIXED_CHAN:
3924 conn->fixed_chan_mask = rsp->data[0];
3925 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3926 conn->info_ident = 0;
3928 l2cap_conn_start(conn);
3935 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3936 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3939 struct l2cap_create_chan_req *req = data;
3940 struct l2cap_create_chan_rsp rsp;
3943 if (cmd_len != sizeof(*req))
3949 psm = le16_to_cpu(req->psm);
3950 scid = le16_to_cpu(req->scid);
3952 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3954 /* Placeholder: Always reject */
3956 rsp.scid = cpu_to_le16(scid);
3957 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3958 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3960 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3966 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3967 struct l2cap_cmd_hdr *cmd, void *data)
3969 BT_DBG("conn %p", conn);
3971 return l2cap_connect_rsp(conn, cmd, data);
3974 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3975 u16 icid, u16 result)
3977 struct l2cap_move_chan_rsp rsp;
3979 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3981 rsp.icid = cpu_to_le16(icid);
3982 rsp.result = cpu_to_le16(result);
3984 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3987 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3988 struct l2cap_chan *chan,
3989 u16 icid, u16 result)
3991 struct l2cap_move_chan_cfm cfm;
3994 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
3996 ident = l2cap_get_ident(conn);
3998 chan->ident = ident;
4000 cfm.icid = cpu_to_le16(icid);
4001 cfm.result = cpu_to_le16(result);
4003 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4006 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4009 struct l2cap_move_chan_cfm_rsp rsp;
4011 BT_DBG("icid 0x%4.4x", icid);
4013 rsp.icid = cpu_to_le16(icid);
4014 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4017 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4018 struct l2cap_cmd_hdr *cmd,
4019 u16 cmd_len, void *data)
4021 struct l2cap_move_chan_req *req = data;
4023 u16 result = L2CAP_MR_NOT_ALLOWED;
4025 if (cmd_len != sizeof(*req))
4028 icid = le16_to_cpu(req->icid);
4030 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4035 /* Placeholder: Always refuse */
4036 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4041 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4042 struct l2cap_cmd_hdr *cmd,
4043 u16 cmd_len, void *data)
4045 struct l2cap_move_chan_rsp *rsp = data;
4048 if (cmd_len != sizeof(*rsp))
4051 icid = le16_to_cpu(rsp->icid);
4052 result = le16_to_cpu(rsp->result);
4054 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4056 /* Placeholder: Always unconfirmed */
4057 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4062 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4063 struct l2cap_cmd_hdr *cmd,
4064 u16 cmd_len, void *data)
4066 struct l2cap_move_chan_cfm *cfm = data;
4069 if (cmd_len != sizeof(*cfm))
4072 icid = le16_to_cpu(cfm->icid);
4073 result = le16_to_cpu(cfm->result);
4075 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4077 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4082 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4083 struct l2cap_cmd_hdr *cmd,
4084 u16 cmd_len, void *data)
4086 struct l2cap_move_chan_cfm_rsp *rsp = data;
4089 if (cmd_len != sizeof(*rsp))
4092 icid = le16_to_cpu(rsp->icid);
4094 BT_DBG("icid 0x%4.4x", icid);
4099 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4104 if (min > max || min < 6 || max > 3200)
4107 if (to_multiplier < 10 || to_multiplier > 3200)
4110 if (max >= to_multiplier * 8)
4113 max_latency = (to_multiplier * 8 / max) - 1;
4114 if (latency > 499 || latency > max_latency)
4120 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4121 struct l2cap_cmd_hdr *cmd, u8 *data)
4123 struct hci_conn *hcon = conn->hcon;
4124 struct l2cap_conn_param_update_req *req;
4125 struct l2cap_conn_param_update_rsp rsp;
4126 u16 min, max, latency, to_multiplier, cmd_len;
4129 if (!(hcon->link_mode & HCI_LM_MASTER))
4132 cmd_len = __le16_to_cpu(cmd->len);
4133 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4136 req = (struct l2cap_conn_param_update_req *) data;
4137 min = __le16_to_cpu(req->min);
4138 max = __le16_to_cpu(req->max);
4139 latency = __le16_to_cpu(req->latency);
4140 to_multiplier = __le16_to_cpu(req->to_multiplier);
4142 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4143 min, max, latency, to_multiplier);
4145 memset(&rsp, 0, sizeof(rsp));
4147 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4149 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4151 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4153 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4157 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4162 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4163 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4167 switch (cmd->code) {
4168 case L2CAP_COMMAND_REJ:
4169 l2cap_command_rej(conn, cmd, data);
4172 case L2CAP_CONN_REQ:
4173 err = l2cap_connect_req(conn, cmd, data);
4176 case L2CAP_CONN_RSP:
4177 err = l2cap_connect_rsp(conn, cmd, data);
4180 case L2CAP_CONF_REQ:
4181 err = l2cap_config_req(conn, cmd, cmd_len, data);
4184 case L2CAP_CONF_RSP:
4185 err = l2cap_config_rsp(conn, cmd, data);
4188 case L2CAP_DISCONN_REQ:
4189 err = l2cap_disconnect_req(conn, cmd, data);
4192 case L2CAP_DISCONN_RSP:
4193 err = l2cap_disconnect_rsp(conn, cmd, data);
4196 case L2CAP_ECHO_REQ:
4197 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4200 case L2CAP_ECHO_RSP:
4203 case L2CAP_INFO_REQ:
4204 err = l2cap_information_req(conn, cmd, data);
4207 case L2CAP_INFO_RSP:
4208 err = l2cap_information_rsp(conn, cmd, data);
4211 case L2CAP_CREATE_CHAN_REQ:
4212 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4215 case L2CAP_CREATE_CHAN_RSP:
4216 err = l2cap_create_channel_rsp(conn, cmd, data);
4219 case L2CAP_MOVE_CHAN_REQ:
4220 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4223 case L2CAP_MOVE_CHAN_RSP:
4224 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4227 case L2CAP_MOVE_CHAN_CFM:
4228 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4231 case L2CAP_MOVE_CHAN_CFM_RSP:
4232 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4236 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4244 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4245 struct l2cap_cmd_hdr *cmd, u8 *data)
4247 switch (cmd->code) {
4248 case L2CAP_COMMAND_REJ:
4251 case L2CAP_CONN_PARAM_UPDATE_REQ:
4252 return l2cap_conn_param_update_req(conn, cmd, data);
4254 case L2CAP_CONN_PARAM_UPDATE_RSP:
4258 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4263 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4264 struct sk_buff *skb)
4266 u8 *data = skb->data;
4268 struct l2cap_cmd_hdr cmd;
4271 l2cap_raw_recv(conn, skb);
4273 while (len >= L2CAP_CMD_HDR_SIZE) {
4275 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4276 data += L2CAP_CMD_HDR_SIZE;
4277 len -= L2CAP_CMD_HDR_SIZE;
4279 cmd_len = le16_to_cpu(cmd.len);
4281 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4283 if (cmd_len > len || !cmd.ident) {
4284 BT_DBG("corrupted command");
4288 if (conn->hcon->type == LE_LINK)
4289 err = l2cap_le_sig_cmd(conn, &cmd, data);
4291 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4294 struct l2cap_cmd_rej_unk rej;
4296 BT_ERR("Wrong link type (%d)", err);
4298 /* FIXME: Map err to a valid reason */
4299 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4300 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4310 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4312 u16 our_fcs, rcv_fcs;
4315 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4316 hdr_size = L2CAP_EXT_HDR_SIZE;
4318 hdr_size = L2CAP_ENH_HDR_SIZE;
4320 if (chan->fcs == L2CAP_FCS_CRC16) {
4321 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4322 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4323 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4325 if (our_fcs != rcv_fcs)
4331 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4333 struct l2cap_ctrl control;
4335 BT_DBG("chan %p", chan);
4337 memset(&control, 0, sizeof(control));
4340 control.reqseq = chan->buffer_seq;
4341 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4343 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4344 control.super = L2CAP_SUPER_RNR;
4345 l2cap_send_sframe(chan, &control);
4348 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4349 chan->unacked_frames > 0)
4350 __set_retrans_timer(chan);
4352 /* Send pending iframes */
4353 l2cap_ertm_send(chan);
4355 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4356 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4357 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4360 control.super = L2CAP_SUPER_RR;
4361 l2cap_send_sframe(chan, &control);
4365 static void append_skb_frag(struct sk_buff *skb,
4366 struct sk_buff *new_frag, struct sk_buff **last_frag)
4368 /* skb->len reflects data in skb as well as all fragments
4369 * skb->data_len reflects only data in fragments
4371 if (!skb_has_frag_list(skb))
4372 skb_shinfo(skb)->frag_list = new_frag;
4374 new_frag->next = NULL;
4376 (*last_frag)->next = new_frag;
4377 *last_frag = new_frag;
4379 skb->len += new_frag->len;
4380 skb->data_len += new_frag->len;
4381 skb->truesize += new_frag->truesize;
4384 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4385 struct l2cap_ctrl *control)
4389 switch (control->sar) {
4390 case L2CAP_SAR_UNSEGMENTED:
4394 err = chan->ops->recv(chan, skb);
4397 case L2CAP_SAR_START:
4401 chan->sdu_len = get_unaligned_le16(skb->data);
4402 skb_pull(skb, L2CAP_SDULEN_SIZE);
4404 if (chan->sdu_len > chan->imtu) {
4409 if (skb->len >= chan->sdu_len)
4413 chan->sdu_last_frag = skb;
4419 case L2CAP_SAR_CONTINUE:
4423 append_skb_frag(chan->sdu, skb,
4424 &chan->sdu_last_frag);
4427 if (chan->sdu->len >= chan->sdu_len)
4437 append_skb_frag(chan->sdu, skb,
4438 &chan->sdu_last_frag);
4441 if (chan->sdu->len != chan->sdu_len)
4444 err = chan->ops->recv(chan, chan->sdu);
4447 /* Reassembly complete */
4449 chan->sdu_last_frag = NULL;
4457 kfree_skb(chan->sdu);
4459 chan->sdu_last_frag = NULL;
4466 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4470 if (chan->mode != L2CAP_MODE_ERTM)
4473 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4474 l2cap_tx(chan, NULL, NULL, event);
4477 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4480 /* Pass sequential frames to l2cap_reassemble_sdu()
4481 * until a gap is encountered.
4484 BT_DBG("chan %p", chan);
4486 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4487 struct sk_buff *skb;
4488 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4489 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4491 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4496 skb_unlink(skb, &chan->srej_q);
4497 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4498 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4503 if (skb_queue_empty(&chan->srej_q)) {
4504 chan->rx_state = L2CAP_RX_STATE_RECV;
4505 l2cap_send_ack(chan);
4511 static void l2cap_handle_srej(struct l2cap_chan *chan,
4512 struct l2cap_ctrl *control)
4514 struct sk_buff *skb;
4516 BT_DBG("chan %p, control %p", chan, control);
4518 if (control->reqseq == chan->next_tx_seq) {
4519 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4520 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4524 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4527 BT_DBG("Seq %d not available for retransmission",
4532 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4533 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4534 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4538 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4540 if (control->poll) {
4541 l2cap_pass_to_tx(chan, control);
4543 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4544 l2cap_retransmit(chan, control);
4545 l2cap_ertm_send(chan);
4547 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4548 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4549 chan->srej_save_reqseq = control->reqseq;
4552 l2cap_pass_to_tx_fbit(chan, control);
4554 if (control->final) {
4555 if (chan->srej_save_reqseq != control->reqseq ||
4556 !test_and_clear_bit(CONN_SREJ_ACT,
4558 l2cap_retransmit(chan, control);
4560 l2cap_retransmit(chan, control);
4561 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4562 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4563 chan->srej_save_reqseq = control->reqseq;
4569 static void l2cap_handle_rej(struct l2cap_chan *chan,
4570 struct l2cap_ctrl *control)
4572 struct sk_buff *skb;
4574 BT_DBG("chan %p, control %p", chan, control);
4576 if (control->reqseq == chan->next_tx_seq) {
4577 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4578 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4582 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4584 if (chan->max_tx && skb &&
4585 bt_cb(skb)->control.retries >= chan->max_tx) {
4586 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4587 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4591 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4593 l2cap_pass_to_tx(chan, control);
4595 if (control->final) {
4596 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4597 l2cap_retransmit_all(chan, control);
4599 l2cap_retransmit_all(chan, control);
4600 l2cap_ertm_send(chan);
4601 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4602 set_bit(CONN_REJ_ACT, &chan->conn_state);
4606 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4608 BT_DBG("chan %p, txseq %d", chan, txseq);
4610 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4611 chan->expected_tx_seq);
4613 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4614 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4616 /* See notes below regarding "double poll" and
4619 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4620 BT_DBG("Invalid/Ignore - after SREJ");
4621 return L2CAP_TXSEQ_INVALID_IGNORE;
4623 BT_DBG("Invalid - in window after SREJ sent");
4624 return L2CAP_TXSEQ_INVALID;
4628 if (chan->srej_list.head == txseq) {
4629 BT_DBG("Expected SREJ");
4630 return L2CAP_TXSEQ_EXPECTED_SREJ;
4633 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4634 BT_DBG("Duplicate SREJ - txseq already stored");
4635 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4638 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4639 BT_DBG("Unexpected SREJ - not requested");
4640 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4644 if (chan->expected_tx_seq == txseq) {
4645 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4647 BT_DBG("Invalid - txseq outside tx window");
4648 return L2CAP_TXSEQ_INVALID;
4651 return L2CAP_TXSEQ_EXPECTED;
4655 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4656 __seq_offset(chan, chan->expected_tx_seq,
4657 chan->last_acked_seq)){
4658 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4659 return L2CAP_TXSEQ_DUPLICATE;
4662 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4663 /* A source of invalid packets is a "double poll" condition,
4664 * where delays cause us to send multiple poll packets. If
4665 * the remote stack receives and processes both polls,
4666 * sequence numbers can wrap around in such a way that a
4667 * resent frame has a sequence number that looks like new data
4668 * with a sequence gap. This would trigger an erroneous SREJ
4671 * Fortunately, this is impossible with a tx window that's
4672 * less than half of the maximum sequence number, which allows
4673 * invalid frames to be safely ignored.
4675 * With tx window sizes greater than half of the tx window
4676 * maximum, the frame is invalid and cannot be ignored. This
4677 * causes a disconnect.
4680 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4681 BT_DBG("Invalid/Ignore - txseq outside tx window");
4682 return L2CAP_TXSEQ_INVALID_IGNORE;
4684 BT_DBG("Invalid - txseq outside tx window");
4685 return L2CAP_TXSEQ_INVALID;
4688 BT_DBG("Unexpected - txseq indicates missing frames");
4689 return L2CAP_TXSEQ_UNEXPECTED;
4693 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4694 struct l2cap_ctrl *control,
4695 struct sk_buff *skb, u8 event)
4698 bool skb_in_use = 0;
4700 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4704 case L2CAP_EV_RECV_IFRAME:
4705 switch (l2cap_classify_txseq(chan, control->txseq)) {
4706 case L2CAP_TXSEQ_EXPECTED:
4707 l2cap_pass_to_tx(chan, control);
4709 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4710 BT_DBG("Busy, discarding expected seq %d",
4715 chan->expected_tx_seq = __next_seq(chan,
4718 chan->buffer_seq = chan->expected_tx_seq;
4721 err = l2cap_reassemble_sdu(chan, skb, control);
4725 if (control->final) {
4726 if (!test_and_clear_bit(CONN_REJ_ACT,
4727 &chan->conn_state)) {
4729 l2cap_retransmit_all(chan, control);
4730 l2cap_ertm_send(chan);
4734 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4735 l2cap_send_ack(chan);
4737 case L2CAP_TXSEQ_UNEXPECTED:
4738 l2cap_pass_to_tx(chan, control);
4740 /* Can't issue SREJ frames in the local busy state.
4741 * Drop this frame, it will be seen as missing
4742 * when local busy is exited.
4744 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4745 BT_DBG("Busy, discarding unexpected seq %d",
4750 /* There was a gap in the sequence, so an SREJ
4751 * must be sent for each missing frame. The
4752 * current frame is stored for later use.
4754 skb_queue_tail(&chan->srej_q, skb);
4756 BT_DBG("Queued %p (queue len %d)", skb,
4757 skb_queue_len(&chan->srej_q));
4759 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4760 l2cap_seq_list_clear(&chan->srej_list);
4761 l2cap_send_srej(chan, control->txseq);
4763 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4765 case L2CAP_TXSEQ_DUPLICATE:
4766 l2cap_pass_to_tx(chan, control);
4768 case L2CAP_TXSEQ_INVALID_IGNORE:
4770 case L2CAP_TXSEQ_INVALID:
4772 l2cap_send_disconn_req(chan->conn, chan,
4777 case L2CAP_EV_RECV_RR:
4778 l2cap_pass_to_tx(chan, control);
4779 if (control->final) {
4780 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4782 if (!test_and_clear_bit(CONN_REJ_ACT,
4783 &chan->conn_state)) {
4785 l2cap_retransmit_all(chan, control);
4788 l2cap_ertm_send(chan);
4789 } else if (control->poll) {
4790 l2cap_send_i_or_rr_or_rnr(chan);
4792 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4793 &chan->conn_state) &&
4794 chan->unacked_frames)
4795 __set_retrans_timer(chan);
4797 l2cap_ertm_send(chan);
4800 case L2CAP_EV_RECV_RNR:
4801 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4802 l2cap_pass_to_tx(chan, control);
4803 if (control && control->poll) {
4804 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4805 l2cap_send_rr_or_rnr(chan, 0);
4807 __clear_retrans_timer(chan);
4808 l2cap_seq_list_clear(&chan->retrans_list);
4810 case L2CAP_EV_RECV_REJ:
4811 l2cap_handle_rej(chan, control);
4813 case L2CAP_EV_RECV_SREJ:
4814 l2cap_handle_srej(chan, control);
4820 if (skb && !skb_in_use) {
4821 BT_DBG("Freeing %p", skb);
4828 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4829 struct l2cap_ctrl *control,
4830 struct sk_buff *skb, u8 event)
4833 u16 txseq = control->txseq;
4834 bool skb_in_use = 0;
4836 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4840 case L2CAP_EV_RECV_IFRAME:
4841 switch (l2cap_classify_txseq(chan, txseq)) {
4842 case L2CAP_TXSEQ_EXPECTED:
4843 /* Keep frame for reassembly later */
4844 l2cap_pass_to_tx(chan, control);
4845 skb_queue_tail(&chan->srej_q, skb);
4847 BT_DBG("Queued %p (queue len %d)", skb,
4848 skb_queue_len(&chan->srej_q));
4850 chan->expected_tx_seq = __next_seq(chan, txseq);
4852 case L2CAP_TXSEQ_EXPECTED_SREJ:
4853 l2cap_seq_list_pop(&chan->srej_list);
4855 l2cap_pass_to_tx(chan, control);
4856 skb_queue_tail(&chan->srej_q, skb);
4858 BT_DBG("Queued %p (queue len %d)", skb,
4859 skb_queue_len(&chan->srej_q));
4861 err = l2cap_rx_queued_iframes(chan);
4866 case L2CAP_TXSEQ_UNEXPECTED:
4867 /* Got a frame that can't be reassembled yet.
4868 * Save it for later, and send SREJs to cover
4869 * the missing frames.
4871 skb_queue_tail(&chan->srej_q, skb);
4873 BT_DBG("Queued %p (queue len %d)", skb,
4874 skb_queue_len(&chan->srej_q));
4876 l2cap_pass_to_tx(chan, control);
4877 l2cap_send_srej(chan, control->txseq);
4879 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4880 /* This frame was requested with an SREJ, but
4881 * some expected retransmitted frames are
4882 * missing. Request retransmission of missing
4885 skb_queue_tail(&chan->srej_q, skb);
4887 BT_DBG("Queued %p (queue len %d)", skb,
4888 skb_queue_len(&chan->srej_q));
4890 l2cap_pass_to_tx(chan, control);
4891 l2cap_send_srej_list(chan, control->txseq);
4893 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4894 /* We've already queued this frame. Drop this copy. */
4895 l2cap_pass_to_tx(chan, control);
4897 case L2CAP_TXSEQ_DUPLICATE:
4898 /* Expecting a later sequence number, so this frame
4899 * was already received. Ignore it completely.
4902 case L2CAP_TXSEQ_INVALID_IGNORE:
4904 case L2CAP_TXSEQ_INVALID:
4906 l2cap_send_disconn_req(chan->conn, chan,
4911 case L2CAP_EV_RECV_RR:
4912 l2cap_pass_to_tx(chan, control);
4913 if (control->final) {
4914 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4916 if (!test_and_clear_bit(CONN_REJ_ACT,
4917 &chan->conn_state)) {
4919 l2cap_retransmit_all(chan, control);
4922 l2cap_ertm_send(chan);
4923 } else if (control->poll) {
4924 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4925 &chan->conn_state) &&
4926 chan->unacked_frames) {
4927 __set_retrans_timer(chan);
4930 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4931 l2cap_send_srej_tail(chan);
4933 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4934 &chan->conn_state) &&
4935 chan->unacked_frames)
4936 __set_retrans_timer(chan);
4938 l2cap_send_ack(chan);
4941 case L2CAP_EV_RECV_RNR:
4942 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4943 l2cap_pass_to_tx(chan, control);
4944 if (control->poll) {
4945 l2cap_send_srej_tail(chan);
4947 struct l2cap_ctrl rr_control;
4948 memset(&rr_control, 0, sizeof(rr_control));
4949 rr_control.sframe = 1;
4950 rr_control.super = L2CAP_SUPER_RR;
4951 rr_control.reqseq = chan->buffer_seq;
4952 l2cap_send_sframe(chan, &rr_control);
4956 case L2CAP_EV_RECV_REJ:
4957 l2cap_handle_rej(chan, control);
4959 case L2CAP_EV_RECV_SREJ:
4960 l2cap_handle_srej(chan, control);
4964 if (skb && !skb_in_use) {
4965 BT_DBG("Freeing %p", skb);
4972 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
4974 /* Make sure reqseq is for a packet that has been sent but not acked */
4977 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
4978 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
4981 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
4982 struct sk_buff *skb, u8 event)
4986 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
4987 control, skb, event, chan->rx_state);
4989 if (__valid_reqseq(chan, control->reqseq)) {
4990 switch (chan->rx_state) {
4991 case L2CAP_RX_STATE_RECV:
4992 err = l2cap_rx_state_recv(chan, control, skb, event);
4994 case L2CAP_RX_STATE_SREJ_SENT:
4995 err = l2cap_rx_state_srej_sent(chan, control, skb,
5003 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5004 control->reqseq, chan->next_tx_seq,
5005 chan->expected_ack_seq);
5006 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5012 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5013 struct sk_buff *skb)
5017 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5020 if (l2cap_classify_txseq(chan, control->txseq) ==
5021 L2CAP_TXSEQ_EXPECTED) {
5022 l2cap_pass_to_tx(chan, control);
5024 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5025 __next_seq(chan, chan->buffer_seq));
5027 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5029 l2cap_reassemble_sdu(chan, skb, control);
5032 kfree_skb(chan->sdu);
5035 chan->sdu_last_frag = NULL;
5039 BT_DBG("Freeing %p", skb);
5044 chan->last_acked_seq = control->txseq;
5045 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5050 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5052 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5056 __unpack_control(chan, skb);
5061 * We can just drop the corrupted I-frame here.
5062 * Receiver will miss it and start proper recovery
5063 * procedures and ask for retransmission.
5065 if (l2cap_check_fcs(chan, skb))
5068 if (!control->sframe && control->sar == L2CAP_SAR_START)
5069 len -= L2CAP_SDULEN_SIZE;
5071 if (chan->fcs == L2CAP_FCS_CRC16)
5072 len -= L2CAP_FCS_SIZE;
5074 if (len > chan->mps) {
5075 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5079 if (!control->sframe) {
5082 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5083 control->sar, control->reqseq, control->final,
5086 /* Validate F-bit - F=0 always valid, F=1 only
5087 * valid in TX WAIT_F
5089 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5092 if (chan->mode != L2CAP_MODE_STREAMING) {
5093 event = L2CAP_EV_RECV_IFRAME;
5094 err = l2cap_rx(chan, control, skb, event);
5096 err = l2cap_stream_rx(chan, control, skb);
5100 l2cap_send_disconn_req(chan->conn, chan,
5103 const u8 rx_func_to_event[4] = {
5104 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5105 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5108 /* Only I-frames are expected in streaming mode */
5109 if (chan->mode == L2CAP_MODE_STREAMING)
5112 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5113 control->reqseq, control->final, control->poll,
5118 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5122 /* Validate F and P bits */
5123 if (control->final && (control->poll ||
5124 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5127 event = rx_func_to_event[control->super];
5128 if (l2cap_rx(chan, control, skb, event))
5129 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5139 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5140 struct sk_buff *skb)
5142 struct l2cap_chan *chan;
5144 chan = l2cap_get_chan_by_scid(conn, cid);
5146 if (cid == L2CAP_CID_A2MP) {
5147 chan = a2mp_channel_create(conn, skb);
5153 l2cap_chan_lock(chan);
5155 BT_DBG("unknown cid 0x%4.4x", cid);
5156 /* Drop packet and return */
5162 BT_DBG("chan %p, len %d", chan, skb->len);
5164 if (chan->state != BT_CONNECTED)
5167 switch (chan->mode) {
5168 case L2CAP_MODE_BASIC:
5169 /* If socket recv buffers overflows we drop data here
5170 * which is *bad* because L2CAP has to be reliable.
5171 * But we don't have any other choice. L2CAP doesn't
5172 * provide flow control mechanism. */
5174 if (chan->imtu < skb->len)
5177 if (!chan->ops->recv(chan, skb))
5181 case L2CAP_MODE_ERTM:
5182 case L2CAP_MODE_STREAMING:
5183 l2cap_data_rcv(chan, skb);
5187 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5195 l2cap_chan_unlock(chan);
5198 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5199 struct sk_buff *skb)
5201 struct l2cap_chan *chan;
5203 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5207 BT_DBG("chan %p, len %d", chan, skb->len);
5209 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5212 if (chan->imtu < skb->len)
5215 if (!chan->ops->recv(chan, skb))
5222 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5223 struct sk_buff *skb)
5225 struct l2cap_chan *chan;
5227 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5231 BT_DBG("chan %p, len %d", chan, skb->len);
5233 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5236 if (chan->imtu < skb->len)
5239 if (!chan->ops->recv(chan, skb))
5246 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5248 struct l2cap_hdr *lh = (void *) skb->data;
5252 skb_pull(skb, L2CAP_HDR_SIZE);
5253 cid = __le16_to_cpu(lh->cid);
5254 len = __le16_to_cpu(lh->len);
5256 if (len != skb->len) {
5261 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5264 case L2CAP_CID_LE_SIGNALING:
5265 case L2CAP_CID_SIGNALING:
5266 l2cap_sig_channel(conn, skb);
5269 case L2CAP_CID_CONN_LESS:
5270 psm = get_unaligned((__le16 *) skb->data);
5271 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5272 l2cap_conless_channel(conn, psm, skb);
5275 case L2CAP_CID_LE_DATA:
5276 l2cap_att_channel(conn, cid, skb);
5280 if (smp_sig_channel(conn, skb))
5281 l2cap_conn_del(conn->hcon, EACCES);
5285 l2cap_data_channel(conn, cid, skb);
5290 /* ---- L2CAP interface with lower layer (HCI) ---- */
5292 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5294 int exact = 0, lm1 = 0, lm2 = 0;
5295 struct l2cap_chan *c;
5297 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5299 /* Find listening sockets and check their link_mode */
5300 read_lock(&chan_list_lock);
5301 list_for_each_entry(c, &chan_list, global_l) {
5302 struct sock *sk = c->sk;
5304 if (c->state != BT_LISTEN)
5307 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5308 lm1 |= HCI_LM_ACCEPT;
5309 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5310 lm1 |= HCI_LM_MASTER;
5312 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5313 lm2 |= HCI_LM_ACCEPT;
5314 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5315 lm2 |= HCI_LM_MASTER;
5318 read_unlock(&chan_list_lock);
5320 return exact ? lm1 : lm2;
5323 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5325 struct l2cap_conn *conn;
5327 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5330 conn = l2cap_conn_add(hcon, status);
5332 l2cap_conn_ready(conn);
5334 l2cap_conn_del(hcon, bt_to_errno(status));
5339 int l2cap_disconn_ind(struct hci_conn *hcon)
5341 struct l2cap_conn *conn = hcon->l2cap_data;
5343 BT_DBG("hcon %p", hcon);
5346 return HCI_ERROR_REMOTE_USER_TERM;
5347 return conn->disc_reason;
5350 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5352 BT_DBG("hcon %p reason %d", hcon, reason);
5354 l2cap_conn_del(hcon, bt_to_errno(reason));
5358 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5360 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5363 if (encrypt == 0x00) {
5364 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5365 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5366 } else if (chan->sec_level == BT_SECURITY_HIGH)
5367 l2cap_chan_close(chan, ECONNREFUSED);
5369 if (chan->sec_level == BT_SECURITY_MEDIUM)
5370 __clear_chan_timer(chan);
5374 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5376 struct l2cap_conn *conn = hcon->l2cap_data;
5377 struct l2cap_chan *chan;
5382 BT_DBG("conn %p", conn);
5384 if (hcon->type == LE_LINK) {
5385 if (!status && encrypt)
5386 smp_distribute_keys(conn, 0);
5387 cancel_delayed_work(&conn->security_timer);
5390 mutex_lock(&conn->chan_lock);
5392 list_for_each_entry(chan, &conn->chan_l, list) {
5393 l2cap_chan_lock(chan);
5395 BT_DBG("chan->scid %d", chan->scid);
5397 if (chan->scid == L2CAP_CID_LE_DATA) {
5398 if (!status && encrypt) {
5399 chan->sec_level = hcon->sec_level;
5400 l2cap_chan_ready(chan);
5403 l2cap_chan_unlock(chan);
5407 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5408 l2cap_chan_unlock(chan);
5412 if (!status && (chan->state == BT_CONNECTED ||
5413 chan->state == BT_CONFIG)) {
5414 struct sock *sk = chan->sk;
5416 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5417 sk->sk_state_change(sk);
5419 l2cap_check_encryption(chan, encrypt);
5420 l2cap_chan_unlock(chan);
5424 if (chan->state == BT_CONNECT) {
5426 l2cap_send_conn_req(chan);
5428 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5430 } else if (chan->state == BT_CONNECT2) {
5431 struct sock *sk = chan->sk;
5432 struct l2cap_conn_rsp rsp;
5438 if (test_bit(BT_SK_DEFER_SETUP,
5439 &bt_sk(sk)->flags)) {
5440 struct sock *parent = bt_sk(sk)->parent;
5441 res = L2CAP_CR_PEND;
5442 stat = L2CAP_CS_AUTHOR_PEND;
5444 parent->sk_data_ready(parent, 0);
5446 __l2cap_state_change(chan, BT_CONFIG);
5447 res = L2CAP_CR_SUCCESS;
5448 stat = L2CAP_CS_NO_INFO;
5451 __l2cap_state_change(chan, BT_DISCONN);
5452 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5453 res = L2CAP_CR_SEC_BLOCK;
5454 stat = L2CAP_CS_NO_INFO;
5459 rsp.scid = cpu_to_le16(chan->dcid);
5460 rsp.dcid = cpu_to_le16(chan->scid);
5461 rsp.result = cpu_to_le16(res);
5462 rsp.status = cpu_to_le16(stat);
5463 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5466 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5467 res == L2CAP_CR_SUCCESS) {
5469 set_bit(CONF_REQ_SENT, &chan->conf_state);
5470 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5472 l2cap_build_conf_req(chan, buf),
5474 chan->num_conf_req++;
5478 l2cap_chan_unlock(chan);
5481 mutex_unlock(&conn->chan_lock);
5486 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5488 struct l2cap_conn *conn = hcon->l2cap_data;
5491 conn = l2cap_conn_add(hcon, 0);
5496 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5498 if (!(flags & ACL_CONT)) {
5499 struct l2cap_hdr *hdr;
5503 BT_ERR("Unexpected start frame (len %d)", skb->len);
5504 kfree_skb(conn->rx_skb);
5505 conn->rx_skb = NULL;
5507 l2cap_conn_unreliable(conn, ECOMM);
5510 /* Start fragment always begin with Basic L2CAP header */
5511 if (skb->len < L2CAP_HDR_SIZE) {
5512 BT_ERR("Frame is too short (len %d)", skb->len);
5513 l2cap_conn_unreliable(conn, ECOMM);
5517 hdr = (struct l2cap_hdr *) skb->data;
5518 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5520 if (len == skb->len) {
5521 /* Complete frame received */
5522 l2cap_recv_frame(conn, skb);
5526 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5528 if (skb->len > len) {
5529 BT_ERR("Frame is too long (len %d, expected len %d)",
5531 l2cap_conn_unreliable(conn, ECOMM);
5535 /* Allocate skb for the complete frame (with header) */
5536 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5540 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5542 conn->rx_len = len - skb->len;
5544 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5546 if (!conn->rx_len) {
5547 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5548 l2cap_conn_unreliable(conn, ECOMM);
5552 if (skb->len > conn->rx_len) {
5553 BT_ERR("Fragment is too long (len %d, expected %d)",
5554 skb->len, conn->rx_len);
5555 kfree_skb(conn->rx_skb);
5556 conn->rx_skb = NULL;
5558 l2cap_conn_unreliable(conn, ECOMM);
5562 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5564 conn->rx_len -= skb->len;
5566 if (!conn->rx_len) {
5567 /* Complete frame received */
5568 l2cap_recv_frame(conn, conn->rx_skb);
5569 conn->rx_skb = NULL;
5578 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5580 struct l2cap_chan *c;
5582 read_lock(&chan_list_lock);
5584 list_for_each_entry(c, &chan_list, global_l) {
5585 struct sock *sk = c->sk;
5587 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5588 batostr(&bt_sk(sk)->src),
5589 batostr(&bt_sk(sk)->dst),
5590 c->state, __le16_to_cpu(c->psm),
5591 c->scid, c->dcid, c->imtu, c->omtu,
5592 c->sec_level, c->mode);
5595 read_unlock(&chan_list_lock);
5600 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5602 return single_open(file, l2cap_debugfs_show, inode->i_private);
5605 static const struct file_operations l2cap_debugfs_fops = {
5606 .open = l2cap_debugfs_open,
5608 .llseek = seq_lseek,
5609 .release = single_release,
5612 static struct dentry *l2cap_debugfs;
5614 int __init l2cap_init(void)
5618 err = l2cap_init_sockets();
5623 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5624 bt_debugfs, NULL, &l2cap_debugfs_fops);
5626 BT_ERR("Failed to create L2CAP debug file");
5632 void l2cap_exit(void)
5634 debugfs_remove(l2cap_debugfs);
5635 l2cap_cleanup_sockets();
5638 module_param(disable_ertm, bool, 0644);
5639 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");