2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
276 skb_queue_purge(TX_QUEUE(sk));
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
305 auth_type = HCI_AT_NO_BONDING;
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
318 auth_type = HCI_AT_NO_BONDING;
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn->lock);
339 if (++conn->tx_ident > 128)
344 spin_unlock_bh(&conn->lock);
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
353 BT_DBG("code 0x%2.2x", code);
358 hci_send_acl(conn->hcon, skb, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
369 if (sk->sk_state != BT_CONNECTED)
372 if (pi->fcs == L2CAP_FCS_CRC16)
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
404 hci_send_acl(pi->conn->hcon, skb, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
413 control |= L2CAP_SUPER_RCV_READY;
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
417 l2cap_send_sframe(pi, control);
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
425 static void l2cap_do_start(struct sock *sk)
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
459 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
461 u32 local_feat_mask = l2cap_feat_mask;
463 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
466 case L2CAP_MODE_ERTM:
467 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
468 case L2CAP_MODE_STREAMING:
469 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
477 struct l2cap_disconn_req req;
482 skb_queue_purge(TX_QUEUE(sk));
484 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
485 del_timer(&l2cap_pi(sk)->retrans_timer);
486 del_timer(&l2cap_pi(sk)->monitor_timer);
487 del_timer(&l2cap_pi(sk)->ack_timer);
490 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
491 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
492 l2cap_send_cmd(conn, l2cap_get_ident(conn),
493 L2CAP_DISCONN_REQ, sizeof(req), &req);
495 sk->sk_state = BT_DISCONN;
499 /* ---- L2CAP connections ---- */
500 static void l2cap_conn_start(struct l2cap_conn *conn)
502 struct l2cap_chan_list *l = &conn->chan_list;
503 struct sock_del_list del, *tmp1, *tmp2;
506 BT_DBG("conn %p", conn);
508 INIT_LIST_HEAD(&del.list);
512 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
515 if (sk->sk_type != SOCK_SEQPACKET &&
516 sk->sk_type != SOCK_STREAM) {
521 if (sk->sk_state == BT_CONNECT) {
522 if (l2cap_check_security(sk) &&
523 __l2cap_no_conn_pending(sk)) {
524 struct l2cap_conn_req req;
526 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
528 && l2cap_pi(sk)->conf_state &
529 L2CAP_CONF_STATE2_DEVICE) {
530 tmp1 = kzalloc(sizeof(struct srej_list),
533 list_add_tail(&tmp1->list, &del.list);
538 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
539 req.psm = l2cap_pi(sk)->psm;
541 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
542 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
544 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
545 L2CAP_CONN_REQ, sizeof(req), &req);
547 } else if (sk->sk_state == BT_CONNECT2) {
548 struct l2cap_conn_rsp rsp;
549 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
550 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
552 if (l2cap_check_security(sk)) {
553 if (bt_sk(sk)->defer_setup) {
554 struct sock *parent = bt_sk(sk)->parent;
555 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
556 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
557 parent->sk_data_ready(parent, 0);
560 sk->sk_state = BT_CONFIG;
561 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
562 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
569 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
570 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
576 read_unlock(&l->lock);
578 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
579 bh_lock_sock(tmp1->sk);
580 __l2cap_sock_close(tmp1->sk, ECONNRESET);
581 bh_unlock_sock(tmp1->sk);
582 list_del(&tmp1->list);
587 static void l2cap_conn_ready(struct l2cap_conn *conn)
589 struct l2cap_chan_list *l = &conn->chan_list;
592 BT_DBG("conn %p", conn);
596 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
599 if (sk->sk_type != SOCK_SEQPACKET &&
600 sk->sk_type != SOCK_STREAM) {
601 l2cap_sock_clear_timer(sk);
602 sk->sk_state = BT_CONNECTED;
603 sk->sk_state_change(sk);
604 } else if (sk->sk_state == BT_CONNECT)
610 read_unlock(&l->lock);
613 /* Notify sockets that we cannot guaranty reliability anymore */
614 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
616 struct l2cap_chan_list *l = &conn->chan_list;
619 BT_DBG("conn %p", conn);
623 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
624 if (l2cap_pi(sk)->force_reliable)
628 read_unlock(&l->lock);
631 static void l2cap_info_timeout(unsigned long arg)
633 struct l2cap_conn *conn = (void *) arg;
635 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
636 conn->info_ident = 0;
638 l2cap_conn_start(conn);
641 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
643 struct l2cap_conn *conn = hcon->l2cap_data;
648 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
652 hcon->l2cap_data = conn;
655 BT_DBG("hcon %p conn %p", hcon, conn);
657 conn->mtu = hcon->hdev->acl_mtu;
658 conn->src = &hcon->hdev->bdaddr;
659 conn->dst = &hcon->dst;
663 spin_lock_init(&conn->lock);
664 rwlock_init(&conn->chan_list.lock);
666 setup_timer(&conn->info_timer, l2cap_info_timeout,
667 (unsigned long) conn);
669 conn->disc_reason = 0x13;
674 static void l2cap_conn_del(struct hci_conn *hcon, int err)
676 struct l2cap_conn *conn = hcon->l2cap_data;
682 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
684 kfree_skb(conn->rx_skb);
687 while ((sk = conn->chan_list.head)) {
689 l2cap_chan_del(sk, err);
694 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
695 del_timer_sync(&conn->info_timer);
697 hcon->l2cap_data = NULL;
701 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
703 struct l2cap_chan_list *l = &conn->chan_list;
704 write_lock_bh(&l->lock);
705 __l2cap_chan_add(conn, sk, parent);
706 write_unlock_bh(&l->lock);
709 /* ---- Socket interface ---- */
710 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
713 struct hlist_node *node;
714 sk_for_each(sk, node, &l2cap_sk_list.head)
715 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
722 /* Find socket with psm and source bdaddr.
723 * Returns closest match.
725 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
727 struct sock *sk = NULL, *sk1 = NULL;
728 struct hlist_node *node;
730 sk_for_each(sk, node, &l2cap_sk_list.head) {
731 if (state && sk->sk_state != state)
734 if (l2cap_pi(sk)->psm == psm) {
736 if (!bacmp(&bt_sk(sk)->src, src))
740 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
744 return node ? sk : sk1;
747 /* Find socket with given address (psm, src).
748 * Returns locked socket */
749 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
752 read_lock(&l2cap_sk_list.lock);
753 s = __l2cap_get_sock_by_psm(state, psm, src);
756 read_unlock(&l2cap_sk_list.lock);
760 static void l2cap_sock_destruct(struct sock *sk)
764 skb_queue_purge(&sk->sk_receive_queue);
765 skb_queue_purge(&sk->sk_write_queue);
768 static void l2cap_sock_cleanup_listen(struct sock *parent)
772 BT_DBG("parent %p", parent);
774 /* Close not yet accepted channels */
775 while ((sk = bt_accept_dequeue(parent, NULL)))
776 l2cap_sock_close(sk);
778 parent->sk_state = BT_CLOSED;
779 sock_set_flag(parent, SOCK_ZAPPED);
782 /* Kill socket (only if zapped and orphan)
783 * Must be called on unlocked socket.
785 static void l2cap_sock_kill(struct sock *sk)
787 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
790 BT_DBG("sk %p state %d", sk, sk->sk_state);
792 /* Kill poor orphan */
793 bt_sock_unlink(&l2cap_sk_list, sk);
794 sock_set_flag(sk, SOCK_DEAD);
798 static void __l2cap_sock_close(struct sock *sk, int reason)
800 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
802 switch (sk->sk_state) {
804 l2cap_sock_cleanup_listen(sk);
809 if (sk->sk_type == SOCK_SEQPACKET ||
810 sk->sk_type == SOCK_STREAM) {
811 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
813 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
814 l2cap_send_disconn_req(conn, sk, reason);
816 l2cap_chan_del(sk, reason);
820 if (sk->sk_type == SOCK_SEQPACKET ||
821 sk->sk_type == SOCK_STREAM) {
822 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
823 struct l2cap_conn_rsp rsp;
826 if (bt_sk(sk)->defer_setup)
827 result = L2CAP_CR_SEC_BLOCK;
829 result = L2CAP_CR_BAD_PSM;
831 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
832 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
833 rsp.result = cpu_to_le16(result);
834 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
835 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
836 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
838 l2cap_chan_del(sk, reason);
843 l2cap_chan_del(sk, reason);
847 sock_set_flag(sk, SOCK_ZAPPED);
852 /* Must be called on unlocked socket. */
853 static void l2cap_sock_close(struct sock *sk)
855 l2cap_sock_clear_timer(sk);
857 __l2cap_sock_close(sk, ECONNRESET);
862 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
864 struct l2cap_pinfo *pi = l2cap_pi(sk);
869 sk->sk_type = parent->sk_type;
870 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
872 pi->imtu = l2cap_pi(parent)->imtu;
873 pi->omtu = l2cap_pi(parent)->omtu;
874 pi->conf_state = l2cap_pi(parent)->conf_state;
875 pi->mode = l2cap_pi(parent)->mode;
876 pi->fcs = l2cap_pi(parent)->fcs;
877 pi->max_tx = l2cap_pi(parent)->max_tx;
878 pi->tx_win = l2cap_pi(parent)->tx_win;
879 pi->sec_level = l2cap_pi(parent)->sec_level;
880 pi->role_switch = l2cap_pi(parent)->role_switch;
881 pi->force_reliable = l2cap_pi(parent)->force_reliable;
883 pi->imtu = L2CAP_DEFAULT_MTU;
885 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
886 pi->mode = L2CAP_MODE_ERTM;
887 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
889 pi->mode = L2CAP_MODE_BASIC;
891 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
892 pi->fcs = L2CAP_FCS_CRC16;
893 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
894 pi->sec_level = BT_SECURITY_LOW;
896 pi->force_reliable = 0;
899 /* Default config options */
901 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
902 skb_queue_head_init(TX_QUEUE(sk));
903 skb_queue_head_init(SREJ_QUEUE(sk));
904 skb_queue_head_init(BUSY_QUEUE(sk));
905 INIT_LIST_HEAD(SREJ_LIST(sk));
908 static struct proto l2cap_proto = {
910 .owner = THIS_MODULE,
911 .obj_size = sizeof(struct l2cap_pinfo)
914 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
918 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
922 sock_init_data(sock, sk);
923 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
925 sk->sk_destruct = l2cap_sock_destruct;
926 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
928 sock_reset_flag(sk, SOCK_ZAPPED);
930 sk->sk_protocol = proto;
931 sk->sk_state = BT_OPEN;
933 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
935 bt_sock_link(&l2cap_sk_list, sk);
939 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
944 BT_DBG("sock %p", sock);
946 sock->state = SS_UNCONNECTED;
948 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
949 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
950 return -ESOCKTNOSUPPORT;
952 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
955 sock->ops = &l2cap_sock_ops;
957 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
961 l2cap_sock_init(sk, NULL);
965 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
967 struct sock *sk = sock->sk;
968 struct sockaddr_l2 la;
973 if (!addr || addr->sa_family != AF_BLUETOOTH)
976 memset(&la, 0, sizeof(la));
977 len = min_t(unsigned int, sizeof(la), alen);
978 memcpy(&la, addr, len);
985 if (sk->sk_state != BT_OPEN) {
990 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
991 !capable(CAP_NET_BIND_SERVICE)) {
996 write_lock_bh(&l2cap_sk_list.lock);
998 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1001 /* Save source address */
1002 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1003 l2cap_pi(sk)->psm = la.l2_psm;
1004 l2cap_pi(sk)->sport = la.l2_psm;
1005 sk->sk_state = BT_BOUND;
1007 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1008 __le16_to_cpu(la.l2_psm) == 0x0003)
1009 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1012 write_unlock_bh(&l2cap_sk_list.lock);
1019 static int l2cap_do_connect(struct sock *sk)
1021 bdaddr_t *src = &bt_sk(sk)->src;
1022 bdaddr_t *dst = &bt_sk(sk)->dst;
1023 struct l2cap_conn *conn;
1024 struct hci_conn *hcon;
1025 struct hci_dev *hdev;
1029 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1032 hdev = hci_get_route(dst, src);
1034 return -EHOSTUNREACH;
1036 hci_dev_lock_bh(hdev);
1040 if (sk->sk_type == SOCK_RAW) {
1041 switch (l2cap_pi(sk)->sec_level) {
1042 case BT_SECURITY_HIGH:
1043 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1045 case BT_SECURITY_MEDIUM:
1046 auth_type = HCI_AT_DEDICATED_BONDING;
1049 auth_type = HCI_AT_NO_BONDING;
1052 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1053 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1054 auth_type = HCI_AT_NO_BONDING_MITM;
1056 auth_type = HCI_AT_NO_BONDING;
1058 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1059 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1061 switch (l2cap_pi(sk)->sec_level) {
1062 case BT_SECURITY_HIGH:
1063 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1065 case BT_SECURITY_MEDIUM:
1066 auth_type = HCI_AT_GENERAL_BONDING;
1069 auth_type = HCI_AT_NO_BONDING;
1074 hcon = hci_connect(hdev, ACL_LINK, dst,
1075 l2cap_pi(sk)->sec_level, auth_type);
1079 conn = l2cap_conn_add(hcon, 0);
1087 /* Update source addr of the socket */
1088 bacpy(src, conn->src);
1090 l2cap_chan_add(conn, sk, NULL);
1092 sk->sk_state = BT_CONNECT;
1093 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1095 if (hcon->state == BT_CONNECTED) {
1096 if (sk->sk_type != SOCK_SEQPACKET &&
1097 sk->sk_type != SOCK_STREAM) {
1098 l2cap_sock_clear_timer(sk);
1099 sk->sk_state = BT_CONNECTED;
1105 hci_dev_unlock_bh(hdev);
1110 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1112 struct sock *sk = sock->sk;
1113 struct sockaddr_l2 la;
1116 BT_DBG("sk %p", sk);
1118 if (!addr || alen < sizeof(addr->sa_family) ||
1119 addr->sa_family != AF_BLUETOOTH)
1122 memset(&la, 0, sizeof(la));
1123 len = min_t(unsigned int, sizeof(la), alen);
1124 memcpy(&la, addr, len);
1131 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1137 switch (l2cap_pi(sk)->mode) {
1138 case L2CAP_MODE_BASIC:
1140 case L2CAP_MODE_ERTM:
1141 case L2CAP_MODE_STREAMING:
1150 switch (sk->sk_state) {
1154 /* Already connecting */
1158 /* Already connected */
1171 /* Set destination address and psm */
1172 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1173 l2cap_pi(sk)->psm = la.l2_psm;
1175 err = l2cap_do_connect(sk);
1180 err = bt_sock_wait_state(sk, BT_CONNECTED,
1181 sock_sndtimeo(sk, flags & O_NONBLOCK));
1187 static int l2cap_sock_listen(struct socket *sock, int backlog)
1189 struct sock *sk = sock->sk;
1192 BT_DBG("sk %p backlog %d", sk, backlog);
1196 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1197 || sk->sk_state != BT_BOUND) {
1202 switch (l2cap_pi(sk)->mode) {
1203 case L2CAP_MODE_BASIC:
1205 case L2CAP_MODE_ERTM:
1206 case L2CAP_MODE_STREAMING:
1215 if (!l2cap_pi(sk)->psm) {
1216 bdaddr_t *src = &bt_sk(sk)->src;
1221 write_lock_bh(&l2cap_sk_list.lock);
1223 for (psm = 0x1001; psm < 0x1100; psm += 2)
1224 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1225 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1226 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1231 write_unlock_bh(&l2cap_sk_list.lock);
1237 sk->sk_max_ack_backlog = backlog;
1238 sk->sk_ack_backlog = 0;
1239 sk->sk_state = BT_LISTEN;
1246 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1248 DECLARE_WAITQUEUE(wait, current);
1249 struct sock *sk = sock->sk, *nsk;
1253 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1255 if (sk->sk_state != BT_LISTEN) {
1260 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1262 BT_DBG("sk %p timeo %ld", sk, timeo);
1264 /* Wait for an incoming connection. (wake-one). */
1265 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1266 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1267 set_current_state(TASK_INTERRUPTIBLE);
1274 timeo = schedule_timeout(timeo);
1275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1277 if (sk->sk_state != BT_LISTEN) {
1282 if (signal_pending(current)) {
1283 err = sock_intr_errno(timeo);
1287 set_current_state(TASK_RUNNING);
1288 remove_wait_queue(sk_sleep(sk), &wait);
1293 newsock->state = SS_CONNECTED;
1295 BT_DBG("new socket %p", nsk);
1302 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1304 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1305 struct sock *sk = sock->sk;
1307 BT_DBG("sock %p, sk %p", sock, sk);
1309 addr->sa_family = AF_BLUETOOTH;
1310 *len = sizeof(struct sockaddr_l2);
1313 la->l2_psm = l2cap_pi(sk)->psm;
1314 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1315 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1317 la->l2_psm = l2cap_pi(sk)->sport;
1318 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1319 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1325 static int __l2cap_wait_ack(struct sock *sk)
1327 DECLARE_WAITQUEUE(wait, current);
1331 add_wait_queue(sk_sleep(sk), &wait);
1332 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1333 set_current_state(TASK_INTERRUPTIBLE);
1338 if (signal_pending(current)) {
1339 err = sock_intr_errno(timeo);
1344 timeo = schedule_timeout(timeo);
1347 err = sock_error(sk);
1351 set_current_state(TASK_RUNNING);
1352 remove_wait_queue(sk_sleep(sk), &wait);
1356 static void l2cap_monitor_timeout(unsigned long arg)
1358 struct sock *sk = (void *) arg;
1360 BT_DBG("sk %p", sk);
1363 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1364 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1369 l2cap_pi(sk)->retry_count++;
1370 __mod_monitor_timer();
1372 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1376 static void l2cap_retrans_timeout(unsigned long arg)
1378 struct sock *sk = (void *) arg;
1380 BT_DBG("sk %p", sk);
1383 l2cap_pi(sk)->retry_count = 1;
1384 __mod_monitor_timer();
1386 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1388 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1392 static void l2cap_drop_acked_frames(struct sock *sk)
1394 struct sk_buff *skb;
1396 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1397 l2cap_pi(sk)->unacked_frames) {
1398 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1401 skb = skb_dequeue(TX_QUEUE(sk));
1404 l2cap_pi(sk)->unacked_frames--;
1407 if (!l2cap_pi(sk)->unacked_frames)
1408 del_timer(&l2cap_pi(sk)->retrans_timer);
1411 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1413 struct l2cap_pinfo *pi = l2cap_pi(sk);
1415 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1417 hci_send_acl(pi->conn->hcon, skb, 0);
1420 static int l2cap_streaming_send(struct sock *sk)
1422 struct sk_buff *skb, *tx_skb;
1423 struct l2cap_pinfo *pi = l2cap_pi(sk);
1426 while ((skb = sk->sk_send_head)) {
1427 tx_skb = skb_clone(skb, GFP_ATOMIC);
1429 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1430 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1431 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1433 if (pi->fcs == L2CAP_FCS_CRC16) {
1434 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1435 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1438 l2cap_do_send(sk, tx_skb);
1440 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1442 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1443 sk->sk_send_head = NULL;
1445 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1447 skb = skb_dequeue(TX_QUEUE(sk));
1453 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1455 struct l2cap_pinfo *pi = l2cap_pi(sk);
1456 struct sk_buff *skb, *tx_skb;
1459 skb = skb_peek(TX_QUEUE(sk));
1464 if (bt_cb(skb)->tx_seq == tx_seq)
1467 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1470 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1472 if (pi->remote_max_tx &&
1473 bt_cb(skb)->retries == pi->remote_max_tx) {
1474 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1478 tx_skb = skb_clone(skb, GFP_ATOMIC);
1479 bt_cb(skb)->retries++;
1480 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1482 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1483 control |= L2CAP_CTRL_FINAL;
1484 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1487 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1488 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1490 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1492 if (pi->fcs == L2CAP_FCS_CRC16) {
1493 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1494 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1497 l2cap_do_send(sk, tx_skb);
1500 static int l2cap_ertm_send(struct sock *sk)
1502 struct sk_buff *skb, *tx_skb;
1503 struct l2cap_pinfo *pi = l2cap_pi(sk);
1507 if (sk->sk_state != BT_CONNECTED)
1510 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1512 if (pi->remote_max_tx &&
1513 bt_cb(skb)->retries == pi->remote_max_tx) {
1514 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1518 tx_skb = skb_clone(skb, GFP_ATOMIC);
1520 bt_cb(skb)->retries++;
1522 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1523 control &= L2CAP_CTRL_SAR;
1525 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1526 control |= L2CAP_CTRL_FINAL;
1527 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1529 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1530 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1531 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1534 if (pi->fcs == L2CAP_FCS_CRC16) {
1535 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1536 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1539 l2cap_do_send(sk, tx_skb);
1541 __mod_retrans_timer();
1543 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1544 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1546 pi->unacked_frames++;
1549 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1550 sk->sk_send_head = NULL;
1552 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1560 static int l2cap_retransmit_frames(struct sock *sk)
1562 struct l2cap_pinfo *pi = l2cap_pi(sk);
1565 spin_lock_bh(&pi->send_lock);
1567 if (!skb_queue_empty(TX_QUEUE(sk)))
1568 sk->sk_send_head = TX_QUEUE(sk)->next;
1570 pi->next_tx_seq = pi->expected_ack_seq;
1571 ret = l2cap_ertm_send(sk);
1573 spin_unlock_bh(&pi->send_lock);
1578 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1580 struct sock *sk = (struct sock *)pi;
1584 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1586 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1587 control |= L2CAP_SUPER_RCV_NOT_READY;
1588 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1589 l2cap_send_sframe(pi, control);
1593 spin_lock_bh(&pi->send_lock);
1594 nframes = l2cap_ertm_send(sk);
1595 spin_unlock_bh(&pi->send_lock);
1600 control |= L2CAP_SUPER_RCV_READY;
1601 l2cap_send_sframe(pi, control);
1604 static void l2cap_send_srejtail(struct sock *sk)
1606 struct srej_list *tail;
1609 control = L2CAP_SUPER_SELECT_REJECT;
1610 control |= L2CAP_CTRL_FINAL;
1612 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1613 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1615 l2cap_send_sframe(l2cap_pi(sk), control);
1618 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1620 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1621 struct sk_buff **frag;
1624 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1630 /* Continuation fragments (no L2CAP header) */
1631 frag = &skb_shinfo(skb)->frag_list;
1633 count = min_t(unsigned int, conn->mtu, len);
1635 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1638 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1644 frag = &(*frag)->next;
1650 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1652 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1653 struct sk_buff *skb;
1654 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1655 struct l2cap_hdr *lh;
1657 BT_DBG("sk %p len %d", sk, (int)len);
1659 count = min_t(unsigned int, (conn->mtu - hlen), len);
1660 skb = bt_skb_send_alloc(sk, count + hlen,
1661 msg->msg_flags & MSG_DONTWAIT, &err);
1663 return ERR_PTR(-ENOMEM);
1665 /* Create L2CAP header */
1666 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1667 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1668 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1669 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1671 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1672 if (unlikely(err < 0)) {
1674 return ERR_PTR(err);
1679 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1681 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1682 struct sk_buff *skb;
1683 int err, count, hlen = L2CAP_HDR_SIZE;
1684 struct l2cap_hdr *lh;
1686 BT_DBG("sk %p len %d", sk, (int)len);
1688 count = min_t(unsigned int, (conn->mtu - hlen), len);
1689 skb = bt_skb_send_alloc(sk, count + hlen,
1690 msg->msg_flags & MSG_DONTWAIT, &err);
1692 return ERR_PTR(-ENOMEM);
1694 /* Create L2CAP header */
1695 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1696 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1697 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1699 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1700 if (unlikely(err < 0)) {
1702 return ERR_PTR(err);
1707 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1709 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1710 struct sk_buff *skb;
1711 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1712 struct l2cap_hdr *lh;
1714 BT_DBG("sk %p len %d", sk, (int)len);
1717 return ERR_PTR(-ENOTCONN);
1722 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1725 count = min_t(unsigned int, (conn->mtu - hlen), len);
1726 skb = bt_skb_send_alloc(sk, count + hlen,
1727 msg->msg_flags & MSG_DONTWAIT, &err);
1729 return ERR_PTR(-ENOMEM);
1731 /* Create L2CAP header */
1732 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1733 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1734 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1735 put_unaligned_le16(control, skb_put(skb, 2));
1737 put_unaligned_le16(sdulen, skb_put(skb, 2));
1739 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1740 if (unlikely(err < 0)) {
1742 return ERR_PTR(err);
1745 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1746 put_unaligned_le16(0, skb_put(skb, 2));
1748 bt_cb(skb)->retries = 0;
1752 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1754 struct l2cap_pinfo *pi = l2cap_pi(sk);
1755 struct sk_buff *skb;
1756 struct sk_buff_head sar_queue;
1760 skb_queue_head_init(&sar_queue);
1761 control = L2CAP_SDU_START;
1762 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1764 return PTR_ERR(skb);
1766 __skb_queue_tail(&sar_queue, skb);
1767 len -= pi->remote_mps;
1768 size += pi->remote_mps;
1773 if (len > pi->remote_mps) {
1774 control = L2CAP_SDU_CONTINUE;
1775 buflen = pi->remote_mps;
1777 control = L2CAP_SDU_END;
1781 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1783 skb_queue_purge(&sar_queue);
1784 return PTR_ERR(skb);
1787 __skb_queue_tail(&sar_queue, skb);
1791 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1792 spin_lock_bh(&pi->send_lock);
1793 if (sk->sk_send_head == NULL)
1794 sk->sk_send_head = sar_queue.next;
1795 spin_unlock_bh(&pi->send_lock);
1800 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1802 struct sock *sk = sock->sk;
1803 struct l2cap_pinfo *pi = l2cap_pi(sk);
1804 struct sk_buff *skb;
1808 BT_DBG("sock %p, sk %p", sock, sk);
1810 err = sock_error(sk);
1814 if (msg->msg_flags & MSG_OOB)
1819 if (sk->sk_state != BT_CONNECTED) {
1824 /* Connectionless channel */
1825 if (sk->sk_type == SOCK_DGRAM) {
1826 skb = l2cap_create_connless_pdu(sk, msg, len);
1830 l2cap_do_send(sk, skb);
1837 case L2CAP_MODE_BASIC:
1838 /* Check outgoing MTU */
1839 if (len > pi->omtu) {
1844 /* Create a basic PDU */
1845 skb = l2cap_create_basic_pdu(sk, msg, len);
1851 l2cap_do_send(sk, skb);
1855 case L2CAP_MODE_ERTM:
1856 case L2CAP_MODE_STREAMING:
1857 /* Entire SDU fits into one PDU */
1858 if (len <= pi->remote_mps) {
1859 control = L2CAP_SDU_UNSEGMENTED;
1860 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1865 __skb_queue_tail(TX_QUEUE(sk), skb);
1867 if (pi->mode == L2CAP_MODE_ERTM)
1868 spin_lock_bh(&pi->send_lock);
1870 if (sk->sk_send_head == NULL)
1871 sk->sk_send_head = skb;
1873 if (pi->mode == L2CAP_MODE_ERTM)
1874 spin_unlock_bh(&pi->send_lock);
1876 /* Segment SDU into multiples PDUs */
1877 err = l2cap_sar_segment_sdu(sk, msg, len);
1882 if (pi->mode == L2CAP_MODE_STREAMING) {
1883 err = l2cap_streaming_send(sk);
1885 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1886 pi->conn_state && L2CAP_CONN_WAIT_F) {
1890 spin_lock_bh(&pi->send_lock);
1891 err = l2cap_ertm_send(sk);
1892 spin_unlock_bh(&pi->send_lock);
1900 BT_DBG("bad state %1.1x", pi->mode);
1909 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1911 struct sock *sk = sock->sk;
1915 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1916 struct l2cap_conn_rsp rsp;
1918 sk->sk_state = BT_CONFIG;
1920 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1922 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1924 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1925 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1933 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1936 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1938 struct sock *sk = sock->sk;
1939 struct l2cap_options opts;
1943 BT_DBG("sk %p", sk);
1949 opts.imtu = l2cap_pi(sk)->imtu;
1950 opts.omtu = l2cap_pi(sk)->omtu;
1951 opts.flush_to = l2cap_pi(sk)->flush_to;
1952 opts.mode = l2cap_pi(sk)->mode;
1953 opts.fcs = l2cap_pi(sk)->fcs;
1954 opts.max_tx = l2cap_pi(sk)->max_tx;
1955 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1957 len = min_t(unsigned int, sizeof(opts), optlen);
1958 if (copy_from_user((char *) &opts, optval, len)) {
1963 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1968 l2cap_pi(sk)->mode = opts.mode;
1969 switch (l2cap_pi(sk)->mode) {
1970 case L2CAP_MODE_BASIC:
1971 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1973 case L2CAP_MODE_ERTM:
1974 case L2CAP_MODE_STREAMING:
1983 l2cap_pi(sk)->imtu = opts.imtu;
1984 l2cap_pi(sk)->omtu = opts.omtu;
1985 l2cap_pi(sk)->fcs = opts.fcs;
1986 l2cap_pi(sk)->max_tx = opts.max_tx;
1987 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1991 if (get_user(opt, (u32 __user *) optval)) {
1996 if (opt & L2CAP_LM_AUTH)
1997 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1998 if (opt & L2CAP_LM_ENCRYPT)
1999 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2000 if (opt & L2CAP_LM_SECURE)
2001 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2003 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2004 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2016 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2018 struct sock *sk = sock->sk;
2019 struct bt_security sec;
2023 BT_DBG("sk %p", sk);
2025 if (level == SOL_L2CAP)
2026 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2028 if (level != SOL_BLUETOOTH)
2029 return -ENOPROTOOPT;
2035 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2036 && sk->sk_type != SOCK_RAW) {
2041 sec.level = BT_SECURITY_LOW;
2043 len = min_t(unsigned int, sizeof(sec), optlen);
2044 if (copy_from_user((char *) &sec, optval, len)) {
2049 if (sec.level < BT_SECURITY_LOW ||
2050 sec.level > BT_SECURITY_HIGH) {
2055 l2cap_pi(sk)->sec_level = sec.level;
2058 case BT_DEFER_SETUP:
2059 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2064 if (get_user(opt, (u32 __user *) optval)) {
2069 bt_sk(sk)->defer_setup = opt;
2081 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2083 struct sock *sk = sock->sk;
2084 struct l2cap_options opts;
2085 struct l2cap_conninfo cinfo;
2089 BT_DBG("sk %p", sk);
2091 if (get_user(len, optlen))
2098 opts.imtu = l2cap_pi(sk)->imtu;
2099 opts.omtu = l2cap_pi(sk)->omtu;
2100 opts.flush_to = l2cap_pi(sk)->flush_to;
2101 opts.mode = l2cap_pi(sk)->mode;
2102 opts.fcs = l2cap_pi(sk)->fcs;
2103 opts.max_tx = l2cap_pi(sk)->max_tx;
2104 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2106 len = min_t(unsigned int, len, sizeof(opts));
2107 if (copy_to_user(optval, (char *) &opts, len))
2113 switch (l2cap_pi(sk)->sec_level) {
2114 case BT_SECURITY_LOW:
2115 opt = L2CAP_LM_AUTH;
2117 case BT_SECURITY_MEDIUM:
2118 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2120 case BT_SECURITY_HIGH:
2121 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2129 if (l2cap_pi(sk)->role_switch)
2130 opt |= L2CAP_LM_MASTER;
2132 if (l2cap_pi(sk)->force_reliable)
2133 opt |= L2CAP_LM_RELIABLE;
2135 if (put_user(opt, (u32 __user *) optval))
2139 case L2CAP_CONNINFO:
2140 if (sk->sk_state != BT_CONNECTED &&
2141 !(sk->sk_state == BT_CONNECT2 &&
2142 bt_sk(sk)->defer_setup)) {
2147 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2148 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2150 len = min_t(unsigned int, len, sizeof(cinfo));
2151 if (copy_to_user(optval, (char *) &cinfo, len))
2165 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2167 struct sock *sk = sock->sk;
2168 struct bt_security sec;
2171 BT_DBG("sk %p", sk);
2173 if (level == SOL_L2CAP)
2174 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2176 if (level != SOL_BLUETOOTH)
2177 return -ENOPROTOOPT;
2179 if (get_user(len, optlen))
2186 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2187 && sk->sk_type != SOCK_RAW) {
2192 sec.level = l2cap_pi(sk)->sec_level;
2194 len = min_t(unsigned int, len, sizeof(sec));
2195 if (copy_to_user(optval, (char *) &sec, len))
2200 case BT_DEFER_SETUP:
2201 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2206 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2220 static int l2cap_sock_shutdown(struct socket *sock, int how)
2222 struct sock *sk = sock->sk;
2225 BT_DBG("sock %p, sk %p", sock, sk);
2231 if (!sk->sk_shutdown) {
2232 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2233 err = __l2cap_wait_ack(sk);
2235 sk->sk_shutdown = SHUTDOWN_MASK;
2236 l2cap_sock_clear_timer(sk);
2237 __l2cap_sock_close(sk, 0);
2239 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2240 err = bt_sock_wait_state(sk, BT_CLOSED,
2244 if (!err && sk->sk_err)
2251 static int l2cap_sock_release(struct socket *sock)
2253 struct sock *sk = sock->sk;
2256 BT_DBG("sock %p, sk %p", sock, sk);
2261 err = l2cap_sock_shutdown(sock, 2);
2264 l2cap_sock_kill(sk);
2268 static void l2cap_chan_ready(struct sock *sk)
2270 struct sock *parent = bt_sk(sk)->parent;
2272 BT_DBG("sk %p, parent %p", sk, parent);
2274 l2cap_pi(sk)->conf_state = 0;
2275 l2cap_sock_clear_timer(sk);
2278 /* Outgoing channel.
2279 * Wake up socket sleeping on connect.
2281 sk->sk_state = BT_CONNECTED;
2282 sk->sk_state_change(sk);
2284 /* Incoming channel.
2285 * Wake up socket sleeping on accept.
2287 parent->sk_data_ready(parent, 0);
2291 /* Copy frame to all raw sockets on that connection */
2292 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2294 struct l2cap_chan_list *l = &conn->chan_list;
2295 struct sk_buff *nskb;
2298 BT_DBG("conn %p", conn);
2300 read_lock(&l->lock);
2301 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2302 if (sk->sk_type != SOCK_RAW)
2305 /* Don't send frame to the socket it came from */
2308 nskb = skb_clone(skb, GFP_ATOMIC);
2312 if (sock_queue_rcv_skb(sk, nskb))
2315 read_unlock(&l->lock);
2318 /* ---- L2CAP signalling commands ---- */
2319 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2320 u8 code, u8 ident, u16 dlen, void *data)
2322 struct sk_buff *skb, **frag;
2323 struct l2cap_cmd_hdr *cmd;
2324 struct l2cap_hdr *lh;
2327 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2328 conn, code, ident, dlen);
2330 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2331 count = min_t(unsigned int, conn->mtu, len);
2333 skb = bt_skb_alloc(count, GFP_ATOMIC);
2337 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2338 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2339 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2341 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2344 cmd->len = cpu_to_le16(dlen);
2347 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2348 memcpy(skb_put(skb, count), data, count);
2354 /* Continuation fragments (no L2CAP header) */
2355 frag = &skb_shinfo(skb)->frag_list;
2357 count = min_t(unsigned int, conn->mtu, len);
2359 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2363 memcpy(skb_put(*frag, count), data, count);
2368 frag = &(*frag)->next;
2378 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2380 struct l2cap_conf_opt *opt = *ptr;
2383 len = L2CAP_CONF_OPT_SIZE + opt->len;
2391 *val = *((u8 *) opt->val);
2395 *val = __le16_to_cpu(*((__le16 *) opt->val));
2399 *val = __le32_to_cpu(*((__le32 *) opt->val));
2403 *val = (unsigned long) opt->val;
2407 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2411 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2413 struct l2cap_conf_opt *opt = *ptr;
2415 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2422 *((u8 *) opt->val) = val;
2426 *((__le16 *) opt->val) = cpu_to_le16(val);
2430 *((__le32 *) opt->val) = cpu_to_le32(val);
2434 memcpy(opt->val, (void *) val, len);
2438 *ptr += L2CAP_CONF_OPT_SIZE + len;
2441 static void l2cap_ack_timeout(unsigned long arg)
2443 struct sock *sk = (void *) arg;
2446 l2cap_send_ack(l2cap_pi(sk));
2450 static inline void l2cap_ertm_init(struct sock *sk)
2452 l2cap_pi(sk)->expected_ack_seq = 0;
2453 l2cap_pi(sk)->unacked_frames = 0;
2454 l2cap_pi(sk)->buffer_seq = 0;
2455 l2cap_pi(sk)->num_acked = 0;
2456 l2cap_pi(sk)->frames_sent = 0;
2458 setup_timer(&l2cap_pi(sk)->retrans_timer,
2459 l2cap_retrans_timeout, (unsigned long) sk);
2460 setup_timer(&l2cap_pi(sk)->monitor_timer,
2461 l2cap_monitor_timeout, (unsigned long) sk);
2462 setup_timer(&l2cap_pi(sk)->ack_timer,
2463 l2cap_ack_timeout, (unsigned long) sk);
2465 __skb_queue_head_init(SREJ_QUEUE(sk));
2466 __skb_queue_head_init(BUSY_QUEUE(sk));
2467 spin_lock_init(&l2cap_pi(sk)->send_lock);
2469 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2472 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2475 case L2CAP_MODE_STREAMING:
2476 case L2CAP_MODE_ERTM:
2477 if (l2cap_mode_supported(mode, remote_feat_mask))
2481 return L2CAP_MODE_BASIC;
2485 static int l2cap_build_conf_req(struct sock *sk, void *data)
2487 struct l2cap_pinfo *pi = l2cap_pi(sk);
2488 struct l2cap_conf_req *req = data;
2489 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2490 void *ptr = req->data;
2492 BT_DBG("sk %p", sk);
2494 if (pi->num_conf_req || pi->num_conf_rsp)
2498 case L2CAP_MODE_STREAMING:
2499 case L2CAP_MODE_ERTM:
2500 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2505 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2511 case L2CAP_MODE_BASIC:
2512 if (pi->imtu != L2CAP_DEFAULT_MTU)
2513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2515 rfc.mode = L2CAP_MODE_BASIC;
2517 rfc.max_transmit = 0;
2518 rfc.retrans_timeout = 0;
2519 rfc.monitor_timeout = 0;
2520 rfc.max_pdu_size = 0;
2524 case L2CAP_MODE_ERTM:
2525 rfc.mode = L2CAP_MODE_ERTM;
2526 rfc.txwin_size = pi->tx_win;
2527 rfc.max_transmit = pi->max_tx;
2528 rfc.retrans_timeout = 0;
2529 rfc.monitor_timeout = 0;
2530 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2531 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2532 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2534 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2537 if (pi->fcs == L2CAP_FCS_NONE ||
2538 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2539 pi->fcs = L2CAP_FCS_NONE;
2540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2544 case L2CAP_MODE_STREAMING:
2545 rfc.mode = L2CAP_MODE_STREAMING;
2547 rfc.max_transmit = 0;
2548 rfc.retrans_timeout = 0;
2549 rfc.monitor_timeout = 0;
2550 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2551 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2552 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2554 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2557 if (pi->fcs == L2CAP_FCS_NONE ||
2558 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2559 pi->fcs = L2CAP_FCS_NONE;
2560 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2565 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2566 (unsigned long) &rfc);
2568 /* FIXME: Need actual value of the flush timeout */
2569 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2570 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2572 req->dcid = cpu_to_le16(pi->dcid);
2573 req->flags = cpu_to_le16(0);
2578 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2580 struct l2cap_pinfo *pi = l2cap_pi(sk);
2581 struct l2cap_conf_rsp *rsp = data;
2582 void *ptr = rsp->data;
2583 void *req = pi->conf_req;
2584 int len = pi->conf_len;
2585 int type, hint, olen;
2587 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2588 u16 mtu = L2CAP_DEFAULT_MTU;
2589 u16 result = L2CAP_CONF_SUCCESS;
2591 BT_DBG("sk %p", sk);
2593 while (len >= L2CAP_CONF_OPT_SIZE) {
2594 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2596 hint = type & L2CAP_CONF_HINT;
2597 type &= L2CAP_CONF_MASK;
2600 case L2CAP_CONF_MTU:
2604 case L2CAP_CONF_FLUSH_TO:
2608 case L2CAP_CONF_QOS:
2611 case L2CAP_CONF_RFC:
2612 if (olen == sizeof(rfc))
2613 memcpy(&rfc, (void *) val, olen);
2616 case L2CAP_CONF_FCS:
2617 if (val == L2CAP_FCS_NONE)
2618 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2626 result = L2CAP_CONF_UNKNOWN;
2627 *((u8 *) ptr++) = type;
2632 if (pi->num_conf_rsp || pi->num_conf_req)
2636 case L2CAP_MODE_STREAMING:
2637 case L2CAP_MODE_ERTM:
2638 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2639 pi->mode = l2cap_select_mode(rfc.mode,
2640 pi->conn->feat_mask);
2644 if (pi->mode != rfc.mode)
2645 return -ECONNREFUSED;
2651 if (pi->mode != rfc.mode) {
2652 result = L2CAP_CONF_UNACCEPT;
2653 rfc.mode = pi->mode;
2655 if (pi->num_conf_rsp == 1)
2656 return -ECONNREFUSED;
2658 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2659 sizeof(rfc), (unsigned long) &rfc);
2663 if (result == L2CAP_CONF_SUCCESS) {
2664 /* Configure output options and let the other side know
2665 * which ones we don't like. */
2667 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2668 result = L2CAP_CONF_UNACCEPT;
2671 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2676 case L2CAP_MODE_BASIC:
2677 pi->fcs = L2CAP_FCS_NONE;
2678 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2681 case L2CAP_MODE_ERTM:
2682 pi->remote_tx_win = rfc.txwin_size;
2683 pi->remote_max_tx = rfc.max_transmit;
2684 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2685 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2687 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2689 rfc.retrans_timeout =
2690 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2691 rfc.monitor_timeout =
2692 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2694 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2696 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2697 sizeof(rfc), (unsigned long) &rfc);
2701 case L2CAP_MODE_STREAMING:
2702 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2703 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2705 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2707 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2709 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2710 sizeof(rfc), (unsigned long) &rfc);
2715 result = L2CAP_CONF_UNACCEPT;
2717 memset(&rfc, 0, sizeof(rfc));
2718 rfc.mode = pi->mode;
2721 if (result == L2CAP_CONF_SUCCESS)
2722 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2724 rsp->scid = cpu_to_le16(pi->dcid);
2725 rsp->result = cpu_to_le16(result);
2726 rsp->flags = cpu_to_le16(0x0000);
2731 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2733 struct l2cap_pinfo *pi = l2cap_pi(sk);
2734 struct l2cap_conf_req *req = data;
2735 void *ptr = req->data;
2738 struct l2cap_conf_rfc rfc;
2740 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2742 while (len >= L2CAP_CONF_OPT_SIZE) {
2743 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2746 case L2CAP_CONF_MTU:
2747 if (val < L2CAP_DEFAULT_MIN_MTU) {
2748 *result = L2CAP_CONF_UNACCEPT;
2749 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2752 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2755 case L2CAP_CONF_FLUSH_TO:
2757 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2761 case L2CAP_CONF_RFC:
2762 if (olen == sizeof(rfc))
2763 memcpy(&rfc, (void *)val, olen);
2765 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2766 rfc.mode != pi->mode)
2767 return -ECONNREFUSED;
2771 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2772 sizeof(rfc), (unsigned long) &rfc);
2777 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2778 return -ECONNREFUSED;
2780 pi->mode = rfc.mode;
2782 if (*result == L2CAP_CONF_SUCCESS) {
2784 case L2CAP_MODE_ERTM:
2785 pi->remote_tx_win = rfc.txwin_size;
2786 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2787 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2788 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2790 case L2CAP_MODE_STREAMING:
2791 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2795 req->dcid = cpu_to_le16(pi->dcid);
2796 req->flags = cpu_to_le16(0x0000);
2801 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2803 struct l2cap_conf_rsp *rsp = data;
2804 void *ptr = rsp->data;
2806 BT_DBG("sk %p", sk);
2808 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2809 rsp->result = cpu_to_le16(result);
2810 rsp->flags = cpu_to_le16(flags);
2815 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2817 struct l2cap_pinfo *pi = l2cap_pi(sk);
2820 struct l2cap_conf_rfc rfc;
2822 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2824 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2827 while (len >= L2CAP_CONF_OPT_SIZE) {
2828 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2831 case L2CAP_CONF_RFC:
2832 if (olen == sizeof(rfc))
2833 memcpy(&rfc, (void *)val, olen);
2840 case L2CAP_MODE_ERTM:
2841 pi->remote_tx_win = rfc.txwin_size;
2842 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2843 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2844 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2846 case L2CAP_MODE_STREAMING:
2847 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2851 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2853 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2855 if (rej->reason != 0x0000)
2858 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2859 cmd->ident == conn->info_ident) {
2860 del_timer(&conn->info_timer);
2862 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2863 conn->info_ident = 0;
2865 l2cap_conn_start(conn);
2871 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2873 struct l2cap_chan_list *list = &conn->chan_list;
2874 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2875 struct l2cap_conn_rsp rsp;
2876 struct sock *sk, *parent;
2877 int result, status = L2CAP_CS_NO_INFO;
2879 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2880 __le16 psm = req->psm;
2882 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2884 /* Check if we have socket listening on psm */
2885 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2887 result = L2CAP_CR_BAD_PSM;
2891 /* Check if the ACL is secure enough (if not SDP) */
2892 if (psm != cpu_to_le16(0x0001) &&
2893 !hci_conn_check_link_mode(conn->hcon)) {
2894 conn->disc_reason = 0x05;
2895 result = L2CAP_CR_SEC_BLOCK;
2899 result = L2CAP_CR_NO_MEM;
2901 /* Check for backlog size */
2902 if (sk_acceptq_is_full(parent)) {
2903 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2907 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2911 write_lock_bh(&list->lock);
2913 /* Check if we already have channel with that dcid */
2914 if (__l2cap_get_chan_by_dcid(list, scid)) {
2915 write_unlock_bh(&list->lock);
2916 sock_set_flag(sk, SOCK_ZAPPED);
2917 l2cap_sock_kill(sk);
2921 hci_conn_hold(conn->hcon);
2923 l2cap_sock_init(sk, parent);
2924 bacpy(&bt_sk(sk)->src, conn->src);
2925 bacpy(&bt_sk(sk)->dst, conn->dst);
2926 l2cap_pi(sk)->psm = psm;
2927 l2cap_pi(sk)->dcid = scid;
2929 __l2cap_chan_add(conn, sk, parent);
2930 dcid = l2cap_pi(sk)->scid;
2932 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2934 l2cap_pi(sk)->ident = cmd->ident;
2936 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2937 if (l2cap_check_security(sk)) {
2938 if (bt_sk(sk)->defer_setup) {
2939 sk->sk_state = BT_CONNECT2;
2940 result = L2CAP_CR_PEND;
2941 status = L2CAP_CS_AUTHOR_PEND;
2942 parent->sk_data_ready(parent, 0);
2944 sk->sk_state = BT_CONFIG;
2945 result = L2CAP_CR_SUCCESS;
2946 status = L2CAP_CS_NO_INFO;
2949 sk->sk_state = BT_CONNECT2;
2950 result = L2CAP_CR_PEND;
2951 status = L2CAP_CS_AUTHEN_PEND;
2954 sk->sk_state = BT_CONNECT2;
2955 result = L2CAP_CR_PEND;
2956 status = L2CAP_CS_NO_INFO;
2959 write_unlock_bh(&list->lock);
2962 bh_unlock_sock(parent);
2965 rsp.scid = cpu_to_le16(scid);
2966 rsp.dcid = cpu_to_le16(dcid);
2967 rsp.result = cpu_to_le16(result);
2968 rsp.status = cpu_to_le16(status);
2969 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2971 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2972 struct l2cap_info_req info;
2973 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2975 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2976 conn->info_ident = l2cap_get_ident(conn);
2978 mod_timer(&conn->info_timer, jiffies +
2979 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2981 l2cap_send_cmd(conn, conn->info_ident,
2982 L2CAP_INFO_REQ, sizeof(info), &info);
2988 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2990 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2991 u16 scid, dcid, result, status;
2995 scid = __le16_to_cpu(rsp->scid);
2996 dcid = __le16_to_cpu(rsp->dcid);
2997 result = __le16_to_cpu(rsp->result);
2998 status = __le16_to_cpu(rsp->status);
3000 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3003 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3007 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3013 case L2CAP_CR_SUCCESS:
3014 sk->sk_state = BT_CONFIG;
3015 l2cap_pi(sk)->ident = 0;
3016 l2cap_pi(sk)->dcid = dcid;
3017 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3018 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3020 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3021 l2cap_build_conf_req(sk, req), req);
3022 l2cap_pi(sk)->num_conf_req++;
3026 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3030 l2cap_chan_del(sk, ECONNREFUSED);
3038 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3040 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3046 dcid = __le16_to_cpu(req->dcid);
3047 flags = __le16_to_cpu(req->flags);
3049 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3051 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3055 if (sk->sk_state != BT_CONFIG) {
3056 struct l2cap_cmd_rej rej;
3058 rej.reason = cpu_to_le16(0x0002);
3059 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3064 /* Reject if config buffer is too small. */
3065 len = cmd_len - sizeof(*req);
3066 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3067 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3068 l2cap_build_conf_rsp(sk, rsp,
3069 L2CAP_CONF_REJECT, flags), rsp);
3074 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3075 l2cap_pi(sk)->conf_len += len;
3077 if (flags & 0x0001) {
3078 /* Incomplete config. Send empty response. */
3079 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3080 l2cap_build_conf_rsp(sk, rsp,
3081 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3085 /* Complete config. */
3086 len = l2cap_parse_conf_req(sk, rsp);
3088 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3092 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3093 l2cap_pi(sk)->num_conf_rsp++;
3095 /* Reset config buffer. */
3096 l2cap_pi(sk)->conf_len = 0;
3098 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3101 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3102 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3103 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3104 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3106 sk->sk_state = BT_CONNECTED;
3108 l2cap_pi(sk)->next_tx_seq = 0;
3109 l2cap_pi(sk)->expected_tx_seq = 0;
3110 __skb_queue_head_init(TX_QUEUE(sk));
3111 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3112 l2cap_ertm_init(sk);
3114 l2cap_chan_ready(sk);
3118 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3120 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3121 l2cap_build_conf_req(sk, buf), buf);
3122 l2cap_pi(sk)->num_conf_req++;
3130 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3132 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3133 u16 scid, flags, result;
3135 int len = cmd->len - sizeof(*rsp);
3137 scid = __le16_to_cpu(rsp->scid);
3138 flags = __le16_to_cpu(rsp->flags);
3139 result = __le16_to_cpu(rsp->result);
3141 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3142 scid, flags, result);
3144 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3149 case L2CAP_CONF_SUCCESS:
3150 l2cap_conf_rfc_get(sk, rsp->data, len);
3153 case L2CAP_CONF_UNACCEPT:
3154 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3157 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3158 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3162 /* throw out any old stored conf requests */
3163 result = L2CAP_CONF_SUCCESS;
3164 len = l2cap_parse_conf_rsp(sk, rsp->data,
3167 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3171 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3172 L2CAP_CONF_REQ, len, req);
3173 l2cap_pi(sk)->num_conf_req++;
3174 if (result != L2CAP_CONF_SUCCESS)
3180 sk->sk_err = ECONNRESET;
3181 l2cap_sock_set_timer(sk, HZ * 5);
3182 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3189 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3191 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3192 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3193 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3194 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3196 sk->sk_state = BT_CONNECTED;
3197 l2cap_pi(sk)->next_tx_seq = 0;
3198 l2cap_pi(sk)->expected_tx_seq = 0;
3199 __skb_queue_head_init(TX_QUEUE(sk));
3200 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3201 l2cap_ertm_init(sk);
3203 l2cap_chan_ready(sk);
3211 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3213 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3214 struct l2cap_disconn_rsp rsp;
3218 scid = __le16_to_cpu(req->scid);
3219 dcid = __le16_to_cpu(req->dcid);
3221 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3223 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3227 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3228 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3229 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3231 sk->sk_shutdown = SHUTDOWN_MASK;
3233 l2cap_chan_del(sk, ECONNRESET);
3236 l2cap_sock_kill(sk);
3240 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3242 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3246 scid = __le16_to_cpu(rsp->scid);
3247 dcid = __le16_to_cpu(rsp->dcid);
3249 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3251 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3255 l2cap_chan_del(sk, 0);
3258 l2cap_sock_kill(sk);
3262 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3264 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3267 type = __le16_to_cpu(req->type);
3269 BT_DBG("type 0x%4.4x", type);
3271 if (type == L2CAP_IT_FEAT_MASK) {
3273 u32 feat_mask = l2cap_feat_mask;
3274 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3275 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3276 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3278 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3280 put_unaligned_le32(feat_mask, rsp->data);
3281 l2cap_send_cmd(conn, cmd->ident,
3282 L2CAP_INFO_RSP, sizeof(buf), buf);
3283 } else if (type == L2CAP_IT_FIXED_CHAN) {
3285 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3286 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3287 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3288 memcpy(buf + 4, l2cap_fixed_chan, 8);
3289 l2cap_send_cmd(conn, cmd->ident,
3290 L2CAP_INFO_RSP, sizeof(buf), buf);
3292 struct l2cap_info_rsp rsp;
3293 rsp.type = cpu_to_le16(type);
3294 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3295 l2cap_send_cmd(conn, cmd->ident,
3296 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3302 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3304 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3307 type = __le16_to_cpu(rsp->type);
3308 result = __le16_to_cpu(rsp->result);
3310 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3312 del_timer(&conn->info_timer);
3314 if (type == L2CAP_IT_FEAT_MASK) {
3315 conn->feat_mask = get_unaligned_le32(rsp->data);
3317 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3318 struct l2cap_info_req req;
3319 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3321 conn->info_ident = l2cap_get_ident(conn);
3323 l2cap_send_cmd(conn, conn->info_ident,
3324 L2CAP_INFO_REQ, sizeof(req), &req);
3326 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3327 conn->info_ident = 0;
3329 l2cap_conn_start(conn);
3331 } else if (type == L2CAP_IT_FIXED_CHAN) {
3332 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3333 conn->info_ident = 0;
3335 l2cap_conn_start(conn);
3341 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3343 u8 *data = skb->data;
3345 struct l2cap_cmd_hdr cmd;
3348 l2cap_raw_recv(conn, skb);
3350 while (len >= L2CAP_CMD_HDR_SIZE) {
3352 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3353 data += L2CAP_CMD_HDR_SIZE;
3354 len -= L2CAP_CMD_HDR_SIZE;
3356 cmd_len = le16_to_cpu(cmd.len);
3358 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3360 if (cmd_len > len || !cmd.ident) {
3361 BT_DBG("corrupted command");
3366 case L2CAP_COMMAND_REJ:
3367 l2cap_command_rej(conn, &cmd, data);
3370 case L2CAP_CONN_REQ:
3371 err = l2cap_connect_req(conn, &cmd, data);
3374 case L2CAP_CONN_RSP:
3375 err = l2cap_connect_rsp(conn, &cmd, data);
3378 case L2CAP_CONF_REQ:
3379 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3382 case L2CAP_CONF_RSP:
3383 err = l2cap_config_rsp(conn, &cmd, data);
3386 case L2CAP_DISCONN_REQ:
3387 err = l2cap_disconnect_req(conn, &cmd, data);
3390 case L2CAP_DISCONN_RSP:
3391 err = l2cap_disconnect_rsp(conn, &cmd, data);
3394 case L2CAP_ECHO_REQ:
3395 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3398 case L2CAP_ECHO_RSP:
3401 case L2CAP_INFO_REQ:
3402 err = l2cap_information_req(conn, &cmd, data);
3405 case L2CAP_INFO_RSP:
3406 err = l2cap_information_rsp(conn, &cmd, data);
3410 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3416 struct l2cap_cmd_rej rej;
3417 BT_DBG("error %d", err);
3419 /* FIXME: Map err to a valid reason */
3420 rej.reason = cpu_to_le16(0);
3421 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3431 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3433 u16 our_fcs, rcv_fcs;
3434 int hdr_size = L2CAP_HDR_SIZE + 2;
3436 if (pi->fcs == L2CAP_FCS_CRC16) {
3437 skb_trim(skb, skb->len - 2);
3438 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3439 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3441 if (our_fcs != rcv_fcs)
3447 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3449 struct l2cap_pinfo *pi = l2cap_pi(sk);
3452 pi->frames_sent = 0;
3454 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3456 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3457 control |= L2CAP_SUPER_RCV_NOT_READY;
3458 l2cap_send_sframe(pi, control);
3459 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3462 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3463 l2cap_retransmit_frames(sk);
3465 spin_lock_bh(&pi->send_lock);
3466 l2cap_ertm_send(sk);
3467 spin_unlock_bh(&pi->send_lock);
3469 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3470 pi->frames_sent == 0) {
3471 control |= L2CAP_SUPER_RCV_READY;
3472 l2cap_send_sframe(pi, control);
3476 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3478 struct sk_buff *next_skb;
3479 struct l2cap_pinfo *pi = l2cap_pi(sk);
3480 int tx_seq_offset, next_tx_seq_offset;
3482 bt_cb(skb)->tx_seq = tx_seq;
3483 bt_cb(skb)->sar = sar;
3485 next_skb = skb_peek(SREJ_QUEUE(sk));
3487 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3491 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3492 if (tx_seq_offset < 0)
3493 tx_seq_offset += 64;
3496 if (bt_cb(next_skb)->tx_seq == tx_seq)
3499 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3500 pi->buffer_seq) % 64;
3501 if (next_tx_seq_offset < 0)
3502 next_tx_seq_offset += 64;
3504 if (next_tx_seq_offset > tx_seq_offset) {
3505 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3509 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3512 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3514 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3519 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3521 struct l2cap_pinfo *pi = l2cap_pi(sk);
3522 struct sk_buff *_skb;
3525 switch (control & L2CAP_CTRL_SAR) {
3526 case L2CAP_SDU_UNSEGMENTED:
3527 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3530 err = sock_queue_rcv_skb(sk, skb);
3536 case L2CAP_SDU_START:
3537 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3540 pi->sdu_len = get_unaligned_le16(skb->data);
3542 if (pi->sdu_len > pi->imtu)
3545 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3549 /* pull sdu_len bytes only after alloc, because of Local Busy
3550 * condition we have to be sure that this will be executed
3551 * only once, i.e., when alloc does not fail */
3554 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3556 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3557 pi->partial_sdu_len = skb->len;
3560 case L2CAP_SDU_CONTINUE:
3561 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3567 pi->partial_sdu_len += skb->len;
3568 if (pi->partial_sdu_len > pi->sdu_len)
3571 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3576 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3582 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3583 pi->partial_sdu_len += skb->len;
3585 if (pi->partial_sdu_len > pi->imtu)
3588 if (pi->partial_sdu_len != pi->sdu_len)
3591 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3594 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3596 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3600 err = sock_queue_rcv_skb(sk, _skb);
3603 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3607 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3608 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3622 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3627 static void l2cap_busy_work(struct work_struct *work)
3629 DECLARE_WAITQUEUE(wait, current);
3630 struct l2cap_pinfo *pi =
3631 container_of(work, struct l2cap_pinfo, busy_work);
3632 struct sock *sk = (struct sock *)pi;
3633 int n_tries = 0, timeo = HZ/5, err;
3634 struct sk_buff *skb;
3639 add_wait_queue(sk_sleep(sk), &wait);
3640 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3641 set_current_state(TASK_INTERRUPTIBLE);
3643 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3645 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3652 if (signal_pending(current)) {
3653 err = sock_intr_errno(timeo);
3658 timeo = schedule_timeout(timeo);
3661 err = sock_error(sk);
3665 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3666 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3667 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3669 skb_queue_head(BUSY_QUEUE(sk), skb);
3673 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3680 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3683 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3684 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3685 l2cap_send_sframe(pi, control);
3686 l2cap_pi(sk)->retry_count = 1;
3688 del_timer(&pi->retrans_timer);
3689 __mod_monitor_timer();
3691 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3694 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3695 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3697 BT_DBG("sk %p, Exit local busy", sk);
3699 set_current_state(TASK_RUNNING);
3700 remove_wait_queue(sk_sleep(sk), &wait);
3705 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3707 struct l2cap_pinfo *pi = l2cap_pi(sk);
3710 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3711 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3712 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3716 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3718 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3722 /* Busy Condition */
3723 BT_DBG("sk %p, Enter local busy", sk);
3725 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3726 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3727 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3729 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3730 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3731 l2cap_send_sframe(pi, sctrl);
3733 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3735 del_timer(&pi->ack_timer);
3737 queue_work(_busy_wq, &pi->busy_work);
3742 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3744 struct l2cap_pinfo *pi = l2cap_pi(sk);
3745 struct sk_buff *_skb;
3749 * TODO: We have to notify the userland if some data is lost with the
3753 switch (control & L2CAP_CTRL_SAR) {
3754 case L2CAP_SDU_UNSEGMENTED:
3755 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3760 err = sock_queue_rcv_skb(sk, skb);
3766 case L2CAP_SDU_START:
3767 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3772 pi->sdu_len = get_unaligned_le16(skb->data);
3775 if (pi->sdu_len > pi->imtu) {
3780 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3786 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3788 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3789 pi->partial_sdu_len = skb->len;
3793 case L2CAP_SDU_CONTINUE:
3794 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3797 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3799 pi->partial_sdu_len += skb->len;
3800 if (pi->partial_sdu_len > pi->sdu_len)
3808 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3811 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3813 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3814 pi->partial_sdu_len += skb->len;
3816 if (pi->partial_sdu_len > pi->imtu)
3819 if (pi->partial_sdu_len == pi->sdu_len) {
3820 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3821 err = sock_queue_rcv_skb(sk, _skb);
3836 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3838 struct sk_buff *skb;
3841 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3842 if (bt_cb(skb)->tx_seq != tx_seq)
3845 skb = skb_dequeue(SREJ_QUEUE(sk));
3846 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3847 l2cap_ertm_reassembly_sdu(sk, skb, control);
3848 l2cap_pi(sk)->buffer_seq_srej =
3849 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3850 tx_seq = (tx_seq + 1) % 64;
3854 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3856 struct l2cap_pinfo *pi = l2cap_pi(sk);
3857 struct srej_list *l, *tmp;
3860 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3861 if (l->tx_seq == tx_seq) {
3866 control = L2CAP_SUPER_SELECT_REJECT;
3867 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3868 l2cap_send_sframe(pi, control);
3870 list_add_tail(&l->list, SREJ_LIST(sk));
3874 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3876 struct l2cap_pinfo *pi = l2cap_pi(sk);
3877 struct srej_list *new;
3880 while (tx_seq != pi->expected_tx_seq) {
3881 control = L2CAP_SUPER_SELECT_REJECT;
3882 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3883 l2cap_send_sframe(pi, control);
3885 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3886 new->tx_seq = pi->expected_tx_seq;
3887 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3888 list_add_tail(&new->list, SREJ_LIST(sk));
3890 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3893 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3895 struct l2cap_pinfo *pi = l2cap_pi(sk);
3896 u8 tx_seq = __get_txseq(rx_control);
3897 u8 req_seq = __get_reqseq(rx_control);
3898 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3899 int tx_seq_offset, expected_tx_seq_offset;
3900 int num_to_ack = (pi->tx_win/6) + 1;
3903 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3906 if (L2CAP_CTRL_FINAL & rx_control &&
3907 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3908 del_timer(&pi->monitor_timer);
3909 if (pi->unacked_frames > 0)
3910 __mod_retrans_timer();
3911 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3914 pi->expected_ack_seq = req_seq;
3915 l2cap_drop_acked_frames(sk);
3917 if (tx_seq == pi->expected_tx_seq)
3920 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3921 if (tx_seq_offset < 0)
3922 tx_seq_offset += 64;
3924 /* invalid tx_seq */
3925 if (tx_seq_offset >= pi->tx_win) {
3926 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3930 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3933 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3934 struct srej_list *first;
3936 first = list_first_entry(SREJ_LIST(sk),
3937 struct srej_list, list);
3938 if (tx_seq == first->tx_seq) {
3939 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3940 l2cap_check_srej_gap(sk, tx_seq);
3942 list_del(&first->list);
3945 if (list_empty(SREJ_LIST(sk))) {
3946 pi->buffer_seq = pi->buffer_seq_srej;
3947 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3949 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3952 struct srej_list *l;
3954 /* duplicated tx_seq */
3955 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3958 list_for_each_entry(l, SREJ_LIST(sk), list) {
3959 if (l->tx_seq == tx_seq) {
3960 l2cap_resend_srejframe(sk, tx_seq);
3964 l2cap_send_srejframe(sk, tx_seq);
3967 expected_tx_seq_offset =
3968 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3969 if (expected_tx_seq_offset < 0)
3970 expected_tx_seq_offset += 64;
3972 /* duplicated tx_seq */
3973 if (tx_seq_offset < expected_tx_seq_offset)
3976 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3978 BT_DBG("sk %p, Enter SREJ", sk);
3980 INIT_LIST_HEAD(SREJ_LIST(sk));
3981 pi->buffer_seq_srej = pi->buffer_seq;
3983 __skb_queue_head_init(SREJ_QUEUE(sk));
3984 __skb_queue_head_init(BUSY_QUEUE(sk));
3985 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3987 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3989 l2cap_send_srejframe(sk, tx_seq);
3991 del_timer(&pi->ack_timer);
3996 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3998 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3999 bt_cb(skb)->tx_seq = tx_seq;
4000 bt_cb(skb)->sar = sar;
4001 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4005 err = l2cap_push_rx_skb(sk, skb, rx_control);
4009 if (rx_control & L2CAP_CTRL_FINAL) {
4010 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4011 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4013 l2cap_retransmit_frames(sk);
4018 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4019 if (pi->num_acked == num_to_ack - 1)
4029 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4031 struct l2cap_pinfo *pi = l2cap_pi(sk);
4033 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4036 pi->expected_ack_seq = __get_reqseq(rx_control);
4037 l2cap_drop_acked_frames(sk);
4039 if (rx_control & L2CAP_CTRL_POLL) {
4040 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4041 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4042 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4043 (pi->unacked_frames > 0))
4044 __mod_retrans_timer();
4046 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4047 l2cap_send_srejtail(sk);
4049 l2cap_send_i_or_rr_or_rnr(sk);
4052 } else if (rx_control & L2CAP_CTRL_FINAL) {
4053 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4055 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4056 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4058 l2cap_retransmit_frames(sk);
4061 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4062 (pi->unacked_frames > 0))
4063 __mod_retrans_timer();
4065 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4066 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4069 spin_lock_bh(&pi->send_lock);
4070 l2cap_ertm_send(sk);
4071 spin_unlock_bh(&pi->send_lock);
4076 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4078 struct l2cap_pinfo *pi = l2cap_pi(sk);
4079 u8 tx_seq = __get_reqseq(rx_control);
4081 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4083 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4085 pi->expected_ack_seq = tx_seq;
4086 l2cap_drop_acked_frames(sk);
4088 if (rx_control & L2CAP_CTRL_FINAL) {
4089 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4090 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4092 l2cap_retransmit_frames(sk);
4094 l2cap_retransmit_frames(sk);
4096 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4097 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4100 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4102 struct l2cap_pinfo *pi = l2cap_pi(sk);
4103 u8 tx_seq = __get_reqseq(rx_control);
4105 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4107 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4109 if (rx_control & L2CAP_CTRL_POLL) {
4110 pi->expected_ack_seq = tx_seq;
4111 l2cap_drop_acked_frames(sk);
4113 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4114 l2cap_retransmit_one_frame(sk, tx_seq);
4116 spin_lock_bh(&pi->send_lock);
4117 l2cap_ertm_send(sk);
4118 spin_unlock_bh(&pi->send_lock);
4120 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4121 pi->srej_save_reqseq = tx_seq;
4122 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4124 } else if (rx_control & L2CAP_CTRL_FINAL) {
4125 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4126 pi->srej_save_reqseq == tx_seq)
4127 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4129 l2cap_retransmit_one_frame(sk, tx_seq);
4131 l2cap_retransmit_one_frame(sk, tx_seq);
4132 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4133 pi->srej_save_reqseq = tx_seq;
4134 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4139 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4141 struct l2cap_pinfo *pi = l2cap_pi(sk);
4142 u8 tx_seq = __get_reqseq(rx_control);
4144 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4146 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4147 pi->expected_ack_seq = tx_seq;
4148 l2cap_drop_acked_frames(sk);
4150 if (rx_control & L2CAP_CTRL_POLL)
4151 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4153 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4154 del_timer(&pi->retrans_timer);
4155 if (rx_control & L2CAP_CTRL_POLL)
4156 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4160 if (rx_control & L2CAP_CTRL_POLL)
4161 l2cap_send_srejtail(sk);
4163 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4166 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4168 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4170 if (L2CAP_CTRL_FINAL & rx_control &&
4171 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4172 del_timer(&l2cap_pi(sk)->monitor_timer);
4173 if (l2cap_pi(sk)->unacked_frames > 0)
4174 __mod_retrans_timer();
4175 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4178 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4179 case L2CAP_SUPER_RCV_READY:
4180 l2cap_data_channel_rrframe(sk, rx_control);
4183 case L2CAP_SUPER_REJECT:
4184 l2cap_data_channel_rejframe(sk, rx_control);
4187 case L2CAP_SUPER_SELECT_REJECT:
4188 l2cap_data_channel_srejframe(sk, rx_control);
4191 case L2CAP_SUPER_RCV_NOT_READY:
4192 l2cap_data_channel_rnrframe(sk, rx_control);
4200 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4203 struct l2cap_pinfo *pi;
4206 int len, next_tx_seq_offset, req_seq_offset;
4208 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4210 BT_DBG("unknown cid 0x%4.4x", cid);
4216 BT_DBG("sk %p, len %d", sk, skb->len);
4218 if (sk->sk_state != BT_CONNECTED)
4222 case L2CAP_MODE_BASIC:
4223 /* If socket recv buffers overflows we drop data here
4224 * which is *bad* because L2CAP has to be reliable.
4225 * But we don't have any other choice. L2CAP doesn't
4226 * provide flow control mechanism. */
4228 if (pi->imtu < skb->len)
4231 if (!sock_queue_rcv_skb(sk, skb))
4235 case L2CAP_MODE_ERTM:
4236 control = get_unaligned_le16(skb->data);
4241 * We can just drop the corrupted I-frame here.
4242 * Receiver will miss it and start proper recovery
4243 * procedures and ask retransmission.
4245 if (l2cap_check_fcs(pi, skb))
4248 if (__is_sar_start(control) && __is_iframe(control))
4251 if (pi->fcs == L2CAP_FCS_CRC16)
4254 if (len > pi->mps) {
4255 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4259 req_seq = __get_reqseq(control);
4260 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4261 if (req_seq_offset < 0)
4262 req_seq_offset += 64;
4264 next_tx_seq_offset =
4265 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4266 if (next_tx_seq_offset < 0)
4267 next_tx_seq_offset += 64;
4269 /* check for invalid req-seq */
4270 if (req_seq_offset > next_tx_seq_offset) {
4271 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4275 if (__is_iframe(control)) {
4277 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4281 l2cap_data_channel_iframe(sk, control, skb);
4284 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4288 l2cap_data_channel_sframe(sk, control, skb);
4293 case L2CAP_MODE_STREAMING:
4294 control = get_unaligned_le16(skb->data);
4298 if (l2cap_check_fcs(pi, skb))
4301 if (__is_sar_start(control))
4304 if (pi->fcs == L2CAP_FCS_CRC16)
4307 if (len > pi->mps || len < 0 || __is_sframe(control))
4310 tx_seq = __get_txseq(control);
4312 if (pi->expected_tx_seq == tx_seq)
4313 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4315 pi->expected_tx_seq = (tx_seq + 1) % 64;
4317 l2cap_streaming_reassembly_sdu(sk, skb, control);
4322 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4336 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4340 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4344 BT_DBG("sk %p, len %d", sk, skb->len);
4346 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4349 if (l2cap_pi(sk)->imtu < skb->len)
4352 if (!sock_queue_rcv_skb(sk, skb))
4364 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4366 struct l2cap_hdr *lh = (void *) skb->data;
4370 skb_pull(skb, L2CAP_HDR_SIZE);
4371 cid = __le16_to_cpu(lh->cid);
4372 len = __le16_to_cpu(lh->len);
4374 if (len != skb->len) {
4379 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4382 case L2CAP_CID_SIGNALING:
4383 l2cap_sig_channel(conn, skb);
4386 case L2CAP_CID_CONN_LESS:
4387 psm = get_unaligned_le16(skb->data);
4389 l2cap_conless_channel(conn, psm, skb);
4393 l2cap_data_channel(conn, cid, skb);
4398 /* ---- L2CAP interface with lower layer (HCI) ---- */
4400 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4402 int exact = 0, lm1 = 0, lm2 = 0;
4403 register struct sock *sk;
4404 struct hlist_node *node;
4406 if (type != ACL_LINK)
4409 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4411 /* Find listening sockets and check their link_mode */
4412 read_lock(&l2cap_sk_list.lock);
4413 sk_for_each(sk, node, &l2cap_sk_list.head) {
4414 if (sk->sk_state != BT_LISTEN)
4417 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4418 lm1 |= HCI_LM_ACCEPT;
4419 if (l2cap_pi(sk)->role_switch)
4420 lm1 |= HCI_LM_MASTER;
4422 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4423 lm2 |= HCI_LM_ACCEPT;
4424 if (l2cap_pi(sk)->role_switch)
4425 lm2 |= HCI_LM_MASTER;
4428 read_unlock(&l2cap_sk_list.lock);
4430 return exact ? lm1 : lm2;
4433 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4435 struct l2cap_conn *conn;
4437 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4439 if (hcon->type != ACL_LINK)
4443 conn = l2cap_conn_add(hcon, status);
4445 l2cap_conn_ready(conn);
4447 l2cap_conn_del(hcon, bt_err(status));
4452 static int l2cap_disconn_ind(struct hci_conn *hcon)
4454 struct l2cap_conn *conn = hcon->l2cap_data;
4456 BT_DBG("hcon %p", hcon);
4458 if (hcon->type != ACL_LINK || !conn)
4461 return conn->disc_reason;
4464 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4466 BT_DBG("hcon %p reason %d", hcon, reason);
4468 if (hcon->type != ACL_LINK)
4471 l2cap_conn_del(hcon, bt_err(reason));
4476 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4478 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4481 if (encrypt == 0x00) {
4482 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4483 l2cap_sock_clear_timer(sk);
4484 l2cap_sock_set_timer(sk, HZ * 5);
4485 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4486 __l2cap_sock_close(sk, ECONNREFUSED);
4488 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4489 l2cap_sock_clear_timer(sk);
4493 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4495 struct l2cap_chan_list *l;
4496 struct l2cap_conn *conn = hcon->l2cap_data;
4502 l = &conn->chan_list;
4504 BT_DBG("conn %p", conn);
4506 read_lock(&l->lock);
4508 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4511 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4516 if (!status && (sk->sk_state == BT_CONNECTED ||
4517 sk->sk_state == BT_CONFIG)) {
4518 l2cap_check_encryption(sk, encrypt);
4523 if (sk->sk_state == BT_CONNECT) {
4525 struct l2cap_conn_req req;
4526 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4527 req.psm = l2cap_pi(sk)->psm;
4529 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4530 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4532 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4533 L2CAP_CONN_REQ, sizeof(req), &req);
4535 l2cap_sock_clear_timer(sk);
4536 l2cap_sock_set_timer(sk, HZ / 10);
4538 } else if (sk->sk_state == BT_CONNECT2) {
4539 struct l2cap_conn_rsp rsp;
4543 sk->sk_state = BT_CONFIG;
4544 result = L2CAP_CR_SUCCESS;
4546 sk->sk_state = BT_DISCONN;
4547 l2cap_sock_set_timer(sk, HZ / 10);
4548 result = L2CAP_CR_SEC_BLOCK;
4551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4553 rsp.result = cpu_to_le16(result);
4554 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4555 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4556 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4562 read_unlock(&l->lock);
4567 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4569 struct l2cap_conn *conn = hcon->l2cap_data;
4571 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4574 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4576 if (flags & ACL_START) {
4577 struct l2cap_hdr *hdr;
4581 BT_ERR("Unexpected start frame (len %d)", skb->len);
4582 kfree_skb(conn->rx_skb);
4583 conn->rx_skb = NULL;
4585 l2cap_conn_unreliable(conn, ECOMM);
4589 BT_ERR("Frame is too short (len %d)", skb->len);
4590 l2cap_conn_unreliable(conn, ECOMM);
4594 hdr = (struct l2cap_hdr *) skb->data;
4595 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4597 if (len == skb->len) {
4598 /* Complete frame received */
4599 l2cap_recv_frame(conn, skb);
4603 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4605 if (skb->len > len) {
4606 BT_ERR("Frame is too long (len %d, expected len %d)",
4608 l2cap_conn_unreliable(conn, ECOMM);
4612 /* Allocate skb for the complete frame (with header) */
4613 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4617 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4619 conn->rx_len = len - skb->len;
4621 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4623 if (!conn->rx_len) {
4624 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4625 l2cap_conn_unreliable(conn, ECOMM);
4629 if (skb->len > conn->rx_len) {
4630 BT_ERR("Fragment is too long (len %d, expected %d)",
4631 skb->len, conn->rx_len);
4632 kfree_skb(conn->rx_skb);
4633 conn->rx_skb = NULL;
4635 l2cap_conn_unreliable(conn, ECOMM);
4639 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4641 conn->rx_len -= skb->len;
4643 if (!conn->rx_len) {
4644 /* Complete frame received */
4645 l2cap_recv_frame(conn, conn->rx_skb);
4646 conn->rx_skb = NULL;
4655 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4658 struct hlist_node *node;
4660 read_lock_bh(&l2cap_sk_list.lock);
4662 sk_for_each(sk, node, &l2cap_sk_list.head) {
4663 struct l2cap_pinfo *pi = l2cap_pi(sk);
4665 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4666 batostr(&bt_sk(sk)->src),
4667 batostr(&bt_sk(sk)->dst),
4668 sk->sk_state, __le16_to_cpu(pi->psm),
4670 pi->imtu, pi->omtu, pi->sec_level);
4673 read_unlock_bh(&l2cap_sk_list.lock);
4678 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4680 return single_open(file, l2cap_debugfs_show, inode->i_private);
4683 static const struct file_operations l2cap_debugfs_fops = {
4684 .open = l2cap_debugfs_open,
4686 .llseek = seq_lseek,
4687 .release = single_release,
4690 static struct dentry *l2cap_debugfs;
4692 static const struct proto_ops l2cap_sock_ops = {
4693 .family = PF_BLUETOOTH,
4694 .owner = THIS_MODULE,
4695 .release = l2cap_sock_release,
4696 .bind = l2cap_sock_bind,
4697 .connect = l2cap_sock_connect,
4698 .listen = l2cap_sock_listen,
4699 .accept = l2cap_sock_accept,
4700 .getname = l2cap_sock_getname,
4701 .sendmsg = l2cap_sock_sendmsg,
4702 .recvmsg = l2cap_sock_recvmsg,
4703 .poll = bt_sock_poll,
4704 .ioctl = bt_sock_ioctl,
4705 .mmap = sock_no_mmap,
4706 .socketpair = sock_no_socketpair,
4707 .shutdown = l2cap_sock_shutdown,
4708 .setsockopt = l2cap_sock_setsockopt,
4709 .getsockopt = l2cap_sock_getsockopt
4712 static const struct net_proto_family l2cap_sock_family_ops = {
4713 .family = PF_BLUETOOTH,
4714 .owner = THIS_MODULE,
4715 .create = l2cap_sock_create,
4718 static struct hci_proto l2cap_hci_proto = {
4720 .id = HCI_PROTO_L2CAP,
4721 .connect_ind = l2cap_connect_ind,
4722 .connect_cfm = l2cap_connect_cfm,
4723 .disconn_ind = l2cap_disconn_ind,
4724 .disconn_cfm = l2cap_disconn_cfm,
4725 .security_cfm = l2cap_security_cfm,
4726 .recv_acldata = l2cap_recv_acldata
4729 static int __init l2cap_init(void)
4733 err = proto_register(&l2cap_proto, 0);
4737 _busy_wq = create_singlethread_workqueue("l2cap");
4741 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4743 BT_ERR("L2CAP socket registration failed");
4747 err = hci_register_proto(&l2cap_hci_proto);
4749 BT_ERR("L2CAP protocol registration failed");
4750 bt_sock_unregister(BTPROTO_L2CAP);
4755 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4756 bt_debugfs, NULL, &l2cap_debugfs_fops);
4758 BT_ERR("Failed to create L2CAP debug file");
4761 BT_INFO("L2CAP ver %s", VERSION);
4762 BT_INFO("L2CAP socket layer initialized");
4767 proto_unregister(&l2cap_proto);
4771 static void __exit l2cap_exit(void)
4773 debugfs_remove(l2cap_debugfs);
4775 flush_workqueue(_busy_wq);
4776 destroy_workqueue(_busy_wq);
4778 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4779 BT_ERR("L2CAP socket unregistration failed");
4781 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4782 BT_ERR("L2CAP protocol unregistration failed");
4784 proto_unregister(&l2cap_proto);
4787 void l2cap_load(void)
4789 /* Dummy function to trigger automatic L2CAP module loading by
4790 * other modules that use L2CAP sockets but don't use any other
4791 * symbols from it. */
4793 EXPORT_SYMBOL(l2cap_load);
4795 module_init(l2cap_init);
4796 module_exit(l2cap_exit);
4798 module_param(enable_ertm, bool, 0644);
4799 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4801 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4802 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4803 MODULE_VERSION(VERSION);
4804 MODULE_LICENSE("GPL");
4805 MODULE_ALIAS("bt-proto-0");