2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
58 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59 static u8 l2cap_fixed_chan[8] = { 0x02, };
61 static const struct proto_ops l2cap_sock_ops;
63 static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 static void __l2cap_sock_close(struct sock *sk, int reason);
68 static void l2cap_sock_close(struct sock *sk);
69 static void l2cap_sock_kill(struct sock *sk);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg)
77 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
92 __l2cap_sock_close(sk, reason);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139 s = __l2cap_get_chan_by_scid(l, cid);
142 read_unlock(&l->lock);
146 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
156 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 s = __l2cap_get_chan_by_ident(l, ident);
163 read_unlock(&l->lock);
167 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 u16 cid = L2CAP_CID_DYN_START;
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
179 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
184 l2cap_pi(l->head)->prev_c = sk;
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
195 write_lock_bh(&l->lock);
200 l2cap_pi(next)->prev_c = prev;
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
208 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
210 struct l2cap_chan_list *l = &conn->chan_list;
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
215 conn->disc_reason = 0x13;
217 l2cap_pi(sk)->conn = conn;
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 __l2cap_chan_link(l, sk);
237 bt_accept_enqueue(parent, sk);
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock *sk, int err)
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
247 l2cap_sock_clear_timer(sk);
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
268 sk->sk_state_change(sk);
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock *sk)
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
281 auth_type = HCI_AT_NO_BONDING;
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
294 auth_type = HCI_AT_NO_BONDING;
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
320 spin_unlock_bh(&conn->lock);
325 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 BT_DBG("code 0x%2.2x", code);
334 return hci_send_acl(conn->hcon, skb, 0);
337 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
344 if (pi->fcs == L2CAP_FCS_CRC16)
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
366 return hci_send_acl(pi->conn->hcon, skb, 0);
369 static void l2cap_do_start(struct sock *sk)
371 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
373 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
374 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
377 if (l2cap_check_security(sk)) {
378 struct l2cap_conn_req req;
379 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
380 req.psm = l2cap_pi(sk)->psm;
382 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
384 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
385 L2CAP_CONN_REQ, sizeof(req), &req);
388 struct l2cap_info_req req;
389 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
391 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
392 conn->info_ident = l2cap_get_ident(conn);
394 mod_timer(&conn->info_timer, jiffies +
395 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
397 l2cap_send_cmd(conn, conn->info_ident,
398 L2CAP_INFO_REQ, sizeof(req), &req);
402 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
404 struct l2cap_disconn_req req;
406 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
407 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
408 l2cap_send_cmd(conn, l2cap_get_ident(conn),
409 L2CAP_DISCONN_REQ, sizeof(req), &req);
412 /* ---- L2CAP connections ---- */
413 static void l2cap_conn_start(struct l2cap_conn *conn)
415 struct l2cap_chan_list *l = &conn->chan_list;
418 BT_DBG("conn %p", conn);
422 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
425 if (sk->sk_type != SOCK_SEQPACKET) {
430 if (sk->sk_state == BT_CONNECT) {
431 if (l2cap_check_security(sk)) {
432 struct l2cap_conn_req req;
433 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
434 req.psm = l2cap_pi(sk)->psm;
436 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
438 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
439 L2CAP_CONN_REQ, sizeof(req), &req);
441 } else if (sk->sk_state == BT_CONNECT2) {
442 struct l2cap_conn_rsp rsp;
443 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
444 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
446 if (l2cap_check_security(sk)) {
447 if (bt_sk(sk)->defer_setup) {
448 struct sock *parent = bt_sk(sk)->parent;
449 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
450 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
451 parent->sk_data_ready(parent, 0);
454 sk->sk_state = BT_CONFIG;
455 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
456 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
459 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
460 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
463 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
464 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
470 read_unlock(&l->lock);
473 static void l2cap_conn_ready(struct l2cap_conn *conn)
475 struct l2cap_chan_list *l = &conn->chan_list;
478 BT_DBG("conn %p", conn);
482 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
485 if (sk->sk_type != SOCK_SEQPACKET) {
486 l2cap_sock_clear_timer(sk);
487 sk->sk_state = BT_CONNECTED;
488 sk->sk_state_change(sk);
489 } else if (sk->sk_state == BT_CONNECT)
495 read_unlock(&l->lock);
498 /* Notify sockets that we cannot guaranty reliability anymore */
499 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
501 struct l2cap_chan_list *l = &conn->chan_list;
504 BT_DBG("conn %p", conn);
508 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
509 if (l2cap_pi(sk)->force_reliable)
513 read_unlock(&l->lock);
516 static void l2cap_info_timeout(unsigned long arg)
518 struct l2cap_conn *conn = (void *) arg;
520 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
521 conn->info_ident = 0;
523 l2cap_conn_start(conn);
526 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
528 struct l2cap_conn *conn = hcon->l2cap_data;
533 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
537 hcon->l2cap_data = conn;
540 BT_DBG("hcon %p conn %p", hcon, conn);
542 conn->mtu = hcon->hdev->acl_mtu;
543 conn->src = &hcon->hdev->bdaddr;
544 conn->dst = &hcon->dst;
548 setup_timer(&conn->info_timer, l2cap_info_timeout,
549 (unsigned long) conn);
551 spin_lock_init(&conn->lock);
552 rwlock_init(&conn->chan_list.lock);
554 conn->disc_reason = 0x13;
559 static void l2cap_conn_del(struct hci_conn *hcon, int err)
561 struct l2cap_conn *conn = hcon->l2cap_data;
567 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
569 kfree_skb(conn->rx_skb);
572 while ((sk = conn->chan_list.head)) {
574 l2cap_chan_del(sk, err);
579 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
580 del_timer_sync(&conn->info_timer);
582 hcon->l2cap_data = NULL;
586 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
588 struct l2cap_chan_list *l = &conn->chan_list;
589 write_lock_bh(&l->lock);
590 __l2cap_chan_add(conn, sk, parent);
591 write_unlock_bh(&l->lock);
594 /* ---- Socket interface ---- */
595 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
598 struct hlist_node *node;
599 sk_for_each(sk, node, &l2cap_sk_list.head)
600 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
607 /* Find socket with psm and source bdaddr.
608 * Returns closest match.
610 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
612 struct sock *sk = NULL, *sk1 = NULL;
613 struct hlist_node *node;
615 sk_for_each(sk, node, &l2cap_sk_list.head) {
616 if (state && sk->sk_state != state)
619 if (l2cap_pi(sk)->psm == psm) {
621 if (!bacmp(&bt_sk(sk)->src, src))
625 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
629 return node ? sk : sk1;
632 /* Find socket with given address (psm, src).
633 * Returns locked socket */
634 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
637 read_lock(&l2cap_sk_list.lock);
638 s = __l2cap_get_sock_by_psm(state, psm, src);
641 read_unlock(&l2cap_sk_list.lock);
645 static void l2cap_sock_destruct(struct sock *sk)
649 skb_queue_purge(&sk->sk_receive_queue);
650 skb_queue_purge(&sk->sk_write_queue);
653 static void l2cap_sock_cleanup_listen(struct sock *parent)
657 BT_DBG("parent %p", parent);
659 /* Close not yet accepted channels */
660 while ((sk = bt_accept_dequeue(parent, NULL)))
661 l2cap_sock_close(sk);
663 parent->sk_state = BT_CLOSED;
664 sock_set_flag(parent, SOCK_ZAPPED);
667 /* Kill socket (only if zapped and orphan)
668 * Must be called on unlocked socket.
670 static void l2cap_sock_kill(struct sock *sk)
672 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
675 BT_DBG("sk %p state %d", sk, sk->sk_state);
677 /* Kill poor orphan */
678 bt_sock_unlink(&l2cap_sk_list, sk);
679 sock_set_flag(sk, SOCK_DEAD);
683 static void __l2cap_sock_close(struct sock *sk, int reason)
685 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
687 switch (sk->sk_state) {
689 l2cap_sock_cleanup_listen(sk);
694 if (sk->sk_type == SOCK_SEQPACKET) {
695 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
697 sk->sk_state = BT_DISCONN;
698 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
699 l2cap_send_disconn_req(conn, sk);
701 l2cap_chan_del(sk, reason);
705 if (sk->sk_type == SOCK_SEQPACKET) {
706 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
707 struct l2cap_conn_rsp rsp;
710 if (bt_sk(sk)->defer_setup)
711 result = L2CAP_CR_SEC_BLOCK;
713 result = L2CAP_CR_BAD_PSM;
715 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
716 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
717 rsp.result = cpu_to_le16(result);
718 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
719 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
720 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
722 l2cap_chan_del(sk, reason);
727 l2cap_chan_del(sk, reason);
731 sock_set_flag(sk, SOCK_ZAPPED);
736 /* Must be called on unlocked socket. */
737 static void l2cap_sock_close(struct sock *sk)
739 l2cap_sock_clear_timer(sk);
741 __l2cap_sock_close(sk, ECONNRESET);
746 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
748 struct l2cap_pinfo *pi = l2cap_pi(sk);
753 sk->sk_type = parent->sk_type;
754 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
756 pi->imtu = l2cap_pi(parent)->imtu;
757 pi->omtu = l2cap_pi(parent)->omtu;
758 pi->mode = l2cap_pi(parent)->mode;
759 pi->fcs = l2cap_pi(parent)->fcs;
760 pi->sec_level = l2cap_pi(parent)->sec_level;
761 pi->role_switch = l2cap_pi(parent)->role_switch;
762 pi->force_reliable = l2cap_pi(parent)->force_reliable;
764 pi->imtu = L2CAP_DEFAULT_MTU;
766 pi->mode = L2CAP_MODE_BASIC;
767 pi->fcs = L2CAP_FCS_CRC16;
768 pi->sec_level = BT_SECURITY_LOW;
770 pi->force_reliable = 0;
773 /* Default config options */
775 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
778 static struct proto l2cap_proto = {
780 .owner = THIS_MODULE,
781 .obj_size = sizeof(struct l2cap_pinfo)
784 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
788 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
792 sock_init_data(sock, sk);
793 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
795 sk->sk_destruct = l2cap_sock_destruct;
796 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
798 sock_reset_flag(sk, SOCK_ZAPPED);
800 sk->sk_protocol = proto;
801 sk->sk_state = BT_OPEN;
803 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
805 bt_sock_link(&l2cap_sk_list, sk);
809 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
813 BT_DBG("sock %p", sock);
815 sock->state = SS_UNCONNECTED;
817 if (sock->type != SOCK_SEQPACKET &&
818 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
819 return -ESOCKTNOSUPPORT;
821 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
824 sock->ops = &l2cap_sock_ops;
826 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
830 l2cap_sock_init(sk, NULL);
834 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
836 struct sock *sk = sock->sk;
837 struct sockaddr_l2 la;
842 if (!addr || addr->sa_family != AF_BLUETOOTH)
845 memset(&la, 0, sizeof(la));
846 len = min_t(unsigned int, sizeof(la), alen);
847 memcpy(&la, addr, len);
854 if (sk->sk_state != BT_OPEN) {
859 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
860 !capable(CAP_NET_BIND_SERVICE)) {
865 write_lock_bh(&l2cap_sk_list.lock);
867 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
870 /* Save source address */
871 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
872 l2cap_pi(sk)->psm = la.l2_psm;
873 l2cap_pi(sk)->sport = la.l2_psm;
874 sk->sk_state = BT_BOUND;
876 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
877 __le16_to_cpu(la.l2_psm) == 0x0003)
878 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
881 write_unlock_bh(&l2cap_sk_list.lock);
888 static int l2cap_do_connect(struct sock *sk)
890 bdaddr_t *src = &bt_sk(sk)->src;
891 bdaddr_t *dst = &bt_sk(sk)->dst;
892 struct l2cap_conn *conn;
893 struct hci_conn *hcon;
894 struct hci_dev *hdev;
898 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
901 hdev = hci_get_route(dst, src);
903 return -EHOSTUNREACH;
905 hci_dev_lock_bh(hdev);
909 if (sk->sk_type == SOCK_RAW) {
910 switch (l2cap_pi(sk)->sec_level) {
911 case BT_SECURITY_HIGH:
912 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
914 case BT_SECURITY_MEDIUM:
915 auth_type = HCI_AT_DEDICATED_BONDING;
918 auth_type = HCI_AT_NO_BONDING;
921 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
922 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
923 auth_type = HCI_AT_NO_BONDING_MITM;
925 auth_type = HCI_AT_NO_BONDING;
927 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
928 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
930 switch (l2cap_pi(sk)->sec_level) {
931 case BT_SECURITY_HIGH:
932 auth_type = HCI_AT_GENERAL_BONDING_MITM;
934 case BT_SECURITY_MEDIUM:
935 auth_type = HCI_AT_GENERAL_BONDING;
938 auth_type = HCI_AT_NO_BONDING;
943 hcon = hci_connect(hdev, ACL_LINK, dst,
944 l2cap_pi(sk)->sec_level, auth_type);
948 conn = l2cap_conn_add(hcon, 0);
956 /* Update source addr of the socket */
957 bacpy(src, conn->src);
959 l2cap_chan_add(conn, sk, NULL);
961 sk->sk_state = BT_CONNECT;
962 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
964 if (hcon->state == BT_CONNECTED) {
965 if (sk->sk_type != SOCK_SEQPACKET) {
966 l2cap_sock_clear_timer(sk);
967 sk->sk_state = BT_CONNECTED;
973 hci_dev_unlock_bh(hdev);
978 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
980 struct sock *sk = sock->sk;
981 struct sockaddr_l2 la;
986 if (!addr || addr->sa_family != AF_BLUETOOTH)
989 memset(&la, 0, sizeof(la));
990 len = min_t(unsigned int, sizeof(la), alen);
991 memcpy(&la, addr, len);
998 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1003 switch (l2cap_pi(sk)->mode) {
1004 case L2CAP_MODE_BASIC:
1006 case L2CAP_MODE_ERTM:
1007 case L2CAP_MODE_STREAMING:
1016 switch (sk->sk_state) {
1020 /* Already connecting */
1024 /* Already connected */
1037 /* Set destination address and psm */
1038 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1039 l2cap_pi(sk)->psm = la.l2_psm;
1041 err = l2cap_do_connect(sk);
1046 err = bt_sock_wait_state(sk, BT_CONNECTED,
1047 sock_sndtimeo(sk, flags & O_NONBLOCK));
1053 static int l2cap_sock_listen(struct socket *sock, int backlog)
1055 struct sock *sk = sock->sk;
1058 BT_DBG("sk %p backlog %d", sk, backlog);
1062 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1067 switch (l2cap_pi(sk)->mode) {
1068 case L2CAP_MODE_BASIC:
1070 case L2CAP_MODE_ERTM:
1071 case L2CAP_MODE_STREAMING:
1080 if (!l2cap_pi(sk)->psm) {
1081 bdaddr_t *src = &bt_sk(sk)->src;
1086 write_lock_bh(&l2cap_sk_list.lock);
1088 for (psm = 0x1001; psm < 0x1100; psm += 2)
1089 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1090 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1091 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1096 write_unlock_bh(&l2cap_sk_list.lock);
1102 sk->sk_max_ack_backlog = backlog;
1103 sk->sk_ack_backlog = 0;
1104 sk->sk_state = BT_LISTEN;
1111 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct sock *sk = sock->sk, *nsk;
1118 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1120 if (sk->sk_state != BT_LISTEN) {
1125 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1127 BT_DBG("sk %p timeo %ld", sk, timeo);
1129 /* Wait for an incoming connection. (wake-one). */
1130 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1131 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1132 set_current_state(TASK_INTERRUPTIBLE);
1139 timeo = schedule_timeout(timeo);
1140 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1142 if (sk->sk_state != BT_LISTEN) {
1147 if (signal_pending(current)) {
1148 err = sock_intr_errno(timeo);
1152 set_current_state(TASK_RUNNING);
1153 remove_wait_queue(sk->sk_sleep, &wait);
1158 newsock->state = SS_CONNECTED;
1160 BT_DBG("new socket %p", nsk);
1167 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1169 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1170 struct sock *sk = sock->sk;
1172 BT_DBG("sock %p, sk %p", sock, sk);
1174 addr->sa_family = AF_BLUETOOTH;
1175 *len = sizeof(struct sockaddr_l2);
1178 la->l2_psm = l2cap_pi(sk)->psm;
1179 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1180 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1182 la->l2_psm = l2cap_pi(sk)->sport;
1183 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1184 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1190 static void l2cap_monitor_timeout(unsigned long arg)
1192 struct sock *sk = (void *) arg;
1196 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1197 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1201 l2cap_pi(sk)->retry_count++;
1202 __mod_monitor_timer();
1204 control = L2CAP_CTRL_POLL;
1205 control |= L2CAP_SUPER_RCV_READY;
1206 l2cap_send_sframe(l2cap_pi(sk), control);
1210 static void l2cap_retrans_timeout(unsigned long arg)
1212 struct sock *sk = (void *) arg;
1216 l2cap_pi(sk)->retry_count = 1;
1217 __mod_monitor_timer();
1219 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1221 control = L2CAP_CTRL_POLL;
1222 control |= L2CAP_SUPER_RCV_READY;
1223 l2cap_send_sframe(l2cap_pi(sk), control);
1227 static void l2cap_drop_acked_frames(struct sock *sk)
1229 struct sk_buff *skb;
1231 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1232 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1235 skb = skb_dequeue(TX_QUEUE(sk));
1238 l2cap_pi(sk)->unacked_frames--;
1241 if (!l2cap_pi(sk)->unacked_frames)
1242 del_timer(&l2cap_pi(sk)->retrans_timer);
1247 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1249 struct l2cap_pinfo *pi = l2cap_pi(sk);
1252 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1254 err = hci_send_acl(pi->conn->hcon, skb, 0);
1261 static int l2cap_streaming_send(struct sock *sk)
1263 struct sk_buff *skb, *tx_skb;
1264 struct l2cap_pinfo *pi = l2cap_pi(sk);
1268 while ((skb = sk->sk_send_head)) {
1269 tx_skb = skb_clone(skb, GFP_ATOMIC);
1271 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1272 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1273 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1275 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1276 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1277 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1280 err = l2cap_do_send(sk, tx_skb);
1282 l2cap_send_disconn_req(pi->conn, sk);
1286 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1288 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1289 sk->sk_send_head = NULL;
1291 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1293 skb = skb_dequeue(TX_QUEUE(sk));
1299 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1301 struct l2cap_pinfo *pi = l2cap_pi(sk);
1302 struct sk_buff *skb, *tx_skb;
1306 skb = skb_peek(TX_QUEUE(sk));
1308 if (bt_cb(skb)->tx_seq != tx_seq) {
1309 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1311 skb = skb_queue_next(TX_QUEUE(sk), skb);
1315 if (pi->remote_max_tx &&
1316 bt_cb(skb)->retries == pi->remote_max_tx) {
1317 l2cap_send_disconn_req(pi->conn, sk);
1321 tx_skb = skb_clone(skb, GFP_ATOMIC);
1322 bt_cb(skb)->retries++;
1323 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1324 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1325 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1326 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1328 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1329 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1330 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1333 err = l2cap_do_send(sk, tx_skb);
1335 l2cap_send_disconn_req(pi->conn, sk);
1343 static int l2cap_ertm_send(struct sock *sk)
1345 struct sk_buff *skb, *tx_skb;
1346 struct l2cap_pinfo *pi = l2cap_pi(sk);
1350 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1353 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1354 tx_skb = skb_clone(skb, GFP_ATOMIC);
1356 if (pi->remote_max_tx &&
1357 bt_cb(skb)->retries == pi->remote_max_tx) {
1358 l2cap_send_disconn_req(pi->conn, sk);
1362 bt_cb(skb)->retries++;
1364 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1365 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1366 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1367 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1370 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1371 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1372 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1375 err = l2cap_do_send(sk, tx_skb);
1377 l2cap_send_disconn_req(pi->conn, sk);
1380 __mod_retrans_timer();
1382 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1383 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1385 pi->unacked_frames++;
1387 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1388 sk->sk_send_head = NULL;
1390 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1396 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1398 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1399 struct sk_buff **frag;
1402 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1409 /* Continuation fragments (no L2CAP header) */
1410 frag = &skb_shinfo(skb)->frag_list;
1412 count = min_t(unsigned int, conn->mtu, len);
1414 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1417 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1423 frag = &(*frag)->next;
1429 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1431 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1432 struct sk_buff *skb;
1433 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1434 struct l2cap_hdr *lh;
1436 BT_DBG("sk %p len %d", sk, (int)len);
1438 count = min_t(unsigned int, (conn->mtu - hlen), len);
1439 skb = bt_skb_send_alloc(sk, count + hlen,
1440 msg->msg_flags & MSG_DONTWAIT, &err);
1442 return ERR_PTR(-ENOMEM);
1444 /* Create L2CAP header */
1445 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1446 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1447 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1448 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1450 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1451 if (unlikely(err < 0)) {
1453 return ERR_PTR(err);
1458 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1460 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1461 struct sk_buff *skb;
1462 int err, count, hlen = L2CAP_HDR_SIZE;
1463 struct l2cap_hdr *lh;
1465 BT_DBG("sk %p len %d", sk, (int)len);
1467 count = min_t(unsigned int, (conn->mtu - hlen), len);
1468 skb = bt_skb_send_alloc(sk, count + hlen,
1469 msg->msg_flags & MSG_DONTWAIT, &err);
1471 return ERR_PTR(-ENOMEM);
1473 /* Create L2CAP header */
1474 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1475 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1476 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1478 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1479 if (unlikely(err < 0)) {
1481 return ERR_PTR(err);
1486 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1488 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1489 struct sk_buff *skb;
1490 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1491 struct l2cap_hdr *lh;
1493 BT_DBG("sk %p len %d", sk, (int)len);
1498 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1501 count = min_t(unsigned int, (conn->mtu - hlen), len);
1502 skb = bt_skb_send_alloc(sk, count + hlen,
1503 msg->msg_flags & MSG_DONTWAIT, &err);
1505 return ERR_PTR(-ENOMEM);
1507 /* Create L2CAP header */
1508 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1509 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1510 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1511 put_unaligned_le16(control, skb_put(skb, 2));
1513 put_unaligned_le16(sdulen, skb_put(skb, 2));
1515 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1516 if (unlikely(err < 0)) {
1518 return ERR_PTR(err);
1521 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1522 put_unaligned_le16(0, skb_put(skb, 2));
1524 bt_cb(skb)->retries = 0;
1528 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1530 struct l2cap_pinfo *pi = l2cap_pi(sk);
1531 struct sk_buff *skb;
1532 struct sk_buff_head sar_queue;
1536 __skb_queue_head_init(&sar_queue);
1537 control = L2CAP_SDU_START;
1538 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1540 return PTR_ERR(skb);
1542 __skb_queue_tail(&sar_queue, skb);
1543 len -= pi->max_pdu_size;
1544 size +=pi->max_pdu_size;
1550 if (len > pi->max_pdu_size) {
1551 control |= L2CAP_SDU_CONTINUE;
1552 buflen = pi->max_pdu_size;
1554 control |= L2CAP_SDU_END;
1558 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1560 skb_queue_purge(&sar_queue);
1561 return PTR_ERR(skb);
1564 __skb_queue_tail(&sar_queue, skb);
1569 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1570 if (sk->sk_send_head == NULL)
1571 sk->sk_send_head = sar_queue.next;
1576 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1578 struct sock *sk = sock->sk;
1579 struct l2cap_pinfo *pi = l2cap_pi(sk);
1580 struct sk_buff *skb;
1584 BT_DBG("sock %p, sk %p", sock, sk);
1586 err = sock_error(sk);
1590 if (msg->msg_flags & MSG_OOB)
1593 /* Check outgoing MTU */
1594 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1600 if (sk->sk_state != BT_CONNECTED) {
1605 /* Connectionless channel */
1606 if (sk->sk_type == SOCK_DGRAM) {
1607 skb = l2cap_create_connless_pdu(sk, msg, len);
1608 err = l2cap_do_send(sk, skb);
1613 case L2CAP_MODE_BASIC:
1614 /* Create a basic PDU */
1615 skb = l2cap_create_basic_pdu(sk, msg, len);
1621 err = l2cap_do_send(sk, skb);
1626 case L2CAP_MODE_ERTM:
1627 case L2CAP_MODE_STREAMING:
1628 /* Entire SDU fits into one PDU */
1629 if (len <= pi->max_pdu_size) {
1630 control = L2CAP_SDU_UNSEGMENTED;
1631 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1636 __skb_queue_tail(TX_QUEUE(sk), skb);
1637 if (sk->sk_send_head == NULL)
1638 sk->sk_send_head = skb;
1640 /* Segment SDU into multiples PDUs */
1641 err = l2cap_sar_segment_sdu(sk, msg, len);
1646 if (pi->mode == L2CAP_MODE_STREAMING)
1647 err = l2cap_streaming_send(sk);
1649 err = l2cap_ertm_send(sk);
1656 BT_DBG("bad state %1.1x", pi->mode);
1665 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1667 struct sock *sk = sock->sk;
1671 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1672 struct l2cap_conn_rsp rsp;
1674 sk->sk_state = BT_CONFIG;
1676 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1677 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1678 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1679 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1680 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1681 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1689 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1692 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1694 struct sock *sk = sock->sk;
1695 struct l2cap_options opts;
1699 BT_DBG("sk %p", sk);
1705 opts.imtu = l2cap_pi(sk)->imtu;
1706 opts.omtu = l2cap_pi(sk)->omtu;
1707 opts.flush_to = l2cap_pi(sk)->flush_to;
1708 opts.mode = l2cap_pi(sk)->mode;
1709 opts.fcs = l2cap_pi(sk)->fcs;
1711 len = min_t(unsigned int, sizeof(opts), optlen);
1712 if (copy_from_user((char *) &opts, optval, len)) {
1717 l2cap_pi(sk)->imtu = opts.imtu;
1718 l2cap_pi(sk)->omtu = opts.omtu;
1719 l2cap_pi(sk)->mode = opts.mode;
1720 l2cap_pi(sk)->fcs = opts.fcs;
1724 if (get_user(opt, (u32 __user *) optval)) {
1729 if (opt & L2CAP_LM_AUTH)
1730 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1731 if (opt & L2CAP_LM_ENCRYPT)
1732 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1733 if (opt & L2CAP_LM_SECURE)
1734 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1736 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1737 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1749 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1751 struct sock *sk = sock->sk;
1752 struct bt_security sec;
1756 BT_DBG("sk %p", sk);
1758 if (level == SOL_L2CAP)
1759 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1761 if (level != SOL_BLUETOOTH)
1762 return -ENOPROTOOPT;
1768 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1773 sec.level = BT_SECURITY_LOW;
1775 len = min_t(unsigned int, sizeof(sec), optlen);
1776 if (copy_from_user((char *) &sec, optval, len)) {
1781 if (sec.level < BT_SECURITY_LOW ||
1782 sec.level > BT_SECURITY_HIGH) {
1787 l2cap_pi(sk)->sec_level = sec.level;
1790 case BT_DEFER_SETUP:
1791 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1796 if (get_user(opt, (u32 __user *) optval)) {
1801 bt_sk(sk)->defer_setup = opt;
1813 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1815 struct sock *sk = sock->sk;
1816 struct l2cap_options opts;
1817 struct l2cap_conninfo cinfo;
1821 BT_DBG("sk %p", sk);
1823 if (get_user(len, optlen))
1830 opts.imtu = l2cap_pi(sk)->imtu;
1831 opts.omtu = l2cap_pi(sk)->omtu;
1832 opts.flush_to = l2cap_pi(sk)->flush_to;
1833 opts.mode = l2cap_pi(sk)->mode;
1834 opts.fcs = l2cap_pi(sk)->fcs;
1836 len = min_t(unsigned int, len, sizeof(opts));
1837 if (copy_to_user(optval, (char *) &opts, len))
1843 switch (l2cap_pi(sk)->sec_level) {
1844 case BT_SECURITY_LOW:
1845 opt = L2CAP_LM_AUTH;
1847 case BT_SECURITY_MEDIUM:
1848 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1850 case BT_SECURITY_HIGH:
1851 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1859 if (l2cap_pi(sk)->role_switch)
1860 opt |= L2CAP_LM_MASTER;
1862 if (l2cap_pi(sk)->force_reliable)
1863 opt |= L2CAP_LM_RELIABLE;
1865 if (put_user(opt, (u32 __user *) optval))
1869 case L2CAP_CONNINFO:
1870 if (sk->sk_state != BT_CONNECTED &&
1871 !(sk->sk_state == BT_CONNECT2 &&
1872 bt_sk(sk)->defer_setup)) {
1877 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1878 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1880 len = min_t(unsigned int, len, sizeof(cinfo));
1881 if (copy_to_user(optval, (char *) &cinfo, len))
1895 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1897 struct sock *sk = sock->sk;
1898 struct bt_security sec;
1901 BT_DBG("sk %p", sk);
1903 if (level == SOL_L2CAP)
1904 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1906 if (level != SOL_BLUETOOTH)
1907 return -ENOPROTOOPT;
1909 if (get_user(len, optlen))
1916 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1921 sec.level = l2cap_pi(sk)->sec_level;
1923 len = min_t(unsigned int, len, sizeof(sec));
1924 if (copy_to_user(optval, (char *) &sec, len))
1929 case BT_DEFER_SETUP:
1930 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1935 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1949 static int l2cap_sock_shutdown(struct socket *sock, int how)
1951 struct sock *sk = sock->sk;
1954 BT_DBG("sock %p, sk %p", sock, sk);
1960 if (!sk->sk_shutdown) {
1961 sk->sk_shutdown = SHUTDOWN_MASK;
1962 l2cap_sock_clear_timer(sk);
1963 __l2cap_sock_close(sk, 0);
1965 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1966 err = bt_sock_wait_state(sk, BT_CLOSED,
1973 static int l2cap_sock_release(struct socket *sock)
1975 struct sock *sk = sock->sk;
1978 BT_DBG("sock %p, sk %p", sock, sk);
1983 err = l2cap_sock_shutdown(sock, 2);
1986 l2cap_sock_kill(sk);
1990 static void l2cap_chan_ready(struct sock *sk)
1992 struct sock *parent = bt_sk(sk)->parent;
1994 BT_DBG("sk %p, parent %p", sk, parent);
1996 l2cap_pi(sk)->conf_state = 0;
1997 l2cap_sock_clear_timer(sk);
2000 /* Outgoing channel.
2001 * Wake up socket sleeping on connect.
2003 sk->sk_state = BT_CONNECTED;
2004 sk->sk_state_change(sk);
2006 /* Incoming channel.
2007 * Wake up socket sleeping on accept.
2009 parent->sk_data_ready(parent, 0);
2013 /* Copy frame to all raw sockets on that connection */
2014 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2016 struct l2cap_chan_list *l = &conn->chan_list;
2017 struct sk_buff *nskb;
2020 BT_DBG("conn %p", conn);
2022 read_lock(&l->lock);
2023 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2024 if (sk->sk_type != SOCK_RAW)
2027 /* Don't send frame to the socket it came from */
2030 nskb = skb_clone(skb, GFP_ATOMIC);
2034 if (sock_queue_rcv_skb(sk, nskb))
2037 read_unlock(&l->lock);
2040 /* ---- L2CAP signalling commands ---- */
2041 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2042 u8 code, u8 ident, u16 dlen, void *data)
2044 struct sk_buff *skb, **frag;
2045 struct l2cap_cmd_hdr *cmd;
2046 struct l2cap_hdr *lh;
2049 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2050 conn, code, ident, dlen);
2052 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2053 count = min_t(unsigned int, conn->mtu, len);
2055 skb = bt_skb_alloc(count, GFP_ATOMIC);
2059 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2060 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2061 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2063 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2066 cmd->len = cpu_to_le16(dlen);
2069 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2070 memcpy(skb_put(skb, count), data, count);
2076 /* Continuation fragments (no L2CAP header) */
2077 frag = &skb_shinfo(skb)->frag_list;
2079 count = min_t(unsigned int, conn->mtu, len);
2081 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2085 memcpy(skb_put(*frag, count), data, count);
2090 frag = &(*frag)->next;
2100 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2102 struct l2cap_conf_opt *opt = *ptr;
2105 len = L2CAP_CONF_OPT_SIZE + opt->len;
2113 *val = *((u8 *) opt->val);
2117 *val = __le16_to_cpu(*((__le16 *) opt->val));
2121 *val = __le32_to_cpu(*((__le32 *) opt->val));
2125 *val = (unsigned long) opt->val;
2129 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2133 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2135 struct l2cap_conf_opt *opt = *ptr;
2137 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2144 *((u8 *) opt->val) = val;
2148 *((__le16 *) opt->val) = cpu_to_le16(val);
2152 *((__le32 *) opt->val) = cpu_to_le32(val);
2156 memcpy(opt->val, (void *) val, len);
2160 *ptr += L2CAP_CONF_OPT_SIZE + len;
2163 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2165 u32 local_feat_mask = l2cap_feat_mask;
2167 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2170 case L2CAP_MODE_ERTM:
2171 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2172 case L2CAP_MODE_STREAMING:
2173 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2179 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2182 case L2CAP_MODE_STREAMING:
2183 case L2CAP_MODE_ERTM:
2184 if (l2cap_mode_supported(mode, remote_feat_mask))
2188 return L2CAP_MODE_BASIC;
2192 static int l2cap_build_conf_req(struct sock *sk, void *data)
2194 struct l2cap_pinfo *pi = l2cap_pi(sk);
2195 struct l2cap_conf_req *req = data;
2196 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2197 void *ptr = req->data;
2199 BT_DBG("sk %p", sk);
2201 if (pi->num_conf_req || pi->num_conf_rsp)
2205 case L2CAP_MODE_STREAMING:
2206 case L2CAP_MODE_ERTM:
2207 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2208 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2209 l2cap_send_disconn_req(pi->conn, sk);
2212 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2218 case L2CAP_MODE_BASIC:
2219 if (pi->imtu != L2CAP_DEFAULT_MTU)
2220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2223 case L2CAP_MODE_ERTM:
2224 rfc.mode = L2CAP_MODE_ERTM;
2225 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2226 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2227 rfc.retrans_timeout = 0;
2228 rfc.monitor_timeout = 0;
2229 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2232 sizeof(rfc), (unsigned long) &rfc);
2234 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2237 if (pi->fcs == L2CAP_FCS_NONE ||
2238 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2239 pi->fcs = L2CAP_FCS_NONE;
2240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2244 case L2CAP_MODE_STREAMING:
2245 rfc.mode = L2CAP_MODE_STREAMING;
2247 rfc.max_transmit = 0;
2248 rfc.retrans_timeout = 0;
2249 rfc.monitor_timeout = 0;
2250 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2253 sizeof(rfc), (unsigned long) &rfc);
2255 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2258 if (pi->fcs == L2CAP_FCS_NONE ||
2259 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2260 pi->fcs = L2CAP_FCS_NONE;
2261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2266 /* FIXME: Need actual value of the flush timeout */
2267 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2268 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2270 req->dcid = cpu_to_le16(pi->dcid);
2271 req->flags = cpu_to_le16(0);
2276 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2278 struct l2cap_pinfo *pi = l2cap_pi(sk);
2279 struct l2cap_conf_rsp *rsp = data;
2280 void *ptr = rsp->data;
2281 void *req = pi->conf_req;
2282 int len = pi->conf_len;
2283 int type, hint, olen;
2285 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2286 u16 mtu = L2CAP_DEFAULT_MTU;
2287 u16 result = L2CAP_CONF_SUCCESS;
2289 BT_DBG("sk %p", sk);
2291 while (len >= L2CAP_CONF_OPT_SIZE) {
2292 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2294 hint = type & L2CAP_CONF_HINT;
2295 type &= L2CAP_CONF_MASK;
2298 case L2CAP_CONF_MTU:
2302 case L2CAP_CONF_FLUSH_TO:
2306 case L2CAP_CONF_QOS:
2309 case L2CAP_CONF_RFC:
2310 if (olen == sizeof(rfc))
2311 memcpy(&rfc, (void *) val, olen);
2314 case L2CAP_CONF_FCS:
2315 if (val == L2CAP_FCS_NONE)
2316 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2324 result = L2CAP_CONF_UNKNOWN;
2325 *((u8 *) ptr++) = type;
2330 if (pi->num_conf_rsp || pi->num_conf_req)
2334 case L2CAP_MODE_STREAMING:
2335 case L2CAP_MODE_ERTM:
2336 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2337 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2338 return -ECONNREFUSED;
2341 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2346 if (pi->mode != rfc.mode) {
2347 result = L2CAP_CONF_UNACCEPT;
2348 rfc.mode = pi->mode;
2350 if (pi->num_conf_rsp == 1)
2351 return -ECONNREFUSED;
2353 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2354 sizeof(rfc), (unsigned long) &rfc);
2358 if (result == L2CAP_CONF_SUCCESS) {
2359 /* Configure output options and let the other side know
2360 * which ones we don't like. */
2362 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2363 result = L2CAP_CONF_UNACCEPT;
2366 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2371 case L2CAP_MODE_BASIC:
2372 pi->fcs = L2CAP_FCS_NONE;
2373 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2376 case L2CAP_MODE_ERTM:
2377 pi->remote_tx_win = rfc.txwin_size;
2378 pi->remote_max_tx = rfc.max_transmit;
2379 pi->max_pdu_size = rfc.max_pdu_size;
2381 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2382 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2384 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2387 case L2CAP_MODE_STREAMING:
2388 pi->remote_tx_win = rfc.txwin_size;
2389 pi->max_pdu_size = rfc.max_pdu_size;
2391 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2395 result = L2CAP_CONF_UNACCEPT;
2397 memset(&rfc, 0, sizeof(rfc));
2398 rfc.mode = pi->mode;
2401 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2402 sizeof(rfc), (unsigned long) &rfc);
2404 if (result == L2CAP_CONF_SUCCESS)
2405 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2407 rsp->scid = cpu_to_le16(pi->dcid);
2408 rsp->result = cpu_to_le16(result);
2409 rsp->flags = cpu_to_le16(0x0000);
2414 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2416 struct l2cap_pinfo *pi = l2cap_pi(sk);
2417 struct l2cap_conf_req *req = data;
2418 void *ptr = req->data;
2421 struct l2cap_conf_rfc rfc;
2423 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2425 while (len >= L2CAP_CONF_OPT_SIZE) {
2426 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2429 case L2CAP_CONF_MTU:
2430 if (val < L2CAP_DEFAULT_MIN_MTU) {
2431 *result = L2CAP_CONF_UNACCEPT;
2432 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2438 case L2CAP_CONF_FLUSH_TO:
2440 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2444 case L2CAP_CONF_RFC:
2445 if (olen == sizeof(rfc))
2446 memcpy(&rfc, (void *)val, olen);
2448 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2449 rfc.mode != pi->mode)
2450 return -ECONNREFUSED;
2452 pi->mode = rfc.mode;
2455 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2456 sizeof(rfc), (unsigned long) &rfc);
2461 if (*result == L2CAP_CONF_SUCCESS) {
2463 case L2CAP_MODE_ERTM:
2464 pi->remote_tx_win = rfc.txwin_size;
2465 pi->retrans_timeout = rfc.retrans_timeout;
2466 pi->monitor_timeout = rfc.monitor_timeout;
2467 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2469 case L2CAP_MODE_STREAMING:
2470 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2475 req->dcid = cpu_to_le16(pi->dcid);
2476 req->flags = cpu_to_le16(0x0000);
2481 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2483 struct l2cap_conf_rsp *rsp = data;
2484 void *ptr = rsp->data;
2486 BT_DBG("sk %p", sk);
2488 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2489 rsp->result = cpu_to_le16(result);
2490 rsp->flags = cpu_to_le16(flags);
2495 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2497 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2499 if (rej->reason != 0x0000)
2502 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2503 cmd->ident == conn->info_ident) {
2504 del_timer(&conn->info_timer);
2506 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2507 conn->info_ident = 0;
2509 l2cap_conn_start(conn);
2515 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2517 struct l2cap_chan_list *list = &conn->chan_list;
2518 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2519 struct l2cap_conn_rsp rsp;
2520 struct sock *sk, *parent;
2521 int result, status = L2CAP_CS_NO_INFO;
2523 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2524 __le16 psm = req->psm;
2526 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2528 /* Check if we have socket listening on psm */
2529 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2531 result = L2CAP_CR_BAD_PSM;
2535 /* Check if the ACL is secure enough (if not SDP) */
2536 if (psm != cpu_to_le16(0x0001) &&
2537 !hci_conn_check_link_mode(conn->hcon)) {
2538 conn->disc_reason = 0x05;
2539 result = L2CAP_CR_SEC_BLOCK;
2543 result = L2CAP_CR_NO_MEM;
2545 /* Check for backlog size */
2546 if (sk_acceptq_is_full(parent)) {
2547 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2551 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2555 write_lock_bh(&list->lock);
2557 /* Check if we already have channel with that dcid */
2558 if (__l2cap_get_chan_by_dcid(list, scid)) {
2559 write_unlock_bh(&list->lock);
2560 sock_set_flag(sk, SOCK_ZAPPED);
2561 l2cap_sock_kill(sk);
2565 hci_conn_hold(conn->hcon);
2567 l2cap_sock_init(sk, parent);
2568 bacpy(&bt_sk(sk)->src, conn->src);
2569 bacpy(&bt_sk(sk)->dst, conn->dst);
2570 l2cap_pi(sk)->psm = psm;
2571 l2cap_pi(sk)->dcid = scid;
2573 __l2cap_chan_add(conn, sk, parent);
2574 dcid = l2cap_pi(sk)->scid;
2576 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2578 l2cap_pi(sk)->ident = cmd->ident;
2580 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2581 if (l2cap_check_security(sk)) {
2582 if (bt_sk(sk)->defer_setup) {
2583 sk->sk_state = BT_CONNECT2;
2584 result = L2CAP_CR_PEND;
2585 status = L2CAP_CS_AUTHOR_PEND;
2586 parent->sk_data_ready(parent, 0);
2588 sk->sk_state = BT_CONFIG;
2589 result = L2CAP_CR_SUCCESS;
2590 status = L2CAP_CS_NO_INFO;
2593 sk->sk_state = BT_CONNECT2;
2594 result = L2CAP_CR_PEND;
2595 status = L2CAP_CS_AUTHEN_PEND;
2598 sk->sk_state = BT_CONNECT2;
2599 result = L2CAP_CR_PEND;
2600 status = L2CAP_CS_NO_INFO;
2603 write_unlock_bh(&list->lock);
2606 bh_unlock_sock(parent);
2609 rsp.scid = cpu_to_le16(scid);
2610 rsp.dcid = cpu_to_le16(dcid);
2611 rsp.result = cpu_to_le16(result);
2612 rsp.status = cpu_to_le16(status);
2613 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2615 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2616 struct l2cap_info_req info;
2617 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2619 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2620 conn->info_ident = l2cap_get_ident(conn);
2622 mod_timer(&conn->info_timer, jiffies +
2623 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2625 l2cap_send_cmd(conn, conn->info_ident,
2626 L2CAP_INFO_REQ, sizeof(info), &info);
2632 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2634 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2635 u16 scid, dcid, result, status;
2639 scid = __le16_to_cpu(rsp->scid);
2640 dcid = __le16_to_cpu(rsp->dcid);
2641 result = __le16_to_cpu(rsp->result);
2642 status = __le16_to_cpu(rsp->status);
2644 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2647 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2651 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2657 case L2CAP_CR_SUCCESS:
2658 sk->sk_state = BT_CONFIG;
2659 l2cap_pi(sk)->ident = 0;
2660 l2cap_pi(sk)->dcid = dcid;
2661 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2663 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2665 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2666 l2cap_build_conf_req(sk, req), req);
2667 l2cap_pi(sk)->num_conf_req++;
2671 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2675 l2cap_chan_del(sk, ECONNREFUSED);
2683 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2685 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2691 dcid = __le16_to_cpu(req->dcid);
2692 flags = __le16_to_cpu(req->flags);
2694 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2696 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2700 if (sk->sk_state == BT_DISCONN)
2703 /* Reject if config buffer is too small. */
2704 len = cmd_len - sizeof(*req);
2705 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2706 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2707 l2cap_build_conf_rsp(sk, rsp,
2708 L2CAP_CONF_REJECT, flags), rsp);
2713 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2714 l2cap_pi(sk)->conf_len += len;
2716 if (flags & 0x0001) {
2717 /* Incomplete config. Send empty response. */
2718 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2719 l2cap_build_conf_rsp(sk, rsp,
2720 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2724 /* Complete config. */
2725 len = l2cap_parse_conf_req(sk, rsp);
2727 l2cap_send_disconn_req(conn, sk);
2731 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2732 l2cap_pi(sk)->num_conf_rsp++;
2734 /* Reset config buffer. */
2735 l2cap_pi(sk)->conf_len = 0;
2737 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2740 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2741 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2742 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2743 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2745 sk->sk_state = BT_CONNECTED;
2746 l2cap_pi(sk)->next_tx_seq = 0;
2747 l2cap_pi(sk)->expected_ack_seq = 0;
2748 l2cap_pi(sk)->unacked_frames = 0;
2750 setup_timer(&l2cap_pi(sk)->retrans_timer,
2751 l2cap_retrans_timeout, (unsigned long) sk);
2752 setup_timer(&l2cap_pi(sk)->monitor_timer,
2753 l2cap_monitor_timeout, (unsigned long) sk);
2755 __skb_queue_head_init(TX_QUEUE(sk));
2756 __skb_queue_head_init(SREJ_QUEUE(sk));
2757 l2cap_chan_ready(sk);
2761 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2763 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2764 l2cap_build_conf_req(sk, buf), buf);
2765 l2cap_pi(sk)->num_conf_req++;
2773 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2775 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2776 u16 scid, flags, result;
2779 scid = __le16_to_cpu(rsp->scid);
2780 flags = __le16_to_cpu(rsp->flags);
2781 result = __le16_to_cpu(rsp->result);
2783 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2784 scid, flags, result);
2786 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2791 case L2CAP_CONF_SUCCESS:
2794 case L2CAP_CONF_UNACCEPT:
2795 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2796 int len = cmd->len - sizeof(*rsp);
2799 /* throw out any old stored conf requests */
2800 result = L2CAP_CONF_SUCCESS;
2801 len = l2cap_parse_conf_rsp(sk, rsp->data,
2804 l2cap_send_disconn_req(conn, sk);
2808 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2809 L2CAP_CONF_REQ, len, req);
2810 l2cap_pi(sk)->num_conf_req++;
2811 if (result != L2CAP_CONF_SUCCESS)
2817 sk->sk_state = BT_DISCONN;
2818 sk->sk_err = ECONNRESET;
2819 l2cap_sock_set_timer(sk, HZ * 5);
2820 l2cap_send_disconn_req(conn, sk);
2827 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2829 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2830 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2831 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2832 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2834 sk->sk_state = BT_CONNECTED;
2835 l2cap_pi(sk)->expected_tx_seq = 0;
2836 l2cap_pi(sk)->buffer_seq = 0;
2837 l2cap_pi(sk)->num_to_ack = 0;
2838 __skb_queue_head_init(TX_QUEUE(sk));
2839 __skb_queue_head_init(SREJ_QUEUE(sk));
2840 l2cap_chan_ready(sk);
2848 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2850 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2851 struct l2cap_disconn_rsp rsp;
2855 scid = __le16_to_cpu(req->scid);
2856 dcid = __le16_to_cpu(req->dcid);
2858 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2860 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2864 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2865 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2866 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2868 sk->sk_shutdown = SHUTDOWN_MASK;
2870 skb_queue_purge(TX_QUEUE(sk));
2871 skb_queue_purge(SREJ_QUEUE(sk));
2872 del_timer(&l2cap_pi(sk)->retrans_timer);
2873 del_timer(&l2cap_pi(sk)->monitor_timer);
2875 l2cap_chan_del(sk, ECONNRESET);
2878 l2cap_sock_kill(sk);
2882 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2884 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2888 scid = __le16_to_cpu(rsp->scid);
2889 dcid = __le16_to_cpu(rsp->dcid);
2891 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2893 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2897 skb_queue_purge(TX_QUEUE(sk));
2898 skb_queue_purge(SREJ_QUEUE(sk));
2899 del_timer(&l2cap_pi(sk)->retrans_timer);
2900 del_timer(&l2cap_pi(sk)->monitor_timer);
2902 l2cap_chan_del(sk, 0);
2905 l2cap_sock_kill(sk);
2909 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2911 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2914 type = __le16_to_cpu(req->type);
2916 BT_DBG("type 0x%4.4x", type);
2918 if (type == L2CAP_IT_FEAT_MASK) {
2920 u32 feat_mask = l2cap_feat_mask;
2921 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2922 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2923 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2925 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2927 put_unaligned_le32(feat_mask, rsp->data);
2928 l2cap_send_cmd(conn, cmd->ident,
2929 L2CAP_INFO_RSP, sizeof(buf), buf);
2930 } else if (type == L2CAP_IT_FIXED_CHAN) {
2932 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2933 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2934 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2935 memcpy(buf + 4, l2cap_fixed_chan, 8);
2936 l2cap_send_cmd(conn, cmd->ident,
2937 L2CAP_INFO_RSP, sizeof(buf), buf);
2939 struct l2cap_info_rsp rsp;
2940 rsp.type = cpu_to_le16(type);
2941 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2942 l2cap_send_cmd(conn, cmd->ident,
2943 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2949 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2951 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2954 type = __le16_to_cpu(rsp->type);
2955 result = __le16_to_cpu(rsp->result);
2957 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2959 del_timer(&conn->info_timer);
2961 if (type == L2CAP_IT_FEAT_MASK) {
2962 conn->feat_mask = get_unaligned_le32(rsp->data);
2964 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2965 struct l2cap_info_req req;
2966 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2968 conn->info_ident = l2cap_get_ident(conn);
2970 l2cap_send_cmd(conn, conn->info_ident,
2971 L2CAP_INFO_REQ, sizeof(req), &req);
2973 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2974 conn->info_ident = 0;
2976 l2cap_conn_start(conn);
2978 } else if (type == L2CAP_IT_FIXED_CHAN) {
2979 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2980 conn->info_ident = 0;
2982 l2cap_conn_start(conn);
2988 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2990 u8 *data = skb->data;
2992 struct l2cap_cmd_hdr cmd;
2995 l2cap_raw_recv(conn, skb);
2997 while (len >= L2CAP_CMD_HDR_SIZE) {
2999 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3000 data += L2CAP_CMD_HDR_SIZE;
3001 len -= L2CAP_CMD_HDR_SIZE;
3003 cmd_len = le16_to_cpu(cmd.len);
3005 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3007 if (cmd_len > len || !cmd.ident) {
3008 BT_DBG("corrupted command");
3013 case L2CAP_COMMAND_REJ:
3014 l2cap_command_rej(conn, &cmd, data);
3017 case L2CAP_CONN_REQ:
3018 err = l2cap_connect_req(conn, &cmd, data);
3021 case L2CAP_CONN_RSP:
3022 err = l2cap_connect_rsp(conn, &cmd, data);
3025 case L2CAP_CONF_REQ:
3026 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3029 case L2CAP_CONF_RSP:
3030 err = l2cap_config_rsp(conn, &cmd, data);
3033 case L2CAP_DISCONN_REQ:
3034 err = l2cap_disconnect_req(conn, &cmd, data);
3037 case L2CAP_DISCONN_RSP:
3038 err = l2cap_disconnect_rsp(conn, &cmd, data);
3041 case L2CAP_ECHO_REQ:
3042 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3045 case L2CAP_ECHO_RSP:
3048 case L2CAP_INFO_REQ:
3049 err = l2cap_information_req(conn, &cmd, data);
3052 case L2CAP_INFO_RSP:
3053 err = l2cap_information_rsp(conn, &cmd, data);
3057 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3063 struct l2cap_cmd_rej rej;
3064 BT_DBG("error %d", err);
3066 /* FIXME: Map err to a valid reason */
3067 rej.reason = cpu_to_le16(0);
3068 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3078 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3080 u16 our_fcs, rcv_fcs;
3081 int hdr_size = L2CAP_HDR_SIZE + 2;
3083 if (pi->fcs == L2CAP_FCS_CRC16) {
3084 skb_trim(skb, skb->len - 2);
3085 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3086 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3088 if (our_fcs != rcv_fcs)
3094 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3096 struct sk_buff *next_skb;
3098 bt_cb(skb)->tx_seq = tx_seq;
3099 bt_cb(skb)->sar = sar;
3101 next_skb = skb_peek(SREJ_QUEUE(sk));
3103 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3108 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3109 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3113 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3116 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3118 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3121 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3123 struct l2cap_pinfo *pi = l2cap_pi(sk);
3124 struct sk_buff *_skb;
3127 switch (control & L2CAP_CTRL_SAR) {
3128 case L2CAP_SDU_UNSEGMENTED:
3129 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3134 err = sock_queue_rcv_skb(sk, skb);
3140 case L2CAP_SDU_START:
3141 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3146 pi->sdu_len = get_unaligned_le16(skb->data);
3149 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3155 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3157 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3158 pi->partial_sdu_len = skb->len;
3162 case L2CAP_SDU_CONTINUE:
3163 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3166 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3168 pi->partial_sdu_len += skb->len;
3169 if (pi->partial_sdu_len > pi->sdu_len)
3177 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3180 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3182 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3183 pi->partial_sdu_len += skb->len;
3185 if (pi->partial_sdu_len == pi->sdu_len) {
3186 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3187 err = sock_queue_rcv_skb(sk, _skb);
3201 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3203 struct sk_buff *skb;
3206 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3207 if (bt_cb(skb)->tx_seq != tx_seq)
3210 skb = skb_dequeue(SREJ_QUEUE(sk));
3211 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3212 l2cap_sar_reassembly_sdu(sk, skb, control);
3213 l2cap_pi(sk)->buffer_seq_srej =
3214 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3219 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3221 struct l2cap_pinfo *pi = l2cap_pi(sk);
3222 struct srej_list *l, *tmp;
3225 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3226 if (l->tx_seq == tx_seq) {
3231 control = L2CAP_SUPER_SELECT_REJECT;
3232 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3233 l2cap_send_sframe(pi, control);
3235 list_add_tail(&l->list, SREJ_LIST(sk));
3239 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3241 struct l2cap_pinfo *pi = l2cap_pi(sk);
3242 struct srej_list *new;
3245 while (tx_seq != pi->expected_tx_seq) {
3246 control = L2CAP_SUPER_SELECT_REJECT;
3247 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3248 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3249 control |= L2CAP_CTRL_POLL;
3250 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3252 l2cap_send_sframe(pi, control);
3254 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3255 new->tx_seq = pi->expected_tx_seq++;
3256 list_add_tail(&new->list, SREJ_LIST(sk));
3258 pi->expected_tx_seq++;
3261 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3263 struct l2cap_pinfo *pi = l2cap_pi(sk);
3264 u8 tx_seq = __get_txseq(rx_control);
3266 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3269 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3271 if (tx_seq == pi->expected_tx_seq)
3274 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3275 struct srej_list *first;
3277 first = list_first_entry(SREJ_LIST(sk),
3278 struct srej_list, list);
3279 if (tx_seq == first->tx_seq) {
3280 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3281 l2cap_check_srej_gap(sk, tx_seq);
3283 list_del(&first->list);
3286 if (list_empty(SREJ_LIST(sk))) {
3287 pi->buffer_seq = pi->buffer_seq_srej;
3288 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3291 struct srej_list *l;
3292 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3294 list_for_each_entry(l, SREJ_LIST(sk), list) {
3295 if (l->tx_seq == tx_seq) {
3296 l2cap_resend_srejframe(sk, tx_seq);
3300 l2cap_send_srejframe(sk, tx_seq);
3303 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3305 INIT_LIST_HEAD(SREJ_LIST(sk));
3306 pi->buffer_seq_srej = pi->buffer_seq;
3308 __skb_queue_head_init(SREJ_QUEUE(sk));
3309 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3311 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3313 l2cap_send_srejframe(sk, tx_seq);
3318 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3320 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3321 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3325 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3327 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3331 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3332 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3333 tx_control |= L2CAP_SUPER_RCV_READY;
3334 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3335 l2cap_send_sframe(pi, tx_control);
3340 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3342 struct l2cap_pinfo *pi = l2cap_pi(sk);
3343 u8 tx_seq = __get_reqseq(rx_control);
3345 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3347 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3348 case L2CAP_SUPER_RCV_READY:
3349 if (rx_control & L2CAP_CTRL_POLL) {
3350 u16 control = L2CAP_CTRL_FINAL;
3351 control |= L2CAP_SUPER_RCV_READY;
3352 l2cap_send_sframe(l2cap_pi(sk), control);
3353 } else if (rx_control & L2CAP_CTRL_FINAL) {
3354 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3357 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3358 del_timer(&pi->monitor_timer);
3360 if (pi->unacked_frames > 0)
3361 __mod_retrans_timer();
3363 pi->expected_ack_seq = tx_seq;
3364 l2cap_drop_acked_frames(sk);
3365 if (pi->unacked_frames > 0)
3366 __mod_retrans_timer();
3367 l2cap_ertm_send(sk);
3371 case L2CAP_SUPER_REJECT:
3372 pi->expected_ack_seq = __get_reqseq(rx_control);
3373 l2cap_drop_acked_frames(sk);
3375 sk->sk_send_head = TX_QUEUE(sk)->next;
3376 pi->next_tx_seq = pi->expected_ack_seq;
3378 l2cap_ertm_send(sk);
3382 case L2CAP_SUPER_SELECT_REJECT:
3383 if (rx_control & L2CAP_CTRL_POLL) {
3384 l2cap_retransmit_frame(sk, tx_seq);
3385 pi->expected_ack_seq = tx_seq;
3386 l2cap_drop_acked_frames(sk);
3387 l2cap_ertm_send(sk);
3388 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3389 pi->srej_save_reqseq = tx_seq;
3390 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3392 } else if (rx_control & L2CAP_CTRL_FINAL) {
3393 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3394 pi->srej_save_reqseq == tx_seq)
3395 pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT;
3397 l2cap_retransmit_frame(sk, tx_seq);
3400 l2cap_retransmit_frame(sk, tx_seq);
3401 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3402 pi->srej_save_reqseq = tx_seq;
3403 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3408 case L2CAP_SUPER_RCV_NOT_READY:
3415 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3418 struct l2cap_pinfo *pi;
3423 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3425 BT_DBG("unknown cid 0x%4.4x", cid);
3431 BT_DBG("sk %p, len %d", sk, skb->len);
3433 if (sk->sk_state != BT_CONNECTED)
3437 case L2CAP_MODE_BASIC:
3438 /* If socket recv buffers overflows we drop data here
3439 * which is *bad* because L2CAP has to be reliable.
3440 * But we don't have any other choice. L2CAP doesn't
3441 * provide flow control mechanism. */
3443 if (pi->imtu < skb->len)
3446 if (!sock_queue_rcv_skb(sk, skb))
3450 case L2CAP_MODE_ERTM:
3451 control = get_unaligned_le16(skb->data);
3455 if (__is_sar_start(control))
3458 if (pi->fcs == L2CAP_FCS_CRC16)
3462 * We can just drop the corrupted I-frame here.
3463 * Receiver will miss it and start proper recovery
3464 * procedures and ask retransmission.
3466 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3469 if (l2cap_check_fcs(pi, skb))
3472 if (__is_iframe(control))
3473 err = l2cap_data_channel_iframe(sk, control, skb);
3475 err = l2cap_data_channel_sframe(sk, control, skb);
3481 case L2CAP_MODE_STREAMING:
3482 control = get_unaligned_le16(skb->data);
3486 if (__is_sar_start(control))
3489 if (pi->fcs == L2CAP_FCS_CRC16)
3492 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3495 if (l2cap_check_fcs(pi, skb))
3498 tx_seq = __get_txseq(control);
3500 if (pi->expected_tx_seq == tx_seq)
3501 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3503 pi->expected_tx_seq = tx_seq + 1;
3505 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3510 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3524 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3528 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3532 BT_DBG("sk %p, len %d", sk, skb->len);
3534 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3537 if (l2cap_pi(sk)->imtu < skb->len)
3540 if (!sock_queue_rcv_skb(sk, skb))
3552 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3554 struct l2cap_hdr *lh = (void *) skb->data;
3558 skb_pull(skb, L2CAP_HDR_SIZE);
3559 cid = __le16_to_cpu(lh->cid);
3560 len = __le16_to_cpu(lh->len);
3562 if (len != skb->len) {
3567 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3570 case L2CAP_CID_SIGNALING:
3571 l2cap_sig_channel(conn, skb);
3574 case L2CAP_CID_CONN_LESS:
3575 psm = get_unaligned_le16(skb->data);
3577 l2cap_conless_channel(conn, psm, skb);
3581 l2cap_data_channel(conn, cid, skb);
3586 /* ---- L2CAP interface with lower layer (HCI) ---- */
3588 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3590 int exact = 0, lm1 = 0, lm2 = 0;
3591 register struct sock *sk;
3592 struct hlist_node *node;
3594 if (type != ACL_LINK)
3597 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3599 /* Find listening sockets and check their link_mode */
3600 read_lock(&l2cap_sk_list.lock);
3601 sk_for_each(sk, node, &l2cap_sk_list.head) {
3602 if (sk->sk_state != BT_LISTEN)
3605 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3606 lm1 |= HCI_LM_ACCEPT;
3607 if (l2cap_pi(sk)->role_switch)
3608 lm1 |= HCI_LM_MASTER;
3610 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3611 lm2 |= HCI_LM_ACCEPT;
3612 if (l2cap_pi(sk)->role_switch)
3613 lm2 |= HCI_LM_MASTER;
3616 read_unlock(&l2cap_sk_list.lock);
3618 return exact ? lm1 : lm2;
3621 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3623 struct l2cap_conn *conn;
3625 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3627 if (hcon->type != ACL_LINK)
3631 conn = l2cap_conn_add(hcon, status);
3633 l2cap_conn_ready(conn);
3635 l2cap_conn_del(hcon, bt_err(status));
3640 static int l2cap_disconn_ind(struct hci_conn *hcon)
3642 struct l2cap_conn *conn = hcon->l2cap_data;
3644 BT_DBG("hcon %p", hcon);
3646 if (hcon->type != ACL_LINK || !conn)
3649 return conn->disc_reason;
3652 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3654 BT_DBG("hcon %p reason %d", hcon, reason);
3656 if (hcon->type != ACL_LINK)
3659 l2cap_conn_del(hcon, bt_err(reason));
3664 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3666 if (sk->sk_type != SOCK_SEQPACKET)
3669 if (encrypt == 0x00) {
3670 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3671 l2cap_sock_clear_timer(sk);
3672 l2cap_sock_set_timer(sk, HZ * 5);
3673 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3674 __l2cap_sock_close(sk, ECONNREFUSED);
3676 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3677 l2cap_sock_clear_timer(sk);
3681 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3683 struct l2cap_chan_list *l;
3684 struct l2cap_conn *conn = hcon->l2cap_data;
3690 l = &conn->chan_list;
3692 BT_DBG("conn %p", conn);
3694 read_lock(&l->lock);
3696 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3699 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3704 if (!status && (sk->sk_state == BT_CONNECTED ||
3705 sk->sk_state == BT_CONFIG)) {
3706 l2cap_check_encryption(sk, encrypt);
3711 if (sk->sk_state == BT_CONNECT) {
3713 struct l2cap_conn_req req;
3714 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3715 req.psm = l2cap_pi(sk)->psm;
3717 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3719 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3720 L2CAP_CONN_REQ, sizeof(req), &req);
3722 l2cap_sock_clear_timer(sk);
3723 l2cap_sock_set_timer(sk, HZ / 10);
3725 } else if (sk->sk_state == BT_CONNECT2) {
3726 struct l2cap_conn_rsp rsp;
3730 sk->sk_state = BT_CONFIG;
3731 result = L2CAP_CR_SUCCESS;
3733 sk->sk_state = BT_DISCONN;
3734 l2cap_sock_set_timer(sk, HZ / 10);
3735 result = L2CAP_CR_SEC_BLOCK;
3738 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3739 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3740 rsp.result = cpu_to_le16(result);
3741 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3742 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3743 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3749 read_unlock(&l->lock);
3754 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3756 struct l2cap_conn *conn = hcon->l2cap_data;
3758 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3761 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3763 if (flags & ACL_START) {
3764 struct l2cap_hdr *hdr;
3768 BT_ERR("Unexpected start frame (len %d)", skb->len);
3769 kfree_skb(conn->rx_skb);
3770 conn->rx_skb = NULL;
3772 l2cap_conn_unreliable(conn, ECOMM);
3776 BT_ERR("Frame is too short (len %d)", skb->len);
3777 l2cap_conn_unreliable(conn, ECOMM);
3781 hdr = (struct l2cap_hdr *) skb->data;
3782 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3784 if (len == skb->len) {
3785 /* Complete frame received */
3786 l2cap_recv_frame(conn, skb);
3790 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3792 if (skb->len > len) {
3793 BT_ERR("Frame is too long (len %d, expected len %d)",
3795 l2cap_conn_unreliable(conn, ECOMM);
3799 /* Allocate skb for the complete frame (with header) */
3800 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3804 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3806 conn->rx_len = len - skb->len;
3808 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3810 if (!conn->rx_len) {
3811 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3812 l2cap_conn_unreliable(conn, ECOMM);
3816 if (skb->len > conn->rx_len) {
3817 BT_ERR("Fragment is too long (len %d, expected %d)",
3818 skb->len, conn->rx_len);
3819 kfree_skb(conn->rx_skb);
3820 conn->rx_skb = NULL;
3822 l2cap_conn_unreliable(conn, ECOMM);
3826 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3828 conn->rx_len -= skb->len;
3830 if (!conn->rx_len) {
3831 /* Complete frame received */
3832 l2cap_recv_frame(conn, conn->rx_skb);
3833 conn->rx_skb = NULL;
3842 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3845 struct hlist_node *node;
3848 read_lock_bh(&l2cap_sk_list.lock);
3850 sk_for_each(sk, node, &l2cap_sk_list.head) {
3851 struct l2cap_pinfo *pi = l2cap_pi(sk);
3853 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3854 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3855 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3856 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3859 read_unlock_bh(&l2cap_sk_list.lock);
3864 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3866 static const struct proto_ops l2cap_sock_ops = {
3867 .family = PF_BLUETOOTH,
3868 .owner = THIS_MODULE,
3869 .release = l2cap_sock_release,
3870 .bind = l2cap_sock_bind,
3871 .connect = l2cap_sock_connect,
3872 .listen = l2cap_sock_listen,
3873 .accept = l2cap_sock_accept,
3874 .getname = l2cap_sock_getname,
3875 .sendmsg = l2cap_sock_sendmsg,
3876 .recvmsg = l2cap_sock_recvmsg,
3877 .poll = bt_sock_poll,
3878 .ioctl = bt_sock_ioctl,
3879 .mmap = sock_no_mmap,
3880 .socketpair = sock_no_socketpair,
3881 .shutdown = l2cap_sock_shutdown,
3882 .setsockopt = l2cap_sock_setsockopt,
3883 .getsockopt = l2cap_sock_getsockopt
3886 static struct net_proto_family l2cap_sock_family_ops = {
3887 .family = PF_BLUETOOTH,
3888 .owner = THIS_MODULE,
3889 .create = l2cap_sock_create,
3892 static struct hci_proto l2cap_hci_proto = {
3894 .id = HCI_PROTO_L2CAP,
3895 .connect_ind = l2cap_connect_ind,
3896 .connect_cfm = l2cap_connect_cfm,
3897 .disconn_ind = l2cap_disconn_ind,
3898 .disconn_cfm = l2cap_disconn_cfm,
3899 .security_cfm = l2cap_security_cfm,
3900 .recv_acldata = l2cap_recv_acldata
3903 static int __init l2cap_init(void)
3907 err = proto_register(&l2cap_proto, 0);
3911 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3913 BT_ERR("L2CAP socket registration failed");
3917 err = hci_register_proto(&l2cap_hci_proto);
3919 BT_ERR("L2CAP protocol registration failed");
3920 bt_sock_unregister(BTPROTO_L2CAP);
3924 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3925 BT_ERR("Failed to create L2CAP info file");
3927 BT_INFO("L2CAP ver %s", VERSION);
3928 BT_INFO("L2CAP socket layer initialized");
3933 proto_unregister(&l2cap_proto);
3937 static void __exit l2cap_exit(void)
3939 class_remove_file(bt_class, &class_attr_l2cap);
3941 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3942 BT_ERR("L2CAP socket unregistration failed");
3944 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3945 BT_ERR("L2CAP protocol unregistration failed");
3947 proto_unregister(&l2cap_proto);
3950 void l2cap_load(void)
3952 /* Dummy function to trigger automatic L2CAP module loading by
3953 * other modules that use L2CAP sockets but don't use any other
3954 * symbols from it. */
3957 EXPORT_SYMBOL(l2cap_load);
3959 module_init(l2cap_init);
3960 module_exit(l2cap_exit);
3962 module_param(enable_ertm, bool, 0644);
3963 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3965 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3966 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3967 MODULE_VERSION(VERSION);
3968 MODULE_LICENSE("GPL");
3969 MODULE_ALIAS("bt-proto-0");