2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
46 #include <asm/system.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.14"
55 static int enable_ertm = 0;
57 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
58 static u8 l2cap_fixed_chan[8] = { 0x02, };
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
79 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
84 reason = ECONNREFUSED;
85 else if (sk->sk_state == BT_CONNECT &&
86 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
87 reason = ECONNREFUSED;
91 __l2cap_sock_close(sk, reason);
99 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
102 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
105 static void l2cap_sock_clear_timer(struct sock *sk)
107 BT_DBG("sock %p state %d", sk, sk->sk_state);
108 sk_stop_timer(sk, &sk->sk_timer);
111 /* ---- L2CAP channels ---- */
112 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
116 if (l2cap_pi(s)->dcid == cid)
122 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->scid == cid)
132 /* Find channel with given SCID.
133 * Returns locked socket */
134 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 s = __l2cap_get_chan_by_scid(l, cid);
141 read_unlock(&l->lock);
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 s = __l2cap_get_chan_by_ident(l, ident);
162 read_unlock(&l->lock);
166 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(l, cid))
178 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183 l2cap_pi(l->head)->prev_c = sk;
185 l2cap_pi(sk)->next_c = l->head;
186 l2cap_pi(sk)->prev_c = NULL;
190 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194 write_lock_bh(&l->lock);
199 l2cap_pi(next)->prev_c = prev;
201 l2cap_pi(prev)->next_c = next;
202 write_unlock_bh(&l->lock);
207 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 struct l2cap_chan_list *l = &conn->chan_list;
211 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
212 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214 conn->disc_reason = 0x13;
216 l2cap_pi(sk)->conn = conn;
218 if (sk->sk_type == SOCK_SEQPACKET) {
219 /* Alloc CID for connection-oriented socket */
220 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
221 } else if (sk->sk_type == SOCK_DGRAM) {
222 /* Connectionless socket */
223 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
224 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 /* Raw socket can send/recv signalling messages only */
228 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
229 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 __l2cap_chan_link(l, sk);
236 bt_accept_enqueue(parent, sk);
240 * Must be called on the locked socket. */
241 static void l2cap_chan_del(struct sock *sk, int err)
243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
244 struct sock *parent = bt_sk(sk)->parent;
246 l2cap_sock_clear_timer(sk);
248 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251 /* Unlink from channel list */
252 l2cap_chan_unlink(&conn->chan_list, sk);
253 l2cap_pi(sk)->conn = NULL;
254 hci_conn_put(conn->hcon);
257 sk->sk_state = BT_CLOSED;
258 sock_set_flag(sk, SOCK_ZAPPED);
264 bt_accept_unlink(sk);
265 parent->sk_data_ready(parent, 0);
267 sk->sk_state_change(sk);
270 /* Service level security */
271 static inline int l2cap_check_security(struct sock *sk)
273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
278 auth_type = HCI_AT_NO_BONDING_MITM;
280 auth_type = HCI_AT_NO_BONDING;
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
283 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 case BT_SECURITY_MEDIUM:
290 auth_type = HCI_AT_GENERAL_BONDING;
293 auth_type = HCI_AT_NO_BONDING;
298 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
302 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
306 /* Get next available identificator.
307 * 1 - 128 are used by kernel.
308 * 129 - 199 are reserved.
309 * 200 - 254 are used by utilities like l2ping, etc.
312 spin_lock_bh(&conn->lock);
314 if (++conn->tx_ident > 128)
319 spin_unlock_bh(&conn->lock);
324 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 BT_DBG("code 0x%2.2x", code);
333 return hci_send_acl(conn->hcon, skb, 0);
336 static void l2cap_do_start(struct sock *sk)
338 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
340 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
341 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
344 if (l2cap_check_security(sk)) {
345 struct l2cap_conn_req req;
346 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
347 req.psm = l2cap_pi(sk)->psm;
349 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
351 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
352 L2CAP_CONN_REQ, sizeof(req), &req);
355 struct l2cap_info_req req;
356 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
358 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
359 conn->info_ident = l2cap_get_ident(conn);
361 mod_timer(&conn->info_timer, jiffies +
362 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
364 l2cap_send_cmd(conn, conn->info_ident,
365 L2CAP_INFO_REQ, sizeof(req), &req);
369 /* ---- L2CAP connections ---- */
370 static void l2cap_conn_start(struct l2cap_conn *conn)
372 struct l2cap_chan_list *l = &conn->chan_list;
375 BT_DBG("conn %p", conn);
379 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
382 if (sk->sk_type != SOCK_SEQPACKET) {
387 if (sk->sk_state == BT_CONNECT) {
388 if (l2cap_check_security(sk)) {
389 struct l2cap_conn_req req;
390 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
391 req.psm = l2cap_pi(sk)->psm;
393 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
395 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
396 L2CAP_CONN_REQ, sizeof(req), &req);
398 } else if (sk->sk_state == BT_CONNECT2) {
399 struct l2cap_conn_rsp rsp;
400 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
401 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
403 if (l2cap_check_security(sk)) {
404 if (bt_sk(sk)->defer_setup) {
405 struct sock *parent = bt_sk(sk)->parent;
406 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
407 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
408 parent->sk_data_ready(parent, 0);
411 sk->sk_state = BT_CONFIG;
412 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
413 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
416 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
417 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
420 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
421 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
427 read_unlock(&l->lock);
430 static void l2cap_conn_ready(struct l2cap_conn *conn)
432 struct l2cap_chan_list *l = &conn->chan_list;
435 BT_DBG("conn %p", conn);
439 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
442 if (sk->sk_type != SOCK_SEQPACKET) {
443 l2cap_sock_clear_timer(sk);
444 sk->sk_state = BT_CONNECTED;
445 sk->sk_state_change(sk);
446 } else if (sk->sk_state == BT_CONNECT)
452 read_unlock(&l->lock);
455 /* Notify sockets that we cannot guaranty reliability anymore */
456 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
458 struct l2cap_chan_list *l = &conn->chan_list;
461 BT_DBG("conn %p", conn);
465 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
466 if (l2cap_pi(sk)->force_reliable)
470 read_unlock(&l->lock);
473 static void l2cap_info_timeout(unsigned long arg)
475 struct l2cap_conn *conn = (void *) arg;
477 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
478 conn->info_ident = 0;
480 l2cap_conn_start(conn);
483 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
485 struct l2cap_conn *conn = hcon->l2cap_data;
490 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
494 hcon->l2cap_data = conn;
497 BT_DBG("hcon %p conn %p", hcon, conn);
499 conn->mtu = hcon->hdev->acl_mtu;
500 conn->src = &hcon->hdev->bdaddr;
501 conn->dst = &hcon->dst;
505 setup_timer(&conn->info_timer, l2cap_info_timeout,
506 (unsigned long) conn);
508 spin_lock_init(&conn->lock);
509 rwlock_init(&conn->chan_list.lock);
511 conn->disc_reason = 0x13;
516 static void l2cap_conn_del(struct hci_conn *hcon, int err)
518 struct l2cap_conn *conn = hcon->l2cap_data;
524 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
526 kfree_skb(conn->rx_skb);
529 while ((sk = conn->chan_list.head)) {
531 l2cap_chan_del(sk, err);
536 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
537 del_timer_sync(&conn->info_timer);
539 hcon->l2cap_data = NULL;
543 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
545 struct l2cap_chan_list *l = &conn->chan_list;
546 write_lock_bh(&l->lock);
547 __l2cap_chan_add(conn, sk, parent);
548 write_unlock_bh(&l->lock);
551 /* ---- Socket interface ---- */
552 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
555 struct hlist_node *node;
556 sk_for_each(sk, node, &l2cap_sk_list.head)
557 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
564 /* Find socket with psm and source bdaddr.
565 * Returns closest match.
567 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
569 struct sock *sk = NULL, *sk1 = NULL;
570 struct hlist_node *node;
572 sk_for_each(sk, node, &l2cap_sk_list.head) {
573 if (state && sk->sk_state != state)
576 if (l2cap_pi(sk)->psm == psm) {
578 if (!bacmp(&bt_sk(sk)->src, src))
582 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
586 return node ? sk : sk1;
589 /* Find socket with given address (psm, src).
590 * Returns locked socket */
591 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
594 read_lock(&l2cap_sk_list.lock);
595 s = __l2cap_get_sock_by_psm(state, psm, src);
598 read_unlock(&l2cap_sk_list.lock);
602 static void l2cap_sock_destruct(struct sock *sk)
606 skb_queue_purge(&sk->sk_receive_queue);
607 skb_queue_purge(&sk->sk_write_queue);
610 static void l2cap_sock_cleanup_listen(struct sock *parent)
614 BT_DBG("parent %p", parent);
616 /* Close not yet accepted channels */
617 while ((sk = bt_accept_dequeue(parent, NULL)))
618 l2cap_sock_close(sk);
620 parent->sk_state = BT_CLOSED;
621 sock_set_flag(parent, SOCK_ZAPPED);
624 /* Kill socket (only if zapped and orphan)
625 * Must be called on unlocked socket.
627 static void l2cap_sock_kill(struct sock *sk)
629 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
632 BT_DBG("sk %p state %d", sk, sk->sk_state);
634 /* Kill poor orphan */
635 bt_sock_unlink(&l2cap_sk_list, sk);
636 sock_set_flag(sk, SOCK_DEAD);
640 static void __l2cap_sock_close(struct sock *sk, int reason)
642 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
644 switch (sk->sk_state) {
646 l2cap_sock_cleanup_listen(sk);
651 if (sk->sk_type == SOCK_SEQPACKET) {
652 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
653 struct l2cap_disconn_req req;
655 sk->sk_state = BT_DISCONN;
656 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
659 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
660 l2cap_send_cmd(conn, l2cap_get_ident(conn),
661 L2CAP_DISCONN_REQ, sizeof(req), &req);
663 l2cap_chan_del(sk, reason);
667 if (sk->sk_type == SOCK_SEQPACKET) {
668 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
669 struct l2cap_conn_rsp rsp;
672 if (bt_sk(sk)->defer_setup)
673 result = L2CAP_CR_SEC_BLOCK;
675 result = L2CAP_CR_BAD_PSM;
677 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
678 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
679 rsp.result = cpu_to_le16(result);
680 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
681 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
682 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
684 l2cap_chan_del(sk, reason);
689 l2cap_chan_del(sk, reason);
693 sock_set_flag(sk, SOCK_ZAPPED);
698 /* Must be called on unlocked socket. */
699 static void l2cap_sock_close(struct sock *sk)
701 l2cap_sock_clear_timer(sk);
703 __l2cap_sock_close(sk, ECONNRESET);
708 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
710 struct l2cap_pinfo *pi = l2cap_pi(sk);
715 sk->sk_type = parent->sk_type;
716 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
718 pi->imtu = l2cap_pi(parent)->imtu;
719 pi->omtu = l2cap_pi(parent)->omtu;
720 pi->mode = l2cap_pi(parent)->mode;
721 pi->fcs = l2cap_pi(parent)->fcs;
722 pi->sec_level = l2cap_pi(parent)->sec_level;
723 pi->role_switch = l2cap_pi(parent)->role_switch;
724 pi->force_reliable = l2cap_pi(parent)->force_reliable;
726 pi->imtu = L2CAP_DEFAULT_MTU;
728 pi->mode = L2CAP_MODE_BASIC;
729 pi->fcs = L2CAP_FCS_CRC16;
730 pi->sec_level = BT_SECURITY_LOW;
732 pi->force_reliable = 0;
735 /* Default config options */
737 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
740 static struct proto l2cap_proto = {
742 .owner = THIS_MODULE,
743 .obj_size = sizeof(struct l2cap_pinfo)
746 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
750 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
754 sock_init_data(sock, sk);
755 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
757 sk->sk_destruct = l2cap_sock_destruct;
758 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
760 sock_reset_flag(sk, SOCK_ZAPPED);
762 sk->sk_protocol = proto;
763 sk->sk_state = BT_OPEN;
765 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
767 bt_sock_link(&l2cap_sk_list, sk);
771 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
775 BT_DBG("sock %p", sock);
777 sock->state = SS_UNCONNECTED;
779 if (sock->type != SOCK_SEQPACKET &&
780 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
781 return -ESOCKTNOSUPPORT;
783 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
786 sock->ops = &l2cap_sock_ops;
788 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
792 l2cap_sock_init(sk, NULL);
796 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
798 struct sock *sk = sock->sk;
799 struct sockaddr_l2 la;
804 if (!addr || addr->sa_family != AF_BLUETOOTH)
807 memset(&la, 0, sizeof(la));
808 len = min_t(unsigned int, sizeof(la), alen);
809 memcpy(&la, addr, len);
816 if (sk->sk_state != BT_OPEN) {
821 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
822 !capable(CAP_NET_BIND_SERVICE)) {
827 write_lock_bh(&l2cap_sk_list.lock);
829 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
832 /* Save source address */
833 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
834 l2cap_pi(sk)->psm = la.l2_psm;
835 l2cap_pi(sk)->sport = la.l2_psm;
836 sk->sk_state = BT_BOUND;
838 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
839 __le16_to_cpu(la.l2_psm) == 0x0003)
840 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
843 write_unlock_bh(&l2cap_sk_list.lock);
850 static int l2cap_do_connect(struct sock *sk)
852 bdaddr_t *src = &bt_sk(sk)->src;
853 bdaddr_t *dst = &bt_sk(sk)->dst;
854 struct l2cap_conn *conn;
855 struct hci_conn *hcon;
856 struct hci_dev *hdev;
860 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
863 hdev = hci_get_route(dst, src);
865 return -EHOSTUNREACH;
867 hci_dev_lock_bh(hdev);
871 if (sk->sk_type == SOCK_RAW) {
872 switch (l2cap_pi(sk)->sec_level) {
873 case BT_SECURITY_HIGH:
874 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
876 case BT_SECURITY_MEDIUM:
877 auth_type = HCI_AT_DEDICATED_BONDING;
880 auth_type = HCI_AT_NO_BONDING;
883 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
884 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
885 auth_type = HCI_AT_NO_BONDING_MITM;
887 auth_type = HCI_AT_NO_BONDING;
889 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
890 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
892 switch (l2cap_pi(sk)->sec_level) {
893 case BT_SECURITY_HIGH:
894 auth_type = HCI_AT_GENERAL_BONDING_MITM;
896 case BT_SECURITY_MEDIUM:
897 auth_type = HCI_AT_GENERAL_BONDING;
900 auth_type = HCI_AT_NO_BONDING;
905 hcon = hci_connect(hdev, ACL_LINK, dst,
906 l2cap_pi(sk)->sec_level, auth_type);
910 conn = l2cap_conn_add(hcon, 0);
918 /* Update source addr of the socket */
919 bacpy(src, conn->src);
921 l2cap_chan_add(conn, sk, NULL);
923 sk->sk_state = BT_CONNECT;
924 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
926 if (hcon->state == BT_CONNECTED) {
927 if (sk->sk_type != SOCK_SEQPACKET) {
928 l2cap_sock_clear_timer(sk);
929 sk->sk_state = BT_CONNECTED;
935 hci_dev_unlock_bh(hdev);
940 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
942 struct sock *sk = sock->sk;
943 struct sockaddr_l2 la;
948 if (!addr || addr->sa_family != AF_BLUETOOTH)
951 memset(&la, 0, sizeof(la));
952 len = min_t(unsigned int, sizeof(la), alen);
953 memcpy(&la, addr, len);
960 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
965 switch (l2cap_pi(sk)->mode) {
966 case L2CAP_MODE_BASIC:
968 case L2CAP_MODE_ERTM:
977 switch (sk->sk_state) {
981 /* Already connecting */
985 /* Already connected */
998 /* Set destination address and psm */
999 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1000 l2cap_pi(sk)->psm = la.l2_psm;
1002 err = l2cap_do_connect(sk);
1007 err = bt_sock_wait_state(sk, BT_CONNECTED,
1008 sock_sndtimeo(sk, flags & O_NONBLOCK));
1014 static int l2cap_sock_listen(struct socket *sock, int backlog)
1016 struct sock *sk = sock->sk;
1019 BT_DBG("sk %p backlog %d", sk, backlog);
1023 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1028 switch (l2cap_pi(sk)->mode) {
1029 case L2CAP_MODE_BASIC:
1031 case L2CAP_MODE_ERTM:
1040 if (!l2cap_pi(sk)->psm) {
1041 bdaddr_t *src = &bt_sk(sk)->src;
1046 write_lock_bh(&l2cap_sk_list.lock);
1048 for (psm = 0x1001; psm < 0x1100; psm += 2)
1049 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1050 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1051 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1056 write_unlock_bh(&l2cap_sk_list.lock);
1062 sk->sk_max_ack_backlog = backlog;
1063 sk->sk_ack_backlog = 0;
1064 sk->sk_state = BT_LISTEN;
1071 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1073 DECLARE_WAITQUEUE(wait, current);
1074 struct sock *sk = sock->sk, *nsk;
1078 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1080 if (sk->sk_state != BT_LISTEN) {
1085 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1087 BT_DBG("sk %p timeo %ld", sk, timeo);
1089 /* Wait for an incoming connection. (wake-one). */
1090 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1091 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1092 set_current_state(TASK_INTERRUPTIBLE);
1099 timeo = schedule_timeout(timeo);
1100 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1102 if (sk->sk_state != BT_LISTEN) {
1107 if (signal_pending(current)) {
1108 err = sock_intr_errno(timeo);
1112 set_current_state(TASK_RUNNING);
1113 remove_wait_queue(sk->sk_sleep, &wait);
1118 newsock->state = SS_CONNECTED;
1120 BT_DBG("new socket %p", nsk);
1127 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1129 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1130 struct sock *sk = sock->sk;
1132 BT_DBG("sock %p, sk %p", sock, sk);
1134 addr->sa_family = AF_BLUETOOTH;
1135 *len = sizeof(struct sockaddr_l2);
1138 la->l2_psm = l2cap_pi(sk)->psm;
1139 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1140 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1142 la->l2_psm = l2cap_pi(sk)->sport;
1143 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1144 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1150 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1152 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1153 struct sk_buff *skb, **frag;
1154 int err, hlen, count, sent = 0;
1155 struct l2cap_hdr *lh;
1157 BT_DBG("sk %p len %d", sk, len);
1159 /* First fragment (with L2CAP header) */
1160 if (sk->sk_type == SOCK_DGRAM)
1161 hlen = L2CAP_HDR_SIZE + 2;
1163 hlen = L2CAP_HDR_SIZE;
1165 count = min_t(unsigned int, (conn->mtu - hlen), len);
1167 skb = bt_skb_send_alloc(sk, hlen + count,
1168 msg->msg_flags & MSG_DONTWAIT, &err);
1172 /* Create L2CAP header */
1173 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1174 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1175 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1177 if (sk->sk_type == SOCK_DGRAM)
1178 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1180 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1188 /* Continuation fragments (no L2CAP header) */
1189 frag = &skb_shinfo(skb)->frag_list;
1191 count = min_t(unsigned int, conn->mtu, len);
1193 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1197 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1205 frag = &(*frag)->next;
1207 err = hci_send_acl(conn->hcon, skb, 0);
1218 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1220 struct sock *sk = sock->sk;
1223 BT_DBG("sock %p, sk %p", sock, sk);
1225 err = sock_error(sk);
1229 if (msg->msg_flags & MSG_OOB)
1232 /* Check outgoing MTU */
1233 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1238 if (sk->sk_state == BT_CONNECTED)
1239 err = l2cap_do_send(sk, msg, len);
1247 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1249 struct sock *sk = sock->sk;
1253 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1254 struct l2cap_conn_rsp rsp;
1256 sk->sk_state = BT_CONFIG;
1258 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1259 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1260 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1261 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1262 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1263 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1271 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1274 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1276 struct sock *sk = sock->sk;
1277 struct l2cap_options opts;
1281 BT_DBG("sk %p", sk);
1287 opts.imtu = l2cap_pi(sk)->imtu;
1288 opts.omtu = l2cap_pi(sk)->omtu;
1289 opts.flush_to = l2cap_pi(sk)->flush_to;
1290 opts.mode = l2cap_pi(sk)->mode;
1292 len = min_t(unsigned int, sizeof(opts), optlen);
1293 if (copy_from_user((char *) &opts, optval, len)) {
1298 l2cap_pi(sk)->imtu = opts.imtu;
1299 l2cap_pi(sk)->omtu = opts.omtu;
1300 l2cap_pi(sk)->mode = opts.mode;
1304 if (get_user(opt, (u32 __user *) optval)) {
1309 if (opt & L2CAP_LM_AUTH)
1310 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1311 if (opt & L2CAP_LM_ENCRYPT)
1312 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1313 if (opt & L2CAP_LM_SECURE)
1314 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1316 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1317 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1329 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1331 struct sock *sk = sock->sk;
1332 struct bt_security sec;
1336 BT_DBG("sk %p", sk);
1338 if (level == SOL_L2CAP)
1339 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1341 if (level != SOL_BLUETOOTH)
1342 return -ENOPROTOOPT;
1348 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1353 sec.level = BT_SECURITY_LOW;
1355 len = min_t(unsigned int, sizeof(sec), optlen);
1356 if (copy_from_user((char *) &sec, optval, len)) {
1361 if (sec.level < BT_SECURITY_LOW ||
1362 sec.level > BT_SECURITY_HIGH) {
1367 l2cap_pi(sk)->sec_level = sec.level;
1370 case BT_DEFER_SETUP:
1371 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1376 if (get_user(opt, (u32 __user *) optval)) {
1381 bt_sk(sk)->defer_setup = opt;
1393 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1395 struct sock *sk = sock->sk;
1396 struct l2cap_options opts;
1397 struct l2cap_conninfo cinfo;
1401 BT_DBG("sk %p", sk);
1403 if (get_user(len, optlen))
1410 opts.imtu = l2cap_pi(sk)->imtu;
1411 opts.omtu = l2cap_pi(sk)->omtu;
1412 opts.flush_to = l2cap_pi(sk)->flush_to;
1413 opts.mode = l2cap_pi(sk)->mode;
1415 len = min_t(unsigned int, len, sizeof(opts));
1416 if (copy_to_user(optval, (char *) &opts, len))
1422 switch (l2cap_pi(sk)->sec_level) {
1423 case BT_SECURITY_LOW:
1424 opt = L2CAP_LM_AUTH;
1426 case BT_SECURITY_MEDIUM:
1427 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1429 case BT_SECURITY_HIGH:
1430 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1438 if (l2cap_pi(sk)->role_switch)
1439 opt |= L2CAP_LM_MASTER;
1441 if (l2cap_pi(sk)->force_reliable)
1442 opt |= L2CAP_LM_RELIABLE;
1444 if (put_user(opt, (u32 __user *) optval))
1448 case L2CAP_CONNINFO:
1449 if (sk->sk_state != BT_CONNECTED &&
1450 !(sk->sk_state == BT_CONNECT2 &&
1451 bt_sk(sk)->defer_setup)) {
1456 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1457 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1459 len = min_t(unsigned int, len, sizeof(cinfo));
1460 if (copy_to_user(optval, (char *) &cinfo, len))
1474 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1476 struct sock *sk = sock->sk;
1477 struct bt_security sec;
1480 BT_DBG("sk %p", sk);
1482 if (level == SOL_L2CAP)
1483 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1485 if (level != SOL_BLUETOOTH)
1486 return -ENOPROTOOPT;
1488 if (get_user(len, optlen))
1495 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1500 sec.level = l2cap_pi(sk)->sec_level;
1502 len = min_t(unsigned int, len, sizeof(sec));
1503 if (copy_to_user(optval, (char *) &sec, len))
1508 case BT_DEFER_SETUP:
1509 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1514 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1528 static int l2cap_sock_shutdown(struct socket *sock, int how)
1530 struct sock *sk = sock->sk;
1533 BT_DBG("sock %p, sk %p", sock, sk);
1539 if (!sk->sk_shutdown) {
1540 sk->sk_shutdown = SHUTDOWN_MASK;
1541 l2cap_sock_clear_timer(sk);
1542 __l2cap_sock_close(sk, 0);
1544 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1545 err = bt_sock_wait_state(sk, BT_CLOSED,
1552 static int l2cap_sock_release(struct socket *sock)
1554 struct sock *sk = sock->sk;
1557 BT_DBG("sock %p, sk %p", sock, sk);
1562 err = l2cap_sock_shutdown(sock, 2);
1565 l2cap_sock_kill(sk);
1569 static void l2cap_chan_ready(struct sock *sk)
1571 struct sock *parent = bt_sk(sk)->parent;
1573 BT_DBG("sk %p, parent %p", sk, parent);
1575 l2cap_pi(sk)->conf_state = 0;
1576 l2cap_sock_clear_timer(sk);
1579 /* Outgoing channel.
1580 * Wake up socket sleeping on connect.
1582 sk->sk_state = BT_CONNECTED;
1583 sk->sk_state_change(sk);
1585 /* Incoming channel.
1586 * Wake up socket sleeping on accept.
1588 parent->sk_data_ready(parent, 0);
1592 /* Copy frame to all raw sockets on that connection */
1593 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1595 struct l2cap_chan_list *l = &conn->chan_list;
1596 struct sk_buff *nskb;
1599 BT_DBG("conn %p", conn);
1601 read_lock(&l->lock);
1602 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1603 if (sk->sk_type != SOCK_RAW)
1606 /* Don't send frame to the socket it came from */
1609 nskb = skb_clone(skb, GFP_ATOMIC);
1613 if (sock_queue_rcv_skb(sk, nskb))
1616 read_unlock(&l->lock);
1619 /* ---- L2CAP signalling commands ---- */
1620 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1621 u8 code, u8 ident, u16 dlen, void *data)
1623 struct sk_buff *skb, **frag;
1624 struct l2cap_cmd_hdr *cmd;
1625 struct l2cap_hdr *lh;
1628 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1629 conn, code, ident, dlen);
1631 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1632 count = min_t(unsigned int, conn->mtu, len);
1634 skb = bt_skb_alloc(count, GFP_ATOMIC);
1638 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1639 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1640 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1642 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1645 cmd->len = cpu_to_le16(dlen);
1648 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1649 memcpy(skb_put(skb, count), data, count);
1655 /* Continuation fragments (no L2CAP header) */
1656 frag = &skb_shinfo(skb)->frag_list;
1658 count = min_t(unsigned int, conn->mtu, len);
1660 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1664 memcpy(skb_put(*frag, count), data, count);
1669 frag = &(*frag)->next;
1679 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1681 struct l2cap_conf_opt *opt = *ptr;
1684 len = L2CAP_CONF_OPT_SIZE + opt->len;
1692 *val = *((u8 *) opt->val);
1696 *val = __le16_to_cpu(*((__le16 *) opt->val));
1700 *val = __le32_to_cpu(*((__le32 *) opt->val));
1704 *val = (unsigned long) opt->val;
1708 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1712 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1714 struct l2cap_conf_opt *opt = *ptr;
1716 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1723 *((u8 *) opt->val) = val;
1727 *((__le16 *) opt->val) = cpu_to_le16(val);
1731 *((__le32 *) opt->val) = cpu_to_le32(val);
1735 memcpy(opt->val, (void *) val, len);
1739 *ptr += L2CAP_CONF_OPT_SIZE + len;
1742 static int l2cap_build_conf_req(struct sock *sk, void *data)
1744 struct l2cap_pinfo *pi = l2cap_pi(sk);
1745 struct l2cap_conf_req *req = data;
1746 void *ptr = req->data;
1748 BT_DBG("sk %p", sk);
1750 if (pi->imtu != L2CAP_DEFAULT_MTU)
1751 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1753 /* FIXME: Need actual value of the flush timeout */
1754 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1755 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1757 req->dcid = cpu_to_le16(pi->dcid);
1758 req->flags = cpu_to_le16(0);
1763 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1765 struct l2cap_pinfo *pi = l2cap_pi(sk);
1766 struct l2cap_conf_rsp *rsp = data;
1767 void *ptr = rsp->data;
1768 void *req = pi->conf_req;
1769 int len = pi->conf_len;
1770 int type, hint, olen;
1772 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1773 u16 mtu = L2CAP_DEFAULT_MTU;
1774 u16 result = L2CAP_CONF_SUCCESS;
1776 BT_DBG("sk %p", sk);
1778 while (len >= L2CAP_CONF_OPT_SIZE) {
1779 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1781 hint = type & L2CAP_CONF_HINT;
1782 type &= L2CAP_CONF_MASK;
1785 case L2CAP_CONF_MTU:
1789 case L2CAP_CONF_FLUSH_TO:
1793 case L2CAP_CONF_QOS:
1796 case L2CAP_CONF_RFC:
1797 if (olen == sizeof(rfc))
1798 memcpy(&rfc, (void *) val, olen);
1805 result = L2CAP_CONF_UNKNOWN;
1806 *((u8 *) ptr++) = type;
1811 if (result == L2CAP_CONF_SUCCESS) {
1812 /* Configure output options and let the other side know
1813 * which ones we don't like. */
1815 if (rfc.mode == L2CAP_MODE_BASIC) {
1817 result = L2CAP_CONF_UNACCEPT;
1820 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1823 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1825 result = L2CAP_CONF_UNACCEPT;
1827 memset(&rfc, 0, sizeof(rfc));
1828 rfc.mode = L2CAP_MODE_BASIC;
1830 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1831 sizeof(rfc), (unsigned long) &rfc);
1835 rsp->scid = cpu_to_le16(pi->dcid);
1836 rsp->result = cpu_to_le16(result);
1837 rsp->flags = cpu_to_le16(0x0000);
1842 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1844 struct l2cap_conf_rsp *rsp = data;
1845 void *ptr = rsp->data;
1847 BT_DBG("sk %p", sk);
1849 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1850 rsp->result = cpu_to_le16(result);
1851 rsp->flags = cpu_to_le16(flags);
1856 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1858 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1860 if (rej->reason != 0x0000)
1863 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1864 cmd->ident == conn->info_ident) {
1865 del_timer(&conn->info_timer);
1867 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1868 conn->info_ident = 0;
1870 l2cap_conn_start(conn);
1876 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1878 struct l2cap_chan_list *list = &conn->chan_list;
1879 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1880 struct l2cap_conn_rsp rsp;
1881 struct sock *sk, *parent;
1882 int result, status = L2CAP_CS_NO_INFO;
1884 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1885 __le16 psm = req->psm;
1887 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1889 /* Check if we have socket listening on psm */
1890 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1892 result = L2CAP_CR_BAD_PSM;
1896 /* Check if the ACL is secure enough (if not SDP) */
1897 if (psm != cpu_to_le16(0x0001) &&
1898 !hci_conn_check_link_mode(conn->hcon)) {
1899 conn->disc_reason = 0x05;
1900 result = L2CAP_CR_SEC_BLOCK;
1904 result = L2CAP_CR_NO_MEM;
1906 /* Check for backlog size */
1907 if (sk_acceptq_is_full(parent)) {
1908 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1912 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1916 write_lock_bh(&list->lock);
1918 /* Check if we already have channel with that dcid */
1919 if (__l2cap_get_chan_by_dcid(list, scid)) {
1920 write_unlock_bh(&list->lock);
1921 sock_set_flag(sk, SOCK_ZAPPED);
1922 l2cap_sock_kill(sk);
1926 hci_conn_hold(conn->hcon);
1928 l2cap_sock_init(sk, parent);
1929 bacpy(&bt_sk(sk)->src, conn->src);
1930 bacpy(&bt_sk(sk)->dst, conn->dst);
1931 l2cap_pi(sk)->psm = psm;
1932 l2cap_pi(sk)->dcid = scid;
1934 __l2cap_chan_add(conn, sk, parent);
1935 dcid = l2cap_pi(sk)->scid;
1937 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1939 l2cap_pi(sk)->ident = cmd->ident;
1941 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
1942 if (l2cap_check_security(sk)) {
1943 if (bt_sk(sk)->defer_setup) {
1944 sk->sk_state = BT_CONNECT2;
1945 result = L2CAP_CR_PEND;
1946 status = L2CAP_CS_AUTHOR_PEND;
1947 parent->sk_data_ready(parent, 0);
1949 sk->sk_state = BT_CONFIG;
1950 result = L2CAP_CR_SUCCESS;
1951 status = L2CAP_CS_NO_INFO;
1954 sk->sk_state = BT_CONNECT2;
1955 result = L2CAP_CR_PEND;
1956 status = L2CAP_CS_AUTHEN_PEND;
1959 sk->sk_state = BT_CONNECT2;
1960 result = L2CAP_CR_PEND;
1961 status = L2CAP_CS_NO_INFO;
1964 write_unlock_bh(&list->lock);
1967 bh_unlock_sock(parent);
1970 rsp.scid = cpu_to_le16(scid);
1971 rsp.dcid = cpu_to_le16(dcid);
1972 rsp.result = cpu_to_le16(result);
1973 rsp.status = cpu_to_le16(status);
1974 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1976 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1977 struct l2cap_info_req info;
1978 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1980 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1981 conn->info_ident = l2cap_get_ident(conn);
1983 mod_timer(&conn->info_timer, jiffies +
1984 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1986 l2cap_send_cmd(conn, conn->info_ident,
1987 L2CAP_INFO_REQ, sizeof(info), &info);
1993 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1995 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1996 u16 scid, dcid, result, status;
2000 scid = __le16_to_cpu(rsp->scid);
2001 dcid = __le16_to_cpu(rsp->dcid);
2002 result = __le16_to_cpu(rsp->result);
2003 status = __le16_to_cpu(rsp->status);
2005 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2008 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2012 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2018 case L2CAP_CR_SUCCESS:
2019 sk->sk_state = BT_CONFIG;
2020 l2cap_pi(sk)->ident = 0;
2021 l2cap_pi(sk)->dcid = dcid;
2022 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2024 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2026 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2027 l2cap_build_conf_req(sk, req), req);
2031 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2035 l2cap_chan_del(sk, ECONNREFUSED);
2043 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2045 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2051 dcid = __le16_to_cpu(req->dcid);
2052 flags = __le16_to_cpu(req->flags);
2054 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2056 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2060 if (sk->sk_state == BT_DISCONN)
2063 /* Reject if config buffer is too small. */
2064 len = cmd_len - sizeof(*req);
2065 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2066 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2067 l2cap_build_conf_rsp(sk, rsp,
2068 L2CAP_CONF_REJECT, flags), rsp);
2073 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2074 l2cap_pi(sk)->conf_len += len;
2076 if (flags & 0x0001) {
2077 /* Incomplete config. Send empty response. */
2078 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2079 l2cap_build_conf_rsp(sk, rsp,
2080 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2084 /* Complete config. */
2085 len = l2cap_parse_conf_req(sk, rsp);
2089 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2091 /* Reset config buffer. */
2092 l2cap_pi(sk)->conf_len = 0;
2094 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2097 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2098 sk->sk_state = BT_CONNECTED;
2099 l2cap_chan_ready(sk);
2103 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2105 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2106 l2cap_build_conf_req(sk, buf), buf);
2114 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2116 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2117 u16 scid, flags, result;
2120 scid = __le16_to_cpu(rsp->scid);
2121 flags = __le16_to_cpu(rsp->flags);
2122 result = __le16_to_cpu(rsp->result);
2124 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2125 scid, flags, result);
2127 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2132 case L2CAP_CONF_SUCCESS:
2135 case L2CAP_CONF_UNACCEPT:
2136 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
2138 /* It does not make sense to adjust L2CAP parameters
2139 * that are currently defined in the spec. We simply
2140 * resend config request that we sent earlier. It is
2141 * stupid, but it helps qualification testing which
2142 * expects at least some response from us. */
2143 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2144 l2cap_build_conf_req(sk, req), req);
2149 sk->sk_state = BT_DISCONN;
2150 sk->sk_err = ECONNRESET;
2151 l2cap_sock_set_timer(sk, HZ * 5);
2153 struct l2cap_disconn_req req;
2154 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
2155 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2156 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2157 L2CAP_DISCONN_REQ, sizeof(req), &req);
2165 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2167 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2168 sk->sk_state = BT_CONNECTED;
2169 l2cap_chan_ready(sk);
2177 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2179 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2180 struct l2cap_disconn_rsp rsp;
2184 scid = __le16_to_cpu(req->scid);
2185 dcid = __le16_to_cpu(req->dcid);
2187 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2189 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2193 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2194 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2195 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2197 sk->sk_shutdown = SHUTDOWN_MASK;
2199 l2cap_chan_del(sk, ECONNRESET);
2202 l2cap_sock_kill(sk);
2206 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2208 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2212 scid = __le16_to_cpu(rsp->scid);
2213 dcid = __le16_to_cpu(rsp->dcid);
2215 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2217 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2221 l2cap_chan_del(sk, 0);
2224 l2cap_sock_kill(sk);
2228 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2230 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2233 type = __le16_to_cpu(req->type);
2235 BT_DBG("type 0x%4.4x", type);
2237 if (type == L2CAP_IT_FEAT_MASK) {
2239 u32 feat_mask = l2cap_feat_mask;
2240 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2241 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2242 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2244 feat_mask |= L2CAP_FEAT_ERTM;
2245 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2246 l2cap_send_cmd(conn, cmd->ident,
2247 L2CAP_INFO_RSP, sizeof(buf), buf);
2248 } else if (type == L2CAP_IT_FIXED_CHAN) {
2250 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2251 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2252 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2253 memcpy(buf + 4, l2cap_fixed_chan, 8);
2254 l2cap_send_cmd(conn, cmd->ident,
2255 L2CAP_INFO_RSP, sizeof(buf), buf);
2257 struct l2cap_info_rsp rsp;
2258 rsp.type = cpu_to_le16(type);
2259 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2260 l2cap_send_cmd(conn, cmd->ident,
2261 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2267 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2269 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2272 type = __le16_to_cpu(rsp->type);
2273 result = __le16_to_cpu(rsp->result);
2275 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2277 del_timer(&conn->info_timer);
2279 if (type == L2CAP_IT_FEAT_MASK) {
2280 conn->feat_mask = get_unaligned_le32(rsp->data);
2282 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2283 struct l2cap_info_req req;
2284 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2286 conn->info_ident = l2cap_get_ident(conn);
2288 l2cap_send_cmd(conn, conn->info_ident,
2289 L2CAP_INFO_REQ, sizeof(req), &req);
2291 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2292 conn->info_ident = 0;
2294 l2cap_conn_start(conn);
2296 } else if (type == L2CAP_IT_FIXED_CHAN) {
2297 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2298 conn->info_ident = 0;
2300 l2cap_conn_start(conn);
2306 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2308 u8 *data = skb->data;
2310 struct l2cap_cmd_hdr cmd;
2313 l2cap_raw_recv(conn, skb);
2315 while (len >= L2CAP_CMD_HDR_SIZE) {
2317 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2318 data += L2CAP_CMD_HDR_SIZE;
2319 len -= L2CAP_CMD_HDR_SIZE;
2321 cmd_len = le16_to_cpu(cmd.len);
2323 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2325 if (cmd_len > len || !cmd.ident) {
2326 BT_DBG("corrupted command");
2331 case L2CAP_COMMAND_REJ:
2332 l2cap_command_rej(conn, &cmd, data);
2335 case L2CAP_CONN_REQ:
2336 err = l2cap_connect_req(conn, &cmd, data);
2339 case L2CAP_CONN_RSP:
2340 err = l2cap_connect_rsp(conn, &cmd, data);
2343 case L2CAP_CONF_REQ:
2344 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2347 case L2CAP_CONF_RSP:
2348 err = l2cap_config_rsp(conn, &cmd, data);
2351 case L2CAP_DISCONN_REQ:
2352 err = l2cap_disconnect_req(conn, &cmd, data);
2355 case L2CAP_DISCONN_RSP:
2356 err = l2cap_disconnect_rsp(conn, &cmd, data);
2359 case L2CAP_ECHO_REQ:
2360 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2363 case L2CAP_ECHO_RSP:
2366 case L2CAP_INFO_REQ:
2367 err = l2cap_information_req(conn, &cmd, data);
2370 case L2CAP_INFO_RSP:
2371 err = l2cap_information_rsp(conn, &cmd, data);
2375 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2381 struct l2cap_cmd_rej rej;
2382 BT_DBG("error %d", err);
2384 /* FIXME: Map err to a valid reason */
2385 rej.reason = cpu_to_le16(0);
2386 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2396 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2400 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2402 BT_DBG("unknown cid 0x%4.4x", cid);
2406 BT_DBG("sk %p, len %d", sk, skb->len);
2408 if (sk->sk_state != BT_CONNECTED)
2411 if (l2cap_pi(sk)->imtu < skb->len)
2414 /* If socket recv buffers overflows we drop data here
2415 * which is *bad* because L2CAP has to be reliable.
2416 * But we don't have any other choice. L2CAP doesn't
2417 * provide flow control mechanism. */
2419 if (!sock_queue_rcv_skb(sk, skb))
2432 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2436 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2440 BT_DBG("sk %p, len %d", sk, skb->len);
2442 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2445 if (l2cap_pi(sk)->imtu < skb->len)
2448 if (!sock_queue_rcv_skb(sk, skb))
2460 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2462 struct l2cap_hdr *lh = (void *) skb->data;
2466 skb_pull(skb, L2CAP_HDR_SIZE);
2467 cid = __le16_to_cpu(lh->cid);
2468 len = __le16_to_cpu(lh->len);
2470 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2473 case L2CAP_CID_SIGNALING:
2474 l2cap_sig_channel(conn, skb);
2477 case L2CAP_CID_CONN_LESS:
2478 psm = get_unaligned((__le16 *) skb->data);
2480 l2cap_conless_channel(conn, psm, skb);
2484 l2cap_data_channel(conn, cid, skb);
2489 /* ---- L2CAP interface with lower layer (HCI) ---- */
2491 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2493 int exact = 0, lm1 = 0, lm2 = 0;
2494 register struct sock *sk;
2495 struct hlist_node *node;
2497 if (type != ACL_LINK)
2500 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2502 /* Find listening sockets and check their link_mode */
2503 read_lock(&l2cap_sk_list.lock);
2504 sk_for_each(sk, node, &l2cap_sk_list.head) {
2505 if (sk->sk_state != BT_LISTEN)
2508 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2509 lm1 |= HCI_LM_ACCEPT;
2510 if (l2cap_pi(sk)->role_switch)
2511 lm1 |= HCI_LM_MASTER;
2513 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2514 lm2 |= HCI_LM_ACCEPT;
2515 if (l2cap_pi(sk)->role_switch)
2516 lm2 |= HCI_LM_MASTER;
2519 read_unlock(&l2cap_sk_list.lock);
2521 return exact ? lm1 : lm2;
2524 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2526 struct l2cap_conn *conn;
2528 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2530 if (hcon->type != ACL_LINK)
2534 conn = l2cap_conn_add(hcon, status);
2536 l2cap_conn_ready(conn);
2538 l2cap_conn_del(hcon, bt_err(status));
2543 static int l2cap_disconn_ind(struct hci_conn *hcon)
2545 struct l2cap_conn *conn = hcon->l2cap_data;
2547 BT_DBG("hcon %p", hcon);
2549 if (hcon->type != ACL_LINK || !conn)
2552 return conn->disc_reason;
2555 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
2557 BT_DBG("hcon %p reason %d", hcon, reason);
2559 if (hcon->type != ACL_LINK)
2562 l2cap_conn_del(hcon, bt_err(reason));
2567 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2569 if (sk->sk_type != SOCK_SEQPACKET)
2572 if (encrypt == 0x00) {
2573 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2574 l2cap_sock_clear_timer(sk);
2575 l2cap_sock_set_timer(sk, HZ * 5);
2576 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2577 __l2cap_sock_close(sk, ECONNREFUSED);
2579 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2580 l2cap_sock_clear_timer(sk);
2584 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2586 struct l2cap_chan_list *l;
2587 struct l2cap_conn *conn = hcon->l2cap_data;
2593 l = &conn->chan_list;
2595 BT_DBG("conn %p", conn);
2597 read_lock(&l->lock);
2599 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2602 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
2607 if (!status && (sk->sk_state == BT_CONNECTED ||
2608 sk->sk_state == BT_CONFIG)) {
2609 l2cap_check_encryption(sk, encrypt);
2614 if (sk->sk_state == BT_CONNECT) {
2616 struct l2cap_conn_req req;
2617 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2618 req.psm = l2cap_pi(sk)->psm;
2620 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2622 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2623 L2CAP_CONN_REQ, sizeof(req), &req);
2625 l2cap_sock_clear_timer(sk);
2626 l2cap_sock_set_timer(sk, HZ / 10);
2628 } else if (sk->sk_state == BT_CONNECT2) {
2629 struct l2cap_conn_rsp rsp;
2633 sk->sk_state = BT_CONFIG;
2634 result = L2CAP_CR_SUCCESS;
2636 sk->sk_state = BT_DISCONN;
2637 l2cap_sock_set_timer(sk, HZ / 10);
2638 result = L2CAP_CR_SEC_BLOCK;
2641 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2642 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2643 rsp.result = cpu_to_le16(result);
2644 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2645 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2646 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2652 read_unlock(&l->lock);
2657 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2659 struct l2cap_conn *conn = hcon->l2cap_data;
2661 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2664 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2666 if (flags & ACL_START) {
2667 struct l2cap_hdr *hdr;
2671 BT_ERR("Unexpected start frame (len %d)", skb->len);
2672 kfree_skb(conn->rx_skb);
2673 conn->rx_skb = NULL;
2675 l2cap_conn_unreliable(conn, ECOMM);
2679 BT_ERR("Frame is too short (len %d)", skb->len);
2680 l2cap_conn_unreliable(conn, ECOMM);
2684 hdr = (struct l2cap_hdr *) skb->data;
2685 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2687 if (len == skb->len) {
2688 /* Complete frame received */
2689 l2cap_recv_frame(conn, skb);
2693 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2695 if (skb->len > len) {
2696 BT_ERR("Frame is too long (len %d, expected len %d)",
2698 l2cap_conn_unreliable(conn, ECOMM);
2702 /* Allocate skb for the complete frame (with header) */
2703 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
2707 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2709 conn->rx_len = len - skb->len;
2711 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2713 if (!conn->rx_len) {
2714 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2715 l2cap_conn_unreliable(conn, ECOMM);
2719 if (skb->len > conn->rx_len) {
2720 BT_ERR("Fragment is too long (len %d, expected %d)",
2721 skb->len, conn->rx_len);
2722 kfree_skb(conn->rx_skb);
2723 conn->rx_skb = NULL;
2725 l2cap_conn_unreliable(conn, ECOMM);
2729 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2731 conn->rx_len -= skb->len;
2733 if (!conn->rx_len) {
2734 /* Complete frame received */
2735 l2cap_recv_frame(conn, conn->rx_skb);
2736 conn->rx_skb = NULL;
2745 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2748 struct hlist_node *node;
2751 read_lock_bh(&l2cap_sk_list.lock);
2753 sk_for_each(sk, node, &l2cap_sk_list.head) {
2754 struct l2cap_pinfo *pi = l2cap_pi(sk);
2756 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2757 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2758 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
2759 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
2762 read_unlock_bh(&l2cap_sk_list.lock);
2767 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2769 static const struct proto_ops l2cap_sock_ops = {
2770 .family = PF_BLUETOOTH,
2771 .owner = THIS_MODULE,
2772 .release = l2cap_sock_release,
2773 .bind = l2cap_sock_bind,
2774 .connect = l2cap_sock_connect,
2775 .listen = l2cap_sock_listen,
2776 .accept = l2cap_sock_accept,
2777 .getname = l2cap_sock_getname,
2778 .sendmsg = l2cap_sock_sendmsg,
2779 .recvmsg = l2cap_sock_recvmsg,
2780 .poll = bt_sock_poll,
2781 .ioctl = bt_sock_ioctl,
2782 .mmap = sock_no_mmap,
2783 .socketpair = sock_no_socketpair,
2784 .shutdown = l2cap_sock_shutdown,
2785 .setsockopt = l2cap_sock_setsockopt,
2786 .getsockopt = l2cap_sock_getsockopt
2789 static struct net_proto_family l2cap_sock_family_ops = {
2790 .family = PF_BLUETOOTH,
2791 .owner = THIS_MODULE,
2792 .create = l2cap_sock_create,
2795 static struct hci_proto l2cap_hci_proto = {
2797 .id = HCI_PROTO_L2CAP,
2798 .connect_ind = l2cap_connect_ind,
2799 .connect_cfm = l2cap_connect_cfm,
2800 .disconn_ind = l2cap_disconn_ind,
2801 .disconn_cfm = l2cap_disconn_cfm,
2802 .security_cfm = l2cap_security_cfm,
2803 .recv_acldata = l2cap_recv_acldata
2806 static int __init l2cap_init(void)
2810 err = proto_register(&l2cap_proto, 0);
2814 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2816 BT_ERR("L2CAP socket registration failed");
2820 err = hci_register_proto(&l2cap_hci_proto);
2822 BT_ERR("L2CAP protocol registration failed");
2823 bt_sock_unregister(BTPROTO_L2CAP);
2827 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2828 BT_ERR("Failed to create L2CAP info file");
2830 BT_INFO("L2CAP ver %s", VERSION);
2831 BT_INFO("L2CAP socket layer initialized");
2836 proto_unregister(&l2cap_proto);
2840 static void __exit l2cap_exit(void)
2842 class_remove_file(bt_class, &class_attr_l2cap);
2844 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2845 BT_ERR("L2CAP socket unregistration failed");
2847 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2848 BT_ERR("L2CAP protocol unregistration failed");
2850 proto_unregister(&l2cap_proto);
2853 void l2cap_load(void)
2855 /* Dummy function to trigger automatic L2CAP module loading by
2856 * other modules that use L2CAP sockets but don't use any other
2857 * symbols from it. */
2860 EXPORT_SYMBOL(l2cap_load);
2862 module_init(l2cap_init);
2863 module_exit(l2cap_exit);
2865 module_param(enable_ertm, bool, 0644);
2866 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
2868 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2869 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2870 MODULE_VERSION(VERSION);
2871 MODULE_LICENSE("GPL");
2872 MODULE_ALIAS("bt-proto-0");