2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
46 #include <asm/system.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.14"
55 static int enable_ertm = 0;
57 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
58 static u8 l2cap_fixed_chan[8] = { 0x02, };
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
79 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
84 reason = ECONNREFUSED;
85 else if (sk->sk_state == BT_CONNECT &&
86 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
87 reason = ECONNREFUSED;
91 __l2cap_sock_close(sk, reason);
99 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
102 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
105 static void l2cap_sock_clear_timer(struct sock *sk)
107 BT_DBG("sock %p state %d", sk, sk->sk_state);
108 sk_stop_timer(sk, &sk->sk_timer);
111 /* ---- L2CAP channels ---- */
112 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
116 if (l2cap_pi(s)->dcid == cid)
122 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->scid == cid)
132 /* Find channel with given SCID.
133 * Returns locked socket */
134 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 s = __l2cap_get_chan_by_scid(l, cid);
141 read_unlock(&l->lock);
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 s = __l2cap_get_chan_by_ident(l, ident);
162 read_unlock(&l->lock);
166 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(l, cid))
178 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183 l2cap_pi(l->head)->prev_c = sk;
185 l2cap_pi(sk)->next_c = l->head;
186 l2cap_pi(sk)->prev_c = NULL;
190 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194 write_lock_bh(&l->lock);
199 l2cap_pi(next)->prev_c = prev;
201 l2cap_pi(prev)->next_c = next;
202 write_unlock_bh(&l->lock);
207 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 struct l2cap_chan_list *l = &conn->chan_list;
211 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
212 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214 conn->disc_reason = 0x13;
216 l2cap_pi(sk)->conn = conn;
218 if (sk->sk_type == SOCK_SEQPACKET) {
219 /* Alloc CID for connection-oriented socket */
220 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
221 } else if (sk->sk_type == SOCK_DGRAM) {
222 /* Connectionless socket */
223 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
224 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 /* Raw socket can send/recv signalling messages only */
228 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
229 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 __l2cap_chan_link(l, sk);
236 bt_accept_enqueue(parent, sk);
240 * Must be called on the locked socket. */
241 static void l2cap_chan_del(struct sock *sk, int err)
243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
244 struct sock *parent = bt_sk(sk)->parent;
246 l2cap_sock_clear_timer(sk);
248 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251 /* Unlink from channel list */
252 l2cap_chan_unlink(&conn->chan_list, sk);
253 l2cap_pi(sk)->conn = NULL;
254 hci_conn_put(conn->hcon);
257 sk->sk_state = BT_CLOSED;
258 sock_set_flag(sk, SOCK_ZAPPED);
264 bt_accept_unlink(sk);
265 parent->sk_data_ready(parent, 0);
267 sk->sk_state_change(sk);
270 /* Service level security */
271 static inline int l2cap_check_security(struct sock *sk)
273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
278 auth_type = HCI_AT_NO_BONDING_MITM;
280 auth_type = HCI_AT_NO_BONDING;
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
283 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 case BT_SECURITY_MEDIUM:
290 auth_type = HCI_AT_GENERAL_BONDING;
293 auth_type = HCI_AT_NO_BONDING;
298 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
302 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
306 /* Get next available identificator.
307 * 1 - 128 are used by kernel.
308 * 129 - 199 are reserved.
309 * 200 - 254 are used by utilities like l2ping, etc.
312 spin_lock_bh(&conn->lock);
314 if (++conn->tx_ident > 128)
319 spin_unlock_bh(&conn->lock);
324 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 BT_DBG("code 0x%2.2x", code);
333 return hci_send_acl(conn->hcon, skb, 0);
336 static void l2cap_do_start(struct sock *sk)
338 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
340 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
341 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
344 if (l2cap_check_security(sk)) {
345 struct l2cap_conn_req req;
346 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
347 req.psm = l2cap_pi(sk)->psm;
349 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
351 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
352 L2CAP_CONN_REQ, sizeof(req), &req);
355 struct l2cap_info_req req;
356 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
358 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
359 conn->info_ident = l2cap_get_ident(conn);
361 mod_timer(&conn->info_timer, jiffies +
362 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
364 l2cap_send_cmd(conn, conn->info_ident,
365 L2CAP_INFO_REQ, sizeof(req), &req);
369 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
371 struct l2cap_disconn_req req;
373 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
374 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
375 l2cap_send_cmd(conn, l2cap_get_ident(conn),
376 L2CAP_DISCONN_REQ, sizeof(req), &req);
379 /* ---- L2CAP connections ---- */
380 static void l2cap_conn_start(struct l2cap_conn *conn)
382 struct l2cap_chan_list *l = &conn->chan_list;
385 BT_DBG("conn %p", conn);
389 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
392 if (sk->sk_type != SOCK_SEQPACKET) {
397 if (sk->sk_state == BT_CONNECT) {
398 if (l2cap_check_security(sk)) {
399 struct l2cap_conn_req req;
400 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
401 req.psm = l2cap_pi(sk)->psm;
403 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
405 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
406 L2CAP_CONN_REQ, sizeof(req), &req);
408 } else if (sk->sk_state == BT_CONNECT2) {
409 struct l2cap_conn_rsp rsp;
410 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
411 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
413 if (l2cap_check_security(sk)) {
414 if (bt_sk(sk)->defer_setup) {
415 struct sock *parent = bt_sk(sk)->parent;
416 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
417 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
418 parent->sk_data_ready(parent, 0);
421 sk->sk_state = BT_CONFIG;
422 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
423 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
426 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
427 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
430 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
431 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
437 read_unlock(&l->lock);
440 static void l2cap_conn_ready(struct l2cap_conn *conn)
442 struct l2cap_chan_list *l = &conn->chan_list;
445 BT_DBG("conn %p", conn);
449 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
452 if (sk->sk_type != SOCK_SEQPACKET) {
453 l2cap_sock_clear_timer(sk);
454 sk->sk_state = BT_CONNECTED;
455 sk->sk_state_change(sk);
456 } else if (sk->sk_state == BT_CONNECT)
462 read_unlock(&l->lock);
465 /* Notify sockets that we cannot guaranty reliability anymore */
466 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
468 struct l2cap_chan_list *l = &conn->chan_list;
471 BT_DBG("conn %p", conn);
475 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
476 if (l2cap_pi(sk)->force_reliable)
480 read_unlock(&l->lock);
483 static void l2cap_info_timeout(unsigned long arg)
485 struct l2cap_conn *conn = (void *) arg;
487 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
488 conn->info_ident = 0;
490 l2cap_conn_start(conn);
493 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
495 struct l2cap_conn *conn = hcon->l2cap_data;
500 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
504 hcon->l2cap_data = conn;
507 BT_DBG("hcon %p conn %p", hcon, conn);
509 conn->mtu = hcon->hdev->acl_mtu;
510 conn->src = &hcon->hdev->bdaddr;
511 conn->dst = &hcon->dst;
515 setup_timer(&conn->info_timer, l2cap_info_timeout,
516 (unsigned long) conn);
518 spin_lock_init(&conn->lock);
519 rwlock_init(&conn->chan_list.lock);
521 conn->disc_reason = 0x13;
526 static void l2cap_conn_del(struct hci_conn *hcon, int err)
528 struct l2cap_conn *conn = hcon->l2cap_data;
534 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
536 kfree_skb(conn->rx_skb);
539 while ((sk = conn->chan_list.head)) {
541 l2cap_chan_del(sk, err);
546 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
547 del_timer_sync(&conn->info_timer);
549 hcon->l2cap_data = NULL;
553 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
555 struct l2cap_chan_list *l = &conn->chan_list;
556 write_lock_bh(&l->lock);
557 __l2cap_chan_add(conn, sk, parent);
558 write_unlock_bh(&l->lock);
561 /* ---- Socket interface ---- */
562 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
565 struct hlist_node *node;
566 sk_for_each(sk, node, &l2cap_sk_list.head)
567 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
574 /* Find socket with psm and source bdaddr.
575 * Returns closest match.
577 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
579 struct sock *sk = NULL, *sk1 = NULL;
580 struct hlist_node *node;
582 sk_for_each(sk, node, &l2cap_sk_list.head) {
583 if (state && sk->sk_state != state)
586 if (l2cap_pi(sk)->psm == psm) {
588 if (!bacmp(&bt_sk(sk)->src, src))
592 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
596 return node ? sk : sk1;
599 /* Find socket with given address (psm, src).
600 * Returns locked socket */
601 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
604 read_lock(&l2cap_sk_list.lock);
605 s = __l2cap_get_sock_by_psm(state, psm, src);
608 read_unlock(&l2cap_sk_list.lock);
612 static void l2cap_sock_destruct(struct sock *sk)
616 skb_queue_purge(&sk->sk_receive_queue);
617 skb_queue_purge(&sk->sk_write_queue);
620 static void l2cap_sock_cleanup_listen(struct sock *parent)
624 BT_DBG("parent %p", parent);
626 /* Close not yet accepted channels */
627 while ((sk = bt_accept_dequeue(parent, NULL)))
628 l2cap_sock_close(sk);
630 parent->sk_state = BT_CLOSED;
631 sock_set_flag(parent, SOCK_ZAPPED);
634 /* Kill socket (only if zapped and orphan)
635 * Must be called on unlocked socket.
637 static void l2cap_sock_kill(struct sock *sk)
639 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
642 BT_DBG("sk %p state %d", sk, sk->sk_state);
644 /* Kill poor orphan */
645 bt_sock_unlink(&l2cap_sk_list, sk);
646 sock_set_flag(sk, SOCK_DEAD);
650 static void __l2cap_sock_close(struct sock *sk, int reason)
652 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
654 switch (sk->sk_state) {
656 l2cap_sock_cleanup_listen(sk);
661 if (sk->sk_type == SOCK_SEQPACKET) {
662 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
664 sk->sk_state = BT_DISCONN;
665 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
666 l2cap_send_disconn_req(conn, sk);
668 l2cap_chan_del(sk, reason);
672 if (sk->sk_type == SOCK_SEQPACKET) {
673 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
674 struct l2cap_conn_rsp rsp;
677 if (bt_sk(sk)->defer_setup)
678 result = L2CAP_CR_SEC_BLOCK;
680 result = L2CAP_CR_BAD_PSM;
682 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
683 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
684 rsp.result = cpu_to_le16(result);
685 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
686 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
687 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
689 l2cap_chan_del(sk, reason);
694 l2cap_chan_del(sk, reason);
698 sock_set_flag(sk, SOCK_ZAPPED);
703 /* Must be called on unlocked socket. */
704 static void l2cap_sock_close(struct sock *sk)
706 l2cap_sock_clear_timer(sk);
708 __l2cap_sock_close(sk, ECONNRESET);
713 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
715 struct l2cap_pinfo *pi = l2cap_pi(sk);
720 sk->sk_type = parent->sk_type;
721 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
723 pi->imtu = l2cap_pi(parent)->imtu;
724 pi->omtu = l2cap_pi(parent)->omtu;
725 pi->mode = l2cap_pi(parent)->mode;
726 pi->fcs = l2cap_pi(parent)->fcs;
727 pi->sec_level = l2cap_pi(parent)->sec_level;
728 pi->role_switch = l2cap_pi(parent)->role_switch;
729 pi->force_reliable = l2cap_pi(parent)->force_reliable;
731 pi->imtu = L2CAP_DEFAULT_MTU;
733 pi->mode = L2CAP_MODE_BASIC;
734 pi->fcs = L2CAP_FCS_CRC16;
735 pi->sec_level = BT_SECURITY_LOW;
737 pi->force_reliable = 0;
740 /* Default config options */
742 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
745 static struct proto l2cap_proto = {
747 .owner = THIS_MODULE,
748 .obj_size = sizeof(struct l2cap_pinfo)
751 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
755 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
759 sock_init_data(sock, sk);
760 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
762 sk->sk_destruct = l2cap_sock_destruct;
763 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
765 sock_reset_flag(sk, SOCK_ZAPPED);
767 sk->sk_protocol = proto;
768 sk->sk_state = BT_OPEN;
770 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
772 bt_sock_link(&l2cap_sk_list, sk);
776 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
780 BT_DBG("sock %p", sock);
782 sock->state = SS_UNCONNECTED;
784 if (sock->type != SOCK_SEQPACKET &&
785 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
786 return -ESOCKTNOSUPPORT;
788 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
791 sock->ops = &l2cap_sock_ops;
793 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
797 l2cap_sock_init(sk, NULL);
801 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
803 struct sock *sk = sock->sk;
804 struct sockaddr_l2 la;
809 if (!addr || addr->sa_family != AF_BLUETOOTH)
812 memset(&la, 0, sizeof(la));
813 len = min_t(unsigned int, sizeof(la), alen);
814 memcpy(&la, addr, len);
821 if (sk->sk_state != BT_OPEN) {
826 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
827 !capable(CAP_NET_BIND_SERVICE)) {
832 write_lock_bh(&l2cap_sk_list.lock);
834 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
837 /* Save source address */
838 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
839 l2cap_pi(sk)->psm = la.l2_psm;
840 l2cap_pi(sk)->sport = la.l2_psm;
841 sk->sk_state = BT_BOUND;
843 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
844 __le16_to_cpu(la.l2_psm) == 0x0003)
845 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
848 write_unlock_bh(&l2cap_sk_list.lock);
855 static int l2cap_do_connect(struct sock *sk)
857 bdaddr_t *src = &bt_sk(sk)->src;
858 bdaddr_t *dst = &bt_sk(sk)->dst;
859 struct l2cap_conn *conn;
860 struct hci_conn *hcon;
861 struct hci_dev *hdev;
865 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
868 hdev = hci_get_route(dst, src);
870 return -EHOSTUNREACH;
872 hci_dev_lock_bh(hdev);
876 if (sk->sk_type == SOCK_RAW) {
877 switch (l2cap_pi(sk)->sec_level) {
878 case BT_SECURITY_HIGH:
879 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
881 case BT_SECURITY_MEDIUM:
882 auth_type = HCI_AT_DEDICATED_BONDING;
885 auth_type = HCI_AT_NO_BONDING;
888 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
889 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
890 auth_type = HCI_AT_NO_BONDING_MITM;
892 auth_type = HCI_AT_NO_BONDING;
894 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
895 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
897 switch (l2cap_pi(sk)->sec_level) {
898 case BT_SECURITY_HIGH:
899 auth_type = HCI_AT_GENERAL_BONDING_MITM;
901 case BT_SECURITY_MEDIUM:
902 auth_type = HCI_AT_GENERAL_BONDING;
905 auth_type = HCI_AT_NO_BONDING;
910 hcon = hci_connect(hdev, ACL_LINK, dst,
911 l2cap_pi(sk)->sec_level, auth_type);
915 conn = l2cap_conn_add(hcon, 0);
923 /* Update source addr of the socket */
924 bacpy(src, conn->src);
926 l2cap_chan_add(conn, sk, NULL);
928 sk->sk_state = BT_CONNECT;
929 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
931 if (hcon->state == BT_CONNECTED) {
932 if (sk->sk_type != SOCK_SEQPACKET) {
933 l2cap_sock_clear_timer(sk);
934 sk->sk_state = BT_CONNECTED;
940 hci_dev_unlock_bh(hdev);
945 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
947 struct sock *sk = sock->sk;
948 struct sockaddr_l2 la;
953 if (!addr || addr->sa_family != AF_BLUETOOTH)
956 memset(&la, 0, sizeof(la));
957 len = min_t(unsigned int, sizeof(la), alen);
958 memcpy(&la, addr, len);
965 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
970 switch (l2cap_pi(sk)->mode) {
971 case L2CAP_MODE_BASIC:
973 case L2CAP_MODE_ERTM:
974 case L2CAP_MODE_STREAMING:
983 switch (sk->sk_state) {
987 /* Already connecting */
991 /* Already connected */
1004 /* Set destination address and psm */
1005 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1006 l2cap_pi(sk)->psm = la.l2_psm;
1008 err = l2cap_do_connect(sk);
1013 err = bt_sock_wait_state(sk, BT_CONNECTED,
1014 sock_sndtimeo(sk, flags & O_NONBLOCK));
1020 static int l2cap_sock_listen(struct socket *sock, int backlog)
1022 struct sock *sk = sock->sk;
1025 BT_DBG("sk %p backlog %d", sk, backlog);
1029 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1034 switch (l2cap_pi(sk)->mode) {
1035 case L2CAP_MODE_BASIC:
1037 case L2CAP_MODE_ERTM:
1038 case L2CAP_MODE_STREAMING:
1047 if (!l2cap_pi(sk)->psm) {
1048 bdaddr_t *src = &bt_sk(sk)->src;
1053 write_lock_bh(&l2cap_sk_list.lock);
1055 for (psm = 0x1001; psm < 0x1100; psm += 2)
1056 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1057 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1058 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1063 write_unlock_bh(&l2cap_sk_list.lock);
1069 sk->sk_max_ack_backlog = backlog;
1070 sk->sk_ack_backlog = 0;
1071 sk->sk_state = BT_LISTEN;
1078 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1080 DECLARE_WAITQUEUE(wait, current);
1081 struct sock *sk = sock->sk, *nsk;
1085 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1087 if (sk->sk_state != BT_LISTEN) {
1092 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1094 BT_DBG("sk %p timeo %ld", sk, timeo);
1096 /* Wait for an incoming connection. (wake-one). */
1097 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1098 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1099 set_current_state(TASK_INTERRUPTIBLE);
1106 timeo = schedule_timeout(timeo);
1107 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1109 if (sk->sk_state != BT_LISTEN) {
1114 if (signal_pending(current)) {
1115 err = sock_intr_errno(timeo);
1119 set_current_state(TASK_RUNNING);
1120 remove_wait_queue(sk->sk_sleep, &wait);
1125 newsock->state = SS_CONNECTED;
1127 BT_DBG("new socket %p", nsk);
1134 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1136 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1137 struct sock *sk = sock->sk;
1139 BT_DBG("sock %p, sk %p", sock, sk);
1141 addr->sa_family = AF_BLUETOOTH;
1142 *len = sizeof(struct sockaddr_l2);
1145 la->l2_psm = l2cap_pi(sk)->psm;
1146 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1147 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1149 la->l2_psm = l2cap_pi(sk)->sport;
1150 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1151 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1157 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1159 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1160 struct sk_buff *skb, **frag;
1161 int err, hlen, count, sent = 0;
1162 struct l2cap_hdr *lh;
1164 BT_DBG("sk %p len %d", sk, len);
1166 /* First fragment (with L2CAP header) */
1167 if (sk->sk_type == SOCK_DGRAM)
1168 hlen = L2CAP_HDR_SIZE + 2;
1170 hlen = L2CAP_HDR_SIZE;
1172 count = min_t(unsigned int, (conn->mtu - hlen), len);
1174 skb = bt_skb_send_alloc(sk, hlen + count,
1175 msg->msg_flags & MSG_DONTWAIT, &err);
1179 /* Create L2CAP header */
1180 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1181 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1182 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1184 if (sk->sk_type == SOCK_DGRAM)
1185 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1187 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1195 /* Continuation fragments (no L2CAP header) */
1196 frag = &skb_shinfo(skb)->frag_list;
1198 count = min_t(unsigned int, conn->mtu, len);
1200 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1204 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1212 frag = &(*frag)->next;
1214 err = hci_send_acl(conn->hcon, skb, 0);
1225 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1227 struct sock *sk = sock->sk;
1230 BT_DBG("sock %p, sk %p", sock, sk);
1232 err = sock_error(sk);
1236 if (msg->msg_flags & MSG_OOB)
1239 /* Check outgoing MTU */
1240 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1245 if (sk->sk_state == BT_CONNECTED)
1246 err = l2cap_do_send(sk, msg, len);
1254 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1256 struct sock *sk = sock->sk;
1260 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1261 struct l2cap_conn_rsp rsp;
1263 sk->sk_state = BT_CONFIG;
1265 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1266 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1267 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1268 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1269 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1270 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1278 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1281 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1283 struct sock *sk = sock->sk;
1284 struct l2cap_options opts;
1288 BT_DBG("sk %p", sk);
1294 opts.imtu = l2cap_pi(sk)->imtu;
1295 opts.omtu = l2cap_pi(sk)->omtu;
1296 opts.flush_to = l2cap_pi(sk)->flush_to;
1297 opts.mode = l2cap_pi(sk)->mode;
1299 len = min_t(unsigned int, sizeof(opts), optlen);
1300 if (copy_from_user((char *) &opts, optval, len)) {
1305 l2cap_pi(sk)->imtu = opts.imtu;
1306 l2cap_pi(sk)->omtu = opts.omtu;
1307 l2cap_pi(sk)->mode = opts.mode;
1311 if (get_user(opt, (u32 __user *) optval)) {
1316 if (opt & L2CAP_LM_AUTH)
1317 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1318 if (opt & L2CAP_LM_ENCRYPT)
1319 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1320 if (opt & L2CAP_LM_SECURE)
1321 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1323 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1324 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1336 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1338 struct sock *sk = sock->sk;
1339 struct bt_security sec;
1343 BT_DBG("sk %p", sk);
1345 if (level == SOL_L2CAP)
1346 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1348 if (level != SOL_BLUETOOTH)
1349 return -ENOPROTOOPT;
1355 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1360 sec.level = BT_SECURITY_LOW;
1362 len = min_t(unsigned int, sizeof(sec), optlen);
1363 if (copy_from_user((char *) &sec, optval, len)) {
1368 if (sec.level < BT_SECURITY_LOW ||
1369 sec.level > BT_SECURITY_HIGH) {
1374 l2cap_pi(sk)->sec_level = sec.level;
1377 case BT_DEFER_SETUP:
1378 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1383 if (get_user(opt, (u32 __user *) optval)) {
1388 bt_sk(sk)->defer_setup = opt;
1400 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1402 struct sock *sk = sock->sk;
1403 struct l2cap_options opts;
1404 struct l2cap_conninfo cinfo;
1408 BT_DBG("sk %p", sk);
1410 if (get_user(len, optlen))
1417 opts.imtu = l2cap_pi(sk)->imtu;
1418 opts.omtu = l2cap_pi(sk)->omtu;
1419 opts.flush_to = l2cap_pi(sk)->flush_to;
1420 opts.mode = l2cap_pi(sk)->mode;
1422 len = min_t(unsigned int, len, sizeof(opts));
1423 if (copy_to_user(optval, (char *) &opts, len))
1429 switch (l2cap_pi(sk)->sec_level) {
1430 case BT_SECURITY_LOW:
1431 opt = L2CAP_LM_AUTH;
1433 case BT_SECURITY_MEDIUM:
1434 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1436 case BT_SECURITY_HIGH:
1437 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1445 if (l2cap_pi(sk)->role_switch)
1446 opt |= L2CAP_LM_MASTER;
1448 if (l2cap_pi(sk)->force_reliable)
1449 opt |= L2CAP_LM_RELIABLE;
1451 if (put_user(opt, (u32 __user *) optval))
1455 case L2CAP_CONNINFO:
1456 if (sk->sk_state != BT_CONNECTED &&
1457 !(sk->sk_state == BT_CONNECT2 &&
1458 bt_sk(sk)->defer_setup)) {
1463 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1464 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1466 len = min_t(unsigned int, len, sizeof(cinfo));
1467 if (copy_to_user(optval, (char *) &cinfo, len))
1481 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1483 struct sock *sk = sock->sk;
1484 struct bt_security sec;
1487 BT_DBG("sk %p", sk);
1489 if (level == SOL_L2CAP)
1490 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1492 if (level != SOL_BLUETOOTH)
1493 return -ENOPROTOOPT;
1495 if (get_user(len, optlen))
1502 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1507 sec.level = l2cap_pi(sk)->sec_level;
1509 len = min_t(unsigned int, len, sizeof(sec));
1510 if (copy_to_user(optval, (char *) &sec, len))
1515 case BT_DEFER_SETUP:
1516 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1521 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1535 static int l2cap_sock_shutdown(struct socket *sock, int how)
1537 struct sock *sk = sock->sk;
1540 BT_DBG("sock %p, sk %p", sock, sk);
1546 if (!sk->sk_shutdown) {
1547 sk->sk_shutdown = SHUTDOWN_MASK;
1548 l2cap_sock_clear_timer(sk);
1549 __l2cap_sock_close(sk, 0);
1551 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1552 err = bt_sock_wait_state(sk, BT_CLOSED,
1559 static int l2cap_sock_release(struct socket *sock)
1561 struct sock *sk = sock->sk;
1564 BT_DBG("sock %p, sk %p", sock, sk);
1569 err = l2cap_sock_shutdown(sock, 2);
1572 l2cap_sock_kill(sk);
1576 static void l2cap_chan_ready(struct sock *sk)
1578 struct sock *parent = bt_sk(sk)->parent;
1580 BT_DBG("sk %p, parent %p", sk, parent);
1582 l2cap_pi(sk)->conf_state = 0;
1583 l2cap_sock_clear_timer(sk);
1586 /* Outgoing channel.
1587 * Wake up socket sleeping on connect.
1589 sk->sk_state = BT_CONNECTED;
1590 sk->sk_state_change(sk);
1592 /* Incoming channel.
1593 * Wake up socket sleeping on accept.
1595 parent->sk_data_ready(parent, 0);
1599 /* Copy frame to all raw sockets on that connection */
1600 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1602 struct l2cap_chan_list *l = &conn->chan_list;
1603 struct sk_buff *nskb;
1606 BT_DBG("conn %p", conn);
1608 read_lock(&l->lock);
1609 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1610 if (sk->sk_type != SOCK_RAW)
1613 /* Don't send frame to the socket it came from */
1616 nskb = skb_clone(skb, GFP_ATOMIC);
1620 if (sock_queue_rcv_skb(sk, nskb))
1623 read_unlock(&l->lock);
1626 /* ---- L2CAP signalling commands ---- */
1627 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1628 u8 code, u8 ident, u16 dlen, void *data)
1630 struct sk_buff *skb, **frag;
1631 struct l2cap_cmd_hdr *cmd;
1632 struct l2cap_hdr *lh;
1635 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1636 conn, code, ident, dlen);
1638 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1639 count = min_t(unsigned int, conn->mtu, len);
1641 skb = bt_skb_alloc(count, GFP_ATOMIC);
1645 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1646 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1647 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1649 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1652 cmd->len = cpu_to_le16(dlen);
1655 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1656 memcpy(skb_put(skb, count), data, count);
1662 /* Continuation fragments (no L2CAP header) */
1663 frag = &skb_shinfo(skb)->frag_list;
1665 count = min_t(unsigned int, conn->mtu, len);
1667 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1671 memcpy(skb_put(*frag, count), data, count);
1676 frag = &(*frag)->next;
1686 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1688 struct l2cap_conf_opt *opt = *ptr;
1691 len = L2CAP_CONF_OPT_SIZE + opt->len;
1699 *val = *((u8 *) opt->val);
1703 *val = __le16_to_cpu(*((__le16 *) opt->val));
1707 *val = __le32_to_cpu(*((__le32 *) opt->val));
1711 *val = (unsigned long) opt->val;
1715 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1719 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1721 struct l2cap_conf_opt *opt = *ptr;
1723 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1730 *((u8 *) opt->val) = val;
1734 *((__le16 *) opt->val) = cpu_to_le16(val);
1738 *((__le32 *) opt->val) = cpu_to_le32(val);
1742 memcpy(opt->val, (void *) val, len);
1746 *ptr += L2CAP_CONF_OPT_SIZE + len;
1749 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1751 u32 local_feat_mask = l2cap_feat_mask;
1753 local_feat_mask |= L2CAP_FEAT_ERTM;
1756 case L2CAP_MODE_ERTM:
1757 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1758 case L2CAP_MODE_STREAMING:
1759 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1765 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1768 case L2CAP_MODE_STREAMING:
1769 case L2CAP_MODE_ERTM:
1770 if (l2cap_mode_supported(mode, remote_feat_mask))
1774 return L2CAP_MODE_BASIC;
1778 static int l2cap_build_conf_req(struct sock *sk, void *data)
1780 struct l2cap_pinfo *pi = l2cap_pi(sk);
1781 struct l2cap_conf_req *req = data;
1782 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
1783 void *ptr = req->data;
1785 BT_DBG("sk %p", sk);
1787 if (pi->num_conf_req || pi->num_conf_rsp)
1791 case L2CAP_MODE_STREAMING:
1792 case L2CAP_MODE_ERTM:
1793 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
1794 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
1795 l2cap_send_disconn_req(pi->conn, sk);
1798 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1804 case L2CAP_MODE_BASIC:
1805 if (pi->imtu != L2CAP_DEFAULT_MTU)
1806 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1809 case L2CAP_MODE_ERTM:
1810 rfc.mode = L2CAP_MODE_ERTM;
1811 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
1812 rfc.max_transmit = L2CAP_DEFAULT_MAX_RECEIVE;
1813 rfc.retrans_timeout = 0;
1814 rfc.monitor_timeout = 0;
1815 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_RX_APDU);
1817 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1818 sizeof(rfc), (unsigned long) &rfc);
1821 case L2CAP_MODE_STREAMING:
1822 rfc.mode = L2CAP_MODE_STREAMING;
1824 rfc.max_transmit = 0;
1825 rfc.retrans_timeout = 0;
1826 rfc.monitor_timeout = 0;
1827 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_RX_APDU);
1829 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1830 sizeof(rfc), (unsigned long) &rfc);
1834 /* FIXME: Need actual value of the flush timeout */
1835 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1836 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1838 req->dcid = cpu_to_le16(pi->dcid);
1839 req->flags = cpu_to_le16(0);
1844 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1846 struct l2cap_pinfo *pi = l2cap_pi(sk);
1847 struct l2cap_conf_rsp *rsp = data;
1848 void *ptr = rsp->data;
1849 void *req = pi->conf_req;
1850 int len = pi->conf_len;
1851 int type, hint, olen;
1853 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1854 u16 mtu = L2CAP_DEFAULT_MTU;
1855 u16 result = L2CAP_CONF_SUCCESS;
1857 BT_DBG("sk %p", sk);
1859 while (len >= L2CAP_CONF_OPT_SIZE) {
1860 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1862 hint = type & L2CAP_CONF_HINT;
1863 type &= L2CAP_CONF_MASK;
1866 case L2CAP_CONF_MTU:
1870 case L2CAP_CONF_FLUSH_TO:
1874 case L2CAP_CONF_QOS:
1877 case L2CAP_CONF_RFC:
1878 if (olen == sizeof(rfc))
1879 memcpy(&rfc, (void *) val, olen);
1886 result = L2CAP_CONF_UNKNOWN;
1887 *((u8 *) ptr++) = type;
1892 if (pi->num_conf_rsp || pi->num_conf_req)
1896 case L2CAP_MODE_STREAMING:
1897 case L2CAP_MODE_ERTM:
1898 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
1899 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
1900 return -ECONNREFUSED;
1903 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1908 if (pi->mode != rfc.mode) {
1909 result = L2CAP_CONF_UNACCEPT;
1910 rfc.mode = pi->mode;
1912 if (pi->num_conf_rsp == 1)
1913 return -ECONNREFUSED;
1915 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1916 sizeof(rfc), (unsigned long) &rfc);
1920 if (result == L2CAP_CONF_SUCCESS) {
1921 /* Configure output options and let the other side know
1922 * which ones we don't like. */
1924 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1925 result = L2CAP_CONF_UNACCEPT;
1928 pi->conf_state |= L2CAP_CONF_MTU_DONE;
1930 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1933 case L2CAP_MODE_BASIC:
1934 pi->fcs = L2CAP_FCS_NONE;
1935 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1938 case L2CAP_MODE_ERTM:
1939 pi->remote_tx_win = rfc.txwin_size;
1940 pi->remote_max_tx = rfc.max_transmit;
1941 pi->max_pdu_size = rfc.max_pdu_size;
1943 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
1944 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
1946 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1949 case L2CAP_MODE_STREAMING:
1950 pi->remote_tx_win = rfc.txwin_size;
1951 pi->max_pdu_size = rfc.max_pdu_size;
1953 pi->conf_state |= L2CAP_CONF_MODE_DONE;
1957 result = L2CAP_CONF_UNACCEPT;
1959 memset(&rfc, 0, sizeof(rfc));
1960 rfc.mode = pi->mode;
1963 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1964 sizeof(rfc), (unsigned long) &rfc);
1966 if (result == L2CAP_CONF_SUCCESS)
1967 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1969 rsp->scid = cpu_to_le16(pi->dcid);
1970 rsp->result = cpu_to_le16(result);
1971 rsp->flags = cpu_to_le16(0x0000);
1976 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
1978 struct l2cap_pinfo *pi = l2cap_pi(sk);
1979 struct l2cap_conf_req *req = data;
1980 void *ptr = req->data;
1983 struct l2cap_conf_rfc rfc;
1985 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1987 while (len >= L2CAP_CONF_OPT_SIZE) {
1988 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1991 case L2CAP_CONF_MTU:
1992 if (val < L2CAP_DEFAULT_MIN_MTU) {
1993 *result = L2CAP_CONF_UNACCEPT;
1994 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
1997 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2000 case L2CAP_CONF_FLUSH_TO:
2002 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2006 case L2CAP_CONF_RFC:
2007 if (olen == sizeof(rfc))
2008 memcpy(&rfc, (void *)val, olen);
2010 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2011 rfc.mode != pi->mode)
2012 return -ECONNREFUSED;
2014 pi->mode = rfc.mode;
2017 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2018 sizeof(rfc), (unsigned long) &rfc);
2023 if (*result == L2CAP_CONF_SUCCESS) {
2025 case L2CAP_MODE_ERTM:
2026 pi->remote_tx_win = rfc.txwin_size;
2027 pi->retrans_timeout = rfc.retrans_timeout;
2028 pi->monitor_timeout = rfc.monitor_timeout;
2029 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2031 case L2CAP_MODE_STREAMING:
2032 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2037 req->dcid = cpu_to_le16(pi->dcid);
2038 req->flags = cpu_to_le16(0x0000);
2043 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2045 struct l2cap_conf_rsp *rsp = data;
2046 void *ptr = rsp->data;
2048 BT_DBG("sk %p", sk);
2050 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2051 rsp->result = cpu_to_le16(result);
2052 rsp->flags = cpu_to_le16(flags);
2057 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2059 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2061 if (rej->reason != 0x0000)
2064 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2065 cmd->ident == conn->info_ident) {
2066 del_timer(&conn->info_timer);
2068 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2069 conn->info_ident = 0;
2071 l2cap_conn_start(conn);
2077 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2079 struct l2cap_chan_list *list = &conn->chan_list;
2080 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2081 struct l2cap_conn_rsp rsp;
2082 struct sock *sk, *parent;
2083 int result, status = L2CAP_CS_NO_INFO;
2085 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2086 __le16 psm = req->psm;
2088 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2090 /* Check if we have socket listening on psm */
2091 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2093 result = L2CAP_CR_BAD_PSM;
2097 /* Check if the ACL is secure enough (if not SDP) */
2098 if (psm != cpu_to_le16(0x0001) &&
2099 !hci_conn_check_link_mode(conn->hcon)) {
2100 conn->disc_reason = 0x05;
2101 result = L2CAP_CR_SEC_BLOCK;
2105 result = L2CAP_CR_NO_MEM;
2107 /* Check for backlog size */
2108 if (sk_acceptq_is_full(parent)) {
2109 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2113 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2117 write_lock_bh(&list->lock);
2119 /* Check if we already have channel with that dcid */
2120 if (__l2cap_get_chan_by_dcid(list, scid)) {
2121 write_unlock_bh(&list->lock);
2122 sock_set_flag(sk, SOCK_ZAPPED);
2123 l2cap_sock_kill(sk);
2127 hci_conn_hold(conn->hcon);
2129 l2cap_sock_init(sk, parent);
2130 bacpy(&bt_sk(sk)->src, conn->src);
2131 bacpy(&bt_sk(sk)->dst, conn->dst);
2132 l2cap_pi(sk)->psm = psm;
2133 l2cap_pi(sk)->dcid = scid;
2135 __l2cap_chan_add(conn, sk, parent);
2136 dcid = l2cap_pi(sk)->scid;
2138 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2140 l2cap_pi(sk)->ident = cmd->ident;
2142 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2143 if (l2cap_check_security(sk)) {
2144 if (bt_sk(sk)->defer_setup) {
2145 sk->sk_state = BT_CONNECT2;
2146 result = L2CAP_CR_PEND;
2147 status = L2CAP_CS_AUTHOR_PEND;
2148 parent->sk_data_ready(parent, 0);
2150 sk->sk_state = BT_CONFIG;
2151 result = L2CAP_CR_SUCCESS;
2152 status = L2CAP_CS_NO_INFO;
2155 sk->sk_state = BT_CONNECT2;
2156 result = L2CAP_CR_PEND;
2157 status = L2CAP_CS_AUTHEN_PEND;
2160 sk->sk_state = BT_CONNECT2;
2161 result = L2CAP_CR_PEND;
2162 status = L2CAP_CS_NO_INFO;
2165 write_unlock_bh(&list->lock);
2168 bh_unlock_sock(parent);
2171 rsp.scid = cpu_to_le16(scid);
2172 rsp.dcid = cpu_to_le16(dcid);
2173 rsp.result = cpu_to_le16(result);
2174 rsp.status = cpu_to_le16(status);
2175 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2177 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2178 struct l2cap_info_req info;
2179 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2181 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2182 conn->info_ident = l2cap_get_ident(conn);
2184 mod_timer(&conn->info_timer, jiffies +
2185 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2187 l2cap_send_cmd(conn, conn->info_ident,
2188 L2CAP_INFO_REQ, sizeof(info), &info);
2194 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2196 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2197 u16 scid, dcid, result, status;
2201 scid = __le16_to_cpu(rsp->scid);
2202 dcid = __le16_to_cpu(rsp->dcid);
2203 result = __le16_to_cpu(rsp->result);
2204 status = __le16_to_cpu(rsp->status);
2206 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2209 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2213 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2219 case L2CAP_CR_SUCCESS:
2220 sk->sk_state = BT_CONFIG;
2221 l2cap_pi(sk)->ident = 0;
2222 l2cap_pi(sk)->dcid = dcid;
2223 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2225 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2227 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2228 l2cap_build_conf_req(sk, req), req);
2229 l2cap_pi(sk)->num_conf_req++;
2233 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2237 l2cap_chan_del(sk, ECONNREFUSED);
2245 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2247 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2253 dcid = __le16_to_cpu(req->dcid);
2254 flags = __le16_to_cpu(req->flags);
2256 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2258 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2262 if (sk->sk_state == BT_DISCONN)
2265 /* Reject if config buffer is too small. */
2266 len = cmd_len - sizeof(*req);
2267 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2268 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2269 l2cap_build_conf_rsp(sk, rsp,
2270 L2CAP_CONF_REJECT, flags), rsp);
2275 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2276 l2cap_pi(sk)->conf_len += len;
2278 if (flags & 0x0001) {
2279 /* Incomplete config. Send empty response. */
2280 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2281 l2cap_build_conf_rsp(sk, rsp,
2282 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2286 /* Complete config. */
2287 len = l2cap_parse_conf_req(sk, rsp);
2289 l2cap_send_disconn_req(conn, sk);
2293 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2294 l2cap_pi(sk)->num_conf_rsp++;
2296 /* Reset config buffer. */
2297 l2cap_pi(sk)->conf_len = 0;
2299 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2302 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2303 sk->sk_state = BT_CONNECTED;
2304 l2cap_chan_ready(sk);
2308 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2310 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2311 l2cap_build_conf_req(sk, buf), buf);
2312 l2cap_pi(sk)->num_conf_req++;
2320 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2322 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2323 u16 scid, flags, result;
2326 scid = __le16_to_cpu(rsp->scid);
2327 flags = __le16_to_cpu(rsp->flags);
2328 result = __le16_to_cpu(rsp->result);
2330 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2331 scid, flags, result);
2333 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2338 case L2CAP_CONF_SUCCESS:
2341 case L2CAP_CONF_UNACCEPT:
2342 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2343 int len = cmd->len - sizeof(*rsp);
2346 /* throw out any old stored conf requests */
2347 result = L2CAP_CONF_SUCCESS;
2348 len = l2cap_parse_conf_rsp(sk, rsp->data,
2351 l2cap_send_disconn_req(conn, sk);
2355 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2356 L2CAP_CONF_REQ, len, req);
2357 l2cap_pi(sk)->num_conf_req++;
2358 if (result != L2CAP_CONF_SUCCESS)
2364 sk->sk_state = BT_DISCONN;
2365 sk->sk_err = ECONNRESET;
2366 l2cap_sock_set_timer(sk, HZ * 5);
2367 l2cap_send_disconn_req(conn, sk);
2374 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2376 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2377 sk->sk_state = BT_CONNECTED;
2378 l2cap_chan_ready(sk);
2386 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2388 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2389 struct l2cap_disconn_rsp rsp;
2393 scid = __le16_to_cpu(req->scid);
2394 dcid = __le16_to_cpu(req->dcid);
2396 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2398 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2402 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2403 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2404 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2406 sk->sk_shutdown = SHUTDOWN_MASK;
2408 l2cap_chan_del(sk, ECONNRESET);
2411 l2cap_sock_kill(sk);
2415 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2417 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2421 scid = __le16_to_cpu(rsp->scid);
2422 dcid = __le16_to_cpu(rsp->dcid);
2424 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2426 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2430 l2cap_chan_del(sk, 0);
2433 l2cap_sock_kill(sk);
2437 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2439 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2442 type = __le16_to_cpu(req->type);
2444 BT_DBG("type 0x%4.4x", type);
2446 if (type == L2CAP_IT_FEAT_MASK) {
2448 u32 feat_mask = l2cap_feat_mask;
2449 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2450 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2451 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2453 feat_mask |= L2CAP_FEAT_ERTM;
2454 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2455 l2cap_send_cmd(conn, cmd->ident,
2456 L2CAP_INFO_RSP, sizeof(buf), buf);
2457 } else if (type == L2CAP_IT_FIXED_CHAN) {
2459 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2460 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2461 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2462 memcpy(buf + 4, l2cap_fixed_chan, 8);
2463 l2cap_send_cmd(conn, cmd->ident,
2464 L2CAP_INFO_RSP, sizeof(buf), buf);
2466 struct l2cap_info_rsp rsp;
2467 rsp.type = cpu_to_le16(type);
2468 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2469 l2cap_send_cmd(conn, cmd->ident,
2470 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2476 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2478 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2481 type = __le16_to_cpu(rsp->type);
2482 result = __le16_to_cpu(rsp->result);
2484 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2486 del_timer(&conn->info_timer);
2488 if (type == L2CAP_IT_FEAT_MASK) {
2489 conn->feat_mask = get_unaligned_le32(rsp->data);
2491 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2492 struct l2cap_info_req req;
2493 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2495 conn->info_ident = l2cap_get_ident(conn);
2497 l2cap_send_cmd(conn, conn->info_ident,
2498 L2CAP_INFO_REQ, sizeof(req), &req);
2500 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2501 conn->info_ident = 0;
2503 l2cap_conn_start(conn);
2505 } else if (type == L2CAP_IT_FIXED_CHAN) {
2506 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2507 conn->info_ident = 0;
2509 l2cap_conn_start(conn);
2515 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2517 u8 *data = skb->data;
2519 struct l2cap_cmd_hdr cmd;
2522 l2cap_raw_recv(conn, skb);
2524 while (len >= L2CAP_CMD_HDR_SIZE) {
2526 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2527 data += L2CAP_CMD_HDR_SIZE;
2528 len -= L2CAP_CMD_HDR_SIZE;
2530 cmd_len = le16_to_cpu(cmd.len);
2532 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2534 if (cmd_len > len || !cmd.ident) {
2535 BT_DBG("corrupted command");
2540 case L2CAP_COMMAND_REJ:
2541 l2cap_command_rej(conn, &cmd, data);
2544 case L2CAP_CONN_REQ:
2545 err = l2cap_connect_req(conn, &cmd, data);
2548 case L2CAP_CONN_RSP:
2549 err = l2cap_connect_rsp(conn, &cmd, data);
2552 case L2CAP_CONF_REQ:
2553 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2556 case L2CAP_CONF_RSP:
2557 err = l2cap_config_rsp(conn, &cmd, data);
2560 case L2CAP_DISCONN_REQ:
2561 err = l2cap_disconnect_req(conn, &cmd, data);
2564 case L2CAP_DISCONN_RSP:
2565 err = l2cap_disconnect_rsp(conn, &cmd, data);
2568 case L2CAP_ECHO_REQ:
2569 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2572 case L2CAP_ECHO_RSP:
2575 case L2CAP_INFO_REQ:
2576 err = l2cap_information_req(conn, &cmd, data);
2579 case L2CAP_INFO_RSP:
2580 err = l2cap_information_rsp(conn, &cmd, data);
2584 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2590 struct l2cap_cmd_rej rej;
2591 BT_DBG("error %d", err);
2593 /* FIXME: Map err to a valid reason */
2594 rej.reason = cpu_to_le16(0);
2595 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2605 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2609 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2611 BT_DBG("unknown cid 0x%4.4x", cid);
2615 BT_DBG("sk %p, len %d", sk, skb->len);
2617 if (sk->sk_state != BT_CONNECTED)
2620 if (l2cap_pi(sk)->imtu < skb->len)
2623 /* If socket recv buffers overflows we drop data here
2624 * which is *bad* because L2CAP has to be reliable.
2625 * But we don't have any other choice. L2CAP doesn't
2626 * provide flow control mechanism. */
2628 if (!sock_queue_rcv_skb(sk, skb))
2641 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2645 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2649 BT_DBG("sk %p, len %d", sk, skb->len);
2651 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2654 if (l2cap_pi(sk)->imtu < skb->len)
2657 if (!sock_queue_rcv_skb(sk, skb))
2669 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2671 struct l2cap_hdr *lh = (void *) skb->data;
2675 skb_pull(skb, L2CAP_HDR_SIZE);
2676 cid = __le16_to_cpu(lh->cid);
2677 len = __le16_to_cpu(lh->len);
2679 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2682 case L2CAP_CID_SIGNALING:
2683 l2cap_sig_channel(conn, skb);
2686 case L2CAP_CID_CONN_LESS:
2687 psm = get_unaligned((__le16 *) skb->data);
2689 l2cap_conless_channel(conn, psm, skb);
2693 l2cap_data_channel(conn, cid, skb);
2698 /* ---- L2CAP interface with lower layer (HCI) ---- */
2700 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2702 int exact = 0, lm1 = 0, lm2 = 0;
2703 register struct sock *sk;
2704 struct hlist_node *node;
2706 if (type != ACL_LINK)
2709 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2711 /* Find listening sockets and check their link_mode */
2712 read_lock(&l2cap_sk_list.lock);
2713 sk_for_each(sk, node, &l2cap_sk_list.head) {
2714 if (sk->sk_state != BT_LISTEN)
2717 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2718 lm1 |= HCI_LM_ACCEPT;
2719 if (l2cap_pi(sk)->role_switch)
2720 lm1 |= HCI_LM_MASTER;
2722 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
2723 lm2 |= HCI_LM_ACCEPT;
2724 if (l2cap_pi(sk)->role_switch)
2725 lm2 |= HCI_LM_MASTER;
2728 read_unlock(&l2cap_sk_list.lock);
2730 return exact ? lm1 : lm2;
2733 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2735 struct l2cap_conn *conn;
2737 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2739 if (hcon->type != ACL_LINK)
2743 conn = l2cap_conn_add(hcon, status);
2745 l2cap_conn_ready(conn);
2747 l2cap_conn_del(hcon, bt_err(status));
2752 static int l2cap_disconn_ind(struct hci_conn *hcon)
2754 struct l2cap_conn *conn = hcon->l2cap_data;
2756 BT_DBG("hcon %p", hcon);
2758 if (hcon->type != ACL_LINK || !conn)
2761 return conn->disc_reason;
2764 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
2766 BT_DBG("hcon %p reason %d", hcon, reason);
2768 if (hcon->type != ACL_LINK)
2771 l2cap_conn_del(hcon, bt_err(reason));
2776 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
2778 if (sk->sk_type != SOCK_SEQPACKET)
2781 if (encrypt == 0x00) {
2782 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
2783 l2cap_sock_clear_timer(sk);
2784 l2cap_sock_set_timer(sk, HZ * 5);
2785 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
2786 __l2cap_sock_close(sk, ECONNREFUSED);
2788 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
2789 l2cap_sock_clear_timer(sk);
2793 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2795 struct l2cap_chan_list *l;
2796 struct l2cap_conn *conn = hcon->l2cap_data;
2802 l = &conn->chan_list;
2804 BT_DBG("conn %p", conn);
2806 read_lock(&l->lock);
2808 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2811 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
2816 if (!status && (sk->sk_state == BT_CONNECTED ||
2817 sk->sk_state == BT_CONFIG)) {
2818 l2cap_check_encryption(sk, encrypt);
2823 if (sk->sk_state == BT_CONNECT) {
2825 struct l2cap_conn_req req;
2826 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2827 req.psm = l2cap_pi(sk)->psm;
2829 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2831 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2832 L2CAP_CONN_REQ, sizeof(req), &req);
2834 l2cap_sock_clear_timer(sk);
2835 l2cap_sock_set_timer(sk, HZ / 10);
2837 } else if (sk->sk_state == BT_CONNECT2) {
2838 struct l2cap_conn_rsp rsp;
2842 sk->sk_state = BT_CONFIG;
2843 result = L2CAP_CR_SUCCESS;
2845 sk->sk_state = BT_DISCONN;
2846 l2cap_sock_set_timer(sk, HZ / 10);
2847 result = L2CAP_CR_SEC_BLOCK;
2850 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2851 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2852 rsp.result = cpu_to_le16(result);
2853 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2854 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2855 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2861 read_unlock(&l->lock);
2866 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2868 struct l2cap_conn *conn = hcon->l2cap_data;
2870 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2873 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2875 if (flags & ACL_START) {
2876 struct l2cap_hdr *hdr;
2880 BT_ERR("Unexpected start frame (len %d)", skb->len);
2881 kfree_skb(conn->rx_skb);
2882 conn->rx_skb = NULL;
2884 l2cap_conn_unreliable(conn, ECOMM);
2888 BT_ERR("Frame is too short (len %d)", skb->len);
2889 l2cap_conn_unreliable(conn, ECOMM);
2893 hdr = (struct l2cap_hdr *) skb->data;
2894 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2896 if (len == skb->len) {
2897 /* Complete frame received */
2898 l2cap_recv_frame(conn, skb);
2902 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2904 if (skb->len > len) {
2905 BT_ERR("Frame is too long (len %d, expected len %d)",
2907 l2cap_conn_unreliable(conn, ECOMM);
2911 /* Allocate skb for the complete frame (with header) */
2912 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
2916 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2918 conn->rx_len = len - skb->len;
2920 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2922 if (!conn->rx_len) {
2923 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2924 l2cap_conn_unreliable(conn, ECOMM);
2928 if (skb->len > conn->rx_len) {
2929 BT_ERR("Fragment is too long (len %d, expected %d)",
2930 skb->len, conn->rx_len);
2931 kfree_skb(conn->rx_skb);
2932 conn->rx_skb = NULL;
2934 l2cap_conn_unreliable(conn, ECOMM);
2938 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2940 conn->rx_len -= skb->len;
2942 if (!conn->rx_len) {
2943 /* Complete frame received */
2944 l2cap_recv_frame(conn, conn->rx_skb);
2945 conn->rx_skb = NULL;
2954 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2957 struct hlist_node *node;
2960 read_lock_bh(&l2cap_sk_list.lock);
2962 sk_for_each(sk, node, &l2cap_sk_list.head) {
2963 struct l2cap_pinfo *pi = l2cap_pi(sk);
2965 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
2966 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2967 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
2968 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
2971 read_unlock_bh(&l2cap_sk_list.lock);
2976 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2978 static const struct proto_ops l2cap_sock_ops = {
2979 .family = PF_BLUETOOTH,
2980 .owner = THIS_MODULE,
2981 .release = l2cap_sock_release,
2982 .bind = l2cap_sock_bind,
2983 .connect = l2cap_sock_connect,
2984 .listen = l2cap_sock_listen,
2985 .accept = l2cap_sock_accept,
2986 .getname = l2cap_sock_getname,
2987 .sendmsg = l2cap_sock_sendmsg,
2988 .recvmsg = l2cap_sock_recvmsg,
2989 .poll = bt_sock_poll,
2990 .ioctl = bt_sock_ioctl,
2991 .mmap = sock_no_mmap,
2992 .socketpair = sock_no_socketpair,
2993 .shutdown = l2cap_sock_shutdown,
2994 .setsockopt = l2cap_sock_setsockopt,
2995 .getsockopt = l2cap_sock_getsockopt
2998 static struct net_proto_family l2cap_sock_family_ops = {
2999 .family = PF_BLUETOOTH,
3000 .owner = THIS_MODULE,
3001 .create = l2cap_sock_create,
3004 static struct hci_proto l2cap_hci_proto = {
3006 .id = HCI_PROTO_L2CAP,
3007 .connect_ind = l2cap_connect_ind,
3008 .connect_cfm = l2cap_connect_cfm,
3009 .disconn_ind = l2cap_disconn_ind,
3010 .disconn_cfm = l2cap_disconn_cfm,
3011 .security_cfm = l2cap_security_cfm,
3012 .recv_acldata = l2cap_recv_acldata
3015 static int __init l2cap_init(void)
3019 err = proto_register(&l2cap_proto, 0);
3023 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3025 BT_ERR("L2CAP socket registration failed");
3029 err = hci_register_proto(&l2cap_hci_proto);
3031 BT_ERR("L2CAP protocol registration failed");
3032 bt_sock_unregister(BTPROTO_L2CAP);
3036 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3037 BT_ERR("Failed to create L2CAP info file");
3039 BT_INFO("L2CAP ver %s", VERSION);
3040 BT_INFO("L2CAP socket layer initialized");
3045 proto_unregister(&l2cap_proto);
3049 static void __exit l2cap_exit(void)
3051 class_remove_file(bt_class, &class_attr_l2cap);
3053 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3054 BT_ERR("L2CAP socket unregistration failed");
3056 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3057 BT_ERR("L2CAP protocol unregistration failed");
3059 proto_unregister(&l2cap_proto);
3062 void l2cap_load(void)
3064 /* Dummy function to trigger automatic L2CAP module loading by
3065 * other modules that use L2CAP sockets but don't use any other
3066 * symbols from it. */
3069 EXPORT_SYMBOL(l2cap_load);
3071 module_init(l2cap_init);
3072 module_exit(l2cap_exit);
3074 module_param(enable_ertm, bool, 0644);
3075 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3077 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3078 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3079 MODULE_VERSION(VERSION);
3080 MODULE_LICENSE("GPL");
3081 MODULE_ALIAS("bt-proto-0");