2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
276 skb_queue_purge(TX_QUEUE(sk));
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
305 auth_type = HCI_AT_NO_BONDING;
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
318 auth_type = HCI_AT_NO_BONDING;
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn->lock);
339 if (++conn->tx_ident > 128)
344 spin_unlock_bh(&conn->lock);
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
353 BT_DBG("code 0x%2.2x", code);
358 hci_send_acl(conn->hcon, skb, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
369 if (sk->sk_state != BT_CONNECTED)
372 if (pi->fcs == L2CAP_FCS_CRC16)
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
404 hci_send_acl(pi->conn->hcon, skb, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
413 control |= L2CAP_SUPER_RCV_READY;
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
417 l2cap_send_sframe(pi, control);
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
425 static void l2cap_do_start(struct sock *sk)
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
461 struct l2cap_disconn_req req;
466 skb_queue_purge(TX_QUEUE(sk));
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
479 sk->sk_state = BT_DISCONN;
483 /* ---- L2CAP connections ---- */
484 static void l2cap_conn_start(struct l2cap_conn *conn)
486 struct l2cap_chan_list *l = &conn->chan_list;
489 BT_DBG("conn %p", conn);
493 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 if (sk->sk_type != SOCK_SEQPACKET &&
497 sk->sk_type != SOCK_STREAM) {
502 if (sk->sk_state == BT_CONNECT) {
503 if (l2cap_check_security(sk) &&
504 __l2cap_no_conn_pending(sk)) {
505 struct l2cap_conn_req req;
506 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
507 req.psm = l2cap_pi(sk)->psm;
509 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
512 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
513 L2CAP_CONN_REQ, sizeof(req), &req);
515 } else if (sk->sk_state == BT_CONNECT2) {
516 struct l2cap_conn_rsp rsp;
517 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
518 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
520 if (l2cap_check_security(sk)) {
521 if (bt_sk(sk)->defer_setup) {
522 struct sock *parent = bt_sk(sk)->parent;
523 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
524 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
525 parent->sk_data_ready(parent, 0);
528 sk->sk_state = BT_CONFIG;
529 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
530 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
533 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
534 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
544 read_unlock(&l->lock);
547 static void l2cap_conn_ready(struct l2cap_conn *conn)
549 struct l2cap_chan_list *l = &conn->chan_list;
552 BT_DBG("conn %p", conn);
556 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
559 if (sk->sk_type != SOCK_SEQPACKET &&
560 sk->sk_type != SOCK_STREAM) {
561 l2cap_sock_clear_timer(sk);
562 sk->sk_state = BT_CONNECTED;
563 sk->sk_state_change(sk);
564 } else if (sk->sk_state == BT_CONNECT)
570 read_unlock(&l->lock);
573 /* Notify sockets that we cannot guaranty reliability anymore */
574 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
576 struct l2cap_chan_list *l = &conn->chan_list;
579 BT_DBG("conn %p", conn);
583 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
584 if (l2cap_pi(sk)->force_reliable)
588 read_unlock(&l->lock);
591 static void l2cap_info_timeout(unsigned long arg)
593 struct l2cap_conn *conn = (void *) arg;
595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
596 conn->info_ident = 0;
598 l2cap_conn_start(conn);
601 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
603 struct l2cap_conn *conn = hcon->l2cap_data;
608 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
612 hcon->l2cap_data = conn;
615 BT_DBG("hcon %p conn %p", hcon, conn);
617 conn->mtu = hcon->hdev->acl_mtu;
618 conn->src = &hcon->hdev->bdaddr;
619 conn->dst = &hcon->dst;
623 spin_lock_init(&conn->lock);
624 rwlock_init(&conn->chan_list.lock);
626 setup_timer(&conn->info_timer, l2cap_info_timeout,
627 (unsigned long) conn);
629 conn->disc_reason = 0x13;
634 static void l2cap_conn_del(struct hci_conn *hcon, int err)
636 struct l2cap_conn *conn = hcon->l2cap_data;
642 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
644 kfree_skb(conn->rx_skb);
647 while ((sk = conn->chan_list.head)) {
649 l2cap_chan_del(sk, err);
654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
655 del_timer_sync(&conn->info_timer);
657 hcon->l2cap_data = NULL;
661 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
663 struct l2cap_chan_list *l = &conn->chan_list;
664 write_lock_bh(&l->lock);
665 __l2cap_chan_add(conn, sk, parent);
666 write_unlock_bh(&l->lock);
669 /* ---- Socket interface ---- */
670 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
673 struct hlist_node *node;
674 sk_for_each(sk, node, &l2cap_sk_list.head)
675 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
682 /* Find socket with psm and source bdaddr.
683 * Returns closest match.
685 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
687 struct sock *sk = NULL, *sk1 = NULL;
688 struct hlist_node *node;
690 sk_for_each(sk, node, &l2cap_sk_list.head) {
691 if (state && sk->sk_state != state)
694 if (l2cap_pi(sk)->psm == psm) {
696 if (!bacmp(&bt_sk(sk)->src, src))
700 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
704 return node ? sk : sk1;
707 /* Find socket with given address (psm, src).
708 * Returns locked socket */
709 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
712 read_lock(&l2cap_sk_list.lock);
713 s = __l2cap_get_sock_by_psm(state, psm, src);
716 read_unlock(&l2cap_sk_list.lock);
720 static void l2cap_sock_destruct(struct sock *sk)
724 skb_queue_purge(&sk->sk_receive_queue);
725 skb_queue_purge(&sk->sk_write_queue);
728 static void l2cap_sock_cleanup_listen(struct sock *parent)
732 BT_DBG("parent %p", parent);
734 /* Close not yet accepted channels */
735 while ((sk = bt_accept_dequeue(parent, NULL)))
736 l2cap_sock_close(sk);
738 parent->sk_state = BT_CLOSED;
739 sock_set_flag(parent, SOCK_ZAPPED);
742 /* Kill socket (only if zapped and orphan)
743 * Must be called on unlocked socket.
745 static void l2cap_sock_kill(struct sock *sk)
747 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
750 BT_DBG("sk %p state %d", sk, sk->sk_state);
752 /* Kill poor orphan */
753 bt_sock_unlink(&l2cap_sk_list, sk);
754 sock_set_flag(sk, SOCK_DEAD);
758 static void __l2cap_sock_close(struct sock *sk, int reason)
760 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
762 switch (sk->sk_state) {
764 l2cap_sock_cleanup_listen(sk);
769 if (sk->sk_type == SOCK_SEQPACKET ||
770 sk->sk_type == SOCK_STREAM) {
771 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
773 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
774 l2cap_send_disconn_req(conn, sk, reason);
776 l2cap_chan_del(sk, reason);
780 if (sk->sk_type == SOCK_SEQPACKET ||
781 sk->sk_type == SOCK_STREAM) {
782 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
783 struct l2cap_conn_rsp rsp;
786 if (bt_sk(sk)->defer_setup)
787 result = L2CAP_CR_SEC_BLOCK;
789 result = L2CAP_CR_BAD_PSM;
791 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
792 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
793 rsp.result = cpu_to_le16(result);
794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
795 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
796 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
798 l2cap_chan_del(sk, reason);
803 l2cap_chan_del(sk, reason);
807 sock_set_flag(sk, SOCK_ZAPPED);
812 /* Must be called on unlocked socket. */
813 static void l2cap_sock_close(struct sock *sk)
815 l2cap_sock_clear_timer(sk);
817 __l2cap_sock_close(sk, ECONNRESET);
822 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
824 struct l2cap_pinfo *pi = l2cap_pi(sk);
829 sk->sk_type = parent->sk_type;
830 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
832 pi->imtu = l2cap_pi(parent)->imtu;
833 pi->omtu = l2cap_pi(parent)->omtu;
834 pi->conf_state = l2cap_pi(parent)->conf_state;
835 pi->mode = l2cap_pi(parent)->mode;
836 pi->fcs = l2cap_pi(parent)->fcs;
837 pi->max_tx = l2cap_pi(parent)->max_tx;
838 pi->tx_win = l2cap_pi(parent)->tx_win;
839 pi->sec_level = l2cap_pi(parent)->sec_level;
840 pi->role_switch = l2cap_pi(parent)->role_switch;
841 pi->force_reliable = l2cap_pi(parent)->force_reliable;
843 pi->imtu = L2CAP_DEFAULT_MTU;
845 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
846 pi->mode = L2CAP_MODE_ERTM;
847 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
849 pi->mode = L2CAP_MODE_BASIC;
851 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
852 pi->fcs = L2CAP_FCS_CRC16;
853 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
854 pi->sec_level = BT_SECURITY_LOW;
856 pi->force_reliable = 0;
859 /* Default config options */
861 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
862 skb_queue_head_init(TX_QUEUE(sk));
863 skb_queue_head_init(SREJ_QUEUE(sk));
864 skb_queue_head_init(BUSY_QUEUE(sk));
865 INIT_LIST_HEAD(SREJ_LIST(sk));
868 static struct proto l2cap_proto = {
870 .owner = THIS_MODULE,
871 .obj_size = sizeof(struct l2cap_pinfo)
874 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
878 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
882 sock_init_data(sock, sk);
883 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
885 sk->sk_destruct = l2cap_sock_destruct;
886 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
888 sock_reset_flag(sk, SOCK_ZAPPED);
890 sk->sk_protocol = proto;
891 sk->sk_state = BT_OPEN;
893 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
895 bt_sock_link(&l2cap_sk_list, sk);
899 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
904 BT_DBG("sock %p", sock);
906 sock->state = SS_UNCONNECTED;
908 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
909 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
910 return -ESOCKTNOSUPPORT;
912 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
915 sock->ops = &l2cap_sock_ops;
917 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
921 l2cap_sock_init(sk, NULL);
925 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
927 struct sock *sk = sock->sk;
928 struct sockaddr_l2 la;
933 if (!addr || addr->sa_family != AF_BLUETOOTH)
936 memset(&la, 0, sizeof(la));
937 len = min_t(unsigned int, sizeof(la), alen);
938 memcpy(&la, addr, len);
945 if (sk->sk_state != BT_OPEN) {
950 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
951 !capable(CAP_NET_BIND_SERVICE)) {
956 write_lock_bh(&l2cap_sk_list.lock);
958 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
961 /* Save source address */
962 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
963 l2cap_pi(sk)->psm = la.l2_psm;
964 l2cap_pi(sk)->sport = la.l2_psm;
965 sk->sk_state = BT_BOUND;
967 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
968 __le16_to_cpu(la.l2_psm) == 0x0003)
969 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
972 write_unlock_bh(&l2cap_sk_list.lock);
979 static int l2cap_do_connect(struct sock *sk)
981 bdaddr_t *src = &bt_sk(sk)->src;
982 bdaddr_t *dst = &bt_sk(sk)->dst;
983 struct l2cap_conn *conn;
984 struct hci_conn *hcon;
985 struct hci_dev *hdev;
989 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
992 hdev = hci_get_route(dst, src);
994 return -EHOSTUNREACH;
996 hci_dev_lock_bh(hdev);
1000 if (sk->sk_type == SOCK_RAW) {
1001 switch (l2cap_pi(sk)->sec_level) {
1002 case BT_SECURITY_HIGH:
1003 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1005 case BT_SECURITY_MEDIUM:
1006 auth_type = HCI_AT_DEDICATED_BONDING;
1009 auth_type = HCI_AT_NO_BONDING;
1012 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1013 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1014 auth_type = HCI_AT_NO_BONDING_MITM;
1016 auth_type = HCI_AT_NO_BONDING;
1018 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1019 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1021 switch (l2cap_pi(sk)->sec_level) {
1022 case BT_SECURITY_HIGH:
1023 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1025 case BT_SECURITY_MEDIUM:
1026 auth_type = HCI_AT_GENERAL_BONDING;
1029 auth_type = HCI_AT_NO_BONDING;
1034 hcon = hci_connect(hdev, ACL_LINK, dst,
1035 l2cap_pi(sk)->sec_level, auth_type);
1039 conn = l2cap_conn_add(hcon, 0);
1047 /* Update source addr of the socket */
1048 bacpy(src, conn->src);
1050 l2cap_chan_add(conn, sk, NULL);
1052 sk->sk_state = BT_CONNECT;
1053 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1055 if (hcon->state == BT_CONNECTED) {
1056 if (sk->sk_type != SOCK_SEQPACKET &&
1057 sk->sk_type != SOCK_STREAM) {
1058 l2cap_sock_clear_timer(sk);
1059 sk->sk_state = BT_CONNECTED;
1065 hci_dev_unlock_bh(hdev);
1070 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1072 struct sock *sk = sock->sk;
1073 struct sockaddr_l2 la;
1076 BT_DBG("sk %p", sk);
1078 if (!addr || alen < sizeof(addr->sa_family) ||
1079 addr->sa_family != AF_BLUETOOTH)
1082 memset(&la, 0, sizeof(la));
1083 len = min_t(unsigned int, sizeof(la), alen);
1084 memcpy(&la, addr, len);
1091 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1097 switch (l2cap_pi(sk)->mode) {
1098 case L2CAP_MODE_BASIC:
1100 case L2CAP_MODE_ERTM:
1101 case L2CAP_MODE_STREAMING:
1110 switch (sk->sk_state) {
1114 /* Already connecting */
1118 /* Already connected */
1131 /* Set destination address and psm */
1132 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1133 l2cap_pi(sk)->psm = la.l2_psm;
1135 err = l2cap_do_connect(sk);
1140 err = bt_sock_wait_state(sk, BT_CONNECTED,
1141 sock_sndtimeo(sk, flags & O_NONBLOCK));
1147 static int l2cap_sock_listen(struct socket *sock, int backlog)
1149 struct sock *sk = sock->sk;
1152 BT_DBG("sk %p backlog %d", sk, backlog);
1156 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1157 || sk->sk_state != BT_BOUND) {
1162 switch (l2cap_pi(sk)->mode) {
1163 case L2CAP_MODE_BASIC:
1165 case L2CAP_MODE_ERTM:
1166 case L2CAP_MODE_STREAMING:
1175 if (!l2cap_pi(sk)->psm) {
1176 bdaddr_t *src = &bt_sk(sk)->src;
1181 write_lock_bh(&l2cap_sk_list.lock);
1183 for (psm = 0x1001; psm < 0x1100; psm += 2)
1184 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1185 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1186 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1191 write_unlock_bh(&l2cap_sk_list.lock);
1197 sk->sk_max_ack_backlog = backlog;
1198 sk->sk_ack_backlog = 0;
1199 sk->sk_state = BT_LISTEN;
1206 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1208 DECLARE_WAITQUEUE(wait, current);
1209 struct sock *sk = sock->sk, *nsk;
1213 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1215 if (sk->sk_state != BT_LISTEN) {
1220 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1222 BT_DBG("sk %p timeo %ld", sk, timeo);
1224 /* Wait for an incoming connection. (wake-one). */
1225 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1226 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1227 set_current_state(TASK_INTERRUPTIBLE);
1234 timeo = schedule_timeout(timeo);
1235 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1237 if (sk->sk_state != BT_LISTEN) {
1242 if (signal_pending(current)) {
1243 err = sock_intr_errno(timeo);
1247 set_current_state(TASK_RUNNING);
1248 remove_wait_queue(sk_sleep(sk), &wait);
1253 newsock->state = SS_CONNECTED;
1255 BT_DBG("new socket %p", nsk);
1262 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1264 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1265 struct sock *sk = sock->sk;
1267 BT_DBG("sock %p, sk %p", sock, sk);
1269 addr->sa_family = AF_BLUETOOTH;
1270 *len = sizeof(struct sockaddr_l2);
1273 la->l2_psm = l2cap_pi(sk)->psm;
1274 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1275 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1277 la->l2_psm = l2cap_pi(sk)->sport;
1278 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1279 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1285 static int __l2cap_wait_ack(struct sock *sk)
1287 DECLARE_WAITQUEUE(wait, current);
1291 add_wait_queue(sk_sleep(sk), &wait);
1292 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1293 set_current_state(TASK_INTERRUPTIBLE);
1298 if (signal_pending(current)) {
1299 err = sock_intr_errno(timeo);
1304 timeo = schedule_timeout(timeo);
1307 err = sock_error(sk);
1311 set_current_state(TASK_RUNNING);
1312 remove_wait_queue(sk_sleep(sk), &wait);
1316 static void l2cap_monitor_timeout(unsigned long arg)
1318 struct sock *sk = (void *) arg;
1320 BT_DBG("sk %p", sk);
1323 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1324 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1329 l2cap_pi(sk)->retry_count++;
1330 __mod_monitor_timer();
1332 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1336 static void l2cap_retrans_timeout(unsigned long arg)
1338 struct sock *sk = (void *) arg;
1340 BT_DBG("sk %p", sk);
1343 l2cap_pi(sk)->retry_count = 1;
1344 __mod_monitor_timer();
1346 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1348 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1352 static void l2cap_drop_acked_frames(struct sock *sk)
1354 struct sk_buff *skb;
1356 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1357 l2cap_pi(sk)->unacked_frames) {
1358 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1361 skb = skb_dequeue(TX_QUEUE(sk));
1364 l2cap_pi(sk)->unacked_frames--;
1367 if (!l2cap_pi(sk)->unacked_frames)
1368 del_timer(&l2cap_pi(sk)->retrans_timer);
1371 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1375 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1377 hci_send_acl(pi->conn->hcon, skb, 0);
1380 static int l2cap_streaming_send(struct sock *sk)
1382 struct sk_buff *skb, *tx_skb;
1383 struct l2cap_pinfo *pi = l2cap_pi(sk);
1386 while ((skb = sk->sk_send_head)) {
1387 tx_skb = skb_clone(skb, GFP_ATOMIC);
1389 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1390 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1391 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1393 if (pi->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1395 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1398 l2cap_do_send(sk, tx_skb);
1400 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1402 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1403 sk->sk_send_head = NULL;
1405 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1407 skb = skb_dequeue(TX_QUEUE(sk));
1413 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1415 struct l2cap_pinfo *pi = l2cap_pi(sk);
1416 struct sk_buff *skb, *tx_skb;
1419 skb = skb_peek(TX_QUEUE(sk));
1424 if (bt_cb(skb)->tx_seq == tx_seq)
1427 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1430 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1432 if (pi->remote_max_tx &&
1433 bt_cb(skb)->retries == pi->remote_max_tx) {
1434 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1438 tx_skb = skb_clone(skb, GFP_ATOMIC);
1439 bt_cb(skb)->retries++;
1440 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1442 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1443 control |= L2CAP_CTRL_FINAL;
1444 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1447 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1448 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1450 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1452 if (pi->fcs == L2CAP_FCS_CRC16) {
1453 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1454 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1457 l2cap_do_send(sk, tx_skb);
1460 static int l2cap_ertm_send(struct sock *sk)
1462 struct sk_buff *skb, *tx_skb;
1463 struct l2cap_pinfo *pi = l2cap_pi(sk);
1467 if (sk->sk_state != BT_CONNECTED)
1470 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1472 if (pi->remote_max_tx &&
1473 bt_cb(skb)->retries == pi->remote_max_tx) {
1474 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1478 tx_skb = skb_clone(skb, GFP_ATOMIC);
1480 bt_cb(skb)->retries++;
1482 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1483 control &= L2CAP_CTRL_SAR;
1485 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1486 control |= L2CAP_CTRL_FINAL;
1487 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1489 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1490 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1491 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1494 if (pi->fcs == L2CAP_FCS_CRC16) {
1495 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1496 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1499 l2cap_do_send(sk, tx_skb);
1501 __mod_retrans_timer();
1503 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1504 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1506 pi->unacked_frames++;
1509 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1510 sk->sk_send_head = NULL;
1512 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1520 static int l2cap_retransmit_frames(struct sock *sk)
1522 struct l2cap_pinfo *pi = l2cap_pi(sk);
1525 spin_lock_bh(&pi->send_lock);
1527 if (!skb_queue_empty(TX_QUEUE(sk)))
1528 sk->sk_send_head = TX_QUEUE(sk)->next;
1530 pi->next_tx_seq = pi->expected_ack_seq;
1531 ret = l2cap_ertm_send(sk);
1533 spin_unlock_bh(&pi->send_lock);
1538 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1540 struct sock *sk = (struct sock *)pi;
1544 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1546 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1547 control |= L2CAP_SUPER_RCV_NOT_READY;
1548 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1549 l2cap_send_sframe(pi, control);
1553 spin_lock_bh(&pi->send_lock);
1554 nframes = l2cap_ertm_send(sk);
1555 spin_unlock_bh(&pi->send_lock);
1560 control |= L2CAP_SUPER_RCV_READY;
1561 l2cap_send_sframe(pi, control);
1564 static void l2cap_send_srejtail(struct sock *sk)
1566 struct srej_list *tail;
1569 control = L2CAP_SUPER_SELECT_REJECT;
1570 control |= L2CAP_CTRL_FINAL;
1572 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1573 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1575 l2cap_send_sframe(l2cap_pi(sk), control);
1578 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1580 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1581 struct sk_buff **frag;
1584 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1590 /* Continuation fragments (no L2CAP header) */
1591 frag = &skb_shinfo(skb)->frag_list;
1593 count = min_t(unsigned int, conn->mtu, len);
1595 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1598 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1604 frag = &(*frag)->next;
1610 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1612 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1613 struct sk_buff *skb;
1614 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1615 struct l2cap_hdr *lh;
1617 BT_DBG("sk %p len %d", sk, (int)len);
1619 count = min_t(unsigned int, (conn->mtu - hlen), len);
1620 skb = bt_skb_send_alloc(sk, count + hlen,
1621 msg->msg_flags & MSG_DONTWAIT, &err);
1623 return ERR_PTR(-ENOMEM);
1625 /* Create L2CAP header */
1626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1627 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1628 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1629 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1631 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1632 if (unlikely(err < 0)) {
1634 return ERR_PTR(err);
1639 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1641 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1642 struct sk_buff *skb;
1643 int err, count, hlen = L2CAP_HDR_SIZE;
1644 struct l2cap_hdr *lh;
1646 BT_DBG("sk %p len %d", sk, (int)len);
1648 count = min_t(unsigned int, (conn->mtu - hlen), len);
1649 skb = bt_skb_send_alloc(sk, count + hlen,
1650 msg->msg_flags & MSG_DONTWAIT, &err);
1652 return ERR_PTR(-ENOMEM);
1654 /* Create L2CAP header */
1655 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1656 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1657 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1659 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1660 if (unlikely(err < 0)) {
1662 return ERR_PTR(err);
1667 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1669 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1670 struct sk_buff *skb;
1671 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1672 struct l2cap_hdr *lh;
1674 BT_DBG("sk %p len %d", sk, (int)len);
1677 return ERR_PTR(-ENOTCONN);
1682 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1685 count = min_t(unsigned int, (conn->mtu - hlen), len);
1686 skb = bt_skb_send_alloc(sk, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1689 return ERR_PTR(-ENOMEM);
1691 /* Create L2CAP header */
1692 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1693 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1694 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1695 put_unaligned_le16(control, skb_put(skb, 2));
1697 put_unaligned_le16(sdulen, skb_put(skb, 2));
1699 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1700 if (unlikely(err < 0)) {
1702 return ERR_PTR(err);
1705 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1706 put_unaligned_le16(0, skb_put(skb, 2));
1708 bt_cb(skb)->retries = 0;
1712 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1714 struct l2cap_pinfo *pi = l2cap_pi(sk);
1715 struct sk_buff *skb;
1716 struct sk_buff_head sar_queue;
1720 skb_queue_head_init(&sar_queue);
1721 control = L2CAP_SDU_START;
1722 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1724 return PTR_ERR(skb);
1726 __skb_queue_tail(&sar_queue, skb);
1727 len -= pi->remote_mps;
1728 size += pi->remote_mps;
1733 if (len > pi->remote_mps) {
1734 control = L2CAP_SDU_CONTINUE;
1735 buflen = pi->remote_mps;
1737 control = L2CAP_SDU_END;
1741 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1743 skb_queue_purge(&sar_queue);
1744 return PTR_ERR(skb);
1747 __skb_queue_tail(&sar_queue, skb);
1751 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1752 spin_lock_bh(&pi->send_lock);
1753 if (sk->sk_send_head == NULL)
1754 sk->sk_send_head = sar_queue.next;
1755 spin_unlock_bh(&pi->send_lock);
1760 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1762 struct sock *sk = sock->sk;
1763 struct l2cap_pinfo *pi = l2cap_pi(sk);
1764 struct sk_buff *skb;
1768 BT_DBG("sock %p, sk %p", sock, sk);
1770 err = sock_error(sk);
1774 if (msg->msg_flags & MSG_OOB)
1779 if (sk->sk_state != BT_CONNECTED) {
1784 /* Connectionless channel */
1785 if (sk->sk_type == SOCK_DGRAM) {
1786 skb = l2cap_create_connless_pdu(sk, msg, len);
1790 l2cap_do_send(sk, skb);
1797 case L2CAP_MODE_BASIC:
1798 /* Check outgoing MTU */
1799 if (len > pi->omtu) {
1804 /* Create a basic PDU */
1805 skb = l2cap_create_basic_pdu(sk, msg, len);
1811 l2cap_do_send(sk, skb);
1815 case L2CAP_MODE_ERTM:
1816 case L2CAP_MODE_STREAMING:
1817 /* Entire SDU fits into one PDU */
1818 if (len <= pi->remote_mps) {
1819 control = L2CAP_SDU_UNSEGMENTED;
1820 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1825 __skb_queue_tail(TX_QUEUE(sk), skb);
1827 if (pi->mode == L2CAP_MODE_ERTM)
1828 spin_lock_bh(&pi->send_lock);
1830 if (sk->sk_send_head == NULL)
1831 sk->sk_send_head = skb;
1833 if (pi->mode == L2CAP_MODE_ERTM)
1834 spin_unlock_bh(&pi->send_lock);
1836 /* Segment SDU into multiples PDUs */
1837 err = l2cap_sar_segment_sdu(sk, msg, len);
1842 if (pi->mode == L2CAP_MODE_STREAMING) {
1843 err = l2cap_streaming_send(sk);
1845 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1846 pi->conn_state && L2CAP_CONN_WAIT_F) {
1850 spin_lock_bh(&pi->send_lock);
1851 err = l2cap_ertm_send(sk);
1852 spin_unlock_bh(&pi->send_lock);
1860 BT_DBG("bad state %1.1x", pi->mode);
1869 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1871 struct sock *sk = sock->sk;
1875 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1876 struct l2cap_conn_rsp rsp;
1878 sk->sk_state = BT_CONFIG;
1880 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1881 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1882 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1883 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1884 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1885 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1893 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1896 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1898 struct sock *sk = sock->sk;
1899 struct l2cap_options opts;
1903 BT_DBG("sk %p", sk);
1909 opts.imtu = l2cap_pi(sk)->imtu;
1910 opts.omtu = l2cap_pi(sk)->omtu;
1911 opts.flush_to = l2cap_pi(sk)->flush_to;
1912 opts.mode = l2cap_pi(sk)->mode;
1913 opts.fcs = l2cap_pi(sk)->fcs;
1914 opts.max_tx = l2cap_pi(sk)->max_tx;
1915 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1917 len = min_t(unsigned int, sizeof(opts), optlen);
1918 if (copy_from_user((char *) &opts, optval, len)) {
1923 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1928 l2cap_pi(sk)->mode = opts.mode;
1929 switch (l2cap_pi(sk)->mode) {
1930 case L2CAP_MODE_BASIC:
1931 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1933 case L2CAP_MODE_ERTM:
1934 case L2CAP_MODE_STREAMING:
1943 l2cap_pi(sk)->imtu = opts.imtu;
1944 l2cap_pi(sk)->omtu = opts.omtu;
1945 l2cap_pi(sk)->fcs = opts.fcs;
1946 l2cap_pi(sk)->max_tx = opts.max_tx;
1947 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1951 if (get_user(opt, (u32 __user *) optval)) {
1956 if (opt & L2CAP_LM_AUTH)
1957 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1958 if (opt & L2CAP_LM_ENCRYPT)
1959 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1960 if (opt & L2CAP_LM_SECURE)
1961 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1963 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1964 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1976 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1978 struct sock *sk = sock->sk;
1979 struct bt_security sec;
1983 BT_DBG("sk %p", sk);
1985 if (level == SOL_L2CAP)
1986 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1988 if (level != SOL_BLUETOOTH)
1989 return -ENOPROTOOPT;
1995 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1996 && sk->sk_type != SOCK_RAW) {
2001 sec.level = BT_SECURITY_LOW;
2003 len = min_t(unsigned int, sizeof(sec), optlen);
2004 if (copy_from_user((char *) &sec, optval, len)) {
2009 if (sec.level < BT_SECURITY_LOW ||
2010 sec.level > BT_SECURITY_HIGH) {
2015 l2cap_pi(sk)->sec_level = sec.level;
2018 case BT_DEFER_SETUP:
2019 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2024 if (get_user(opt, (u32 __user *) optval)) {
2029 bt_sk(sk)->defer_setup = opt;
2041 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2043 struct sock *sk = sock->sk;
2044 struct l2cap_options opts;
2045 struct l2cap_conninfo cinfo;
2049 BT_DBG("sk %p", sk);
2051 if (get_user(len, optlen))
2058 opts.imtu = l2cap_pi(sk)->imtu;
2059 opts.omtu = l2cap_pi(sk)->omtu;
2060 opts.flush_to = l2cap_pi(sk)->flush_to;
2061 opts.mode = l2cap_pi(sk)->mode;
2062 opts.fcs = l2cap_pi(sk)->fcs;
2063 opts.max_tx = l2cap_pi(sk)->max_tx;
2064 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2066 len = min_t(unsigned int, len, sizeof(opts));
2067 if (copy_to_user(optval, (char *) &opts, len))
2073 switch (l2cap_pi(sk)->sec_level) {
2074 case BT_SECURITY_LOW:
2075 opt = L2CAP_LM_AUTH;
2077 case BT_SECURITY_MEDIUM:
2078 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2080 case BT_SECURITY_HIGH:
2081 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2089 if (l2cap_pi(sk)->role_switch)
2090 opt |= L2CAP_LM_MASTER;
2092 if (l2cap_pi(sk)->force_reliable)
2093 opt |= L2CAP_LM_RELIABLE;
2095 if (put_user(opt, (u32 __user *) optval))
2099 case L2CAP_CONNINFO:
2100 if (sk->sk_state != BT_CONNECTED &&
2101 !(sk->sk_state == BT_CONNECT2 &&
2102 bt_sk(sk)->defer_setup)) {
2107 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2108 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2110 len = min_t(unsigned int, len, sizeof(cinfo));
2111 if (copy_to_user(optval, (char *) &cinfo, len))
2125 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2127 struct sock *sk = sock->sk;
2128 struct bt_security sec;
2131 BT_DBG("sk %p", sk);
2133 if (level == SOL_L2CAP)
2134 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2136 if (level != SOL_BLUETOOTH)
2137 return -ENOPROTOOPT;
2139 if (get_user(len, optlen))
2146 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2147 && sk->sk_type != SOCK_RAW) {
2152 sec.level = l2cap_pi(sk)->sec_level;
2154 len = min_t(unsigned int, len, sizeof(sec));
2155 if (copy_to_user(optval, (char *) &sec, len))
2160 case BT_DEFER_SETUP:
2161 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2166 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2180 static int l2cap_sock_shutdown(struct socket *sock, int how)
2182 struct sock *sk = sock->sk;
2185 BT_DBG("sock %p, sk %p", sock, sk);
2191 if (!sk->sk_shutdown) {
2192 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2193 err = __l2cap_wait_ack(sk);
2195 sk->sk_shutdown = SHUTDOWN_MASK;
2196 l2cap_sock_clear_timer(sk);
2197 __l2cap_sock_close(sk, 0);
2199 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2200 err = bt_sock_wait_state(sk, BT_CLOSED,
2204 if (!err && sk->sk_err)
2211 static int l2cap_sock_release(struct socket *sock)
2213 struct sock *sk = sock->sk;
2216 BT_DBG("sock %p, sk %p", sock, sk);
2221 err = l2cap_sock_shutdown(sock, 2);
2224 l2cap_sock_kill(sk);
2228 static void l2cap_chan_ready(struct sock *sk)
2230 struct sock *parent = bt_sk(sk)->parent;
2232 BT_DBG("sk %p, parent %p", sk, parent);
2234 l2cap_pi(sk)->conf_state = 0;
2235 l2cap_sock_clear_timer(sk);
2238 /* Outgoing channel.
2239 * Wake up socket sleeping on connect.
2241 sk->sk_state = BT_CONNECTED;
2242 sk->sk_state_change(sk);
2244 /* Incoming channel.
2245 * Wake up socket sleeping on accept.
2247 parent->sk_data_ready(parent, 0);
2251 /* Copy frame to all raw sockets on that connection */
2252 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2254 struct l2cap_chan_list *l = &conn->chan_list;
2255 struct sk_buff *nskb;
2258 BT_DBG("conn %p", conn);
2260 read_lock(&l->lock);
2261 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2262 if (sk->sk_type != SOCK_RAW)
2265 /* Don't send frame to the socket it came from */
2268 nskb = skb_clone(skb, GFP_ATOMIC);
2272 if (sock_queue_rcv_skb(sk, nskb))
2275 read_unlock(&l->lock);
2278 /* ---- L2CAP signalling commands ---- */
2279 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2280 u8 code, u8 ident, u16 dlen, void *data)
2282 struct sk_buff *skb, **frag;
2283 struct l2cap_cmd_hdr *cmd;
2284 struct l2cap_hdr *lh;
2287 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2288 conn, code, ident, dlen);
2290 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2291 count = min_t(unsigned int, conn->mtu, len);
2293 skb = bt_skb_alloc(count, GFP_ATOMIC);
2297 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2298 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2299 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2301 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2304 cmd->len = cpu_to_le16(dlen);
2307 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2308 memcpy(skb_put(skb, count), data, count);
2314 /* Continuation fragments (no L2CAP header) */
2315 frag = &skb_shinfo(skb)->frag_list;
2317 count = min_t(unsigned int, conn->mtu, len);
2319 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2323 memcpy(skb_put(*frag, count), data, count);
2328 frag = &(*frag)->next;
2338 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2340 struct l2cap_conf_opt *opt = *ptr;
2343 len = L2CAP_CONF_OPT_SIZE + opt->len;
2351 *val = *((u8 *) opt->val);
2355 *val = __le16_to_cpu(*((__le16 *) opt->val));
2359 *val = __le32_to_cpu(*((__le32 *) opt->val));
2363 *val = (unsigned long) opt->val;
2367 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2371 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2373 struct l2cap_conf_opt *opt = *ptr;
2375 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2382 *((u8 *) opt->val) = val;
2386 *((__le16 *) opt->val) = cpu_to_le16(val);
2390 *((__le32 *) opt->val) = cpu_to_le32(val);
2394 memcpy(opt->val, (void *) val, len);
2398 *ptr += L2CAP_CONF_OPT_SIZE + len;
2401 static void l2cap_ack_timeout(unsigned long arg)
2403 struct sock *sk = (void *) arg;
2406 l2cap_send_ack(l2cap_pi(sk));
2410 static inline void l2cap_ertm_init(struct sock *sk)
2412 l2cap_pi(sk)->expected_ack_seq = 0;
2413 l2cap_pi(sk)->unacked_frames = 0;
2414 l2cap_pi(sk)->buffer_seq = 0;
2415 l2cap_pi(sk)->num_acked = 0;
2416 l2cap_pi(sk)->frames_sent = 0;
2418 setup_timer(&l2cap_pi(sk)->retrans_timer,
2419 l2cap_retrans_timeout, (unsigned long) sk);
2420 setup_timer(&l2cap_pi(sk)->monitor_timer,
2421 l2cap_monitor_timeout, (unsigned long) sk);
2422 setup_timer(&l2cap_pi(sk)->ack_timer,
2423 l2cap_ack_timeout, (unsigned long) sk);
2425 __skb_queue_head_init(SREJ_QUEUE(sk));
2426 __skb_queue_head_init(BUSY_QUEUE(sk));
2427 spin_lock_init(&l2cap_pi(sk)->send_lock);
2429 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2432 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2434 u32 local_feat_mask = l2cap_feat_mask;
2436 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2439 case L2CAP_MODE_ERTM:
2440 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2441 case L2CAP_MODE_STREAMING:
2442 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2448 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2451 case L2CAP_MODE_STREAMING:
2452 case L2CAP_MODE_ERTM:
2453 if (l2cap_mode_supported(mode, remote_feat_mask))
2457 return L2CAP_MODE_BASIC;
2461 static int l2cap_build_conf_req(struct sock *sk, void *data)
2463 struct l2cap_pinfo *pi = l2cap_pi(sk);
2464 struct l2cap_conf_req *req = data;
2465 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2466 void *ptr = req->data;
2468 BT_DBG("sk %p", sk);
2470 if (pi->num_conf_req || pi->num_conf_rsp)
2474 case L2CAP_MODE_STREAMING:
2475 case L2CAP_MODE_ERTM:
2476 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2477 pi->mode = l2cap_select_mode(rfc.mode,
2478 pi->conn->feat_mask);
2482 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2483 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2486 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2492 case L2CAP_MODE_BASIC:
2493 if (pi->imtu != L2CAP_DEFAULT_MTU)
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2496 rfc.mode = L2CAP_MODE_BASIC;
2498 rfc.max_transmit = 0;
2499 rfc.retrans_timeout = 0;
2500 rfc.monitor_timeout = 0;
2501 rfc.max_pdu_size = 0;
2505 case L2CAP_MODE_ERTM:
2506 rfc.mode = L2CAP_MODE_ERTM;
2507 rfc.txwin_size = pi->tx_win;
2508 rfc.max_transmit = pi->max_tx;
2509 rfc.retrans_timeout = 0;
2510 rfc.monitor_timeout = 0;
2511 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2512 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2513 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2515 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2518 if (pi->fcs == L2CAP_FCS_NONE ||
2519 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2520 pi->fcs = L2CAP_FCS_NONE;
2521 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2525 case L2CAP_MODE_STREAMING:
2526 rfc.mode = L2CAP_MODE_STREAMING;
2528 rfc.max_transmit = 0;
2529 rfc.retrans_timeout = 0;
2530 rfc.monitor_timeout = 0;
2531 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2532 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2533 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2535 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2538 if (pi->fcs == L2CAP_FCS_NONE ||
2539 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2540 pi->fcs = L2CAP_FCS_NONE;
2541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2547 (unsigned long) &rfc);
2549 /* FIXME: Need actual value of the flush timeout */
2550 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2551 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2553 req->dcid = cpu_to_le16(pi->dcid);
2554 req->flags = cpu_to_le16(0);
2559 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2561 struct l2cap_pinfo *pi = l2cap_pi(sk);
2562 struct l2cap_conf_rsp *rsp = data;
2563 void *ptr = rsp->data;
2564 void *req = pi->conf_req;
2565 int len = pi->conf_len;
2566 int type, hint, olen;
2568 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2569 u16 mtu = L2CAP_DEFAULT_MTU;
2570 u16 result = L2CAP_CONF_SUCCESS;
2572 BT_DBG("sk %p", sk);
2574 while (len >= L2CAP_CONF_OPT_SIZE) {
2575 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2577 hint = type & L2CAP_CONF_HINT;
2578 type &= L2CAP_CONF_MASK;
2581 case L2CAP_CONF_MTU:
2585 case L2CAP_CONF_FLUSH_TO:
2589 case L2CAP_CONF_QOS:
2592 case L2CAP_CONF_RFC:
2593 if (olen == sizeof(rfc))
2594 memcpy(&rfc, (void *) val, olen);
2597 case L2CAP_CONF_FCS:
2598 if (val == L2CAP_FCS_NONE)
2599 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2607 result = L2CAP_CONF_UNKNOWN;
2608 *((u8 *) ptr++) = type;
2613 if (pi->num_conf_rsp || pi->num_conf_req)
2617 case L2CAP_MODE_STREAMING:
2618 case L2CAP_MODE_ERTM:
2619 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2620 pi->mode = l2cap_select_mode(rfc.mode,
2621 pi->conn->feat_mask);
2625 if (pi->mode != rfc.mode)
2626 return -ECONNREFUSED;
2632 if (pi->mode != rfc.mode) {
2633 result = L2CAP_CONF_UNACCEPT;
2634 rfc.mode = pi->mode;
2636 if (pi->num_conf_rsp == 1)
2637 return -ECONNREFUSED;
2639 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2640 sizeof(rfc), (unsigned long) &rfc);
2644 if (result == L2CAP_CONF_SUCCESS) {
2645 /* Configure output options and let the other side know
2646 * which ones we don't like. */
2648 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2649 result = L2CAP_CONF_UNACCEPT;
2652 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2654 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2657 case L2CAP_MODE_BASIC:
2658 pi->fcs = L2CAP_FCS_NONE;
2659 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2662 case L2CAP_MODE_ERTM:
2663 pi->remote_tx_win = rfc.txwin_size;
2664 pi->remote_max_tx = rfc.max_transmit;
2665 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2666 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2668 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2670 rfc.retrans_timeout =
2671 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2672 rfc.monitor_timeout =
2673 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2675 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2678 sizeof(rfc), (unsigned long) &rfc);
2682 case L2CAP_MODE_STREAMING:
2683 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2684 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2686 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2688 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2690 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2691 sizeof(rfc), (unsigned long) &rfc);
2696 result = L2CAP_CONF_UNACCEPT;
2698 memset(&rfc, 0, sizeof(rfc));
2699 rfc.mode = pi->mode;
2702 if (result == L2CAP_CONF_SUCCESS)
2703 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2705 rsp->scid = cpu_to_le16(pi->dcid);
2706 rsp->result = cpu_to_le16(result);
2707 rsp->flags = cpu_to_le16(0x0000);
2712 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2714 struct l2cap_pinfo *pi = l2cap_pi(sk);
2715 struct l2cap_conf_req *req = data;
2716 void *ptr = req->data;
2719 struct l2cap_conf_rfc rfc;
2721 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2723 while (len >= L2CAP_CONF_OPT_SIZE) {
2724 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2727 case L2CAP_CONF_MTU:
2728 if (val < L2CAP_DEFAULT_MIN_MTU) {
2729 *result = L2CAP_CONF_UNACCEPT;
2730 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2736 case L2CAP_CONF_FLUSH_TO:
2738 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2742 case L2CAP_CONF_RFC:
2743 if (olen == sizeof(rfc))
2744 memcpy(&rfc, (void *)val, olen);
2746 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2747 rfc.mode != pi->mode)
2748 return -ECONNREFUSED;
2752 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2753 sizeof(rfc), (unsigned long) &rfc);
2758 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2759 return -ECONNREFUSED;
2761 pi->mode = rfc.mode;
2763 if (*result == L2CAP_CONF_SUCCESS) {
2765 case L2CAP_MODE_ERTM:
2766 pi->remote_tx_win = rfc.txwin_size;
2767 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2768 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2769 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2771 case L2CAP_MODE_STREAMING:
2772 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2776 req->dcid = cpu_to_le16(pi->dcid);
2777 req->flags = cpu_to_le16(0x0000);
2782 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2784 struct l2cap_conf_rsp *rsp = data;
2785 void *ptr = rsp->data;
2787 BT_DBG("sk %p", sk);
2789 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2790 rsp->result = cpu_to_le16(result);
2791 rsp->flags = cpu_to_le16(flags);
2796 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2798 struct l2cap_pinfo *pi = l2cap_pi(sk);
2801 struct l2cap_conf_rfc rfc;
2803 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2805 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2808 while (len >= L2CAP_CONF_OPT_SIZE) {
2809 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2812 case L2CAP_CONF_RFC:
2813 if (olen == sizeof(rfc))
2814 memcpy(&rfc, (void *)val, olen);
2821 case L2CAP_MODE_ERTM:
2822 pi->remote_tx_win = rfc.txwin_size;
2823 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2824 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2825 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2827 case L2CAP_MODE_STREAMING:
2828 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2832 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2834 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2836 if (rej->reason != 0x0000)
2839 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2840 cmd->ident == conn->info_ident) {
2841 del_timer(&conn->info_timer);
2843 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2844 conn->info_ident = 0;
2846 l2cap_conn_start(conn);
2852 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2854 struct l2cap_chan_list *list = &conn->chan_list;
2855 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2856 struct l2cap_conn_rsp rsp;
2857 struct sock *sk, *parent;
2858 int result, status = L2CAP_CS_NO_INFO;
2860 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2861 __le16 psm = req->psm;
2863 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2865 /* Check if we have socket listening on psm */
2866 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2868 result = L2CAP_CR_BAD_PSM;
2872 /* Check if the ACL is secure enough (if not SDP) */
2873 if (psm != cpu_to_le16(0x0001) &&
2874 !hci_conn_check_link_mode(conn->hcon)) {
2875 conn->disc_reason = 0x05;
2876 result = L2CAP_CR_SEC_BLOCK;
2880 result = L2CAP_CR_NO_MEM;
2882 /* Check for backlog size */
2883 if (sk_acceptq_is_full(parent)) {
2884 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2888 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2892 write_lock_bh(&list->lock);
2894 /* Check if we already have channel with that dcid */
2895 if (__l2cap_get_chan_by_dcid(list, scid)) {
2896 write_unlock_bh(&list->lock);
2897 sock_set_flag(sk, SOCK_ZAPPED);
2898 l2cap_sock_kill(sk);
2902 hci_conn_hold(conn->hcon);
2904 l2cap_sock_init(sk, parent);
2905 bacpy(&bt_sk(sk)->src, conn->src);
2906 bacpy(&bt_sk(sk)->dst, conn->dst);
2907 l2cap_pi(sk)->psm = psm;
2908 l2cap_pi(sk)->dcid = scid;
2910 __l2cap_chan_add(conn, sk, parent);
2911 dcid = l2cap_pi(sk)->scid;
2913 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2915 l2cap_pi(sk)->ident = cmd->ident;
2917 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2918 if (l2cap_check_security(sk)) {
2919 if (bt_sk(sk)->defer_setup) {
2920 sk->sk_state = BT_CONNECT2;
2921 result = L2CAP_CR_PEND;
2922 status = L2CAP_CS_AUTHOR_PEND;
2923 parent->sk_data_ready(parent, 0);
2925 sk->sk_state = BT_CONFIG;
2926 result = L2CAP_CR_SUCCESS;
2927 status = L2CAP_CS_NO_INFO;
2930 sk->sk_state = BT_CONNECT2;
2931 result = L2CAP_CR_PEND;
2932 status = L2CAP_CS_AUTHEN_PEND;
2935 sk->sk_state = BT_CONNECT2;
2936 result = L2CAP_CR_PEND;
2937 status = L2CAP_CS_NO_INFO;
2940 write_unlock_bh(&list->lock);
2943 bh_unlock_sock(parent);
2946 rsp.scid = cpu_to_le16(scid);
2947 rsp.dcid = cpu_to_le16(dcid);
2948 rsp.result = cpu_to_le16(result);
2949 rsp.status = cpu_to_le16(status);
2950 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2952 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2953 struct l2cap_info_req info;
2954 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2956 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2957 conn->info_ident = l2cap_get_ident(conn);
2959 mod_timer(&conn->info_timer, jiffies +
2960 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2962 l2cap_send_cmd(conn, conn->info_ident,
2963 L2CAP_INFO_REQ, sizeof(info), &info);
2969 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2971 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2972 u16 scid, dcid, result, status;
2976 scid = __le16_to_cpu(rsp->scid);
2977 dcid = __le16_to_cpu(rsp->dcid);
2978 result = __le16_to_cpu(rsp->result);
2979 status = __le16_to_cpu(rsp->status);
2981 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2984 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2988 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2994 case L2CAP_CR_SUCCESS:
2995 sk->sk_state = BT_CONFIG;
2996 l2cap_pi(sk)->ident = 0;
2997 l2cap_pi(sk)->dcid = dcid;
2998 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2999 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3001 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3002 l2cap_build_conf_req(sk, req), req);
3003 l2cap_pi(sk)->num_conf_req++;
3007 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3011 l2cap_chan_del(sk, ECONNREFUSED);
3019 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3021 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3027 dcid = __le16_to_cpu(req->dcid);
3028 flags = __le16_to_cpu(req->flags);
3030 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3032 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3036 if (sk->sk_state == BT_DISCONN)
3039 /* Reject if config buffer is too small. */
3040 len = cmd_len - sizeof(*req);
3041 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3042 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3043 l2cap_build_conf_rsp(sk, rsp,
3044 L2CAP_CONF_REJECT, flags), rsp);
3049 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3050 l2cap_pi(sk)->conf_len += len;
3052 if (flags & 0x0001) {
3053 /* Incomplete config. Send empty response. */
3054 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3055 l2cap_build_conf_rsp(sk, rsp,
3056 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3060 /* Complete config. */
3061 len = l2cap_parse_conf_req(sk, rsp);
3063 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3067 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3068 l2cap_pi(sk)->num_conf_rsp++;
3070 /* Reset config buffer. */
3071 l2cap_pi(sk)->conf_len = 0;
3073 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3076 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3077 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3078 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3079 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3081 sk->sk_state = BT_CONNECTED;
3083 l2cap_pi(sk)->next_tx_seq = 0;
3084 l2cap_pi(sk)->expected_tx_seq = 0;
3085 __skb_queue_head_init(TX_QUEUE(sk));
3086 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3087 l2cap_ertm_init(sk);
3089 l2cap_chan_ready(sk);
3093 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3095 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3096 l2cap_build_conf_req(sk, buf), buf);
3097 l2cap_pi(sk)->num_conf_req++;
3105 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3107 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3108 u16 scid, flags, result;
3110 int len = cmd->len - sizeof(*rsp);
3112 scid = __le16_to_cpu(rsp->scid);
3113 flags = __le16_to_cpu(rsp->flags);
3114 result = __le16_to_cpu(rsp->result);
3116 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3117 scid, flags, result);
3119 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3124 case L2CAP_CONF_SUCCESS:
3125 l2cap_conf_rfc_get(sk, rsp->data, len);
3128 case L2CAP_CONF_UNACCEPT:
3129 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3132 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3133 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3137 /* throw out any old stored conf requests */
3138 result = L2CAP_CONF_SUCCESS;
3139 len = l2cap_parse_conf_rsp(sk, rsp->data,
3142 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3146 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3147 L2CAP_CONF_REQ, len, req);
3148 l2cap_pi(sk)->num_conf_req++;
3149 if (result != L2CAP_CONF_SUCCESS)
3155 sk->sk_err = ECONNRESET;
3156 l2cap_sock_set_timer(sk, HZ * 5);
3157 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3164 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3166 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3167 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3168 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3169 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3171 sk->sk_state = BT_CONNECTED;
3172 l2cap_pi(sk)->next_tx_seq = 0;
3173 l2cap_pi(sk)->expected_tx_seq = 0;
3174 __skb_queue_head_init(TX_QUEUE(sk));
3175 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3176 l2cap_ertm_init(sk);
3178 l2cap_chan_ready(sk);
3186 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3188 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3189 struct l2cap_disconn_rsp rsp;
3193 scid = __le16_to_cpu(req->scid);
3194 dcid = __le16_to_cpu(req->dcid);
3196 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3198 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3202 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3203 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3204 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3206 sk->sk_shutdown = SHUTDOWN_MASK;
3208 l2cap_chan_del(sk, ECONNRESET);
3211 l2cap_sock_kill(sk);
3215 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3217 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3221 scid = __le16_to_cpu(rsp->scid);
3222 dcid = __le16_to_cpu(rsp->dcid);
3224 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3226 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3230 l2cap_chan_del(sk, 0);
3233 l2cap_sock_kill(sk);
3237 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3239 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3242 type = __le16_to_cpu(req->type);
3244 BT_DBG("type 0x%4.4x", type);
3246 if (type == L2CAP_IT_FEAT_MASK) {
3248 u32 feat_mask = l2cap_feat_mask;
3249 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3250 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3251 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3253 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3255 put_unaligned_le32(feat_mask, rsp->data);
3256 l2cap_send_cmd(conn, cmd->ident,
3257 L2CAP_INFO_RSP, sizeof(buf), buf);
3258 } else if (type == L2CAP_IT_FIXED_CHAN) {
3260 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3261 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3262 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3263 memcpy(buf + 4, l2cap_fixed_chan, 8);
3264 l2cap_send_cmd(conn, cmd->ident,
3265 L2CAP_INFO_RSP, sizeof(buf), buf);
3267 struct l2cap_info_rsp rsp;
3268 rsp.type = cpu_to_le16(type);
3269 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3270 l2cap_send_cmd(conn, cmd->ident,
3271 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3277 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3279 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3282 type = __le16_to_cpu(rsp->type);
3283 result = __le16_to_cpu(rsp->result);
3285 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3287 del_timer(&conn->info_timer);
3289 if (type == L2CAP_IT_FEAT_MASK) {
3290 conn->feat_mask = get_unaligned_le32(rsp->data);
3292 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3293 struct l2cap_info_req req;
3294 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3296 conn->info_ident = l2cap_get_ident(conn);
3298 l2cap_send_cmd(conn, conn->info_ident,
3299 L2CAP_INFO_REQ, sizeof(req), &req);
3301 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3302 conn->info_ident = 0;
3304 l2cap_conn_start(conn);
3306 } else if (type == L2CAP_IT_FIXED_CHAN) {
3307 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3308 conn->info_ident = 0;
3310 l2cap_conn_start(conn);
3316 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3318 u8 *data = skb->data;
3320 struct l2cap_cmd_hdr cmd;
3323 l2cap_raw_recv(conn, skb);
3325 while (len >= L2CAP_CMD_HDR_SIZE) {
3327 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3328 data += L2CAP_CMD_HDR_SIZE;
3329 len -= L2CAP_CMD_HDR_SIZE;
3331 cmd_len = le16_to_cpu(cmd.len);
3333 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3335 if (cmd_len > len || !cmd.ident) {
3336 BT_DBG("corrupted command");
3341 case L2CAP_COMMAND_REJ:
3342 l2cap_command_rej(conn, &cmd, data);
3345 case L2CAP_CONN_REQ:
3346 err = l2cap_connect_req(conn, &cmd, data);
3349 case L2CAP_CONN_RSP:
3350 err = l2cap_connect_rsp(conn, &cmd, data);
3353 case L2CAP_CONF_REQ:
3354 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3357 case L2CAP_CONF_RSP:
3358 err = l2cap_config_rsp(conn, &cmd, data);
3361 case L2CAP_DISCONN_REQ:
3362 err = l2cap_disconnect_req(conn, &cmd, data);
3365 case L2CAP_DISCONN_RSP:
3366 err = l2cap_disconnect_rsp(conn, &cmd, data);
3369 case L2CAP_ECHO_REQ:
3370 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3373 case L2CAP_ECHO_RSP:
3376 case L2CAP_INFO_REQ:
3377 err = l2cap_information_req(conn, &cmd, data);
3380 case L2CAP_INFO_RSP:
3381 err = l2cap_information_rsp(conn, &cmd, data);
3385 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3391 struct l2cap_cmd_rej rej;
3392 BT_DBG("error %d", err);
3394 /* FIXME: Map err to a valid reason */
3395 rej.reason = cpu_to_le16(0);
3396 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3406 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3408 u16 our_fcs, rcv_fcs;
3409 int hdr_size = L2CAP_HDR_SIZE + 2;
3411 if (pi->fcs == L2CAP_FCS_CRC16) {
3412 skb_trim(skb, skb->len - 2);
3413 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3414 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3416 if (our_fcs != rcv_fcs)
3422 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3424 struct l2cap_pinfo *pi = l2cap_pi(sk);
3427 pi->frames_sent = 0;
3429 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3431 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3432 control |= L2CAP_SUPER_RCV_NOT_READY;
3433 l2cap_send_sframe(pi, control);
3434 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3437 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3438 l2cap_retransmit_frames(sk);
3440 spin_lock_bh(&pi->send_lock);
3441 l2cap_ertm_send(sk);
3442 spin_unlock_bh(&pi->send_lock);
3444 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3445 pi->frames_sent == 0) {
3446 control |= L2CAP_SUPER_RCV_READY;
3447 l2cap_send_sframe(pi, control);
3451 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3453 struct sk_buff *next_skb;
3454 struct l2cap_pinfo *pi = l2cap_pi(sk);
3455 int tx_seq_offset, next_tx_seq_offset;
3457 bt_cb(skb)->tx_seq = tx_seq;
3458 bt_cb(skb)->sar = sar;
3460 next_skb = skb_peek(SREJ_QUEUE(sk));
3462 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3466 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3467 if (tx_seq_offset < 0)
3468 tx_seq_offset += 64;
3471 if (bt_cb(next_skb)->tx_seq == tx_seq)
3474 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3475 pi->buffer_seq) % 64;
3476 if (next_tx_seq_offset < 0)
3477 next_tx_seq_offset += 64;
3479 if (next_tx_seq_offset > tx_seq_offset) {
3480 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3484 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3487 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3489 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3494 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3496 struct l2cap_pinfo *pi = l2cap_pi(sk);
3497 struct sk_buff *_skb;
3500 switch (control & L2CAP_CTRL_SAR) {
3501 case L2CAP_SDU_UNSEGMENTED:
3502 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3505 err = sock_queue_rcv_skb(sk, skb);
3511 case L2CAP_SDU_START:
3512 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3515 pi->sdu_len = get_unaligned_le16(skb->data);
3517 if (pi->sdu_len > pi->imtu)
3520 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3524 /* pull sdu_len bytes only after alloc, because of Local Busy
3525 * condition we have to be sure that this will be executed
3526 * only once, i.e., when alloc does not fail */
3529 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3531 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3532 pi->partial_sdu_len = skb->len;
3535 case L2CAP_SDU_CONTINUE:
3536 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3542 pi->partial_sdu_len += skb->len;
3543 if (pi->partial_sdu_len > pi->sdu_len)
3546 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3551 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3557 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3558 pi->partial_sdu_len += skb->len;
3560 if (pi->partial_sdu_len > pi->imtu)
3563 if (pi->partial_sdu_len != pi->sdu_len)
3566 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3569 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3571 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3575 err = sock_queue_rcv_skb(sk, _skb);
3578 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3582 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3583 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3597 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3602 static void l2cap_busy_work(struct work_struct *work)
3604 DECLARE_WAITQUEUE(wait, current);
3605 struct l2cap_pinfo *pi =
3606 container_of(work, struct l2cap_pinfo, busy_work);
3607 struct sock *sk = (struct sock *)pi;
3608 int n_tries = 0, timeo = HZ/5, err;
3609 struct sk_buff *skb;
3614 add_wait_queue(sk_sleep(sk), &wait);
3615 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3616 set_current_state(TASK_INTERRUPTIBLE);
3618 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3620 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3627 if (signal_pending(current)) {
3628 err = sock_intr_errno(timeo);
3633 timeo = schedule_timeout(timeo);
3636 err = sock_error(sk);
3640 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3641 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3642 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3644 skb_queue_head(BUSY_QUEUE(sk), skb);
3648 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3655 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3658 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3659 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3660 l2cap_send_sframe(pi, control);
3661 l2cap_pi(sk)->retry_count = 1;
3663 del_timer(&pi->retrans_timer);
3664 __mod_monitor_timer();
3666 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3669 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3670 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3672 BT_DBG("sk %p, Exit local busy", sk);
3674 set_current_state(TASK_RUNNING);
3675 remove_wait_queue(sk_sleep(sk), &wait);
3680 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3682 struct l2cap_pinfo *pi = l2cap_pi(sk);
3685 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3686 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3687 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3691 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3693 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3697 /* Busy Condition */
3698 BT_DBG("sk %p, Enter local busy", sk);
3700 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3701 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3702 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3704 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3705 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3706 l2cap_send_sframe(pi, sctrl);
3708 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3710 del_timer(&pi->ack_timer);
3712 queue_work(_busy_wq, &pi->busy_work);
3717 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3719 struct l2cap_pinfo *pi = l2cap_pi(sk);
3720 struct sk_buff *_skb;
3724 * TODO: We have to notify the userland if some data is lost with the
3728 switch (control & L2CAP_CTRL_SAR) {
3729 case L2CAP_SDU_UNSEGMENTED:
3730 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3735 err = sock_queue_rcv_skb(sk, skb);
3741 case L2CAP_SDU_START:
3742 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3747 pi->sdu_len = get_unaligned_le16(skb->data);
3750 if (pi->sdu_len > pi->imtu) {
3755 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3761 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3763 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3764 pi->partial_sdu_len = skb->len;
3768 case L2CAP_SDU_CONTINUE:
3769 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3772 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3774 pi->partial_sdu_len += skb->len;
3775 if (pi->partial_sdu_len > pi->sdu_len)
3783 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3786 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3788 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3789 pi->partial_sdu_len += skb->len;
3791 if (pi->partial_sdu_len > pi->imtu)
3794 if (pi->partial_sdu_len == pi->sdu_len) {
3795 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3796 err = sock_queue_rcv_skb(sk, _skb);
3811 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3813 struct sk_buff *skb;
3816 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3817 if (bt_cb(skb)->tx_seq != tx_seq)
3820 skb = skb_dequeue(SREJ_QUEUE(sk));
3821 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3822 l2cap_ertm_reassembly_sdu(sk, skb, control);
3823 l2cap_pi(sk)->buffer_seq_srej =
3824 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3825 tx_seq = (tx_seq + 1) % 64;
3829 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3831 struct l2cap_pinfo *pi = l2cap_pi(sk);
3832 struct srej_list *l, *tmp;
3835 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3836 if (l->tx_seq == tx_seq) {
3841 control = L2CAP_SUPER_SELECT_REJECT;
3842 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3843 l2cap_send_sframe(pi, control);
3845 list_add_tail(&l->list, SREJ_LIST(sk));
3849 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3851 struct l2cap_pinfo *pi = l2cap_pi(sk);
3852 struct srej_list *new;
3855 while (tx_seq != pi->expected_tx_seq) {
3856 control = L2CAP_SUPER_SELECT_REJECT;
3857 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3858 l2cap_send_sframe(pi, control);
3860 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3861 new->tx_seq = pi->expected_tx_seq;
3862 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3863 list_add_tail(&new->list, SREJ_LIST(sk));
3865 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3868 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3870 struct l2cap_pinfo *pi = l2cap_pi(sk);
3871 u8 tx_seq = __get_txseq(rx_control);
3872 u8 req_seq = __get_reqseq(rx_control);
3873 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3874 int tx_seq_offset, expected_tx_seq_offset;
3875 int num_to_ack = (pi->tx_win/6) + 1;
3878 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3881 if (L2CAP_CTRL_FINAL & rx_control &&
3882 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3883 del_timer(&pi->monitor_timer);
3884 if (pi->unacked_frames > 0)
3885 __mod_retrans_timer();
3886 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3889 pi->expected_ack_seq = req_seq;
3890 l2cap_drop_acked_frames(sk);
3892 if (tx_seq == pi->expected_tx_seq)
3895 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3896 if (tx_seq_offset < 0)
3897 tx_seq_offset += 64;
3899 /* invalid tx_seq */
3900 if (tx_seq_offset >= pi->tx_win) {
3901 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3905 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3908 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3909 struct srej_list *first;
3911 first = list_first_entry(SREJ_LIST(sk),
3912 struct srej_list, list);
3913 if (tx_seq == first->tx_seq) {
3914 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3915 l2cap_check_srej_gap(sk, tx_seq);
3917 list_del(&first->list);
3920 if (list_empty(SREJ_LIST(sk))) {
3921 pi->buffer_seq = pi->buffer_seq_srej;
3922 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3924 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3927 struct srej_list *l;
3929 /* duplicated tx_seq */
3930 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3933 list_for_each_entry(l, SREJ_LIST(sk), list) {
3934 if (l->tx_seq == tx_seq) {
3935 l2cap_resend_srejframe(sk, tx_seq);
3939 l2cap_send_srejframe(sk, tx_seq);
3942 expected_tx_seq_offset =
3943 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3944 if (expected_tx_seq_offset < 0)
3945 expected_tx_seq_offset += 64;
3947 /* duplicated tx_seq */
3948 if (tx_seq_offset < expected_tx_seq_offset)
3951 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3953 BT_DBG("sk %p, Enter SREJ", sk);
3955 INIT_LIST_HEAD(SREJ_LIST(sk));
3956 pi->buffer_seq_srej = pi->buffer_seq;
3958 __skb_queue_head_init(SREJ_QUEUE(sk));
3959 __skb_queue_head_init(BUSY_QUEUE(sk));
3960 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3962 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3964 l2cap_send_srejframe(sk, tx_seq);
3966 del_timer(&pi->ack_timer);
3971 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3973 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3974 bt_cb(skb)->tx_seq = tx_seq;
3975 bt_cb(skb)->sar = sar;
3976 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3980 err = l2cap_push_rx_skb(sk, skb, rx_control);
3984 if (rx_control & L2CAP_CTRL_FINAL) {
3985 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3986 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3988 l2cap_retransmit_frames(sk);
3993 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3994 if (pi->num_acked == num_to_ack - 1)
4004 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4006 struct l2cap_pinfo *pi = l2cap_pi(sk);
4008 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4011 pi->expected_ack_seq = __get_reqseq(rx_control);
4012 l2cap_drop_acked_frames(sk);
4014 if (rx_control & L2CAP_CTRL_POLL) {
4015 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4016 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4017 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4018 (pi->unacked_frames > 0))
4019 __mod_retrans_timer();
4021 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4022 l2cap_send_srejtail(sk);
4024 l2cap_send_i_or_rr_or_rnr(sk);
4027 } else if (rx_control & L2CAP_CTRL_FINAL) {
4028 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4030 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4031 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4033 l2cap_retransmit_frames(sk);
4036 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4037 (pi->unacked_frames > 0))
4038 __mod_retrans_timer();
4040 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4041 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4044 spin_lock_bh(&pi->send_lock);
4045 l2cap_ertm_send(sk);
4046 spin_unlock_bh(&pi->send_lock);
4051 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4053 struct l2cap_pinfo *pi = l2cap_pi(sk);
4054 u8 tx_seq = __get_reqseq(rx_control);
4056 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4058 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4060 pi->expected_ack_seq = tx_seq;
4061 l2cap_drop_acked_frames(sk);
4063 if (rx_control & L2CAP_CTRL_FINAL) {
4064 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4065 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4067 l2cap_retransmit_frames(sk);
4069 l2cap_retransmit_frames(sk);
4071 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4072 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4075 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4077 struct l2cap_pinfo *pi = l2cap_pi(sk);
4078 u8 tx_seq = __get_reqseq(rx_control);
4080 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4082 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4084 if (rx_control & L2CAP_CTRL_POLL) {
4085 pi->expected_ack_seq = tx_seq;
4086 l2cap_drop_acked_frames(sk);
4088 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4089 l2cap_retransmit_one_frame(sk, tx_seq);
4091 spin_lock_bh(&pi->send_lock);
4092 l2cap_ertm_send(sk);
4093 spin_unlock_bh(&pi->send_lock);
4095 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4096 pi->srej_save_reqseq = tx_seq;
4097 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4099 } else if (rx_control & L2CAP_CTRL_FINAL) {
4100 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4101 pi->srej_save_reqseq == tx_seq)
4102 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4104 l2cap_retransmit_one_frame(sk, tx_seq);
4106 l2cap_retransmit_one_frame(sk, tx_seq);
4107 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4108 pi->srej_save_reqseq = tx_seq;
4109 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4114 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4116 struct l2cap_pinfo *pi = l2cap_pi(sk);
4117 u8 tx_seq = __get_reqseq(rx_control);
4119 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4121 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4122 pi->expected_ack_seq = tx_seq;
4123 l2cap_drop_acked_frames(sk);
4125 if (rx_control & L2CAP_CTRL_POLL)
4126 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4128 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4129 del_timer(&pi->retrans_timer);
4130 if (rx_control & L2CAP_CTRL_POLL)
4131 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4135 if (rx_control & L2CAP_CTRL_POLL)
4136 l2cap_send_srejtail(sk);
4138 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4141 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4143 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4145 if (L2CAP_CTRL_FINAL & rx_control &&
4146 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4147 del_timer(&l2cap_pi(sk)->monitor_timer);
4148 if (l2cap_pi(sk)->unacked_frames > 0)
4149 __mod_retrans_timer();
4150 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4153 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4154 case L2CAP_SUPER_RCV_READY:
4155 l2cap_data_channel_rrframe(sk, rx_control);
4158 case L2CAP_SUPER_REJECT:
4159 l2cap_data_channel_rejframe(sk, rx_control);
4162 case L2CAP_SUPER_SELECT_REJECT:
4163 l2cap_data_channel_srejframe(sk, rx_control);
4166 case L2CAP_SUPER_RCV_NOT_READY:
4167 l2cap_data_channel_rnrframe(sk, rx_control);
4175 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4178 struct l2cap_pinfo *pi;
4181 int len, next_tx_seq_offset, req_seq_offset;
4183 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4185 BT_DBG("unknown cid 0x%4.4x", cid);
4191 BT_DBG("sk %p, len %d", sk, skb->len);
4193 if (sk->sk_state != BT_CONNECTED)
4197 case L2CAP_MODE_BASIC:
4198 /* If socket recv buffers overflows we drop data here
4199 * which is *bad* because L2CAP has to be reliable.
4200 * But we don't have any other choice. L2CAP doesn't
4201 * provide flow control mechanism. */
4203 if (pi->imtu < skb->len)
4206 if (!sock_queue_rcv_skb(sk, skb))
4210 case L2CAP_MODE_ERTM:
4211 control = get_unaligned_le16(skb->data);
4216 * We can just drop the corrupted I-frame here.
4217 * Receiver will miss it and start proper recovery
4218 * procedures and ask retransmission.
4220 if (l2cap_check_fcs(pi, skb))
4223 if (__is_sar_start(control) && __is_iframe(control))
4226 if (pi->fcs == L2CAP_FCS_CRC16)
4229 if (len > pi->mps) {
4230 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4234 req_seq = __get_reqseq(control);
4235 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4236 if (req_seq_offset < 0)
4237 req_seq_offset += 64;
4239 next_tx_seq_offset =
4240 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4241 if (next_tx_seq_offset < 0)
4242 next_tx_seq_offset += 64;
4244 /* check for invalid req-seq */
4245 if (req_seq_offset > next_tx_seq_offset) {
4246 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4250 if (__is_iframe(control)) {
4252 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4256 l2cap_data_channel_iframe(sk, control, skb);
4259 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4263 l2cap_data_channel_sframe(sk, control, skb);
4268 case L2CAP_MODE_STREAMING:
4269 control = get_unaligned_le16(skb->data);
4273 if (l2cap_check_fcs(pi, skb))
4276 if (__is_sar_start(control))
4279 if (pi->fcs == L2CAP_FCS_CRC16)
4282 if (len > pi->mps || len < 0 || __is_sframe(control))
4285 tx_seq = __get_txseq(control);
4287 if (pi->expected_tx_seq == tx_seq)
4288 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4290 pi->expected_tx_seq = (tx_seq + 1) % 64;
4292 l2cap_streaming_reassembly_sdu(sk, skb, control);
4297 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4311 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4315 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4319 BT_DBG("sk %p, len %d", sk, skb->len);
4321 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4324 if (l2cap_pi(sk)->imtu < skb->len)
4327 if (!sock_queue_rcv_skb(sk, skb))
4339 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4341 struct l2cap_hdr *lh = (void *) skb->data;
4345 skb_pull(skb, L2CAP_HDR_SIZE);
4346 cid = __le16_to_cpu(lh->cid);
4347 len = __le16_to_cpu(lh->len);
4349 if (len != skb->len) {
4354 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4357 case L2CAP_CID_SIGNALING:
4358 l2cap_sig_channel(conn, skb);
4361 case L2CAP_CID_CONN_LESS:
4362 psm = get_unaligned_le16(skb->data);
4364 l2cap_conless_channel(conn, psm, skb);
4368 l2cap_data_channel(conn, cid, skb);
4373 /* ---- L2CAP interface with lower layer (HCI) ---- */
4375 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4377 int exact = 0, lm1 = 0, lm2 = 0;
4378 register struct sock *sk;
4379 struct hlist_node *node;
4381 if (type != ACL_LINK)
4384 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4386 /* Find listening sockets and check their link_mode */
4387 read_lock(&l2cap_sk_list.lock);
4388 sk_for_each(sk, node, &l2cap_sk_list.head) {
4389 if (sk->sk_state != BT_LISTEN)
4392 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4393 lm1 |= HCI_LM_ACCEPT;
4394 if (l2cap_pi(sk)->role_switch)
4395 lm1 |= HCI_LM_MASTER;
4397 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4398 lm2 |= HCI_LM_ACCEPT;
4399 if (l2cap_pi(sk)->role_switch)
4400 lm2 |= HCI_LM_MASTER;
4403 read_unlock(&l2cap_sk_list.lock);
4405 return exact ? lm1 : lm2;
4408 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4410 struct l2cap_conn *conn;
4412 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4414 if (hcon->type != ACL_LINK)
4418 conn = l2cap_conn_add(hcon, status);
4420 l2cap_conn_ready(conn);
4422 l2cap_conn_del(hcon, bt_err(status));
4427 static int l2cap_disconn_ind(struct hci_conn *hcon)
4429 struct l2cap_conn *conn = hcon->l2cap_data;
4431 BT_DBG("hcon %p", hcon);
4433 if (hcon->type != ACL_LINK || !conn)
4436 return conn->disc_reason;
4439 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4441 BT_DBG("hcon %p reason %d", hcon, reason);
4443 if (hcon->type != ACL_LINK)
4446 l2cap_conn_del(hcon, bt_err(reason));
4451 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4453 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4456 if (encrypt == 0x00) {
4457 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4458 l2cap_sock_clear_timer(sk);
4459 l2cap_sock_set_timer(sk, HZ * 5);
4460 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4461 __l2cap_sock_close(sk, ECONNREFUSED);
4463 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4464 l2cap_sock_clear_timer(sk);
4468 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4470 struct l2cap_chan_list *l;
4471 struct l2cap_conn *conn = hcon->l2cap_data;
4477 l = &conn->chan_list;
4479 BT_DBG("conn %p", conn);
4481 read_lock(&l->lock);
4483 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4486 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4491 if (!status && (sk->sk_state == BT_CONNECTED ||
4492 sk->sk_state == BT_CONFIG)) {
4493 l2cap_check_encryption(sk, encrypt);
4498 if (sk->sk_state == BT_CONNECT) {
4500 struct l2cap_conn_req req;
4501 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4502 req.psm = l2cap_pi(sk)->psm;
4504 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4505 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4507 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4508 L2CAP_CONN_REQ, sizeof(req), &req);
4510 l2cap_sock_clear_timer(sk);
4511 l2cap_sock_set_timer(sk, HZ / 10);
4513 } else if (sk->sk_state == BT_CONNECT2) {
4514 struct l2cap_conn_rsp rsp;
4518 sk->sk_state = BT_CONFIG;
4519 result = L2CAP_CR_SUCCESS;
4521 sk->sk_state = BT_DISCONN;
4522 l2cap_sock_set_timer(sk, HZ / 10);
4523 result = L2CAP_CR_SEC_BLOCK;
4526 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4527 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4528 rsp.result = cpu_to_le16(result);
4529 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4530 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4531 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4537 read_unlock(&l->lock);
4542 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4544 struct l2cap_conn *conn = hcon->l2cap_data;
4546 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4549 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4551 if (flags & ACL_START) {
4552 struct l2cap_hdr *hdr;
4556 BT_ERR("Unexpected start frame (len %d)", skb->len);
4557 kfree_skb(conn->rx_skb);
4558 conn->rx_skb = NULL;
4560 l2cap_conn_unreliable(conn, ECOMM);
4564 BT_ERR("Frame is too short (len %d)", skb->len);
4565 l2cap_conn_unreliable(conn, ECOMM);
4569 hdr = (struct l2cap_hdr *) skb->data;
4570 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4572 if (len == skb->len) {
4573 /* Complete frame received */
4574 l2cap_recv_frame(conn, skb);
4578 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4580 if (skb->len > len) {
4581 BT_ERR("Frame is too long (len %d, expected len %d)",
4583 l2cap_conn_unreliable(conn, ECOMM);
4587 /* Allocate skb for the complete frame (with header) */
4588 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4592 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4594 conn->rx_len = len - skb->len;
4596 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4598 if (!conn->rx_len) {
4599 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4600 l2cap_conn_unreliable(conn, ECOMM);
4604 if (skb->len > conn->rx_len) {
4605 BT_ERR("Fragment is too long (len %d, expected %d)",
4606 skb->len, conn->rx_len);
4607 kfree_skb(conn->rx_skb);
4608 conn->rx_skb = NULL;
4610 l2cap_conn_unreliable(conn, ECOMM);
4614 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4616 conn->rx_len -= skb->len;
4618 if (!conn->rx_len) {
4619 /* Complete frame received */
4620 l2cap_recv_frame(conn, conn->rx_skb);
4621 conn->rx_skb = NULL;
4630 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4633 struct hlist_node *node;
4635 read_lock_bh(&l2cap_sk_list.lock);
4637 sk_for_each(sk, node, &l2cap_sk_list.head) {
4638 struct l2cap_pinfo *pi = l2cap_pi(sk);
4640 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4641 batostr(&bt_sk(sk)->src),
4642 batostr(&bt_sk(sk)->dst),
4643 sk->sk_state, __le16_to_cpu(pi->psm),
4645 pi->imtu, pi->omtu, pi->sec_level);
4648 read_unlock_bh(&l2cap_sk_list.lock);
4653 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4655 return single_open(file, l2cap_debugfs_show, inode->i_private);
4658 static const struct file_operations l2cap_debugfs_fops = {
4659 .open = l2cap_debugfs_open,
4661 .llseek = seq_lseek,
4662 .release = single_release,
4665 static struct dentry *l2cap_debugfs;
4667 static const struct proto_ops l2cap_sock_ops = {
4668 .family = PF_BLUETOOTH,
4669 .owner = THIS_MODULE,
4670 .release = l2cap_sock_release,
4671 .bind = l2cap_sock_bind,
4672 .connect = l2cap_sock_connect,
4673 .listen = l2cap_sock_listen,
4674 .accept = l2cap_sock_accept,
4675 .getname = l2cap_sock_getname,
4676 .sendmsg = l2cap_sock_sendmsg,
4677 .recvmsg = l2cap_sock_recvmsg,
4678 .poll = bt_sock_poll,
4679 .ioctl = bt_sock_ioctl,
4680 .mmap = sock_no_mmap,
4681 .socketpair = sock_no_socketpair,
4682 .shutdown = l2cap_sock_shutdown,
4683 .setsockopt = l2cap_sock_setsockopt,
4684 .getsockopt = l2cap_sock_getsockopt
4687 static const struct net_proto_family l2cap_sock_family_ops = {
4688 .family = PF_BLUETOOTH,
4689 .owner = THIS_MODULE,
4690 .create = l2cap_sock_create,
4693 static struct hci_proto l2cap_hci_proto = {
4695 .id = HCI_PROTO_L2CAP,
4696 .connect_ind = l2cap_connect_ind,
4697 .connect_cfm = l2cap_connect_cfm,
4698 .disconn_ind = l2cap_disconn_ind,
4699 .disconn_cfm = l2cap_disconn_cfm,
4700 .security_cfm = l2cap_security_cfm,
4701 .recv_acldata = l2cap_recv_acldata
4704 static int __init l2cap_init(void)
4708 err = proto_register(&l2cap_proto, 0);
4712 _busy_wq = create_singlethread_workqueue("l2cap");
4716 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4718 BT_ERR("L2CAP socket registration failed");
4722 err = hci_register_proto(&l2cap_hci_proto);
4724 BT_ERR("L2CAP protocol registration failed");
4725 bt_sock_unregister(BTPROTO_L2CAP);
4730 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4731 bt_debugfs, NULL, &l2cap_debugfs_fops);
4733 BT_ERR("Failed to create L2CAP debug file");
4736 BT_INFO("L2CAP ver %s", VERSION);
4737 BT_INFO("L2CAP socket layer initialized");
4742 proto_unregister(&l2cap_proto);
4746 static void __exit l2cap_exit(void)
4748 debugfs_remove(l2cap_debugfs);
4750 flush_workqueue(_busy_wq);
4751 destroy_workqueue(_busy_wq);
4753 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4754 BT_ERR("L2CAP socket unregistration failed");
4756 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4757 BT_ERR("L2CAP protocol unregistration failed");
4759 proto_unregister(&l2cap_proto);
4762 void l2cap_load(void)
4764 /* Dummy function to trigger automatic L2CAP module loading by
4765 * other modules that use L2CAP sockets but don't use any other
4766 * symbols from it. */
4768 EXPORT_SYMBOL(l2cap_load);
4770 module_init(l2cap_init);
4771 module_exit(l2cap_exit);
4773 module_param(enable_ertm, bool, 0644);
4774 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4776 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4777 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4778 MODULE_VERSION(VERSION);
4779 MODULE_LICENSE("GPL");
4780 MODULE_ALIAS("bt-proto-0");