2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg)
85 struct sock *sk = (struct sock *) arg;
88 BT_DBG("sock %p state %d", sk, sk->sk_state);
92 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
93 reason = ECONNREFUSED;
94 else if (sk->sk_state == BT_CONNECT &&
95 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
96 reason = ECONNREFUSED;
100 __l2cap_sock_close(sk, reason);
108 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
110 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
111 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
114 static void l2cap_sock_clear_timer(struct sock *sk)
116 BT_DBG("sock %p state %d", sk, sk->sk_state);
117 sk_stop_timer(sk, &sk->sk_timer);
120 /* ---- L2CAP channels ---- */
121 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
124 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
125 if (l2cap_pi(s)->dcid == cid)
131 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
135 if (l2cap_pi(s)->scid == cid)
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 s = __l2cap_get_chan_by_scid(l, cid);
150 read_unlock(&l->lock);
154 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
158 if (l2cap_pi(s)->ident == ident)
164 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 s = __l2cap_get_chan_by_ident(l, ident);
171 read_unlock(&l->lock);
175 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
177 u16 cid = L2CAP_CID_DYN_START;
179 for (; cid < L2CAP_CID_DYN_END; cid++) {
180 if (!__l2cap_get_chan_by_scid(l, cid))
187 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 l2cap_pi(l->head)->prev_c = sk;
194 l2cap_pi(sk)->next_c = l->head;
195 l2cap_pi(sk)->prev_c = NULL;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
201 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
203 write_lock_bh(&l->lock);
208 l2cap_pi(next)->prev_c = prev;
210 l2cap_pi(prev)->next_c = next;
211 write_unlock_bh(&l->lock);
216 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
218 struct l2cap_chan_list *l = &conn->chan_list;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
221 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
223 conn->disc_reason = 0x13;
225 l2cap_pi(sk)->conn = conn;
227 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
230 } else if (sk->sk_type == SOCK_DGRAM) {
231 /* Connectionless socket */
232 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
233 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
238 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
242 __l2cap_chan_link(l, sk);
245 bt_accept_enqueue(parent, sk);
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock *sk, int err)
252 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
253 struct sock *parent = bt_sk(sk)->parent;
255 l2cap_sock_clear_timer(sk);
257 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn->chan_list, sk);
262 l2cap_pi(sk)->conn = NULL;
263 hci_conn_put(conn->hcon);
266 sk->sk_state = BT_CLOSED;
267 sock_set_flag(sk, SOCK_ZAPPED);
273 bt_accept_unlink(sk);
274 parent->sk_data_ready(parent, 0);
276 sk->sk_state_change(sk);
278 skb_queue_purge(TX_QUEUE(sk));
280 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
281 struct srej_list *l, *tmp;
283 del_timer(&l2cap_pi(sk)->retrans_timer);
284 del_timer(&l2cap_pi(sk)->monitor_timer);
285 del_timer(&l2cap_pi(sk)->ack_timer);
287 skb_queue_purge(SREJ_QUEUE(sk));
288 skb_queue_purge(BUSY_QUEUE(sk));
290 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
297 /* Service level security */
298 static inline int l2cap_check_security(struct sock *sk)
300 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
303 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
304 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
305 auth_type = HCI_AT_NO_BONDING_MITM;
307 auth_type = HCI_AT_NO_BONDING;
309 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
310 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
312 switch (l2cap_pi(sk)->sec_level) {
313 case BT_SECURITY_HIGH:
314 auth_type = HCI_AT_GENERAL_BONDING_MITM;
316 case BT_SECURITY_MEDIUM:
317 auth_type = HCI_AT_GENERAL_BONDING;
320 auth_type = HCI_AT_NO_BONDING;
325 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
333 /* Get next available identificator.
334 * 1 - 128 are used by kernel.
335 * 129 - 199 are reserved.
336 * 200 - 254 are used by utilities like l2ping, etc.
339 spin_lock_bh(&conn->lock);
341 if (++conn->tx_ident > 128)
346 spin_unlock_bh(&conn->lock);
351 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
353 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
355 BT_DBG("code 0x%2.2x", code);
360 hci_send_acl(conn->hcon, skb, 0);
363 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
366 struct l2cap_hdr *lh;
367 struct l2cap_conn *conn = pi->conn;
368 struct sock *sk = (struct sock *)pi;
369 int count, hlen = L2CAP_HDR_SIZE + 2;
371 if (sk->sk_state != BT_CONNECTED)
374 if (pi->fcs == L2CAP_FCS_CRC16)
377 BT_DBG("pi %p, control 0x%2.2x", pi, control);
379 count = min_t(unsigned int, conn->mtu, hlen);
380 control |= L2CAP_CTRL_FRAME_TYPE;
382 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
383 control |= L2CAP_CTRL_FINAL;
384 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
387 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
388 control |= L2CAP_CTRL_POLL;
389 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
392 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
397 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
398 lh->cid = cpu_to_le16(pi->dcid);
399 put_unaligned_le16(control, skb_put(skb, 2));
401 if (pi->fcs == L2CAP_FCS_CRC16) {
402 u16 fcs = crc16(0, (u8 *)lh, count - 2);
403 put_unaligned_le16(fcs, skb_put(skb, 2));
406 hci_send_acl(pi->conn->hcon, skb, 0);
409 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
411 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
412 control |= L2CAP_SUPER_RCV_NOT_READY;
413 pi->conn_state |= L2CAP_CONN_RNR_SENT;
415 control |= L2CAP_SUPER_RCV_READY;
417 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
419 l2cap_send_sframe(pi, control);
422 static inline int __l2cap_no_conn_pending(struct sock *sk)
424 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
427 static void l2cap_do_start(struct sock *sk)
429 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
431 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
432 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
435 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
436 struct l2cap_conn_req req;
437 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
438 req.psm = l2cap_pi(sk)->psm;
440 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
441 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
443 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
444 L2CAP_CONN_REQ, sizeof(req), &req);
447 struct l2cap_info_req req;
448 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
450 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
451 conn->info_ident = l2cap_get_ident(conn);
453 mod_timer(&conn->info_timer, jiffies +
454 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
456 l2cap_send_cmd(conn, conn->info_ident,
457 L2CAP_INFO_REQ, sizeof(req), &req);
461 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
463 u32 local_feat_mask = l2cap_feat_mask;
465 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
468 case L2CAP_MODE_ERTM:
469 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
470 case L2CAP_MODE_STREAMING:
471 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
477 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
479 struct l2cap_disconn_req req;
484 skb_queue_purge(TX_QUEUE(sk));
486 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
487 del_timer(&l2cap_pi(sk)->retrans_timer);
488 del_timer(&l2cap_pi(sk)->monitor_timer);
489 del_timer(&l2cap_pi(sk)->ack_timer);
492 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
493 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
494 l2cap_send_cmd(conn, l2cap_get_ident(conn),
495 L2CAP_DISCONN_REQ, sizeof(req), &req);
497 sk->sk_state = BT_DISCONN;
501 /* ---- L2CAP connections ---- */
502 static void l2cap_conn_start(struct l2cap_conn *conn)
504 struct l2cap_chan_list *l = &conn->chan_list;
505 struct sock_del_list del, *tmp1, *tmp2;
508 BT_DBG("conn %p", conn);
510 INIT_LIST_HEAD(&del.list);
514 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
517 if (sk->sk_type != SOCK_SEQPACKET &&
518 sk->sk_type != SOCK_STREAM) {
523 if (sk->sk_state == BT_CONNECT) {
524 if (l2cap_check_security(sk) &&
525 __l2cap_no_conn_pending(sk)) {
526 struct l2cap_conn_req req;
528 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
530 && l2cap_pi(sk)->conf_state &
531 L2CAP_CONF_STATE2_DEVICE) {
532 tmp1 = kzalloc(sizeof(struct srej_list),
535 list_add_tail(&tmp1->list, &del.list);
540 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
541 req.psm = l2cap_pi(sk)->psm;
543 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
544 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
546 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
547 L2CAP_CONN_REQ, sizeof(req), &req);
549 } else if (sk->sk_state == BT_CONNECT2) {
550 struct l2cap_conn_rsp rsp;
551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
554 if (l2cap_check_security(sk)) {
555 if (bt_sk(sk)->defer_setup) {
556 struct sock *parent = bt_sk(sk)->parent;
557 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
558 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
559 parent->sk_data_ready(parent, 0);
562 sk->sk_state = BT_CONFIG;
563 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
564 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
567 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
568 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
571 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
572 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
578 read_unlock(&l->lock);
580 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
581 bh_lock_sock(tmp1->sk);
582 __l2cap_sock_close(tmp1->sk, ECONNRESET);
583 bh_unlock_sock(tmp1->sk);
584 list_del(&tmp1->list);
589 static void l2cap_conn_ready(struct l2cap_conn *conn)
591 struct l2cap_chan_list *l = &conn->chan_list;
594 BT_DBG("conn %p", conn);
598 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
601 if (sk->sk_type != SOCK_SEQPACKET &&
602 sk->sk_type != SOCK_STREAM) {
603 l2cap_sock_clear_timer(sk);
604 sk->sk_state = BT_CONNECTED;
605 sk->sk_state_change(sk);
606 } else if (sk->sk_state == BT_CONNECT)
612 read_unlock(&l->lock);
615 /* Notify sockets that we cannot guaranty reliability anymore */
616 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
618 struct l2cap_chan_list *l = &conn->chan_list;
621 BT_DBG("conn %p", conn);
625 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
626 if (l2cap_pi(sk)->force_reliable)
630 read_unlock(&l->lock);
633 static void l2cap_info_timeout(unsigned long arg)
635 struct l2cap_conn *conn = (void *) arg;
637 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
638 conn->info_ident = 0;
640 l2cap_conn_start(conn);
643 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
645 struct l2cap_conn *conn = hcon->l2cap_data;
650 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
654 hcon->l2cap_data = conn;
657 BT_DBG("hcon %p conn %p", hcon, conn);
659 conn->mtu = hcon->hdev->acl_mtu;
660 conn->src = &hcon->hdev->bdaddr;
661 conn->dst = &hcon->dst;
665 spin_lock_init(&conn->lock);
666 rwlock_init(&conn->chan_list.lock);
668 setup_timer(&conn->info_timer, l2cap_info_timeout,
669 (unsigned long) conn);
671 conn->disc_reason = 0x13;
676 static void l2cap_conn_del(struct hci_conn *hcon, int err)
678 struct l2cap_conn *conn = hcon->l2cap_data;
684 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
686 kfree_skb(conn->rx_skb);
689 while ((sk = conn->chan_list.head)) {
691 l2cap_chan_del(sk, err);
696 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
697 del_timer_sync(&conn->info_timer);
699 hcon->l2cap_data = NULL;
703 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
705 struct l2cap_chan_list *l = &conn->chan_list;
706 write_lock_bh(&l->lock);
707 __l2cap_chan_add(conn, sk, parent);
708 write_unlock_bh(&l->lock);
711 /* ---- Socket interface ---- */
712 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
715 struct hlist_node *node;
716 sk_for_each(sk, node, &l2cap_sk_list.head)
717 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
724 /* Find socket with psm and source bdaddr.
725 * Returns closest match.
727 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
729 struct sock *sk = NULL, *sk1 = NULL;
730 struct hlist_node *node;
732 sk_for_each(sk, node, &l2cap_sk_list.head) {
733 if (state && sk->sk_state != state)
736 if (l2cap_pi(sk)->psm == psm) {
738 if (!bacmp(&bt_sk(sk)->src, src))
742 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
746 return node ? sk : sk1;
749 /* Find socket with given address (psm, src).
750 * Returns locked socket */
751 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
754 read_lock(&l2cap_sk_list.lock);
755 s = __l2cap_get_sock_by_psm(state, psm, src);
758 read_unlock(&l2cap_sk_list.lock);
762 static void l2cap_sock_destruct(struct sock *sk)
766 skb_queue_purge(&sk->sk_receive_queue);
767 skb_queue_purge(&sk->sk_write_queue);
770 static void l2cap_sock_cleanup_listen(struct sock *parent)
774 BT_DBG("parent %p", parent);
776 /* Close not yet accepted channels */
777 while ((sk = bt_accept_dequeue(parent, NULL)))
778 l2cap_sock_close(sk);
780 parent->sk_state = BT_CLOSED;
781 sock_set_flag(parent, SOCK_ZAPPED);
784 /* Kill socket (only if zapped and orphan)
785 * Must be called on unlocked socket.
787 static void l2cap_sock_kill(struct sock *sk)
789 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
792 BT_DBG("sk %p state %d", sk, sk->sk_state);
794 /* Kill poor orphan */
795 bt_sock_unlink(&l2cap_sk_list, sk);
796 sock_set_flag(sk, SOCK_DEAD);
800 static void __l2cap_sock_close(struct sock *sk, int reason)
802 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
804 switch (sk->sk_state) {
806 l2cap_sock_cleanup_listen(sk);
811 if (sk->sk_type == SOCK_SEQPACKET ||
812 sk->sk_type == SOCK_STREAM) {
813 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
815 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
816 l2cap_send_disconn_req(conn, sk, reason);
818 l2cap_chan_del(sk, reason);
822 if (sk->sk_type == SOCK_SEQPACKET ||
823 sk->sk_type == SOCK_STREAM) {
824 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
825 struct l2cap_conn_rsp rsp;
828 if (bt_sk(sk)->defer_setup)
829 result = L2CAP_CR_SEC_BLOCK;
831 result = L2CAP_CR_BAD_PSM;
833 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
834 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
835 rsp.result = cpu_to_le16(result);
836 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
837 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
838 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
840 l2cap_chan_del(sk, reason);
845 l2cap_chan_del(sk, reason);
849 sock_set_flag(sk, SOCK_ZAPPED);
854 /* Must be called on unlocked socket. */
855 static void l2cap_sock_close(struct sock *sk)
857 l2cap_sock_clear_timer(sk);
859 __l2cap_sock_close(sk, ECONNRESET);
864 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
866 struct l2cap_pinfo *pi = l2cap_pi(sk);
871 sk->sk_type = parent->sk_type;
872 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
874 pi->imtu = l2cap_pi(parent)->imtu;
875 pi->omtu = l2cap_pi(parent)->omtu;
876 pi->conf_state = l2cap_pi(parent)->conf_state;
877 pi->mode = l2cap_pi(parent)->mode;
878 pi->fcs = l2cap_pi(parent)->fcs;
879 pi->max_tx = l2cap_pi(parent)->max_tx;
880 pi->tx_win = l2cap_pi(parent)->tx_win;
881 pi->sec_level = l2cap_pi(parent)->sec_level;
882 pi->role_switch = l2cap_pi(parent)->role_switch;
883 pi->force_reliable = l2cap_pi(parent)->force_reliable;
885 pi->imtu = L2CAP_DEFAULT_MTU;
887 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
888 pi->mode = L2CAP_MODE_ERTM;
889 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
891 pi->mode = L2CAP_MODE_BASIC;
893 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
894 pi->fcs = L2CAP_FCS_CRC16;
895 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
896 pi->sec_level = BT_SECURITY_LOW;
898 pi->force_reliable = 0;
901 /* Default config options */
903 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
904 skb_queue_head_init(TX_QUEUE(sk));
905 skb_queue_head_init(SREJ_QUEUE(sk));
906 skb_queue_head_init(BUSY_QUEUE(sk));
907 INIT_LIST_HEAD(SREJ_LIST(sk));
910 static struct proto l2cap_proto = {
912 .owner = THIS_MODULE,
913 .obj_size = sizeof(struct l2cap_pinfo)
916 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
920 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
924 sock_init_data(sock, sk);
925 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
927 sk->sk_destruct = l2cap_sock_destruct;
928 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
930 sock_reset_flag(sk, SOCK_ZAPPED);
932 sk->sk_protocol = proto;
933 sk->sk_state = BT_OPEN;
935 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
937 bt_sock_link(&l2cap_sk_list, sk);
941 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
946 BT_DBG("sock %p", sock);
948 sock->state = SS_UNCONNECTED;
950 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
951 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
952 return -ESOCKTNOSUPPORT;
954 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
957 sock->ops = &l2cap_sock_ops;
959 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
963 l2cap_sock_init(sk, NULL);
967 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
969 struct sock *sk = sock->sk;
970 struct sockaddr_l2 la;
975 if (!addr || addr->sa_family != AF_BLUETOOTH)
978 memset(&la, 0, sizeof(la));
979 len = min_t(unsigned int, sizeof(la), alen);
980 memcpy(&la, addr, len);
987 if (sk->sk_state != BT_OPEN) {
992 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
993 !capable(CAP_NET_BIND_SERVICE)) {
998 write_lock_bh(&l2cap_sk_list.lock);
1000 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1003 /* Save source address */
1004 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1005 l2cap_pi(sk)->psm = la.l2_psm;
1006 l2cap_pi(sk)->sport = la.l2_psm;
1007 sk->sk_state = BT_BOUND;
1009 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1010 __le16_to_cpu(la.l2_psm) == 0x0003)
1011 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1014 write_unlock_bh(&l2cap_sk_list.lock);
1021 static int l2cap_do_connect(struct sock *sk)
1023 bdaddr_t *src = &bt_sk(sk)->src;
1024 bdaddr_t *dst = &bt_sk(sk)->dst;
1025 struct l2cap_conn *conn;
1026 struct hci_conn *hcon;
1027 struct hci_dev *hdev;
1031 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1034 hdev = hci_get_route(dst, src);
1036 return -EHOSTUNREACH;
1038 hci_dev_lock_bh(hdev);
1042 if (sk->sk_type == SOCK_RAW) {
1043 switch (l2cap_pi(sk)->sec_level) {
1044 case BT_SECURITY_HIGH:
1045 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1047 case BT_SECURITY_MEDIUM:
1048 auth_type = HCI_AT_DEDICATED_BONDING;
1051 auth_type = HCI_AT_NO_BONDING;
1054 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1055 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1056 auth_type = HCI_AT_NO_BONDING_MITM;
1058 auth_type = HCI_AT_NO_BONDING;
1060 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1061 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1063 switch (l2cap_pi(sk)->sec_level) {
1064 case BT_SECURITY_HIGH:
1065 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1067 case BT_SECURITY_MEDIUM:
1068 auth_type = HCI_AT_GENERAL_BONDING;
1071 auth_type = HCI_AT_NO_BONDING;
1076 hcon = hci_connect(hdev, ACL_LINK, dst,
1077 l2cap_pi(sk)->sec_level, auth_type);
1081 conn = l2cap_conn_add(hcon, 0);
1089 /* Update source addr of the socket */
1090 bacpy(src, conn->src);
1092 l2cap_chan_add(conn, sk, NULL);
1094 sk->sk_state = BT_CONNECT;
1095 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1097 if (hcon->state == BT_CONNECTED) {
1098 if (sk->sk_type != SOCK_SEQPACKET &&
1099 sk->sk_type != SOCK_STREAM) {
1100 l2cap_sock_clear_timer(sk);
1101 sk->sk_state = BT_CONNECTED;
1107 hci_dev_unlock_bh(hdev);
1112 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1114 struct sock *sk = sock->sk;
1115 struct sockaddr_l2 la;
1118 BT_DBG("sk %p", sk);
1120 if (!addr || alen < sizeof(addr->sa_family) ||
1121 addr->sa_family != AF_BLUETOOTH)
1124 memset(&la, 0, sizeof(la));
1125 len = min_t(unsigned int, sizeof(la), alen);
1126 memcpy(&la, addr, len);
1133 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1139 switch (l2cap_pi(sk)->mode) {
1140 case L2CAP_MODE_BASIC:
1142 case L2CAP_MODE_ERTM:
1143 case L2CAP_MODE_STREAMING:
1152 switch (sk->sk_state) {
1156 /* Already connecting */
1160 /* Already connected */
1174 /* Set destination address and psm */
1175 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1176 l2cap_pi(sk)->psm = la.l2_psm;
1178 err = l2cap_do_connect(sk);
1183 err = bt_sock_wait_state(sk, BT_CONNECTED,
1184 sock_sndtimeo(sk, flags & O_NONBLOCK));
1190 static int l2cap_sock_listen(struct socket *sock, int backlog)
1192 struct sock *sk = sock->sk;
1195 BT_DBG("sk %p backlog %d", sk, backlog);
1199 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1200 || sk->sk_state != BT_BOUND) {
1205 switch (l2cap_pi(sk)->mode) {
1206 case L2CAP_MODE_BASIC:
1208 case L2CAP_MODE_ERTM:
1209 case L2CAP_MODE_STREAMING:
1218 if (!l2cap_pi(sk)->psm) {
1219 bdaddr_t *src = &bt_sk(sk)->src;
1224 write_lock_bh(&l2cap_sk_list.lock);
1226 for (psm = 0x1001; psm < 0x1100; psm += 2)
1227 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1228 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1229 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1234 write_unlock_bh(&l2cap_sk_list.lock);
1240 sk->sk_max_ack_backlog = backlog;
1241 sk->sk_ack_backlog = 0;
1242 sk->sk_state = BT_LISTEN;
1249 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1251 DECLARE_WAITQUEUE(wait, current);
1252 struct sock *sk = sock->sk, *nsk;
1256 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1258 if (sk->sk_state != BT_LISTEN) {
1263 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1265 BT_DBG("sk %p timeo %ld", sk, timeo);
1267 /* Wait for an incoming connection. (wake-one). */
1268 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1269 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1270 set_current_state(TASK_INTERRUPTIBLE);
1277 timeo = schedule_timeout(timeo);
1278 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1280 if (sk->sk_state != BT_LISTEN) {
1285 if (signal_pending(current)) {
1286 err = sock_intr_errno(timeo);
1290 set_current_state(TASK_RUNNING);
1291 remove_wait_queue(sk_sleep(sk), &wait);
1296 newsock->state = SS_CONNECTED;
1298 BT_DBG("new socket %p", nsk);
1305 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1307 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1308 struct sock *sk = sock->sk;
1310 BT_DBG("sock %p, sk %p", sock, sk);
1312 addr->sa_family = AF_BLUETOOTH;
1313 *len = sizeof(struct sockaddr_l2);
1316 la->l2_psm = l2cap_pi(sk)->psm;
1317 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1318 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1320 la->l2_psm = l2cap_pi(sk)->sport;
1321 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1322 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1328 static int __l2cap_wait_ack(struct sock *sk)
1330 DECLARE_WAITQUEUE(wait, current);
1334 add_wait_queue(sk_sleep(sk), &wait);
1335 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1336 set_current_state(TASK_INTERRUPTIBLE);
1341 if (signal_pending(current)) {
1342 err = sock_intr_errno(timeo);
1347 timeo = schedule_timeout(timeo);
1350 err = sock_error(sk);
1354 set_current_state(TASK_RUNNING);
1355 remove_wait_queue(sk_sleep(sk), &wait);
1359 static void l2cap_monitor_timeout(unsigned long arg)
1361 struct sock *sk = (void *) arg;
1363 BT_DBG("sk %p", sk);
1366 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1367 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1372 l2cap_pi(sk)->retry_count++;
1373 __mod_monitor_timer();
1375 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1379 static void l2cap_retrans_timeout(unsigned long arg)
1381 struct sock *sk = (void *) arg;
1383 BT_DBG("sk %p", sk);
1386 l2cap_pi(sk)->retry_count = 1;
1387 __mod_monitor_timer();
1389 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1391 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1395 static void l2cap_drop_acked_frames(struct sock *sk)
1397 struct sk_buff *skb;
1399 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1400 l2cap_pi(sk)->unacked_frames) {
1401 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1404 skb = skb_dequeue(TX_QUEUE(sk));
1407 l2cap_pi(sk)->unacked_frames--;
1410 if (!l2cap_pi(sk)->unacked_frames)
1411 del_timer(&l2cap_pi(sk)->retrans_timer);
1414 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1416 struct l2cap_pinfo *pi = l2cap_pi(sk);
1418 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1420 hci_send_acl(pi->conn->hcon, skb, 0);
1423 static int l2cap_streaming_send(struct sock *sk)
1425 struct sk_buff *skb, *tx_skb;
1426 struct l2cap_pinfo *pi = l2cap_pi(sk);
1429 while ((skb = sk->sk_send_head)) {
1430 tx_skb = skb_clone(skb, GFP_ATOMIC);
1432 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1433 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1434 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1436 if (pi->fcs == L2CAP_FCS_CRC16) {
1437 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1438 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1441 l2cap_do_send(sk, tx_skb);
1443 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1445 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1446 sk->sk_send_head = NULL;
1448 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1450 skb = skb_dequeue(TX_QUEUE(sk));
1456 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1458 struct l2cap_pinfo *pi = l2cap_pi(sk);
1459 struct sk_buff *skb, *tx_skb;
1462 skb = skb_peek(TX_QUEUE(sk));
1467 if (bt_cb(skb)->tx_seq == tx_seq)
1470 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1473 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1475 if (pi->remote_max_tx &&
1476 bt_cb(skb)->retries == pi->remote_max_tx) {
1477 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1481 tx_skb = skb_clone(skb, GFP_ATOMIC);
1482 bt_cb(skb)->retries++;
1483 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1485 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1486 control |= L2CAP_CTRL_FINAL;
1487 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1490 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1491 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1493 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1495 if (pi->fcs == L2CAP_FCS_CRC16) {
1496 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1497 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1500 l2cap_do_send(sk, tx_skb);
1503 static int l2cap_ertm_send(struct sock *sk)
1505 struct sk_buff *skb, *tx_skb;
1506 struct l2cap_pinfo *pi = l2cap_pi(sk);
1510 if (sk->sk_state != BT_CONNECTED)
1513 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1515 if (pi->remote_max_tx &&
1516 bt_cb(skb)->retries == pi->remote_max_tx) {
1517 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1521 tx_skb = skb_clone(skb, GFP_ATOMIC);
1523 bt_cb(skb)->retries++;
1525 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1526 control &= L2CAP_CTRL_SAR;
1528 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1529 control |= L2CAP_CTRL_FINAL;
1530 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1532 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1533 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1534 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1537 if (pi->fcs == L2CAP_FCS_CRC16) {
1538 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1539 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1542 l2cap_do_send(sk, tx_skb);
1544 __mod_retrans_timer();
1546 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1547 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1549 pi->unacked_frames++;
1552 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1553 sk->sk_send_head = NULL;
1555 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1563 static int l2cap_retransmit_frames(struct sock *sk)
1565 struct l2cap_pinfo *pi = l2cap_pi(sk);
1568 if (!skb_queue_empty(TX_QUEUE(sk)))
1569 sk->sk_send_head = TX_QUEUE(sk)->next;
1571 pi->next_tx_seq = pi->expected_ack_seq;
1572 ret = l2cap_ertm_send(sk);
1576 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1578 struct sock *sk = (struct sock *)pi;
1581 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1583 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1584 control |= L2CAP_SUPER_RCV_NOT_READY;
1585 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1586 l2cap_send_sframe(pi, control);
1590 if (l2cap_ertm_send(sk) > 0)
1593 control |= L2CAP_SUPER_RCV_READY;
1594 l2cap_send_sframe(pi, control);
1597 static void l2cap_send_srejtail(struct sock *sk)
1599 struct srej_list *tail;
1602 control = L2CAP_SUPER_SELECT_REJECT;
1603 control |= L2CAP_CTRL_FINAL;
1605 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1606 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1608 l2cap_send_sframe(l2cap_pi(sk), control);
1611 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1613 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1614 struct sk_buff **frag;
1617 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1623 /* Continuation fragments (no L2CAP header) */
1624 frag = &skb_shinfo(skb)->frag_list;
1626 count = min_t(unsigned int, conn->mtu, len);
1628 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1631 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1637 frag = &(*frag)->next;
1643 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1645 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1646 struct sk_buff *skb;
1647 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1648 struct l2cap_hdr *lh;
1650 BT_DBG("sk %p len %d", sk, (int)len);
1652 count = min_t(unsigned int, (conn->mtu - hlen), len);
1653 skb = bt_skb_send_alloc(sk, count + hlen,
1654 msg->msg_flags & MSG_DONTWAIT, &err);
1656 return ERR_PTR(-ENOMEM);
1658 /* Create L2CAP header */
1659 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1660 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1661 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1662 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1664 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1665 if (unlikely(err < 0)) {
1667 return ERR_PTR(err);
1672 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1674 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1675 struct sk_buff *skb;
1676 int err, count, hlen = L2CAP_HDR_SIZE;
1677 struct l2cap_hdr *lh;
1679 BT_DBG("sk %p len %d", sk, (int)len);
1681 count = min_t(unsigned int, (conn->mtu - hlen), len);
1682 skb = bt_skb_send_alloc(sk, count + hlen,
1683 msg->msg_flags & MSG_DONTWAIT, &err);
1685 return ERR_PTR(-ENOMEM);
1687 /* Create L2CAP header */
1688 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1689 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1690 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1692 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1693 if (unlikely(err < 0)) {
1695 return ERR_PTR(err);
1700 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1702 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1703 struct sk_buff *skb;
1704 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1705 struct l2cap_hdr *lh;
1707 BT_DBG("sk %p len %d", sk, (int)len);
1710 return ERR_PTR(-ENOTCONN);
1715 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1718 count = min_t(unsigned int, (conn->mtu - hlen), len);
1719 skb = bt_skb_send_alloc(sk, count + hlen,
1720 msg->msg_flags & MSG_DONTWAIT, &err);
1722 return ERR_PTR(-ENOMEM);
1724 /* Create L2CAP header */
1725 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1726 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1727 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1728 put_unaligned_le16(control, skb_put(skb, 2));
1730 put_unaligned_le16(sdulen, skb_put(skb, 2));
1732 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1733 if (unlikely(err < 0)) {
1735 return ERR_PTR(err);
1738 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1739 put_unaligned_le16(0, skb_put(skb, 2));
1741 bt_cb(skb)->retries = 0;
1745 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1747 struct l2cap_pinfo *pi = l2cap_pi(sk);
1748 struct sk_buff *skb;
1749 struct sk_buff_head sar_queue;
1753 skb_queue_head_init(&sar_queue);
1754 control = L2CAP_SDU_START;
1755 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1757 return PTR_ERR(skb);
1759 __skb_queue_tail(&sar_queue, skb);
1760 len -= pi->remote_mps;
1761 size += pi->remote_mps;
1766 if (len > pi->remote_mps) {
1767 control = L2CAP_SDU_CONTINUE;
1768 buflen = pi->remote_mps;
1770 control = L2CAP_SDU_END;
1774 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1776 skb_queue_purge(&sar_queue);
1777 return PTR_ERR(skb);
1780 __skb_queue_tail(&sar_queue, skb);
1784 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1785 if (sk->sk_send_head == NULL)
1786 sk->sk_send_head = sar_queue.next;
1791 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1793 struct sock *sk = sock->sk;
1794 struct l2cap_pinfo *pi = l2cap_pi(sk);
1795 struct sk_buff *skb;
1799 BT_DBG("sock %p, sk %p", sock, sk);
1801 err = sock_error(sk);
1805 if (msg->msg_flags & MSG_OOB)
1810 if (sk->sk_state != BT_CONNECTED) {
1815 /* Connectionless channel */
1816 if (sk->sk_type == SOCK_DGRAM) {
1817 skb = l2cap_create_connless_pdu(sk, msg, len);
1821 l2cap_do_send(sk, skb);
1828 case L2CAP_MODE_BASIC:
1829 /* Check outgoing MTU */
1830 if (len > pi->omtu) {
1835 /* Create a basic PDU */
1836 skb = l2cap_create_basic_pdu(sk, msg, len);
1842 l2cap_do_send(sk, skb);
1846 case L2CAP_MODE_ERTM:
1847 case L2CAP_MODE_STREAMING:
1848 /* Entire SDU fits into one PDU */
1849 if (len <= pi->remote_mps) {
1850 control = L2CAP_SDU_UNSEGMENTED;
1851 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1856 __skb_queue_tail(TX_QUEUE(sk), skb);
1858 if (sk->sk_send_head == NULL)
1859 sk->sk_send_head = skb;
1862 /* Segment SDU into multiples PDUs */
1863 err = l2cap_sar_segment_sdu(sk, msg, len);
1868 if (pi->mode == L2CAP_MODE_STREAMING) {
1869 err = l2cap_streaming_send(sk);
1871 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1872 pi->conn_state && L2CAP_CONN_WAIT_F) {
1876 err = l2cap_ertm_send(sk);
1884 BT_DBG("bad state %1.1x", pi->mode);
1893 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1895 struct sock *sk = sock->sk;
1899 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1900 struct l2cap_conn_rsp rsp;
1902 sk->sk_state = BT_CONFIG;
1904 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1905 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1906 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1907 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1908 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1909 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1917 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1920 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1922 struct sock *sk = sock->sk;
1923 struct l2cap_options opts;
1927 BT_DBG("sk %p", sk);
1933 opts.imtu = l2cap_pi(sk)->imtu;
1934 opts.omtu = l2cap_pi(sk)->omtu;
1935 opts.flush_to = l2cap_pi(sk)->flush_to;
1936 opts.mode = l2cap_pi(sk)->mode;
1937 opts.fcs = l2cap_pi(sk)->fcs;
1938 opts.max_tx = l2cap_pi(sk)->max_tx;
1939 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1941 len = min_t(unsigned int, sizeof(opts), optlen);
1942 if (copy_from_user((char *) &opts, optval, len)) {
1947 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1952 l2cap_pi(sk)->mode = opts.mode;
1953 switch (l2cap_pi(sk)->mode) {
1954 case L2CAP_MODE_BASIC:
1955 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1957 case L2CAP_MODE_ERTM:
1958 case L2CAP_MODE_STREAMING:
1967 l2cap_pi(sk)->imtu = opts.imtu;
1968 l2cap_pi(sk)->omtu = opts.omtu;
1969 l2cap_pi(sk)->fcs = opts.fcs;
1970 l2cap_pi(sk)->max_tx = opts.max_tx;
1971 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1975 if (get_user(opt, (u32 __user *) optval)) {
1980 if (opt & L2CAP_LM_AUTH)
1981 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1982 if (opt & L2CAP_LM_ENCRYPT)
1983 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1984 if (opt & L2CAP_LM_SECURE)
1985 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1987 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1988 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2000 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2002 struct sock *sk = sock->sk;
2003 struct bt_security sec;
2007 BT_DBG("sk %p", sk);
2009 if (level == SOL_L2CAP)
2010 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2012 if (level != SOL_BLUETOOTH)
2013 return -ENOPROTOOPT;
2019 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2020 && sk->sk_type != SOCK_RAW) {
2025 sec.level = BT_SECURITY_LOW;
2027 len = min_t(unsigned int, sizeof(sec), optlen);
2028 if (copy_from_user((char *) &sec, optval, len)) {
2033 if (sec.level < BT_SECURITY_LOW ||
2034 sec.level > BT_SECURITY_HIGH) {
2039 l2cap_pi(sk)->sec_level = sec.level;
2042 case BT_DEFER_SETUP:
2043 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2048 if (get_user(opt, (u32 __user *) optval)) {
2053 bt_sk(sk)->defer_setup = opt;
2065 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2067 struct sock *sk = sock->sk;
2068 struct l2cap_options opts;
2069 struct l2cap_conninfo cinfo;
2073 BT_DBG("sk %p", sk);
2075 if (get_user(len, optlen))
2082 opts.imtu = l2cap_pi(sk)->imtu;
2083 opts.omtu = l2cap_pi(sk)->omtu;
2084 opts.flush_to = l2cap_pi(sk)->flush_to;
2085 opts.mode = l2cap_pi(sk)->mode;
2086 opts.fcs = l2cap_pi(sk)->fcs;
2087 opts.max_tx = l2cap_pi(sk)->max_tx;
2088 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2090 len = min_t(unsigned int, len, sizeof(opts));
2091 if (copy_to_user(optval, (char *) &opts, len))
2097 switch (l2cap_pi(sk)->sec_level) {
2098 case BT_SECURITY_LOW:
2099 opt = L2CAP_LM_AUTH;
2101 case BT_SECURITY_MEDIUM:
2102 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2104 case BT_SECURITY_HIGH:
2105 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2113 if (l2cap_pi(sk)->role_switch)
2114 opt |= L2CAP_LM_MASTER;
2116 if (l2cap_pi(sk)->force_reliable)
2117 opt |= L2CAP_LM_RELIABLE;
2119 if (put_user(opt, (u32 __user *) optval))
2123 case L2CAP_CONNINFO:
2124 if (sk->sk_state != BT_CONNECTED &&
2125 !(sk->sk_state == BT_CONNECT2 &&
2126 bt_sk(sk)->defer_setup)) {
2131 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2132 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2134 len = min_t(unsigned int, len, sizeof(cinfo));
2135 if (copy_to_user(optval, (char *) &cinfo, len))
2149 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2151 struct sock *sk = sock->sk;
2152 struct bt_security sec;
2155 BT_DBG("sk %p", sk);
2157 if (level == SOL_L2CAP)
2158 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2160 if (level != SOL_BLUETOOTH)
2161 return -ENOPROTOOPT;
2163 if (get_user(len, optlen))
2170 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2171 && sk->sk_type != SOCK_RAW) {
2176 sec.level = l2cap_pi(sk)->sec_level;
2178 len = min_t(unsigned int, len, sizeof(sec));
2179 if (copy_to_user(optval, (char *) &sec, len))
2184 case BT_DEFER_SETUP:
2185 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2190 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2204 static int l2cap_sock_shutdown(struct socket *sock, int how)
2206 struct sock *sk = sock->sk;
2209 BT_DBG("sock %p, sk %p", sock, sk);
2215 if (!sk->sk_shutdown) {
2216 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2217 err = __l2cap_wait_ack(sk);
2219 sk->sk_shutdown = SHUTDOWN_MASK;
2220 l2cap_sock_clear_timer(sk);
2221 __l2cap_sock_close(sk, 0);
2223 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2224 err = bt_sock_wait_state(sk, BT_CLOSED,
2228 if (!err && sk->sk_err)
2235 static int l2cap_sock_release(struct socket *sock)
2237 struct sock *sk = sock->sk;
2240 BT_DBG("sock %p, sk %p", sock, sk);
2245 err = l2cap_sock_shutdown(sock, 2);
2248 l2cap_sock_kill(sk);
2252 static void l2cap_chan_ready(struct sock *sk)
2254 struct sock *parent = bt_sk(sk)->parent;
2256 BT_DBG("sk %p, parent %p", sk, parent);
2258 l2cap_pi(sk)->conf_state = 0;
2259 l2cap_sock_clear_timer(sk);
2262 /* Outgoing channel.
2263 * Wake up socket sleeping on connect.
2265 sk->sk_state = BT_CONNECTED;
2266 sk->sk_state_change(sk);
2268 /* Incoming channel.
2269 * Wake up socket sleeping on accept.
2271 parent->sk_data_ready(parent, 0);
2275 /* Copy frame to all raw sockets on that connection */
2276 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2278 struct l2cap_chan_list *l = &conn->chan_list;
2279 struct sk_buff *nskb;
2282 BT_DBG("conn %p", conn);
2284 read_lock(&l->lock);
2285 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2286 if (sk->sk_type != SOCK_RAW)
2289 /* Don't send frame to the socket it came from */
2292 nskb = skb_clone(skb, GFP_ATOMIC);
2296 if (sock_queue_rcv_skb(sk, nskb))
2299 read_unlock(&l->lock);
2302 /* ---- L2CAP signalling commands ---- */
2303 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2304 u8 code, u8 ident, u16 dlen, void *data)
2306 struct sk_buff *skb, **frag;
2307 struct l2cap_cmd_hdr *cmd;
2308 struct l2cap_hdr *lh;
2311 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2312 conn, code, ident, dlen);
2314 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2315 count = min_t(unsigned int, conn->mtu, len);
2317 skb = bt_skb_alloc(count, GFP_ATOMIC);
2321 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2322 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2323 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2325 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2328 cmd->len = cpu_to_le16(dlen);
2331 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2332 memcpy(skb_put(skb, count), data, count);
2338 /* Continuation fragments (no L2CAP header) */
2339 frag = &skb_shinfo(skb)->frag_list;
2341 count = min_t(unsigned int, conn->mtu, len);
2343 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2347 memcpy(skb_put(*frag, count), data, count);
2352 frag = &(*frag)->next;
2362 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2364 struct l2cap_conf_opt *opt = *ptr;
2367 len = L2CAP_CONF_OPT_SIZE + opt->len;
2375 *val = *((u8 *) opt->val);
2379 *val = __le16_to_cpu(*((__le16 *) opt->val));
2383 *val = __le32_to_cpu(*((__le32 *) opt->val));
2387 *val = (unsigned long) opt->val;
2391 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2395 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2397 struct l2cap_conf_opt *opt = *ptr;
2399 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2406 *((u8 *) opt->val) = val;
2410 *((__le16 *) opt->val) = cpu_to_le16(val);
2414 *((__le32 *) opt->val) = cpu_to_le32(val);
2418 memcpy(opt->val, (void *) val, len);
2422 *ptr += L2CAP_CONF_OPT_SIZE + len;
2425 static void l2cap_ack_timeout(unsigned long arg)
2427 struct sock *sk = (void *) arg;
2430 l2cap_send_ack(l2cap_pi(sk));
2434 static inline void l2cap_ertm_init(struct sock *sk)
2436 l2cap_pi(sk)->expected_ack_seq = 0;
2437 l2cap_pi(sk)->unacked_frames = 0;
2438 l2cap_pi(sk)->buffer_seq = 0;
2439 l2cap_pi(sk)->num_acked = 0;
2440 l2cap_pi(sk)->frames_sent = 0;
2442 setup_timer(&l2cap_pi(sk)->retrans_timer,
2443 l2cap_retrans_timeout, (unsigned long) sk);
2444 setup_timer(&l2cap_pi(sk)->monitor_timer,
2445 l2cap_monitor_timeout, (unsigned long) sk);
2446 setup_timer(&l2cap_pi(sk)->ack_timer,
2447 l2cap_ack_timeout, (unsigned long) sk);
2449 __skb_queue_head_init(SREJ_QUEUE(sk));
2450 __skb_queue_head_init(BUSY_QUEUE(sk));
2452 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2454 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2457 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2460 case L2CAP_MODE_STREAMING:
2461 case L2CAP_MODE_ERTM:
2462 if (l2cap_mode_supported(mode, remote_feat_mask))
2466 return L2CAP_MODE_BASIC;
2470 static int l2cap_build_conf_req(struct sock *sk, void *data)
2472 struct l2cap_pinfo *pi = l2cap_pi(sk);
2473 struct l2cap_conf_req *req = data;
2474 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2475 void *ptr = req->data;
2477 BT_DBG("sk %p", sk);
2479 if (pi->num_conf_req || pi->num_conf_rsp)
2483 case L2CAP_MODE_STREAMING:
2484 case L2CAP_MODE_ERTM:
2485 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2490 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2496 case L2CAP_MODE_BASIC:
2497 if (pi->imtu != L2CAP_DEFAULT_MTU)
2498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2500 rfc.mode = L2CAP_MODE_BASIC;
2502 rfc.max_transmit = 0;
2503 rfc.retrans_timeout = 0;
2504 rfc.monitor_timeout = 0;
2505 rfc.max_pdu_size = 0;
2509 case L2CAP_MODE_ERTM:
2510 rfc.mode = L2CAP_MODE_ERTM;
2511 rfc.txwin_size = pi->tx_win;
2512 rfc.max_transmit = pi->max_tx;
2513 rfc.retrans_timeout = 0;
2514 rfc.monitor_timeout = 0;
2515 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2516 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2517 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2519 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2522 if (pi->fcs == L2CAP_FCS_NONE ||
2523 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2524 pi->fcs = L2CAP_FCS_NONE;
2525 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2529 case L2CAP_MODE_STREAMING:
2530 rfc.mode = L2CAP_MODE_STREAMING;
2532 rfc.max_transmit = 0;
2533 rfc.retrans_timeout = 0;
2534 rfc.monitor_timeout = 0;
2535 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2536 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2537 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2539 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2542 if (pi->fcs == L2CAP_FCS_NONE ||
2543 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2544 pi->fcs = L2CAP_FCS_NONE;
2545 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2551 (unsigned long) &rfc);
2553 /* FIXME: Need actual value of the flush timeout */
2554 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2555 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2557 req->dcid = cpu_to_le16(pi->dcid);
2558 req->flags = cpu_to_le16(0);
2563 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2565 struct l2cap_pinfo *pi = l2cap_pi(sk);
2566 struct l2cap_conf_rsp *rsp = data;
2567 void *ptr = rsp->data;
2568 void *req = pi->conf_req;
2569 int len = pi->conf_len;
2570 int type, hint, olen;
2572 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2573 u16 mtu = L2CAP_DEFAULT_MTU;
2574 u16 result = L2CAP_CONF_SUCCESS;
2576 BT_DBG("sk %p", sk);
2578 while (len >= L2CAP_CONF_OPT_SIZE) {
2579 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2581 hint = type & L2CAP_CONF_HINT;
2582 type &= L2CAP_CONF_MASK;
2585 case L2CAP_CONF_MTU:
2589 case L2CAP_CONF_FLUSH_TO:
2593 case L2CAP_CONF_QOS:
2596 case L2CAP_CONF_RFC:
2597 if (olen == sizeof(rfc))
2598 memcpy(&rfc, (void *) val, olen);
2601 case L2CAP_CONF_FCS:
2602 if (val == L2CAP_FCS_NONE)
2603 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2611 result = L2CAP_CONF_UNKNOWN;
2612 *((u8 *) ptr++) = type;
2617 if (pi->num_conf_rsp || pi->num_conf_req)
2621 case L2CAP_MODE_STREAMING:
2622 case L2CAP_MODE_ERTM:
2623 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2624 pi->mode = l2cap_select_mode(rfc.mode,
2625 pi->conn->feat_mask);
2629 if (pi->mode != rfc.mode)
2630 return -ECONNREFUSED;
2636 if (pi->mode != rfc.mode) {
2637 result = L2CAP_CONF_UNACCEPT;
2638 rfc.mode = pi->mode;
2640 if (pi->num_conf_rsp == 1)
2641 return -ECONNREFUSED;
2643 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2644 sizeof(rfc), (unsigned long) &rfc);
2648 if (result == L2CAP_CONF_SUCCESS) {
2649 /* Configure output options and let the other side know
2650 * which ones we don't like. */
2652 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2653 result = L2CAP_CONF_UNACCEPT;
2656 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2658 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2661 case L2CAP_MODE_BASIC:
2662 pi->fcs = L2CAP_FCS_NONE;
2663 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2666 case L2CAP_MODE_ERTM:
2667 pi->remote_tx_win = rfc.txwin_size;
2668 pi->remote_max_tx = rfc.max_transmit;
2669 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2670 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2672 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2674 rfc.retrans_timeout =
2675 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2676 rfc.monitor_timeout =
2677 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2679 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2681 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2682 sizeof(rfc), (unsigned long) &rfc);
2686 case L2CAP_MODE_STREAMING:
2687 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2688 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2690 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2692 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2694 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2695 sizeof(rfc), (unsigned long) &rfc);
2700 result = L2CAP_CONF_UNACCEPT;
2702 memset(&rfc, 0, sizeof(rfc));
2703 rfc.mode = pi->mode;
2706 if (result == L2CAP_CONF_SUCCESS)
2707 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2709 rsp->scid = cpu_to_le16(pi->dcid);
2710 rsp->result = cpu_to_le16(result);
2711 rsp->flags = cpu_to_le16(0x0000);
2716 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2718 struct l2cap_pinfo *pi = l2cap_pi(sk);
2719 struct l2cap_conf_req *req = data;
2720 void *ptr = req->data;
2723 struct l2cap_conf_rfc rfc;
2725 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2727 while (len >= L2CAP_CONF_OPT_SIZE) {
2728 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2731 case L2CAP_CONF_MTU:
2732 if (val < L2CAP_DEFAULT_MIN_MTU) {
2733 *result = L2CAP_CONF_UNACCEPT;
2734 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2737 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2740 case L2CAP_CONF_FLUSH_TO:
2742 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2746 case L2CAP_CONF_RFC:
2747 if (olen == sizeof(rfc))
2748 memcpy(&rfc, (void *)val, olen);
2750 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2751 rfc.mode != pi->mode)
2752 return -ECONNREFUSED;
2756 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2757 sizeof(rfc), (unsigned long) &rfc);
2762 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2763 return -ECONNREFUSED;
2765 pi->mode = rfc.mode;
2767 if (*result == L2CAP_CONF_SUCCESS) {
2769 case L2CAP_MODE_ERTM:
2770 pi->remote_tx_win = rfc.txwin_size;
2771 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2772 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2773 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2775 case L2CAP_MODE_STREAMING:
2776 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2780 req->dcid = cpu_to_le16(pi->dcid);
2781 req->flags = cpu_to_le16(0x0000);
2786 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2788 struct l2cap_conf_rsp *rsp = data;
2789 void *ptr = rsp->data;
2791 BT_DBG("sk %p", sk);
2793 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2794 rsp->result = cpu_to_le16(result);
2795 rsp->flags = cpu_to_le16(flags);
2800 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2802 struct l2cap_pinfo *pi = l2cap_pi(sk);
2805 struct l2cap_conf_rfc rfc;
2807 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2809 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2812 while (len >= L2CAP_CONF_OPT_SIZE) {
2813 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2816 case L2CAP_CONF_RFC:
2817 if (olen == sizeof(rfc))
2818 memcpy(&rfc, (void *)val, olen);
2825 case L2CAP_MODE_ERTM:
2826 pi->remote_tx_win = rfc.txwin_size;
2827 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2828 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2829 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2831 case L2CAP_MODE_STREAMING:
2832 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2836 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2838 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2840 if (rej->reason != 0x0000)
2843 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2844 cmd->ident == conn->info_ident) {
2845 del_timer(&conn->info_timer);
2847 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2848 conn->info_ident = 0;
2850 l2cap_conn_start(conn);
2856 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2858 struct l2cap_chan_list *list = &conn->chan_list;
2859 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2860 struct l2cap_conn_rsp rsp;
2861 struct sock *sk, *parent;
2862 int result, status = L2CAP_CS_NO_INFO;
2864 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2865 __le16 psm = req->psm;
2867 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2869 /* Check if we have socket listening on psm */
2870 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2872 result = L2CAP_CR_BAD_PSM;
2876 /* Check if the ACL is secure enough (if not SDP) */
2877 if (psm != cpu_to_le16(0x0001) &&
2878 !hci_conn_check_link_mode(conn->hcon)) {
2879 conn->disc_reason = 0x05;
2880 result = L2CAP_CR_SEC_BLOCK;
2884 result = L2CAP_CR_NO_MEM;
2886 /* Check for backlog size */
2887 if (sk_acceptq_is_full(parent)) {
2888 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2892 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2896 write_lock_bh(&list->lock);
2898 /* Check if we already have channel with that dcid */
2899 if (__l2cap_get_chan_by_dcid(list, scid)) {
2900 write_unlock_bh(&list->lock);
2901 sock_set_flag(sk, SOCK_ZAPPED);
2902 l2cap_sock_kill(sk);
2906 hci_conn_hold(conn->hcon);
2908 l2cap_sock_init(sk, parent);
2909 bacpy(&bt_sk(sk)->src, conn->src);
2910 bacpy(&bt_sk(sk)->dst, conn->dst);
2911 l2cap_pi(sk)->psm = psm;
2912 l2cap_pi(sk)->dcid = scid;
2914 __l2cap_chan_add(conn, sk, parent);
2915 dcid = l2cap_pi(sk)->scid;
2917 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2919 l2cap_pi(sk)->ident = cmd->ident;
2921 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2922 if (l2cap_check_security(sk)) {
2923 if (bt_sk(sk)->defer_setup) {
2924 sk->sk_state = BT_CONNECT2;
2925 result = L2CAP_CR_PEND;
2926 status = L2CAP_CS_AUTHOR_PEND;
2927 parent->sk_data_ready(parent, 0);
2929 sk->sk_state = BT_CONFIG;
2930 result = L2CAP_CR_SUCCESS;
2931 status = L2CAP_CS_NO_INFO;
2934 sk->sk_state = BT_CONNECT2;
2935 result = L2CAP_CR_PEND;
2936 status = L2CAP_CS_AUTHEN_PEND;
2939 sk->sk_state = BT_CONNECT2;
2940 result = L2CAP_CR_PEND;
2941 status = L2CAP_CS_NO_INFO;
2944 write_unlock_bh(&list->lock);
2947 bh_unlock_sock(parent);
2950 rsp.scid = cpu_to_le16(scid);
2951 rsp.dcid = cpu_to_le16(dcid);
2952 rsp.result = cpu_to_le16(result);
2953 rsp.status = cpu_to_le16(status);
2954 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2956 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2957 struct l2cap_info_req info;
2958 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2960 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2961 conn->info_ident = l2cap_get_ident(conn);
2963 mod_timer(&conn->info_timer, jiffies +
2964 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2966 l2cap_send_cmd(conn, conn->info_ident,
2967 L2CAP_INFO_REQ, sizeof(info), &info);
2973 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2975 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2976 u16 scid, dcid, result, status;
2980 scid = __le16_to_cpu(rsp->scid);
2981 dcid = __le16_to_cpu(rsp->dcid);
2982 result = __le16_to_cpu(rsp->result);
2983 status = __le16_to_cpu(rsp->status);
2985 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2988 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2992 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2998 case L2CAP_CR_SUCCESS:
2999 sk->sk_state = BT_CONFIG;
3000 l2cap_pi(sk)->ident = 0;
3001 l2cap_pi(sk)->dcid = dcid;
3002 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3003 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3005 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3006 l2cap_build_conf_req(sk, req), req);
3007 l2cap_pi(sk)->num_conf_req++;
3011 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3015 l2cap_chan_del(sk, ECONNREFUSED);
3023 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3025 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3031 dcid = __le16_to_cpu(req->dcid);
3032 flags = __le16_to_cpu(req->flags);
3034 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3036 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3040 if (sk->sk_state != BT_CONFIG) {
3041 struct l2cap_cmd_rej rej;
3043 rej.reason = cpu_to_le16(0x0002);
3044 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3049 /* Reject if config buffer is too small. */
3050 len = cmd_len - sizeof(*req);
3051 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3052 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3053 l2cap_build_conf_rsp(sk, rsp,
3054 L2CAP_CONF_REJECT, flags), rsp);
3059 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3060 l2cap_pi(sk)->conf_len += len;
3062 if (flags & 0x0001) {
3063 /* Incomplete config. Send empty response. */
3064 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3065 l2cap_build_conf_rsp(sk, rsp,
3066 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3070 /* Complete config. */
3071 len = l2cap_parse_conf_req(sk, rsp);
3073 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3077 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3078 l2cap_pi(sk)->num_conf_rsp++;
3080 /* Reset config buffer. */
3081 l2cap_pi(sk)->conf_len = 0;
3083 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3086 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3087 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3088 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3089 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3091 sk->sk_state = BT_CONNECTED;
3093 l2cap_pi(sk)->next_tx_seq = 0;
3094 l2cap_pi(sk)->expected_tx_seq = 0;
3095 __skb_queue_head_init(TX_QUEUE(sk));
3096 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3097 l2cap_ertm_init(sk);
3099 l2cap_chan_ready(sk);
3103 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3105 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3106 l2cap_build_conf_req(sk, buf), buf);
3107 l2cap_pi(sk)->num_conf_req++;
3115 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3117 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3118 u16 scid, flags, result;
3120 int len = cmd->len - sizeof(*rsp);
3122 scid = __le16_to_cpu(rsp->scid);
3123 flags = __le16_to_cpu(rsp->flags);
3124 result = __le16_to_cpu(rsp->result);
3126 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3127 scid, flags, result);
3129 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3134 case L2CAP_CONF_SUCCESS:
3135 l2cap_conf_rfc_get(sk, rsp->data, len);
3138 case L2CAP_CONF_UNACCEPT:
3139 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3142 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3143 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3147 /* throw out any old stored conf requests */
3148 result = L2CAP_CONF_SUCCESS;
3149 len = l2cap_parse_conf_rsp(sk, rsp->data,
3152 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3156 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3157 L2CAP_CONF_REQ, len, req);
3158 l2cap_pi(sk)->num_conf_req++;
3159 if (result != L2CAP_CONF_SUCCESS)
3165 sk->sk_err = ECONNRESET;
3166 l2cap_sock_set_timer(sk, HZ * 5);
3167 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3174 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3176 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3177 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3178 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3179 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3181 sk->sk_state = BT_CONNECTED;
3182 l2cap_pi(sk)->next_tx_seq = 0;
3183 l2cap_pi(sk)->expected_tx_seq = 0;
3184 __skb_queue_head_init(TX_QUEUE(sk));
3185 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3186 l2cap_ertm_init(sk);
3188 l2cap_chan_ready(sk);
3196 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3198 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3199 struct l2cap_disconn_rsp rsp;
3203 scid = __le16_to_cpu(req->scid);
3204 dcid = __le16_to_cpu(req->dcid);
3206 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3208 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3212 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3213 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3214 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3216 sk->sk_shutdown = SHUTDOWN_MASK;
3218 l2cap_chan_del(sk, ECONNRESET);
3221 l2cap_sock_kill(sk);
3225 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3227 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3231 scid = __le16_to_cpu(rsp->scid);
3232 dcid = __le16_to_cpu(rsp->dcid);
3234 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3236 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3240 l2cap_chan_del(sk, 0);
3243 l2cap_sock_kill(sk);
3247 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3249 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3252 type = __le16_to_cpu(req->type);
3254 BT_DBG("type 0x%4.4x", type);
3256 if (type == L2CAP_IT_FEAT_MASK) {
3258 u32 feat_mask = l2cap_feat_mask;
3259 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3260 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3261 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3263 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3265 put_unaligned_le32(feat_mask, rsp->data);
3266 l2cap_send_cmd(conn, cmd->ident,
3267 L2CAP_INFO_RSP, sizeof(buf), buf);
3268 } else if (type == L2CAP_IT_FIXED_CHAN) {
3270 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3271 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3272 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3273 memcpy(buf + 4, l2cap_fixed_chan, 8);
3274 l2cap_send_cmd(conn, cmd->ident,
3275 L2CAP_INFO_RSP, sizeof(buf), buf);
3277 struct l2cap_info_rsp rsp;
3278 rsp.type = cpu_to_le16(type);
3279 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3280 l2cap_send_cmd(conn, cmd->ident,
3281 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3287 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3289 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3292 type = __le16_to_cpu(rsp->type);
3293 result = __le16_to_cpu(rsp->result);
3295 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3297 del_timer(&conn->info_timer);
3299 if (type == L2CAP_IT_FEAT_MASK) {
3300 conn->feat_mask = get_unaligned_le32(rsp->data);
3302 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3303 struct l2cap_info_req req;
3304 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3306 conn->info_ident = l2cap_get_ident(conn);
3308 l2cap_send_cmd(conn, conn->info_ident,
3309 L2CAP_INFO_REQ, sizeof(req), &req);
3311 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3312 conn->info_ident = 0;
3314 l2cap_conn_start(conn);
3316 } else if (type == L2CAP_IT_FIXED_CHAN) {
3317 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3318 conn->info_ident = 0;
3320 l2cap_conn_start(conn);
3326 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3328 u8 *data = skb->data;
3330 struct l2cap_cmd_hdr cmd;
3333 l2cap_raw_recv(conn, skb);
3335 while (len >= L2CAP_CMD_HDR_SIZE) {
3337 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3338 data += L2CAP_CMD_HDR_SIZE;
3339 len -= L2CAP_CMD_HDR_SIZE;
3341 cmd_len = le16_to_cpu(cmd.len);
3343 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3345 if (cmd_len > len || !cmd.ident) {
3346 BT_DBG("corrupted command");
3351 case L2CAP_COMMAND_REJ:
3352 l2cap_command_rej(conn, &cmd, data);
3355 case L2CAP_CONN_REQ:
3356 err = l2cap_connect_req(conn, &cmd, data);
3359 case L2CAP_CONN_RSP:
3360 err = l2cap_connect_rsp(conn, &cmd, data);
3363 case L2CAP_CONF_REQ:
3364 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3367 case L2CAP_CONF_RSP:
3368 err = l2cap_config_rsp(conn, &cmd, data);
3371 case L2CAP_DISCONN_REQ:
3372 err = l2cap_disconnect_req(conn, &cmd, data);
3375 case L2CAP_DISCONN_RSP:
3376 err = l2cap_disconnect_rsp(conn, &cmd, data);
3379 case L2CAP_ECHO_REQ:
3380 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3383 case L2CAP_ECHO_RSP:
3386 case L2CAP_INFO_REQ:
3387 err = l2cap_information_req(conn, &cmd, data);
3390 case L2CAP_INFO_RSP:
3391 err = l2cap_information_rsp(conn, &cmd, data);
3395 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3401 struct l2cap_cmd_rej rej;
3402 BT_DBG("error %d", err);
3404 /* FIXME: Map err to a valid reason */
3405 rej.reason = cpu_to_le16(0);
3406 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3416 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3418 u16 our_fcs, rcv_fcs;
3419 int hdr_size = L2CAP_HDR_SIZE + 2;
3421 if (pi->fcs == L2CAP_FCS_CRC16) {
3422 skb_trim(skb, skb->len - 2);
3423 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3424 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3426 if (our_fcs != rcv_fcs)
3432 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3434 struct l2cap_pinfo *pi = l2cap_pi(sk);
3437 pi->frames_sent = 0;
3439 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3441 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3442 control |= L2CAP_SUPER_RCV_NOT_READY;
3443 l2cap_send_sframe(pi, control);
3444 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3447 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3448 l2cap_retransmit_frames(sk);
3450 l2cap_ertm_send(sk);
3452 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3453 pi->frames_sent == 0) {
3454 control |= L2CAP_SUPER_RCV_READY;
3455 l2cap_send_sframe(pi, control);
3459 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3461 struct sk_buff *next_skb;
3462 struct l2cap_pinfo *pi = l2cap_pi(sk);
3463 int tx_seq_offset, next_tx_seq_offset;
3465 bt_cb(skb)->tx_seq = tx_seq;
3466 bt_cb(skb)->sar = sar;
3468 next_skb = skb_peek(SREJ_QUEUE(sk));
3470 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3474 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3475 if (tx_seq_offset < 0)
3476 tx_seq_offset += 64;
3479 if (bt_cb(next_skb)->tx_seq == tx_seq)
3482 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3483 pi->buffer_seq) % 64;
3484 if (next_tx_seq_offset < 0)
3485 next_tx_seq_offset += 64;
3487 if (next_tx_seq_offset > tx_seq_offset) {
3488 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3492 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3495 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3497 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3502 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3504 struct l2cap_pinfo *pi = l2cap_pi(sk);
3505 struct sk_buff *_skb;
3508 switch (control & L2CAP_CTRL_SAR) {
3509 case L2CAP_SDU_UNSEGMENTED:
3510 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3513 err = sock_queue_rcv_skb(sk, skb);
3519 case L2CAP_SDU_START:
3520 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3523 pi->sdu_len = get_unaligned_le16(skb->data);
3525 if (pi->sdu_len > pi->imtu)
3528 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3532 /* pull sdu_len bytes only after alloc, because of Local Busy
3533 * condition we have to be sure that this will be executed
3534 * only once, i.e., when alloc does not fail */
3537 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3539 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3540 pi->partial_sdu_len = skb->len;
3543 case L2CAP_SDU_CONTINUE:
3544 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3550 pi->partial_sdu_len += skb->len;
3551 if (pi->partial_sdu_len > pi->sdu_len)
3554 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3559 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3565 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3566 pi->partial_sdu_len += skb->len;
3568 if (pi->partial_sdu_len > pi->imtu)
3571 if (pi->partial_sdu_len != pi->sdu_len)
3574 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3577 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3579 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3583 err = sock_queue_rcv_skb(sk, _skb);
3586 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3590 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3591 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3605 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3610 static int l2cap_try_push_rx_skb(struct sock *sk)
3612 struct l2cap_pinfo *pi = l2cap_pi(sk);
3613 struct sk_buff *skb;
3617 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3618 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3619 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3621 skb_queue_head(BUSY_QUEUE(sk), skb);
3625 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3628 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3631 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3632 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3633 l2cap_send_sframe(pi, control);
3634 l2cap_pi(sk)->retry_count = 1;
3636 del_timer(&pi->retrans_timer);
3637 __mod_monitor_timer();
3639 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3642 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3643 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3645 BT_DBG("sk %p, Exit local busy", sk);
3650 static void l2cap_busy_work(struct work_struct *work)
3652 DECLARE_WAITQUEUE(wait, current);
3653 struct l2cap_pinfo *pi =
3654 container_of(work, struct l2cap_pinfo, busy_work);
3655 struct sock *sk = (struct sock *)pi;
3656 int n_tries = 0, timeo = HZ/5, err;
3657 struct sk_buff *skb;
3661 add_wait_queue(sk_sleep(sk), &wait);
3662 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3663 set_current_state(TASK_INTERRUPTIBLE);
3665 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3667 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3674 if (signal_pending(current)) {
3675 err = sock_intr_errno(timeo);
3680 timeo = schedule_timeout(timeo);
3683 err = sock_error(sk);
3687 if (l2cap_try_push_rx_skb(sk) == 0)
3691 set_current_state(TASK_RUNNING);
3692 remove_wait_queue(sk_sleep(sk), &wait);
3697 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3699 struct l2cap_pinfo *pi = l2cap_pi(sk);
3702 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3703 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3704 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3705 return l2cap_try_push_rx_skb(sk);
3710 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3712 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3716 /* Busy Condition */
3717 BT_DBG("sk %p, Enter local busy", sk);
3719 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3720 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3721 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3723 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3724 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3725 l2cap_send_sframe(pi, sctrl);
3727 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3729 del_timer(&pi->ack_timer);
3731 queue_work(_busy_wq, &pi->busy_work);
3736 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3738 struct l2cap_pinfo *pi = l2cap_pi(sk);
3739 struct sk_buff *_skb;
3743 * TODO: We have to notify the userland if some data is lost with the
3747 switch (control & L2CAP_CTRL_SAR) {
3748 case L2CAP_SDU_UNSEGMENTED:
3749 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3754 err = sock_queue_rcv_skb(sk, skb);
3760 case L2CAP_SDU_START:
3761 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3766 pi->sdu_len = get_unaligned_le16(skb->data);
3769 if (pi->sdu_len > pi->imtu) {
3774 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3780 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3782 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3783 pi->partial_sdu_len = skb->len;
3787 case L2CAP_SDU_CONTINUE:
3788 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3791 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3793 pi->partial_sdu_len += skb->len;
3794 if (pi->partial_sdu_len > pi->sdu_len)
3802 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3805 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3807 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3808 pi->partial_sdu_len += skb->len;
3810 if (pi->partial_sdu_len > pi->imtu)
3813 if (pi->partial_sdu_len == pi->sdu_len) {
3814 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3815 err = sock_queue_rcv_skb(sk, _skb);
3830 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3832 struct sk_buff *skb;
3835 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3836 if (bt_cb(skb)->tx_seq != tx_seq)
3839 skb = skb_dequeue(SREJ_QUEUE(sk));
3840 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3841 l2cap_ertm_reassembly_sdu(sk, skb, control);
3842 l2cap_pi(sk)->buffer_seq_srej =
3843 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3844 tx_seq = (tx_seq + 1) % 64;
3848 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3850 struct l2cap_pinfo *pi = l2cap_pi(sk);
3851 struct srej_list *l, *tmp;
3854 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3855 if (l->tx_seq == tx_seq) {
3860 control = L2CAP_SUPER_SELECT_REJECT;
3861 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3862 l2cap_send_sframe(pi, control);
3864 list_add_tail(&l->list, SREJ_LIST(sk));
3868 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3870 struct l2cap_pinfo *pi = l2cap_pi(sk);
3871 struct srej_list *new;
3874 while (tx_seq != pi->expected_tx_seq) {
3875 control = L2CAP_SUPER_SELECT_REJECT;
3876 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3877 l2cap_send_sframe(pi, control);
3879 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3880 new->tx_seq = pi->expected_tx_seq;
3881 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3882 list_add_tail(&new->list, SREJ_LIST(sk));
3884 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3887 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3889 struct l2cap_pinfo *pi = l2cap_pi(sk);
3890 u8 tx_seq = __get_txseq(rx_control);
3891 u8 req_seq = __get_reqseq(rx_control);
3892 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3893 int tx_seq_offset, expected_tx_seq_offset;
3894 int num_to_ack = (pi->tx_win/6) + 1;
3897 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3900 if (L2CAP_CTRL_FINAL & rx_control &&
3901 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3902 del_timer(&pi->monitor_timer);
3903 if (pi->unacked_frames > 0)
3904 __mod_retrans_timer();
3905 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3908 pi->expected_ack_seq = req_seq;
3909 l2cap_drop_acked_frames(sk);
3911 if (tx_seq == pi->expected_tx_seq)
3914 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3915 if (tx_seq_offset < 0)
3916 tx_seq_offset += 64;
3918 /* invalid tx_seq */
3919 if (tx_seq_offset >= pi->tx_win) {
3920 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3924 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3927 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3928 struct srej_list *first;
3930 first = list_first_entry(SREJ_LIST(sk),
3931 struct srej_list, list);
3932 if (tx_seq == first->tx_seq) {
3933 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3934 l2cap_check_srej_gap(sk, tx_seq);
3936 list_del(&first->list);
3939 if (list_empty(SREJ_LIST(sk))) {
3940 pi->buffer_seq = pi->buffer_seq_srej;
3941 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3943 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3946 struct srej_list *l;
3948 /* duplicated tx_seq */
3949 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3952 list_for_each_entry(l, SREJ_LIST(sk), list) {
3953 if (l->tx_seq == tx_seq) {
3954 l2cap_resend_srejframe(sk, tx_seq);
3958 l2cap_send_srejframe(sk, tx_seq);
3961 expected_tx_seq_offset =
3962 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3963 if (expected_tx_seq_offset < 0)
3964 expected_tx_seq_offset += 64;
3966 /* duplicated tx_seq */
3967 if (tx_seq_offset < expected_tx_seq_offset)
3970 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3972 BT_DBG("sk %p, Enter SREJ", sk);
3974 INIT_LIST_HEAD(SREJ_LIST(sk));
3975 pi->buffer_seq_srej = pi->buffer_seq;
3977 __skb_queue_head_init(SREJ_QUEUE(sk));
3978 __skb_queue_head_init(BUSY_QUEUE(sk));
3979 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3981 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3983 l2cap_send_srejframe(sk, tx_seq);
3985 del_timer(&pi->ack_timer);
3990 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3992 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3993 bt_cb(skb)->tx_seq = tx_seq;
3994 bt_cb(skb)->sar = sar;
3995 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3999 err = l2cap_push_rx_skb(sk, skb, rx_control);
4003 if (rx_control & L2CAP_CTRL_FINAL) {
4004 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4005 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4007 l2cap_retransmit_frames(sk);
4012 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4013 if (pi->num_acked == num_to_ack - 1)
4023 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4025 struct l2cap_pinfo *pi = l2cap_pi(sk);
4027 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4030 pi->expected_ack_seq = __get_reqseq(rx_control);
4031 l2cap_drop_acked_frames(sk);
4033 if (rx_control & L2CAP_CTRL_POLL) {
4034 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4035 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4036 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4037 (pi->unacked_frames > 0))
4038 __mod_retrans_timer();
4040 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4041 l2cap_send_srejtail(sk);
4043 l2cap_send_i_or_rr_or_rnr(sk);
4046 } else if (rx_control & L2CAP_CTRL_FINAL) {
4047 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4049 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4050 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4052 l2cap_retransmit_frames(sk);
4055 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4056 (pi->unacked_frames > 0))
4057 __mod_retrans_timer();
4059 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4060 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4063 l2cap_ertm_send(sk);
4068 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4070 struct l2cap_pinfo *pi = l2cap_pi(sk);
4071 u8 tx_seq = __get_reqseq(rx_control);
4073 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4075 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4077 pi->expected_ack_seq = tx_seq;
4078 l2cap_drop_acked_frames(sk);
4080 if (rx_control & L2CAP_CTRL_FINAL) {
4081 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4082 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4084 l2cap_retransmit_frames(sk);
4086 l2cap_retransmit_frames(sk);
4088 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4089 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4092 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4094 struct l2cap_pinfo *pi = l2cap_pi(sk);
4095 u8 tx_seq = __get_reqseq(rx_control);
4097 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4099 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4101 if (rx_control & L2CAP_CTRL_POLL) {
4102 pi->expected_ack_seq = tx_seq;
4103 l2cap_drop_acked_frames(sk);
4105 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4106 l2cap_retransmit_one_frame(sk, tx_seq);
4108 l2cap_ertm_send(sk);
4110 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4111 pi->srej_save_reqseq = tx_seq;
4112 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4114 } else if (rx_control & L2CAP_CTRL_FINAL) {
4115 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4116 pi->srej_save_reqseq == tx_seq)
4117 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4119 l2cap_retransmit_one_frame(sk, tx_seq);
4121 l2cap_retransmit_one_frame(sk, tx_seq);
4122 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4123 pi->srej_save_reqseq = tx_seq;
4124 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4129 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4131 struct l2cap_pinfo *pi = l2cap_pi(sk);
4132 u8 tx_seq = __get_reqseq(rx_control);
4134 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4136 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4137 pi->expected_ack_seq = tx_seq;
4138 l2cap_drop_acked_frames(sk);
4140 if (rx_control & L2CAP_CTRL_POLL)
4141 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4143 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4144 del_timer(&pi->retrans_timer);
4145 if (rx_control & L2CAP_CTRL_POLL)
4146 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4150 if (rx_control & L2CAP_CTRL_POLL)
4151 l2cap_send_srejtail(sk);
4153 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4156 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4158 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4160 if (L2CAP_CTRL_FINAL & rx_control &&
4161 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4162 del_timer(&l2cap_pi(sk)->monitor_timer);
4163 if (l2cap_pi(sk)->unacked_frames > 0)
4164 __mod_retrans_timer();
4165 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4168 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4169 case L2CAP_SUPER_RCV_READY:
4170 l2cap_data_channel_rrframe(sk, rx_control);
4173 case L2CAP_SUPER_REJECT:
4174 l2cap_data_channel_rejframe(sk, rx_control);
4177 case L2CAP_SUPER_SELECT_REJECT:
4178 l2cap_data_channel_srejframe(sk, rx_control);
4181 case L2CAP_SUPER_RCV_NOT_READY:
4182 l2cap_data_channel_rnrframe(sk, rx_control);
4190 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4192 struct l2cap_pinfo *pi = l2cap_pi(sk);
4195 int len, next_tx_seq_offset, req_seq_offset;
4197 control = get_unaligned_le16(skb->data);
4202 * We can just drop the corrupted I-frame here.
4203 * Receiver will miss it and start proper recovery
4204 * procedures and ask retransmission.
4206 if (l2cap_check_fcs(pi, skb))
4209 if (__is_sar_start(control) && __is_iframe(control))
4212 if (pi->fcs == L2CAP_FCS_CRC16)
4215 if (len > pi->mps) {
4216 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4220 req_seq = __get_reqseq(control);
4221 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4222 if (req_seq_offset < 0)
4223 req_seq_offset += 64;
4225 next_tx_seq_offset =
4226 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4227 if (next_tx_seq_offset < 0)
4228 next_tx_seq_offset += 64;
4230 /* check for invalid req-seq */
4231 if (req_seq_offset > next_tx_seq_offset) {
4232 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4236 if (__is_iframe(control)) {
4238 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4242 l2cap_data_channel_iframe(sk, control, skb);
4246 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4250 l2cap_data_channel_sframe(sk, control, skb);
4260 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4263 struct l2cap_pinfo *pi;
4268 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4270 BT_DBG("unknown cid 0x%4.4x", cid);
4276 BT_DBG("sk %p, len %d", sk, skb->len);
4278 if (sk->sk_state != BT_CONNECTED)
4282 case L2CAP_MODE_BASIC:
4283 /* If socket recv buffers overflows we drop data here
4284 * which is *bad* because L2CAP has to be reliable.
4285 * But we don't have any other choice. L2CAP doesn't
4286 * provide flow control mechanism. */
4288 if (pi->imtu < skb->len)
4291 if (!sock_queue_rcv_skb(sk, skb))
4295 case L2CAP_MODE_ERTM:
4296 if (!sock_owned_by_user(sk)) {
4297 l2cap_ertm_data_rcv(sk, skb);
4299 if (sk_add_backlog(sk, skb))
4305 case L2CAP_MODE_STREAMING:
4306 control = get_unaligned_le16(skb->data);
4310 if (l2cap_check_fcs(pi, skb))
4313 if (__is_sar_start(control))
4316 if (pi->fcs == L2CAP_FCS_CRC16)
4319 if (len > pi->mps || len < 0 || __is_sframe(control))
4322 tx_seq = __get_txseq(control);
4324 if (pi->expected_tx_seq == tx_seq)
4325 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4327 pi->expected_tx_seq = (tx_seq + 1) % 64;
4329 l2cap_streaming_reassembly_sdu(sk, skb, control);
4334 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4348 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4352 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4356 BT_DBG("sk %p, len %d", sk, skb->len);
4358 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4361 if (l2cap_pi(sk)->imtu < skb->len)
4364 if (!sock_queue_rcv_skb(sk, skb))
4376 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4378 struct l2cap_hdr *lh = (void *) skb->data;
4382 skb_pull(skb, L2CAP_HDR_SIZE);
4383 cid = __le16_to_cpu(lh->cid);
4384 len = __le16_to_cpu(lh->len);
4386 if (len != skb->len) {
4391 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4394 case L2CAP_CID_SIGNALING:
4395 l2cap_sig_channel(conn, skb);
4398 case L2CAP_CID_CONN_LESS:
4399 psm = get_unaligned_le16(skb->data);
4401 l2cap_conless_channel(conn, psm, skb);
4405 l2cap_data_channel(conn, cid, skb);
4410 /* ---- L2CAP interface with lower layer (HCI) ---- */
4412 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4414 int exact = 0, lm1 = 0, lm2 = 0;
4415 register struct sock *sk;
4416 struct hlist_node *node;
4418 if (type != ACL_LINK)
4421 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4423 /* Find listening sockets and check their link_mode */
4424 read_lock(&l2cap_sk_list.lock);
4425 sk_for_each(sk, node, &l2cap_sk_list.head) {
4426 if (sk->sk_state != BT_LISTEN)
4429 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4430 lm1 |= HCI_LM_ACCEPT;
4431 if (l2cap_pi(sk)->role_switch)
4432 lm1 |= HCI_LM_MASTER;
4434 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4435 lm2 |= HCI_LM_ACCEPT;
4436 if (l2cap_pi(sk)->role_switch)
4437 lm2 |= HCI_LM_MASTER;
4440 read_unlock(&l2cap_sk_list.lock);
4442 return exact ? lm1 : lm2;
4445 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4447 struct l2cap_conn *conn;
4449 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4451 if (hcon->type != ACL_LINK)
4455 conn = l2cap_conn_add(hcon, status);
4457 l2cap_conn_ready(conn);
4459 l2cap_conn_del(hcon, bt_err(status));
4464 static int l2cap_disconn_ind(struct hci_conn *hcon)
4466 struct l2cap_conn *conn = hcon->l2cap_data;
4468 BT_DBG("hcon %p", hcon);
4470 if (hcon->type != ACL_LINK || !conn)
4473 return conn->disc_reason;
4476 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4478 BT_DBG("hcon %p reason %d", hcon, reason);
4480 if (hcon->type != ACL_LINK)
4483 l2cap_conn_del(hcon, bt_err(reason));
4488 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4490 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4493 if (encrypt == 0x00) {
4494 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4495 l2cap_sock_clear_timer(sk);
4496 l2cap_sock_set_timer(sk, HZ * 5);
4497 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4498 __l2cap_sock_close(sk, ECONNREFUSED);
4500 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4501 l2cap_sock_clear_timer(sk);
4505 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4507 struct l2cap_chan_list *l;
4508 struct l2cap_conn *conn = hcon->l2cap_data;
4514 l = &conn->chan_list;
4516 BT_DBG("conn %p", conn);
4518 read_lock(&l->lock);
4520 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4523 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4528 if (!status && (sk->sk_state == BT_CONNECTED ||
4529 sk->sk_state == BT_CONFIG)) {
4530 l2cap_check_encryption(sk, encrypt);
4535 if (sk->sk_state == BT_CONNECT) {
4537 struct l2cap_conn_req req;
4538 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4539 req.psm = l2cap_pi(sk)->psm;
4541 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4542 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4544 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4545 L2CAP_CONN_REQ, sizeof(req), &req);
4547 l2cap_sock_clear_timer(sk);
4548 l2cap_sock_set_timer(sk, HZ / 10);
4550 } else if (sk->sk_state == BT_CONNECT2) {
4551 struct l2cap_conn_rsp rsp;
4555 sk->sk_state = BT_CONFIG;
4556 result = L2CAP_CR_SUCCESS;
4558 sk->sk_state = BT_DISCONN;
4559 l2cap_sock_set_timer(sk, HZ / 10);
4560 result = L2CAP_CR_SEC_BLOCK;
4563 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4564 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4565 rsp.result = cpu_to_le16(result);
4566 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4567 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4568 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4574 read_unlock(&l->lock);
4579 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4581 struct l2cap_conn *conn = hcon->l2cap_data;
4583 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4586 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4588 if (flags & ACL_START) {
4589 struct l2cap_hdr *hdr;
4593 BT_ERR("Unexpected start frame (len %d)", skb->len);
4594 kfree_skb(conn->rx_skb);
4595 conn->rx_skb = NULL;
4597 l2cap_conn_unreliable(conn, ECOMM);
4601 BT_ERR("Frame is too short (len %d)", skb->len);
4602 l2cap_conn_unreliable(conn, ECOMM);
4606 hdr = (struct l2cap_hdr *) skb->data;
4607 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4609 if (len == skb->len) {
4610 /* Complete frame received */
4611 l2cap_recv_frame(conn, skb);
4615 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4617 if (skb->len > len) {
4618 BT_ERR("Frame is too long (len %d, expected len %d)",
4620 l2cap_conn_unreliable(conn, ECOMM);
4624 /* Allocate skb for the complete frame (with header) */
4625 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4629 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4631 conn->rx_len = len - skb->len;
4633 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4635 if (!conn->rx_len) {
4636 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4637 l2cap_conn_unreliable(conn, ECOMM);
4641 if (skb->len > conn->rx_len) {
4642 BT_ERR("Fragment is too long (len %d, expected %d)",
4643 skb->len, conn->rx_len);
4644 kfree_skb(conn->rx_skb);
4645 conn->rx_skb = NULL;
4647 l2cap_conn_unreliable(conn, ECOMM);
4651 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4653 conn->rx_len -= skb->len;
4655 if (!conn->rx_len) {
4656 /* Complete frame received */
4657 l2cap_recv_frame(conn, conn->rx_skb);
4658 conn->rx_skb = NULL;
4667 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4670 struct hlist_node *node;
4672 read_lock_bh(&l2cap_sk_list.lock);
4674 sk_for_each(sk, node, &l2cap_sk_list.head) {
4675 struct l2cap_pinfo *pi = l2cap_pi(sk);
4677 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4678 batostr(&bt_sk(sk)->src),
4679 batostr(&bt_sk(sk)->dst),
4680 sk->sk_state, __le16_to_cpu(pi->psm),
4682 pi->imtu, pi->omtu, pi->sec_level);
4685 read_unlock_bh(&l2cap_sk_list.lock);
4690 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4692 return single_open(file, l2cap_debugfs_show, inode->i_private);
4695 static const struct file_operations l2cap_debugfs_fops = {
4696 .open = l2cap_debugfs_open,
4698 .llseek = seq_lseek,
4699 .release = single_release,
4702 static struct dentry *l2cap_debugfs;
4704 static const struct proto_ops l2cap_sock_ops = {
4705 .family = PF_BLUETOOTH,
4706 .owner = THIS_MODULE,
4707 .release = l2cap_sock_release,
4708 .bind = l2cap_sock_bind,
4709 .connect = l2cap_sock_connect,
4710 .listen = l2cap_sock_listen,
4711 .accept = l2cap_sock_accept,
4712 .getname = l2cap_sock_getname,
4713 .sendmsg = l2cap_sock_sendmsg,
4714 .recvmsg = l2cap_sock_recvmsg,
4715 .poll = bt_sock_poll,
4716 .ioctl = bt_sock_ioctl,
4717 .mmap = sock_no_mmap,
4718 .socketpair = sock_no_socketpair,
4719 .shutdown = l2cap_sock_shutdown,
4720 .setsockopt = l2cap_sock_setsockopt,
4721 .getsockopt = l2cap_sock_getsockopt
4724 static const struct net_proto_family l2cap_sock_family_ops = {
4725 .family = PF_BLUETOOTH,
4726 .owner = THIS_MODULE,
4727 .create = l2cap_sock_create,
4730 static struct hci_proto l2cap_hci_proto = {
4732 .id = HCI_PROTO_L2CAP,
4733 .connect_ind = l2cap_connect_ind,
4734 .connect_cfm = l2cap_connect_cfm,
4735 .disconn_ind = l2cap_disconn_ind,
4736 .disconn_cfm = l2cap_disconn_cfm,
4737 .security_cfm = l2cap_security_cfm,
4738 .recv_acldata = l2cap_recv_acldata
4741 static int __init l2cap_init(void)
4745 err = proto_register(&l2cap_proto, 0);
4749 _busy_wq = create_singlethread_workqueue("l2cap");
4753 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4755 BT_ERR("L2CAP socket registration failed");
4759 err = hci_register_proto(&l2cap_hci_proto);
4761 BT_ERR("L2CAP protocol registration failed");
4762 bt_sock_unregister(BTPROTO_L2CAP);
4767 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4768 bt_debugfs, NULL, &l2cap_debugfs_fops);
4770 BT_ERR("Failed to create L2CAP debug file");
4773 BT_INFO("L2CAP ver %s", VERSION);
4774 BT_INFO("L2CAP socket layer initialized");
4779 proto_unregister(&l2cap_proto);
4783 static void __exit l2cap_exit(void)
4785 debugfs_remove(l2cap_debugfs);
4787 flush_workqueue(_busy_wq);
4788 destroy_workqueue(_busy_wq);
4790 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4791 BT_ERR("L2CAP socket unregistration failed");
4793 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4794 BT_ERR("L2CAP protocol unregistration failed");
4796 proto_unregister(&l2cap_proto);
4799 void l2cap_load(void)
4801 /* Dummy function to trigger automatic L2CAP module loading by
4802 * other modules that use L2CAP sockets but don't use any other
4803 * symbols from it. */
4805 EXPORT_SYMBOL(l2cap_load);
4807 module_init(l2cap_init);
4808 module_exit(l2cap_exit);
4810 module_param(enable_ertm, bool, 0644);
4811 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4813 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4814 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4815 MODULE_VERSION(VERSION);
4816 MODULE_LICENSE("GPL");
4817 MODULE_ALIAS("bt-proto-0");