2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include <linux/compat.h>
41 #include <linux/socket.h>
42 #include <linux/ioctl.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
50 #include <net/bluetooth/hci_mon.h>
52 static atomic_t monitor_promisc = ATOMIC_INIT(0);
54 /* ----- HCI socket interface ----- */
56 static inline int hci_test_bit(int nr, void *addr)
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
62 static struct hci_sec_filter hci_sec_filter = {
66 { 0x1000d9fe, 0x0000b00c },
71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
78 /* OGF_STATUS_PARAM */
79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
83 static struct bt_sock_list hci_sk_list = {
84 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
87 /* Send frame to RAW socket */
88 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
91 struct hlist_node *node;
92 struct sk_buff *skb_copy = NULL;
94 BT_DBG("hdev %p len %d", hdev, skb->len);
96 read_lock(&hci_sk_list.lock);
98 sk_for_each(sk, node, &hci_sk_list.head) {
99 struct hci_filter *flt;
100 struct sk_buff *nskb;
102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
105 /* Don't send frame to the socket it came from */
109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
113 flt = &hci_pi(sk)->filter;
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
120 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
121 register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
123 if (!hci_test_bit(evt, &flt->event_mask))
127 ((evt == HCI_EV_CMD_COMPLETE &&
129 get_unaligned((__le16 *)(skb->data + 3))) ||
130 (evt == HCI_EV_CMD_STATUS &&
132 get_unaligned((__le16 *)(skb->data + 4)))))
137 /* Create a private copy with headroom */
138 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
142 /* Put type byte before the data */
143 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
146 nskb = skb_clone(skb_copy, GFP_ATOMIC);
150 if (sock_queue_rcv_skb(sk, nskb))
154 read_unlock(&hci_sk_list.lock);
159 /* Send frame to control socket */
160 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
163 struct hlist_node *node;
165 BT_DBG("len %d", skb->len);
167 read_lock(&hci_sk_list.lock);
169 sk_for_each(sk, node, &hci_sk_list.head) {
170 struct sk_buff *nskb;
172 /* Skip the original socket */
176 if (sk->sk_state != BT_BOUND)
179 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
182 nskb = skb_clone(skb, GFP_ATOMIC);
186 if (sock_queue_rcv_skb(sk, nskb))
190 read_unlock(&hci_sk_list.lock);
193 /* Send frame to monitor socket */
194 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
197 struct hlist_node *node;
198 struct sk_buff *skb_copy = NULL;
201 if (!atomic_read(&monitor_promisc))
204 BT_DBG("hdev %p len %d", hdev, skb->len);
206 switch (bt_cb(skb)->pkt_type) {
207 case HCI_COMMAND_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
211 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
213 case HCI_ACLDATA_PKT:
214 if (bt_cb(skb)->incoming)
215 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
217 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
219 case HCI_SCODATA_PKT:
220 if (bt_cb(skb)->incoming)
221 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
223 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
229 read_lock(&hci_sk_list.lock);
231 sk_for_each(sk, node, &hci_sk_list.head) {
232 struct sk_buff *nskb;
234 if (sk->sk_state != BT_BOUND)
237 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
241 struct hci_mon_hdr *hdr;
243 /* Create a private copy with headroom */
244 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
248 /* Put header before the data */
249 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
250 hdr->opcode = opcode;
251 hdr->index = cpu_to_le16(hdev->id);
252 hdr->len = cpu_to_le16(skb->len);
255 nskb = skb_clone(skb_copy, GFP_ATOMIC);
259 if (sock_queue_rcv_skb(sk, nskb))
263 read_unlock(&hci_sk_list.lock);
268 static void send_monitor_event(struct sk_buff *skb)
271 struct hlist_node *node;
273 BT_DBG("len %d", skb->len);
275 read_lock(&hci_sk_list.lock);
277 sk_for_each(sk, node, &hci_sk_list.head) {
278 struct sk_buff *nskb;
280 if (sk->sk_state != BT_BOUND)
283 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
286 nskb = skb_clone(skb, GFP_ATOMIC);
290 if (sock_queue_rcv_skb(sk, nskb))
294 read_unlock(&hci_sk_list.lock);
297 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
299 struct hci_mon_hdr *hdr;
300 struct hci_mon_new_index *ni;
306 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
310 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
311 ni->type = hdev->dev_type;
313 bacpy(&ni->bdaddr, &hdev->bdaddr);
314 memcpy(ni->name, hdev->name, 8);
316 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
320 skb = bt_skb_alloc(0, GFP_ATOMIC);
324 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
331 __net_timestamp(skb);
333 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
334 hdr->opcode = opcode;
335 hdr->index = cpu_to_le16(hdev->id);
336 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
341 static void send_monitor_replay(struct sock *sk)
343 struct hci_dev *hdev;
345 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(hdev, &hci_dev_list, list) {
350 skb = create_monitor_event(hdev, HCI_DEV_REG);
354 if (sock_queue_rcv_skb(sk, skb))
358 read_unlock(&hci_dev_list_lock);
361 /* Generate internal stack event */
362 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
364 struct hci_event_hdr *hdr;
365 struct hci_ev_stack_internal *ev;
368 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
372 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
373 hdr->evt = HCI_EV_STACK_INTERNAL;
374 hdr->plen = sizeof(*ev) + dlen;
376 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
378 memcpy(ev->data, data, dlen);
380 bt_cb(skb)->incoming = 1;
381 __net_timestamp(skb);
383 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
384 skb->dev = (void *) hdev;
385 hci_send_to_sock(hdev, skb);
389 void hci_sock_dev_event(struct hci_dev *hdev, int event)
391 struct hci_ev_si_device ev;
393 BT_DBG("hdev %s event %d", hdev->name, event);
395 /* Send event to monitor */
396 if (atomic_read(&monitor_promisc)) {
399 skb = create_monitor_event(hdev, event);
401 send_monitor_event(skb);
406 /* Send event to sockets */
408 ev.dev_id = hdev->id;
409 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
411 if (event == HCI_DEV_UNREG) {
413 struct hlist_node *node;
415 /* Detach sockets from device */
416 read_lock(&hci_sk_list.lock);
417 sk_for_each(sk, node, &hci_sk_list.head) {
418 bh_lock_sock_nested(sk);
419 if (hci_pi(sk)->hdev == hdev) {
420 hci_pi(sk)->hdev = NULL;
422 sk->sk_state = BT_OPEN;
423 sk->sk_state_change(sk);
429 read_unlock(&hci_sk_list.lock);
433 static int hci_sock_release(struct socket *sock)
435 struct sock *sk = sock->sk;
436 struct hci_dev *hdev;
438 BT_DBG("sock %p sk %p", sock, sk);
443 hdev = hci_pi(sk)->hdev;
445 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
446 atomic_dec(&monitor_promisc);
448 bt_sock_unlink(&hci_sk_list, sk);
451 atomic_dec(&hdev->promisc);
457 skb_queue_purge(&sk->sk_receive_queue);
458 skb_queue_purge(&sk->sk_write_queue);
464 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
469 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
474 err = hci_blacklist_add(hdev, &bdaddr, 0);
476 hci_dev_unlock(hdev);
481 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
486 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
491 err = hci_blacklist_del(hdev, &bdaddr, 0);
493 hci_dev_unlock(hdev);
498 /* Ioctls that require bound socket */
499 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
502 struct hci_dev *hdev = hci_pi(sk)->hdev;
509 if (!capable(CAP_NET_ADMIN))
512 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
516 set_bit(HCI_RAW, &hdev->flags);
518 clear_bit(HCI_RAW, &hdev->flags);
523 return hci_get_conn_info(hdev, (void __user *) arg);
526 return hci_get_auth_info(hdev, (void __user *) arg);
529 if (!capable(CAP_NET_ADMIN))
531 return hci_sock_blacklist_add(hdev, (void __user *) arg);
534 if (!capable(CAP_NET_ADMIN))
536 return hci_sock_blacklist_del(hdev, (void __user *) arg);
540 return hdev->ioctl(hdev, cmd, arg);
545 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
547 struct sock *sk = sock->sk;
548 void __user *argp = (void __user *) arg;
551 BT_DBG("cmd %x arg %lx", cmd, arg);
555 return hci_get_dev_list(argp);
558 return hci_get_dev_info(argp);
561 return hci_get_conn_list(argp);
564 if (!capable(CAP_NET_ADMIN))
566 return hci_dev_open(arg);
569 if (!capable(CAP_NET_ADMIN))
571 return hci_dev_close(arg);
574 if (!capable(CAP_NET_ADMIN))
576 return hci_dev_reset(arg);
579 if (!capable(CAP_NET_ADMIN))
581 return hci_dev_reset_stat(arg);
591 if (!capable(CAP_NET_ADMIN))
593 return hci_dev_cmd(cmd, argp);
596 return hci_inquiry(argp);
600 err = hci_sock_bound_ioctl(sk, cmd, arg);
606 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
608 struct sockaddr_hci haddr;
609 struct sock *sk = sock->sk;
610 struct hci_dev *hdev = NULL;
613 BT_DBG("sock %p sk %p", sock, sk);
618 memset(&haddr, 0, sizeof(haddr));
619 len = min_t(unsigned int, sizeof(haddr), addr_len);
620 memcpy(&haddr, addr, len);
622 if (haddr.hci_family != AF_BLUETOOTH)
627 if (sk->sk_state == BT_BOUND) {
632 switch (haddr.hci_channel) {
633 case HCI_CHANNEL_RAW:
634 if (hci_pi(sk)->hdev) {
639 if (haddr.hci_dev != HCI_DEV_NONE) {
640 hdev = hci_dev_get(haddr.hci_dev);
646 atomic_inc(&hdev->promisc);
649 hci_pi(sk)->hdev = hdev;
652 case HCI_CHANNEL_CONTROL:
653 if (haddr.hci_dev != HCI_DEV_NONE) {
658 if (!capable(CAP_NET_ADMIN)) {
665 case HCI_CHANNEL_MONITOR:
666 if (haddr.hci_dev != HCI_DEV_NONE) {
671 if (!capable(CAP_NET_RAW)) {
676 send_monitor_replay(sk);
678 atomic_inc(&monitor_promisc);
687 hci_pi(sk)->channel = haddr.hci_channel;
688 sk->sk_state = BT_BOUND;
695 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
697 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
698 struct sock *sk = sock->sk;
699 struct hci_dev *hdev = hci_pi(sk)->hdev;
701 BT_DBG("sock %p sk %p", sock, sk);
708 *addr_len = sizeof(*haddr);
709 haddr->hci_family = AF_BLUETOOTH;
710 haddr->hci_dev = hdev->id;
716 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
719 __u32 mask = hci_pi(sk)->cmsg_mask;
721 if (mask & HCI_CMSG_DIR) {
722 int incoming = bt_cb(skb)->incoming;
723 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
726 if (mask & HCI_CMSG_TSTAMP) {
728 struct compat_timeval ctv;
734 skb_get_timestamp(skb, &tv);
739 if (!COMPAT_USE_64BIT_TIME &&
740 (msg->msg_flags & MSG_CMSG_COMPAT)) {
741 ctv.tv_sec = tv.tv_sec;
742 ctv.tv_usec = tv.tv_usec;
748 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
752 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
753 struct msghdr *msg, size_t len, int flags)
755 int noblock = flags & MSG_DONTWAIT;
756 struct sock *sk = sock->sk;
760 BT_DBG("sock %p, sk %p", sock, sk);
762 if (flags & (MSG_OOB))
765 if (sk->sk_state == BT_CLOSED)
768 skb = skb_recv_datagram(sk, flags, noblock, &err);
772 msg->msg_namelen = 0;
776 msg->msg_flags |= MSG_TRUNC;
780 skb_reset_transport_header(skb);
781 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
783 switch (hci_pi(sk)->channel) {
784 case HCI_CHANNEL_RAW:
785 hci_sock_cmsg(sk, msg, skb);
787 case HCI_CHANNEL_CONTROL:
788 case HCI_CHANNEL_MONITOR:
789 sock_recv_timestamp(msg, sk, skb);
793 skb_free_datagram(sk, skb);
795 return err ? : copied;
798 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
799 struct msghdr *msg, size_t len)
801 struct sock *sk = sock->sk;
802 struct hci_dev *hdev;
806 BT_DBG("sock %p sk %p", sock, sk);
808 if (msg->msg_flags & MSG_OOB)
811 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
814 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
819 switch (hci_pi(sk)->channel) {
820 case HCI_CHANNEL_RAW:
822 case HCI_CHANNEL_CONTROL:
823 err = mgmt_control(sk, msg, len);
825 case HCI_CHANNEL_MONITOR:
833 hdev = hci_pi(sk)->hdev;
839 if (!test_bit(HCI_UP, &hdev->flags)) {
844 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
848 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
853 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
855 skb->dev = (void *) hdev;
857 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
858 u16 opcode = get_unaligned_le16(skb->data);
859 u16 ogf = hci_opcode_ogf(opcode);
860 u16 ocf = hci_opcode_ocf(opcode);
862 if (((ogf > HCI_SFLT_MAX_OGF) ||
863 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
864 &hci_sec_filter.ocf_mask[ogf])) &&
865 !capable(CAP_NET_RAW)) {
870 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
871 skb_queue_tail(&hdev->raw_q, skb);
872 queue_work(hdev->workqueue, &hdev->tx_work);
874 skb_queue_tail(&hdev->cmd_q, skb);
875 queue_work(hdev->workqueue, &hdev->cmd_work);
878 if (!capable(CAP_NET_RAW)) {
883 skb_queue_tail(&hdev->raw_q, skb);
884 queue_work(hdev->workqueue, &hdev->tx_work);
898 static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
900 struct hci_ufilter uf = { .opcode = 0 };
901 struct sock *sk = sock->sk;
902 int err = 0, opt = 0;
904 BT_DBG("sk %p, opt %d", sk, optname);
908 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
915 if (get_user(opt, (int __user *)optval)) {
921 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
923 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
927 if (get_user(opt, (int __user *)optval)) {
933 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
935 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
940 struct hci_filter *f = &hci_pi(sk)->filter;
942 uf.type_mask = f->type_mask;
943 uf.opcode = f->opcode;
944 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
945 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
948 len = min_t(unsigned int, len, sizeof(uf));
949 if (copy_from_user(&uf, optval, len)) {
954 if (!capable(CAP_NET_RAW)) {
955 uf.type_mask &= hci_sec_filter.type_mask;
956 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
957 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
961 struct hci_filter *f = &hci_pi(sk)->filter;
963 f->type_mask = uf.type_mask;
964 f->opcode = uf.opcode;
965 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
966 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
980 static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
982 struct hci_ufilter uf;
983 struct sock *sk = sock->sk;
984 int len, opt, err = 0;
986 BT_DBG("sk %p, opt %d", sk, optname);
988 if (get_user(len, optlen))
993 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1000 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1005 if (put_user(opt, optval))
1009 case HCI_TIME_STAMP:
1010 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1015 if (put_user(opt, optval))
1021 struct hci_filter *f = &hci_pi(sk)->filter;
1023 uf.type_mask = f->type_mask;
1024 uf.opcode = f->opcode;
1025 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1026 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1029 len = min_t(unsigned int, len, sizeof(uf));
1030 if (copy_to_user(optval, &uf, len))
1044 static const struct proto_ops hci_sock_ops = {
1045 .family = PF_BLUETOOTH,
1046 .owner = THIS_MODULE,
1047 .release = hci_sock_release,
1048 .bind = hci_sock_bind,
1049 .getname = hci_sock_getname,
1050 .sendmsg = hci_sock_sendmsg,
1051 .recvmsg = hci_sock_recvmsg,
1052 .ioctl = hci_sock_ioctl,
1053 .poll = datagram_poll,
1054 .listen = sock_no_listen,
1055 .shutdown = sock_no_shutdown,
1056 .setsockopt = hci_sock_setsockopt,
1057 .getsockopt = hci_sock_getsockopt,
1058 .connect = sock_no_connect,
1059 .socketpair = sock_no_socketpair,
1060 .accept = sock_no_accept,
1061 .mmap = sock_no_mmap
1064 static struct proto hci_sk_proto = {
1066 .owner = THIS_MODULE,
1067 .obj_size = sizeof(struct hci_pinfo)
1070 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1075 BT_DBG("sock %p", sock);
1077 if (sock->type != SOCK_RAW)
1078 return -ESOCKTNOSUPPORT;
1080 sock->ops = &hci_sock_ops;
1082 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1086 sock_init_data(sock, sk);
1088 sock_reset_flag(sk, SOCK_ZAPPED);
1090 sk->sk_protocol = protocol;
1092 sock->state = SS_UNCONNECTED;
1093 sk->sk_state = BT_OPEN;
1095 bt_sock_link(&hci_sk_list, sk);
1099 static const struct net_proto_family hci_sock_family_ops = {
1100 .family = PF_BLUETOOTH,
1101 .owner = THIS_MODULE,
1102 .create = hci_sock_create,
1105 int __init hci_sock_init(void)
1109 err = proto_register(&hci_sk_proto, 0);
1113 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1117 BT_INFO("HCI socket layer initialized");
1122 BT_ERR("HCI socket registration failed");
1123 proto_unregister(&hci_sk_proto);
1127 void hci_sock_cleanup(void)
1129 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1130 BT_ERR("HCI socket unregistration failed");
1132 proto_unregister(&hci_sk_proto);