2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/slab.h>
34 #include <linux/poll.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/skbuff.h>
38 #include <linux/workqueue.h>
39 #include <linux/interrupt.h>
40 #include <linux/compat.h>
41 #include <linux/socket.h>
42 #include <linux/ioctl.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
48 #include <net/bluetooth/bluetooth.h>
49 #include <net/bluetooth/hci_core.h>
50 #include <net/bluetooth/hci_mon.h>
52 static atomic_t monitor_promisc = ATOMIC_INIT(0);
54 /* ----- HCI socket interface ----- */
56 static inline int hci_test_bit(int nr, void *addr)
58 return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
62 static struct hci_sec_filter hci_sec_filter = {
66 { 0x1000d9fe, 0x0000b00c },
71 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
75 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
78 /* OGF_STATUS_PARAM */
79 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
83 static struct bt_sock_list hci_sk_list = {
84 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
87 /* Send frame to RAW socket */
88 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
91 struct hlist_node *node;
92 struct sk_buff *skb_copy = NULL;
94 BT_DBG("hdev %p len %d", hdev, skb->len);
96 read_lock(&hci_sk_list.lock);
98 sk_for_each(sk, node, &hci_sk_list.head) {
99 struct hci_filter *flt;
100 struct sk_buff *nskb;
102 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
105 /* Don't send frame to the socket it came from */
109 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
113 flt = &hci_pi(sk)->filter;
115 if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
116 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
120 if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
121 int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
123 if (!hci_test_bit(evt, &flt->event_mask))
127 ((evt == HCI_EV_CMD_COMPLETE &&
129 get_unaligned((__le16 *)(skb->data + 3))) ||
130 (evt == HCI_EV_CMD_STATUS &&
132 get_unaligned((__le16 *)(skb->data + 4)))))
137 /* Create a private copy with headroom */
138 skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
142 /* Put type byte before the data */
143 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
146 nskb = skb_clone(skb_copy, GFP_ATOMIC);
150 if (sock_queue_rcv_skb(sk, nskb))
154 read_unlock(&hci_sk_list.lock);
159 /* Send frame to control socket */
160 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
163 struct hlist_node *node;
165 BT_DBG("len %d", skb->len);
167 read_lock(&hci_sk_list.lock);
169 sk_for_each(sk, node, &hci_sk_list.head) {
170 struct sk_buff *nskb;
172 /* Skip the original socket */
176 if (sk->sk_state != BT_BOUND)
179 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
182 nskb = skb_clone(skb, GFP_ATOMIC);
186 if (sock_queue_rcv_skb(sk, nskb))
190 read_unlock(&hci_sk_list.lock);
193 /* Send frame to monitor socket */
194 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
197 struct hlist_node *node;
198 struct sk_buff *skb_copy = NULL;
201 if (!atomic_read(&monitor_promisc))
204 BT_DBG("hdev %p len %d", hdev, skb->len);
206 switch (bt_cb(skb)->pkt_type) {
207 case HCI_COMMAND_PKT:
208 opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
211 opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
213 case HCI_ACLDATA_PKT:
214 if (bt_cb(skb)->incoming)
215 opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
217 opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
219 case HCI_SCODATA_PKT:
220 if (bt_cb(skb)->incoming)
221 opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
223 opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
229 read_lock(&hci_sk_list.lock);
231 sk_for_each(sk, node, &hci_sk_list.head) {
232 struct sk_buff *nskb;
234 if (sk->sk_state != BT_BOUND)
237 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
241 struct hci_mon_hdr *hdr;
243 /* Create a private copy with headroom */
244 skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
249 /* Put header before the data */
250 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
251 hdr->opcode = opcode;
252 hdr->index = cpu_to_le16(hdev->id);
253 hdr->len = cpu_to_le16(skb->len);
256 nskb = skb_clone(skb_copy, GFP_ATOMIC);
260 if (sock_queue_rcv_skb(sk, nskb))
264 read_unlock(&hci_sk_list.lock);
269 static void send_monitor_event(struct sk_buff *skb)
272 struct hlist_node *node;
274 BT_DBG("len %d", skb->len);
276 read_lock(&hci_sk_list.lock);
278 sk_for_each(sk, node, &hci_sk_list.head) {
279 struct sk_buff *nskb;
281 if (sk->sk_state != BT_BOUND)
284 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
287 nskb = skb_clone(skb, GFP_ATOMIC);
291 if (sock_queue_rcv_skb(sk, nskb))
295 read_unlock(&hci_sk_list.lock);
298 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
300 struct hci_mon_hdr *hdr;
301 struct hci_mon_new_index *ni;
307 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
311 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
312 ni->type = hdev->dev_type;
314 bacpy(&ni->bdaddr, &hdev->bdaddr);
315 memcpy(ni->name, hdev->name, 8);
317 opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
321 skb = bt_skb_alloc(0, GFP_ATOMIC);
325 opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
332 __net_timestamp(skb);
334 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
335 hdr->opcode = opcode;
336 hdr->index = cpu_to_le16(hdev->id);
337 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
342 static void send_monitor_replay(struct sock *sk)
344 struct hci_dev *hdev;
346 read_lock(&hci_dev_list_lock);
348 list_for_each_entry(hdev, &hci_dev_list, list) {
351 skb = create_monitor_event(hdev, HCI_DEV_REG);
355 if (sock_queue_rcv_skb(sk, skb))
359 read_unlock(&hci_dev_list_lock);
362 /* Generate internal stack event */
363 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
365 struct hci_event_hdr *hdr;
366 struct hci_ev_stack_internal *ev;
369 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
373 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
374 hdr->evt = HCI_EV_STACK_INTERNAL;
375 hdr->plen = sizeof(*ev) + dlen;
377 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
379 memcpy(ev->data, data, dlen);
381 bt_cb(skb)->incoming = 1;
382 __net_timestamp(skb);
384 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
385 skb->dev = (void *) hdev;
386 hci_send_to_sock(hdev, skb);
390 void hci_sock_dev_event(struct hci_dev *hdev, int event)
392 struct hci_ev_si_device ev;
394 BT_DBG("hdev %s event %d", hdev->name, event);
396 /* Send event to monitor */
397 if (atomic_read(&monitor_promisc)) {
400 skb = create_monitor_event(hdev, event);
402 send_monitor_event(skb);
407 /* Send event to sockets */
409 ev.dev_id = hdev->id;
410 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
412 if (event == HCI_DEV_UNREG) {
414 struct hlist_node *node;
416 /* Detach sockets from device */
417 read_lock(&hci_sk_list.lock);
418 sk_for_each(sk, node, &hci_sk_list.head) {
419 bh_lock_sock_nested(sk);
420 if (hci_pi(sk)->hdev == hdev) {
421 hci_pi(sk)->hdev = NULL;
423 sk->sk_state = BT_OPEN;
424 sk->sk_state_change(sk);
430 read_unlock(&hci_sk_list.lock);
434 static int hci_sock_release(struct socket *sock)
436 struct sock *sk = sock->sk;
437 struct hci_dev *hdev;
439 BT_DBG("sock %p sk %p", sock, sk);
444 hdev = hci_pi(sk)->hdev;
446 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
447 atomic_dec(&monitor_promisc);
449 bt_sock_unlink(&hci_sk_list, sk);
452 atomic_dec(&hdev->promisc);
458 skb_queue_purge(&sk->sk_receive_queue);
459 skb_queue_purge(&sk->sk_write_queue);
465 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
470 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
475 err = hci_blacklist_add(hdev, &bdaddr, 0);
477 hci_dev_unlock(hdev);
482 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
487 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
492 err = hci_blacklist_del(hdev, &bdaddr, 0);
494 hci_dev_unlock(hdev);
499 /* Ioctls that require bound socket */
500 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
503 struct hci_dev *hdev = hci_pi(sk)->hdev;
510 if (!capable(CAP_NET_ADMIN))
513 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
517 set_bit(HCI_RAW, &hdev->flags);
519 clear_bit(HCI_RAW, &hdev->flags);
524 return hci_get_conn_info(hdev, (void __user *) arg);
527 return hci_get_auth_info(hdev, (void __user *) arg);
530 if (!capable(CAP_NET_ADMIN))
532 return hci_sock_blacklist_add(hdev, (void __user *) arg);
535 if (!capable(CAP_NET_ADMIN))
537 return hci_sock_blacklist_del(hdev, (void __user *) arg);
541 return hdev->ioctl(hdev, cmd, arg);
546 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
549 struct sock *sk = sock->sk;
550 void __user *argp = (void __user *) arg;
553 BT_DBG("cmd %x arg %lx", cmd, arg);
557 return hci_get_dev_list(argp);
560 return hci_get_dev_info(argp);
563 return hci_get_conn_list(argp);
566 if (!capable(CAP_NET_ADMIN))
568 return hci_dev_open(arg);
571 if (!capable(CAP_NET_ADMIN))
573 return hci_dev_close(arg);
576 if (!capable(CAP_NET_ADMIN))
578 return hci_dev_reset(arg);
581 if (!capable(CAP_NET_ADMIN))
583 return hci_dev_reset_stat(arg);
593 if (!capable(CAP_NET_ADMIN))
595 return hci_dev_cmd(cmd, argp);
598 return hci_inquiry(argp);
602 err = hci_sock_bound_ioctl(sk, cmd, arg);
608 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
611 struct sockaddr_hci haddr;
612 struct sock *sk = sock->sk;
613 struct hci_dev *hdev = NULL;
616 BT_DBG("sock %p sk %p", sock, sk);
621 memset(&haddr, 0, sizeof(haddr));
622 len = min_t(unsigned int, sizeof(haddr), addr_len);
623 memcpy(&haddr, addr, len);
625 if (haddr.hci_family != AF_BLUETOOTH)
630 if (sk->sk_state == BT_BOUND) {
635 switch (haddr.hci_channel) {
636 case HCI_CHANNEL_RAW:
637 if (hci_pi(sk)->hdev) {
642 if (haddr.hci_dev != HCI_DEV_NONE) {
643 hdev = hci_dev_get(haddr.hci_dev);
649 atomic_inc(&hdev->promisc);
652 hci_pi(sk)->hdev = hdev;
655 case HCI_CHANNEL_CONTROL:
656 if (haddr.hci_dev != HCI_DEV_NONE) {
661 if (!capable(CAP_NET_ADMIN)) {
668 case HCI_CHANNEL_MONITOR:
669 if (haddr.hci_dev != HCI_DEV_NONE) {
674 if (!capable(CAP_NET_RAW)) {
679 send_monitor_replay(sk);
681 atomic_inc(&monitor_promisc);
690 hci_pi(sk)->channel = haddr.hci_channel;
691 sk->sk_state = BT_BOUND;
698 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
699 int *addr_len, int peer)
701 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
702 struct sock *sk = sock->sk;
703 struct hci_dev *hdev = hci_pi(sk)->hdev;
705 BT_DBG("sock %p sk %p", sock, sk);
712 *addr_len = sizeof(*haddr);
713 haddr->hci_family = AF_BLUETOOTH;
714 haddr->hci_dev = hdev->id;
720 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
723 __u32 mask = hci_pi(sk)->cmsg_mask;
725 if (mask & HCI_CMSG_DIR) {
726 int incoming = bt_cb(skb)->incoming;
727 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
731 if (mask & HCI_CMSG_TSTAMP) {
733 struct compat_timeval ctv;
739 skb_get_timestamp(skb, &tv);
744 if (!COMPAT_USE_64BIT_TIME &&
745 (msg->msg_flags & MSG_CMSG_COMPAT)) {
746 ctv.tv_sec = tv.tv_sec;
747 ctv.tv_usec = tv.tv_usec;
753 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
757 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
758 struct msghdr *msg, size_t len, int flags)
760 int noblock = flags & MSG_DONTWAIT;
761 struct sock *sk = sock->sk;
765 BT_DBG("sock %p, sk %p", sock, sk);
767 if (flags & (MSG_OOB))
770 if (sk->sk_state == BT_CLOSED)
773 skb = skb_recv_datagram(sk, flags, noblock, &err);
777 msg->msg_namelen = 0;
781 msg->msg_flags |= MSG_TRUNC;
785 skb_reset_transport_header(skb);
786 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
788 switch (hci_pi(sk)->channel) {
789 case HCI_CHANNEL_RAW:
790 hci_sock_cmsg(sk, msg, skb);
792 case HCI_CHANNEL_CONTROL:
793 case HCI_CHANNEL_MONITOR:
794 sock_recv_timestamp(msg, sk, skb);
798 skb_free_datagram(sk, skb);
800 return err ? : copied;
803 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
804 struct msghdr *msg, size_t len)
806 struct sock *sk = sock->sk;
807 struct hci_dev *hdev;
811 BT_DBG("sock %p sk %p", sock, sk);
813 if (msg->msg_flags & MSG_OOB)
816 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
819 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
824 switch (hci_pi(sk)->channel) {
825 case HCI_CHANNEL_RAW:
827 case HCI_CHANNEL_CONTROL:
828 err = mgmt_control(sk, msg, len);
830 case HCI_CHANNEL_MONITOR:
838 hdev = hci_pi(sk)->hdev;
844 if (!test_bit(HCI_UP, &hdev->flags)) {
849 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
853 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
858 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
860 skb->dev = (void *) hdev;
862 if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
863 u16 opcode = get_unaligned_le16(skb->data);
864 u16 ogf = hci_opcode_ogf(opcode);
865 u16 ocf = hci_opcode_ocf(opcode);
867 if (((ogf > HCI_SFLT_MAX_OGF) ||
868 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
869 &hci_sec_filter.ocf_mask[ogf])) &&
870 !capable(CAP_NET_RAW)) {
875 if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
876 skb_queue_tail(&hdev->raw_q, skb);
877 queue_work(hdev->workqueue, &hdev->tx_work);
879 skb_queue_tail(&hdev->cmd_q, skb);
880 queue_work(hdev->workqueue, &hdev->cmd_work);
883 if (!capable(CAP_NET_RAW)) {
888 skb_queue_tail(&hdev->raw_q, skb);
889 queue_work(hdev->workqueue, &hdev->tx_work);
903 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
904 char __user *optval, unsigned int len)
906 struct hci_ufilter uf = { .opcode = 0 };
907 struct sock *sk = sock->sk;
908 int err = 0, opt = 0;
910 BT_DBG("sk %p, opt %d", sk, optname);
914 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
921 if (get_user(opt, (int __user *)optval)) {
927 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
929 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
933 if (get_user(opt, (int __user *)optval)) {
939 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
941 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
946 struct hci_filter *f = &hci_pi(sk)->filter;
948 uf.type_mask = f->type_mask;
949 uf.opcode = f->opcode;
950 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
951 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
954 len = min_t(unsigned int, len, sizeof(uf));
955 if (copy_from_user(&uf, optval, len)) {
960 if (!capable(CAP_NET_RAW)) {
961 uf.type_mask &= hci_sec_filter.type_mask;
962 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
963 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
967 struct hci_filter *f = &hci_pi(sk)->filter;
969 f->type_mask = uf.type_mask;
970 f->opcode = uf.opcode;
971 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
972 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
986 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
987 char __user *optval, int __user *optlen)
989 struct hci_ufilter uf;
990 struct sock *sk = sock->sk;
991 int len, opt, err = 0;
993 BT_DBG("sk %p, opt %d", sk, optname);
995 if (get_user(len, optlen))
1000 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1007 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1012 if (put_user(opt, optval))
1016 case HCI_TIME_STAMP:
1017 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1022 if (put_user(opt, optval))
1028 struct hci_filter *f = &hci_pi(sk)->filter;
1030 uf.type_mask = f->type_mask;
1031 uf.opcode = f->opcode;
1032 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1033 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1036 len = min_t(unsigned int, len, sizeof(uf));
1037 if (copy_to_user(optval, &uf, len))
1051 static const struct proto_ops hci_sock_ops = {
1052 .family = PF_BLUETOOTH,
1053 .owner = THIS_MODULE,
1054 .release = hci_sock_release,
1055 .bind = hci_sock_bind,
1056 .getname = hci_sock_getname,
1057 .sendmsg = hci_sock_sendmsg,
1058 .recvmsg = hci_sock_recvmsg,
1059 .ioctl = hci_sock_ioctl,
1060 .poll = datagram_poll,
1061 .listen = sock_no_listen,
1062 .shutdown = sock_no_shutdown,
1063 .setsockopt = hci_sock_setsockopt,
1064 .getsockopt = hci_sock_getsockopt,
1065 .connect = sock_no_connect,
1066 .socketpair = sock_no_socketpair,
1067 .accept = sock_no_accept,
1068 .mmap = sock_no_mmap
1071 static struct proto hci_sk_proto = {
1073 .owner = THIS_MODULE,
1074 .obj_size = sizeof(struct hci_pinfo)
1077 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1082 BT_DBG("sock %p", sock);
1084 if (sock->type != SOCK_RAW)
1085 return -ESOCKTNOSUPPORT;
1087 sock->ops = &hci_sock_ops;
1089 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1093 sock_init_data(sock, sk);
1095 sock_reset_flag(sk, SOCK_ZAPPED);
1097 sk->sk_protocol = protocol;
1099 sock->state = SS_UNCONNECTED;
1100 sk->sk_state = BT_OPEN;
1102 bt_sock_link(&hci_sk_list, sk);
1106 static const struct net_proto_family hci_sock_family_ops = {
1107 .family = PF_BLUETOOTH,
1108 .owner = THIS_MODULE,
1109 .create = hci_sock_create,
1112 int __init hci_sock_init(void)
1116 err = proto_register(&hci_sk_proto, 0);
1120 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1124 BT_INFO("HCI socket layer initialized");
1129 BT_ERR("HCI socket registration failed");
1130 proto_unregister(&hci_sk_proto);
1134 void hci_sock_cleanup(void)
1136 if (bt_sock_unregister(BTPROTO_HCI) < 0)
1137 BT_ERR("HCI socket unregistration failed");
1139 proto_unregister(&hci_sk_proto);