]> Pileus Git - ~andy/linux/blob - net/bluetooth/hci_core.c
Bluetooth: Guarantee BR-EDR device will be registered as hci0
[~andy/linux] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92         atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101         /* If this is the init phase check if the completed command matches
102          * the last init command, and if not just return.
103          */
104         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105                 return;
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116         BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127                                         unsigned long opt, __u32 timeout)
128 {
129         DECLARE_WAITQUEUE(wait, current);
130         int err = 0;
131
132         BT_DBG("%s start", hdev->name);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         add_wait_queue(&hdev->req_wait_q, &wait);
137         set_current_state(TASK_INTERRUPTIBLE);
138
139         req(hdev, opt);
140         schedule_timeout(timeout);
141
142         remove_wait_queue(&hdev->req_wait_q, &wait);
143
144         if (signal_pending(current))
145                 return -EINTR;
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_to_errno(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = hdev->req_result = 0;
162
163         BT_DBG("%s end: err %d", hdev->name, err);
164
165         return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169                                         unsigned long opt, __u32 timeout)
170 {
171         int ret;
172
173         if (!test_bit(HCI_UP, &hdev->flags))
174                 return -ENETDOWN;
175
176         /* Serialize all requests */
177         hci_req_lock(hdev);
178         ret = __hci_request(hdev, req, opt, timeout);
179         hci_req_unlock(hdev);
180
181         return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         BT_DBG("%s %ld", hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &hdev->flags);
190         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195         struct hci_cp_delete_stored_link_key cp;
196         struct sk_buff *skb;
197         __le16 param;
198         __u8 flt_type;
199
200         BT_DBG("%s %ld", hdev->name, opt);
201
202         /* Driver initialization */
203
204         /* Special commands */
205         while ((skb = skb_dequeue(&hdev->driver_init))) {
206                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207                 skb->dev = (void *) hdev;
208
209                 skb_queue_tail(&hdev->cmd_q, skb);
210                 tasklet_schedule(&hdev->cmd_task);
211         }
212         skb_queue_purge(&hdev->driver_init);
213
214         /* Mandatory initialization */
215
216         /* Reset */
217         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218                         set_bit(HCI_RESET, &hdev->flags);
219                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220         }
221
222         /* Read Local Supported Features */
223         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225         /* Read Local Version */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232         /* Host buffer size */
233         {
234                 struct hci_cp_host_buffer_size cp;
235                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237                 cp.acl_max_pkt = cpu_to_le16(0xffff);
238                 cp.sco_max_pkt = cpu_to_le16(0xffff);
239                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240         }
241 #endif
242
243         /* Read BD Address */
244         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246         /* Read Class of Device */
247         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249         /* Read Local Name */
250         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252         /* Read Voice Setting */
253         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255         /* Optional initialization */
256
257         /* Clear Event Filters */
258         flt_type = HCI_FLT_CLEAR_ALL;
259         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261         /* Connection accept timeout ~20 secs */
262         param = cpu_to_le16(0x7d00);
263         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265         bacpy(&cp.bdaddr, BDADDR_ANY);
266         cp.delete_all = 1;
267         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272         BT_DBG("%s", hdev->name);
273
274         /* Read LE buffer size */
275         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 scan = opt;
281
282         BT_DBG("%s %x", hdev->name, scan);
283
284         /* Inquiry and Page scans */
285         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290         __u8 auth = opt;
291
292         BT_DBG("%s %x", hdev->name, auth);
293
294         /* Authentication */
295         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __u8 encrypt = opt;
301
302         BT_DBG("%s %x", hdev->name, encrypt);
303
304         /* Encryption */
305         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310         __le16 policy = cpu_to_le16(opt);
311
312         BT_DBG("%s %x", hdev->name, policy);
313
314         /* Default link policy */
315         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317
318 /* Get HCI device by index.
319  * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322         struct hci_dev *hdev = NULL, *d;
323
324         BT_DBG("%d", index);
325
326         if (index < 0)
327                 return NULL;
328
329         read_lock(&hci_dev_list_lock);
330         list_for_each_entry(d, &hci_dev_list, list) {
331                 if (d->id == index) {
332                         hdev = hci_dev_hold(d);
333                         break;
334                 }
335         }
336         read_unlock(&hci_dev_list_lock);
337         return hdev;
338 }
339
340 /* ---- Inquiry support ---- */
341 static void inquiry_cache_flush(struct hci_dev *hdev)
342 {
343         struct inquiry_cache *cache = &hdev->inq_cache;
344         struct inquiry_entry *next  = cache->list, *e;
345
346         BT_DBG("cache %p", cache);
347
348         cache->list = NULL;
349         while ((e = next)) {
350                 next = e->next;
351                 kfree(e);
352         }
353 }
354
355 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356 {
357         struct inquiry_cache *cache = &hdev->inq_cache;
358         struct inquiry_entry *e;
359
360         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362         for (e = cache->list; e; e = e->next)
363                 if (!bacmp(&e->data.bdaddr, bdaddr))
364                         break;
365         return e;
366 }
367
368 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369 {
370         struct inquiry_cache *cache = &hdev->inq_cache;
371         struct inquiry_entry *ie;
372
373         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
375         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376         if (!ie) {
377                 /* Entry not in the cache. Add new one. */
378                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379                 if (!ie)
380                         return;
381
382                 ie->next = cache->list;
383                 cache->list = ie;
384         }
385
386         memcpy(&ie->data, data, sizeof(*data));
387         ie->timestamp = jiffies;
388         cache->timestamp = jiffies;
389 }
390
391 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392 {
393         struct inquiry_cache *cache = &hdev->inq_cache;
394         struct inquiry_info *info = (struct inquiry_info *) buf;
395         struct inquiry_entry *e;
396         int copied = 0;
397
398         for (e = cache->list; e && copied < num; e = e->next, copied++) {
399                 struct inquiry_data *data = &e->data;
400                 bacpy(&info->bdaddr, &data->bdaddr);
401                 info->pscan_rep_mode    = data->pscan_rep_mode;
402                 info->pscan_period_mode = data->pscan_period_mode;
403                 info->pscan_mode        = data->pscan_mode;
404                 memcpy(info->dev_class, data->dev_class, 3);
405                 info->clock_offset      = data->clock_offset;
406                 info++;
407         }
408
409         BT_DBG("cache %p, copied %d", cache, copied);
410         return copied;
411 }
412
413 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414 {
415         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416         struct hci_cp_inquiry cp;
417
418         BT_DBG("%s", hdev->name);
419
420         if (test_bit(HCI_INQUIRY, &hdev->flags))
421                 return;
422
423         /* Start Inquiry */
424         memcpy(&cp.lap, &ir->lap, 3);
425         cp.length  = ir->length;
426         cp.num_rsp = ir->num_rsp;
427         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
428 }
429
430 int hci_inquiry(void __user *arg)
431 {
432         __u8 __user *ptr = arg;
433         struct hci_inquiry_req ir;
434         struct hci_dev *hdev;
435         int err = 0, do_inquiry = 0, max_rsp;
436         long timeo;
437         __u8 *buf;
438
439         if (copy_from_user(&ir, ptr, sizeof(ir)))
440                 return -EFAULT;
441
442         hdev = hci_dev_get(ir.dev_id);
443         if (!hdev)
444                 return -ENODEV;
445
446         hci_dev_lock_bh(hdev);
447         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
448                                 inquiry_cache_empty(hdev) ||
449                                 ir.flags & IREQ_CACHE_FLUSH) {
450                 inquiry_cache_flush(hdev);
451                 do_inquiry = 1;
452         }
453         hci_dev_unlock_bh(hdev);
454
455         timeo = ir.length * msecs_to_jiffies(2000);
456
457         if (do_inquiry) {
458                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459                 if (err < 0)
460                         goto done;
461         }
462
463         /* for unlimited number of responses we will use buffer with 255 entries */
464         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467          * copy it to the user space.
468          */
469         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
470         if (!buf) {
471                 err = -ENOMEM;
472                 goto done;
473         }
474
475         hci_dev_lock_bh(hdev);
476         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477         hci_dev_unlock_bh(hdev);
478
479         BT_DBG("num_rsp %d", ir.num_rsp);
480
481         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482                 ptr += sizeof(ir);
483                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484                                         ir.num_rsp))
485                         err = -EFAULT;
486         } else
487                 err = -EFAULT;
488
489         kfree(buf);
490
491 done:
492         hci_dev_put(hdev);
493         return err;
494 }
495
496 /* ---- HCI ioctl helpers ---- */
497
498 int hci_dev_open(__u16 dev)
499 {
500         struct hci_dev *hdev;
501         int ret = 0;
502
503         hdev = hci_dev_get(dev);
504         if (!hdev)
505                 return -ENODEV;
506
507         BT_DBG("%s %p", hdev->name, hdev);
508
509         hci_req_lock(hdev);
510
511         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512                 ret = -ERFKILL;
513                 goto done;
514         }
515
516         if (test_bit(HCI_UP, &hdev->flags)) {
517                 ret = -EALREADY;
518                 goto done;
519         }
520
521         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522                 set_bit(HCI_RAW, &hdev->flags);
523
524         /* Treat all non BR/EDR controllers as raw devices for now */
525         if (hdev->dev_type != HCI_BREDR)
526                 set_bit(HCI_RAW, &hdev->flags);
527
528         if (hdev->open(hdev)) {
529                 ret = -EIO;
530                 goto done;
531         }
532
533         if (!test_bit(HCI_RAW, &hdev->flags)) {
534                 atomic_set(&hdev->cmd_cnt, 1);
535                 set_bit(HCI_INIT, &hdev->flags);
536                 hdev->init_last_cmd = 0;
537
538                 ret = __hci_request(hdev, hci_init_req, 0,
539                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
540
541                 if (lmp_host_le_capable(hdev))
542                         ret = __hci_request(hdev, hci_le_init_req, 0,
543                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
545                 clear_bit(HCI_INIT, &hdev->flags);
546         }
547
548         if (!ret) {
549                 hci_dev_hold(hdev);
550                 set_bit(HCI_UP, &hdev->flags);
551                 hci_notify(hdev, HCI_DEV_UP);
552                 if (!test_bit(HCI_SETUP, &hdev->flags))
553                         mgmt_powered(hdev->id, 1);
554         } else {
555                 /* Init failed, cleanup */
556                 tasklet_kill(&hdev->rx_task);
557                 tasklet_kill(&hdev->tx_task);
558                 tasklet_kill(&hdev->cmd_task);
559
560                 skb_queue_purge(&hdev->cmd_q);
561                 skb_queue_purge(&hdev->rx_q);
562
563                 if (hdev->flush)
564                         hdev->flush(hdev);
565
566                 if (hdev->sent_cmd) {
567                         kfree_skb(hdev->sent_cmd);
568                         hdev->sent_cmd = NULL;
569                 }
570
571                 hdev->close(hdev);
572                 hdev->flags = 0;
573         }
574
575 done:
576         hci_req_unlock(hdev);
577         hci_dev_put(hdev);
578         return ret;
579 }
580
581 static int hci_dev_do_close(struct hci_dev *hdev)
582 {
583         BT_DBG("%s %p", hdev->name, hdev);
584
585         hci_req_cancel(hdev, ENODEV);
586         hci_req_lock(hdev);
587
588         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
589                 del_timer_sync(&hdev->cmd_timer);
590                 hci_req_unlock(hdev);
591                 return 0;
592         }
593
594         /* Kill RX and TX tasks */
595         tasklet_kill(&hdev->rx_task);
596         tasklet_kill(&hdev->tx_task);
597
598         hci_dev_lock_bh(hdev);
599         inquiry_cache_flush(hdev);
600         hci_conn_hash_flush(hdev);
601         hci_dev_unlock_bh(hdev);
602
603         hci_notify(hdev, HCI_DEV_DOWN);
604
605         if (hdev->flush)
606                 hdev->flush(hdev);
607
608         /* Reset device */
609         skb_queue_purge(&hdev->cmd_q);
610         atomic_set(&hdev->cmd_cnt, 1);
611         if (!test_bit(HCI_RAW, &hdev->flags)) {
612                 set_bit(HCI_INIT, &hdev->flags);
613                 __hci_request(hdev, hci_reset_req, 0,
614                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
615                 clear_bit(HCI_INIT, &hdev->flags);
616         }
617
618         /* Kill cmd task */
619         tasklet_kill(&hdev->cmd_task);
620
621         /* Drop queues */
622         skb_queue_purge(&hdev->rx_q);
623         skb_queue_purge(&hdev->cmd_q);
624         skb_queue_purge(&hdev->raw_q);
625
626         /* Drop last sent command */
627         if (hdev->sent_cmd) {
628                 del_timer_sync(&hdev->cmd_timer);
629                 kfree_skb(hdev->sent_cmd);
630                 hdev->sent_cmd = NULL;
631         }
632
633         /* After this point our queues are empty
634          * and no tasks are scheduled. */
635         hdev->close(hdev);
636
637         mgmt_powered(hdev->id, 0);
638
639         /* Clear flags */
640         hdev->flags = 0;
641
642         hci_req_unlock(hdev);
643
644         hci_dev_put(hdev);
645         return 0;
646 }
647
648 int hci_dev_close(__u16 dev)
649 {
650         struct hci_dev *hdev;
651         int err;
652
653         hdev = hci_dev_get(dev);
654         if (!hdev)
655                 return -ENODEV;
656         err = hci_dev_do_close(hdev);
657         hci_dev_put(hdev);
658         return err;
659 }
660
661 int hci_dev_reset(__u16 dev)
662 {
663         struct hci_dev *hdev;
664         int ret = 0;
665
666         hdev = hci_dev_get(dev);
667         if (!hdev)
668                 return -ENODEV;
669
670         hci_req_lock(hdev);
671         tasklet_disable(&hdev->tx_task);
672
673         if (!test_bit(HCI_UP, &hdev->flags))
674                 goto done;
675
676         /* Drop queues */
677         skb_queue_purge(&hdev->rx_q);
678         skb_queue_purge(&hdev->cmd_q);
679
680         hci_dev_lock_bh(hdev);
681         inquiry_cache_flush(hdev);
682         hci_conn_hash_flush(hdev);
683         hci_dev_unlock_bh(hdev);
684
685         if (hdev->flush)
686                 hdev->flush(hdev);
687
688         atomic_set(&hdev->cmd_cnt, 1);
689         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
690
691         if (!test_bit(HCI_RAW, &hdev->flags))
692                 ret = __hci_request(hdev, hci_reset_req, 0,
693                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
694
695 done:
696         tasklet_enable(&hdev->tx_task);
697         hci_req_unlock(hdev);
698         hci_dev_put(hdev);
699         return ret;
700 }
701
702 int hci_dev_reset_stat(__u16 dev)
703 {
704         struct hci_dev *hdev;
705         int ret = 0;
706
707         hdev = hci_dev_get(dev);
708         if (!hdev)
709                 return -ENODEV;
710
711         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
712
713         hci_dev_put(hdev);
714
715         return ret;
716 }
717
718 int hci_dev_cmd(unsigned int cmd, void __user *arg)
719 {
720         struct hci_dev *hdev;
721         struct hci_dev_req dr;
722         int err = 0;
723
724         if (copy_from_user(&dr, arg, sizeof(dr)))
725                 return -EFAULT;
726
727         hdev = hci_dev_get(dr.dev_id);
728         if (!hdev)
729                 return -ENODEV;
730
731         switch (cmd) {
732         case HCISETAUTH:
733                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
734                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
735                 break;
736
737         case HCISETENCRYPT:
738                 if (!lmp_encrypt_capable(hdev)) {
739                         err = -EOPNOTSUPP;
740                         break;
741                 }
742
743                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
744                         /* Auth must be enabled first */
745                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
746                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
747                         if (err)
748                                 break;
749                 }
750
751                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
752                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
753                 break;
754
755         case HCISETSCAN:
756                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
757                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
758                 break;
759
760         case HCISETLINKPOL:
761                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
762                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
763                 break;
764
765         case HCISETLINKMODE:
766                 hdev->link_mode = ((__u16) dr.dev_opt) &
767                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
768                 break;
769
770         case HCISETPTYPE:
771                 hdev->pkt_type = (__u16) dr.dev_opt;
772                 break;
773
774         case HCISETACLMTU:
775                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
776                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
777                 break;
778
779         case HCISETSCOMTU:
780                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
781                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
782                 break;
783
784         default:
785                 err = -EINVAL;
786                 break;
787         }
788
789         hci_dev_put(hdev);
790         return err;
791 }
792
793 int hci_get_dev_list(void __user *arg)
794 {
795         struct hci_dev *hdev;
796         struct hci_dev_list_req *dl;
797         struct hci_dev_req *dr;
798         int n = 0, size, err;
799         __u16 dev_num;
800
801         if (get_user(dev_num, (__u16 __user *) arg))
802                 return -EFAULT;
803
804         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
805                 return -EINVAL;
806
807         size = sizeof(*dl) + dev_num * sizeof(*dr);
808
809         dl = kzalloc(size, GFP_KERNEL);
810         if (!dl)
811                 return -ENOMEM;
812
813         dr = dl->dev_req;
814
815         read_lock_bh(&hci_dev_list_lock);
816         list_for_each_entry(hdev, &hci_dev_list, list) {
817                 hci_del_off_timer(hdev);
818
819                 if (!test_bit(HCI_MGMT, &hdev->flags))
820                         set_bit(HCI_PAIRABLE, &hdev->flags);
821
822                 (dr + n)->dev_id  = hdev->id;
823                 (dr + n)->dev_opt = hdev->flags;
824
825                 if (++n >= dev_num)
826                         break;
827         }
828         read_unlock_bh(&hci_dev_list_lock);
829
830         dl->dev_num = n;
831         size = sizeof(*dl) + n * sizeof(*dr);
832
833         err = copy_to_user(arg, dl, size);
834         kfree(dl);
835
836         return err ? -EFAULT : 0;
837 }
838
839 int hci_get_dev_info(void __user *arg)
840 {
841         struct hci_dev *hdev;
842         struct hci_dev_info di;
843         int err = 0;
844
845         if (copy_from_user(&di, arg, sizeof(di)))
846                 return -EFAULT;
847
848         hdev = hci_dev_get(di.dev_id);
849         if (!hdev)
850                 return -ENODEV;
851
852         hci_del_off_timer(hdev);
853
854         if (!test_bit(HCI_MGMT, &hdev->flags))
855                 set_bit(HCI_PAIRABLE, &hdev->flags);
856
857         strcpy(di.name, hdev->name);
858         di.bdaddr   = hdev->bdaddr;
859         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
860         di.flags    = hdev->flags;
861         di.pkt_type = hdev->pkt_type;
862         di.acl_mtu  = hdev->acl_mtu;
863         di.acl_pkts = hdev->acl_pkts;
864         di.sco_mtu  = hdev->sco_mtu;
865         di.sco_pkts = hdev->sco_pkts;
866         di.link_policy = hdev->link_policy;
867         di.link_mode   = hdev->link_mode;
868
869         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
870         memcpy(&di.features, &hdev->features, sizeof(di.features));
871
872         if (copy_to_user(arg, &di, sizeof(di)))
873                 err = -EFAULT;
874
875         hci_dev_put(hdev);
876
877         return err;
878 }
879
880 /* ---- Interface to HCI drivers ---- */
881
882 static int hci_rfkill_set_block(void *data, bool blocked)
883 {
884         struct hci_dev *hdev = data;
885
886         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
887
888         if (!blocked)
889                 return 0;
890
891         hci_dev_do_close(hdev);
892
893         return 0;
894 }
895
896 static const struct rfkill_ops hci_rfkill_ops = {
897         .set_block = hci_rfkill_set_block,
898 };
899
900 /* Alloc HCI device */
901 struct hci_dev *hci_alloc_dev(void)
902 {
903         struct hci_dev *hdev;
904
905         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
906         if (!hdev)
907                 return NULL;
908
909         hci_init_sysfs(hdev);
910         skb_queue_head_init(&hdev->driver_init);
911
912         return hdev;
913 }
914 EXPORT_SYMBOL(hci_alloc_dev);
915
916 /* Free HCI device */
917 void hci_free_dev(struct hci_dev *hdev)
918 {
919         skb_queue_purge(&hdev->driver_init);
920
921         /* will free via device release */
922         put_device(&hdev->dev);
923 }
924 EXPORT_SYMBOL(hci_free_dev);
925
926 static void hci_power_on(struct work_struct *work)
927 {
928         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930         BT_DBG("%s", hdev->name);
931
932         if (hci_dev_open(hdev->id) < 0)
933                 return;
934
935         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936                 mod_timer(&hdev->off_timer,
937                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940                 mgmt_index_added(hdev->id);
941 }
942
943 static void hci_power_off(struct work_struct *work)
944 {
945         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947         BT_DBG("%s", hdev->name);
948
949         hci_dev_close(hdev->id);
950 }
951
952 static void hci_auto_off(unsigned long data)
953 {
954         struct hci_dev *hdev = (struct hci_dev *) data;
955
956         BT_DBG("%s", hdev->name);
957
958         clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960         queue_work(hdev->workqueue, &hdev->power_off);
961 }
962
963 void hci_del_off_timer(struct hci_dev *hdev)
964 {
965         BT_DBG("%s", hdev->name);
966
967         clear_bit(HCI_AUTO_OFF, &hdev->flags);
968         del_timer(&hdev->off_timer);
969 }
970
971 int hci_uuids_clear(struct hci_dev *hdev)
972 {
973         struct list_head *p, *n;
974
975         list_for_each_safe(p, n, &hdev->uuids) {
976                 struct bt_uuid *uuid;
977
978                 uuid = list_entry(p, struct bt_uuid, list);
979
980                 list_del(p);
981                 kfree(uuid);
982         }
983
984         return 0;
985 }
986
987 int hci_link_keys_clear(struct hci_dev *hdev)
988 {
989         struct list_head *p, *n;
990
991         list_for_each_safe(p, n, &hdev->link_keys) {
992                 struct link_key *key;
993
994                 key = list_entry(p, struct link_key, list);
995
996                 list_del(p);
997                 kfree(key);
998         }
999
1000         return 0;
1001 }
1002
1003 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004 {
1005         struct link_key *k;
1006
1007         list_for_each_entry(k, &hdev->link_keys, list)
1008                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1009                         return k;
1010
1011         return NULL;
1012 }
1013
1014 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1015                                                 u8 key_type, u8 old_key_type)
1016 {
1017         /* Legacy key */
1018         if (key_type < 0x03)
1019                 return 1;
1020
1021         /* Debug keys are insecure so don't store them persistently */
1022         if (key_type == HCI_LK_DEBUG_COMBINATION)
1023                 return 0;
1024
1025         /* Changed combination key and there's no previous one */
1026         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1027                 return 0;
1028
1029         /* Security mode 3 case */
1030         if (!conn)
1031                 return 1;
1032
1033         /* Neither local nor remote side had no-bonding as requirement */
1034         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1035                 return 1;
1036
1037         /* Local side had dedicated bonding as requirement */
1038         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1039                 return 1;
1040
1041         /* Remote side had dedicated bonding as requirement */
1042         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1043                 return 1;
1044
1045         /* If none of the above criteria match, then don't store the key
1046          * persistently */
1047         return 0;
1048 }
1049
1050 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1051 {
1052         struct link_key *k;
1053
1054         list_for_each_entry(k, &hdev->link_keys, list) {
1055                 struct key_master_id *id;
1056
1057                 if (k->type != HCI_LK_SMP_LTK)
1058                         continue;
1059
1060                 if (k->dlen != sizeof(*id))
1061                         continue;
1062
1063                 id = (void *) &k->data;
1064                 if (id->ediv == ediv &&
1065                                 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1066                         return k;
1067         }
1068
1069         return NULL;
1070 }
1071 EXPORT_SYMBOL(hci_find_ltk);
1072
1073 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1074                                         bdaddr_t *bdaddr, u8 type)
1075 {
1076         struct link_key *k;
1077
1078         list_for_each_entry(k, &hdev->link_keys, list)
1079                 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1080                         return k;
1081
1082         return NULL;
1083 }
1084 EXPORT_SYMBOL(hci_find_link_key_type);
1085
1086 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1087                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1088 {
1089         struct link_key *key, *old_key;
1090         u8 old_key_type, persistent;
1091
1092         old_key = hci_find_link_key(hdev, bdaddr);
1093         if (old_key) {
1094                 old_key_type = old_key->type;
1095                 key = old_key;
1096         } else {
1097                 old_key_type = conn ? conn->key_type : 0xff;
1098                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1099                 if (!key)
1100                         return -ENOMEM;
1101                 list_add(&key->list, &hdev->link_keys);
1102         }
1103
1104         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1105
1106         /* Some buggy controller combinations generate a changed
1107          * combination key for legacy pairing even when there's no
1108          * previous key */
1109         if (type == HCI_LK_CHANGED_COMBINATION &&
1110                                         (!conn || conn->remote_auth == 0xff) &&
1111                                         old_key_type == 0xff) {
1112                 type = HCI_LK_COMBINATION;
1113                 if (conn)
1114                         conn->key_type = type;
1115         }
1116
1117         bacpy(&key->bdaddr, bdaddr);
1118         memcpy(key->val, val, 16);
1119         key->pin_len = pin_len;
1120
1121         if (type == HCI_LK_CHANGED_COMBINATION)
1122                 key->type = old_key_type;
1123         else
1124                 key->type = type;
1125
1126         if (!new_key)
1127                 return 0;
1128
1129         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1130
1131         mgmt_new_key(hdev->id, key, persistent);
1132
1133         if (!persistent) {
1134                 list_del(&key->list);
1135                 kfree(key);
1136         }
1137
1138         return 0;
1139 }
1140
1141 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1142                         u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1143 {
1144         struct link_key *key, *old_key;
1145         struct key_master_id *id;
1146         u8 old_key_type;
1147
1148         BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1149
1150         old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1151         if (old_key) {
1152                 key = old_key;
1153                 old_key_type = old_key->type;
1154         } else {
1155                 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1156                 if (!key)
1157                         return -ENOMEM;
1158                 list_add(&key->list, &hdev->link_keys);
1159                 old_key_type = 0xff;
1160         }
1161
1162         key->dlen = sizeof(*id);
1163
1164         bacpy(&key->bdaddr, bdaddr);
1165         memcpy(key->val, ltk, sizeof(key->val));
1166         key->type = HCI_LK_SMP_LTK;
1167         key->pin_len = key_size;
1168
1169         id = (void *) &key->data;
1170         id->ediv = ediv;
1171         memcpy(id->rand, rand, sizeof(id->rand));
1172
1173         if (new_key)
1174                 mgmt_new_key(hdev->id, key, old_key_type);
1175
1176         return 0;
1177 }
1178
1179 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1180 {
1181         struct link_key *key;
1182
1183         key = hci_find_link_key(hdev, bdaddr);
1184         if (!key)
1185                 return -ENOENT;
1186
1187         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1188
1189         list_del(&key->list);
1190         kfree(key);
1191
1192         return 0;
1193 }
1194
1195 /* HCI command timer function */
1196 static void hci_cmd_timer(unsigned long arg)
1197 {
1198         struct hci_dev *hdev = (void *) arg;
1199
1200         BT_ERR("%s command tx timeout", hdev->name);
1201         atomic_set(&hdev->cmd_cnt, 1);
1202         tasklet_schedule(&hdev->cmd_task);
1203 }
1204
1205 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1206                                                         bdaddr_t *bdaddr)
1207 {
1208         struct oob_data *data;
1209
1210         list_for_each_entry(data, &hdev->remote_oob_data, list)
1211                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1212                         return data;
1213
1214         return NULL;
1215 }
1216
1217 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1218 {
1219         struct oob_data *data;
1220
1221         data = hci_find_remote_oob_data(hdev, bdaddr);
1222         if (!data)
1223                 return -ENOENT;
1224
1225         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1226
1227         list_del(&data->list);
1228         kfree(data);
1229
1230         return 0;
1231 }
1232
1233 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1234 {
1235         struct oob_data *data, *n;
1236
1237         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1238                 list_del(&data->list);
1239                 kfree(data);
1240         }
1241
1242         return 0;
1243 }
1244
1245 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1246                                                                 u8 *randomizer)
1247 {
1248         struct oob_data *data;
1249
1250         data = hci_find_remote_oob_data(hdev, bdaddr);
1251
1252         if (!data) {
1253                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1254                 if (!data)
1255                         return -ENOMEM;
1256
1257                 bacpy(&data->bdaddr, bdaddr);
1258                 list_add(&data->list, &hdev->remote_oob_data);
1259         }
1260
1261         memcpy(data->hash, hash, sizeof(data->hash));
1262         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1263
1264         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1265
1266         return 0;
1267 }
1268
1269 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1270                                                 bdaddr_t *bdaddr)
1271 {
1272         struct bdaddr_list *b;
1273
1274         list_for_each_entry(b, &hdev->blacklist, list)
1275                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1276                         return b;
1277
1278         return NULL;
1279 }
1280
1281 int hci_blacklist_clear(struct hci_dev *hdev)
1282 {
1283         struct list_head *p, *n;
1284
1285         list_for_each_safe(p, n, &hdev->blacklist) {
1286                 struct bdaddr_list *b;
1287
1288                 b = list_entry(p, struct bdaddr_list, list);
1289
1290                 list_del(p);
1291                 kfree(b);
1292         }
1293
1294         return 0;
1295 }
1296
1297 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1298 {
1299         struct bdaddr_list *entry;
1300
1301         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1302                 return -EBADF;
1303
1304         if (hci_blacklist_lookup(hdev, bdaddr))
1305                 return -EEXIST;
1306
1307         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1308         if (!entry)
1309                 return -ENOMEM;
1310
1311         bacpy(&entry->bdaddr, bdaddr);
1312
1313         list_add(&entry->list, &hdev->blacklist);
1314
1315         return mgmt_device_blocked(hdev->id, bdaddr);
1316 }
1317
1318 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1319 {
1320         struct bdaddr_list *entry;
1321
1322         if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1323                 return hci_blacklist_clear(hdev);
1324         }
1325
1326         entry = hci_blacklist_lookup(hdev, bdaddr);
1327         if (!entry) {
1328                 return -ENOENT;
1329         }
1330
1331         list_del(&entry->list);
1332         kfree(entry);
1333
1334         return mgmt_device_unblocked(hdev->id, bdaddr);
1335 }
1336
1337 static void hci_clear_adv_cache(unsigned long arg)
1338 {
1339         struct hci_dev *hdev = (void *) arg;
1340
1341         hci_dev_lock(hdev);
1342
1343         hci_adv_entries_clear(hdev);
1344
1345         hci_dev_unlock(hdev);
1346 }
1347
1348 int hci_adv_entries_clear(struct hci_dev *hdev)
1349 {
1350         struct adv_entry *entry, *tmp;
1351
1352         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1353                 list_del(&entry->list);
1354                 kfree(entry);
1355         }
1356
1357         BT_DBG("%s adv cache cleared", hdev->name);
1358
1359         return 0;
1360 }
1361
1362 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1363 {
1364         struct adv_entry *entry;
1365
1366         list_for_each_entry(entry, &hdev->adv_entries, list)
1367                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1368                         return entry;
1369
1370         return NULL;
1371 }
1372
1373 static inline int is_connectable_adv(u8 evt_type)
1374 {
1375         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1376                 return 1;
1377
1378         return 0;
1379 }
1380
1381 int hci_add_adv_entry(struct hci_dev *hdev,
1382                                         struct hci_ev_le_advertising_info *ev)
1383 {
1384         struct adv_entry *entry;
1385
1386         if (!is_connectable_adv(ev->evt_type))
1387                 return -EINVAL;
1388
1389         /* Only new entries should be added to adv_entries. So, if
1390          * bdaddr was found, don't add it. */
1391         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1392                 return 0;
1393
1394         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1395         if (!entry)
1396                 return -ENOMEM;
1397
1398         bacpy(&entry->bdaddr, &ev->bdaddr);
1399         entry->bdaddr_type = ev->bdaddr_type;
1400
1401         list_add(&entry->list, &hdev->adv_entries);
1402
1403         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1404                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1405
1406         return 0;
1407 }
1408
1409 /* Register HCI device */
1410 int hci_register_dev(struct hci_dev *hdev)
1411 {
1412         struct list_head *head = &hci_dev_list, *p;
1413         int i, id, error;
1414
1415         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1416                                                 hdev->bus, hdev->owner);
1417
1418         if (!hdev->open || !hdev->close || !hdev->destruct)
1419                 return -EINVAL;
1420
1421         /* Do not allow HCI_AMP devices to register at index 0,
1422          * so the index can be used as the AMP controller ID.
1423          */
1424         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1425
1426         write_lock_bh(&hci_dev_list_lock);
1427
1428         /* Find first available device id */
1429         list_for_each(p, &hci_dev_list) {
1430                 if (list_entry(p, struct hci_dev, list)->id != id)
1431                         break;
1432                 head = p; id++;
1433         }
1434
1435         sprintf(hdev->name, "hci%d", id);
1436         hdev->id = id;
1437         list_add(&hdev->list, head);
1438
1439         atomic_set(&hdev->refcnt, 1);
1440         spin_lock_init(&hdev->lock);
1441
1442         hdev->flags = 0;
1443         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1444         hdev->esco_type = (ESCO_HV1);
1445         hdev->link_mode = (HCI_LM_ACCEPT);
1446         hdev->io_capability = 0x03; /* No Input No Output */
1447
1448         hdev->idle_timeout = 0;
1449         hdev->sniff_max_interval = 800;
1450         hdev->sniff_min_interval = 80;
1451
1452         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1453         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1454         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1455
1456         skb_queue_head_init(&hdev->rx_q);
1457         skb_queue_head_init(&hdev->cmd_q);
1458         skb_queue_head_init(&hdev->raw_q);
1459
1460         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1461
1462         for (i = 0; i < NUM_REASSEMBLY; i++)
1463                 hdev->reassembly[i] = NULL;
1464
1465         init_waitqueue_head(&hdev->req_wait_q);
1466         mutex_init(&hdev->req_lock);
1467
1468         inquiry_cache_init(hdev);
1469
1470         hci_conn_hash_init(hdev);
1471
1472         INIT_LIST_HEAD(&hdev->blacklist);
1473
1474         INIT_LIST_HEAD(&hdev->uuids);
1475
1476         INIT_LIST_HEAD(&hdev->link_keys);
1477
1478         INIT_LIST_HEAD(&hdev->remote_oob_data);
1479
1480         INIT_LIST_HEAD(&hdev->adv_entries);
1481         setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1482                                                 (unsigned long) hdev);
1483
1484         INIT_WORK(&hdev->power_on, hci_power_on);
1485         INIT_WORK(&hdev->power_off, hci_power_off);
1486         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1487
1488         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1489
1490         atomic_set(&hdev->promisc, 0);
1491
1492         write_unlock_bh(&hci_dev_list_lock);
1493
1494         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1495         if (!hdev->workqueue) {
1496                 error = -ENOMEM;
1497                 goto err;
1498         }
1499
1500         error = hci_add_sysfs(hdev);
1501         if (error < 0)
1502                 goto err_wqueue;
1503
1504         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1505                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1506         if (hdev->rfkill) {
1507                 if (rfkill_register(hdev->rfkill) < 0) {
1508                         rfkill_destroy(hdev->rfkill);
1509                         hdev->rfkill = NULL;
1510                 }
1511         }
1512
1513         set_bit(HCI_AUTO_OFF, &hdev->flags);
1514         set_bit(HCI_SETUP, &hdev->flags);
1515         queue_work(hdev->workqueue, &hdev->power_on);
1516
1517         hci_notify(hdev, HCI_DEV_REG);
1518
1519         return id;
1520
1521 err_wqueue:
1522         destroy_workqueue(hdev->workqueue);
1523 err:
1524         write_lock_bh(&hci_dev_list_lock);
1525         list_del(&hdev->list);
1526         write_unlock_bh(&hci_dev_list_lock);
1527
1528         return error;
1529 }
1530 EXPORT_SYMBOL(hci_register_dev);
1531
1532 /* Unregister HCI device */
1533 void hci_unregister_dev(struct hci_dev *hdev)
1534 {
1535         int i;
1536
1537         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1538
1539         write_lock_bh(&hci_dev_list_lock);
1540         list_del(&hdev->list);
1541         write_unlock_bh(&hci_dev_list_lock);
1542
1543         hci_dev_do_close(hdev);
1544
1545         for (i = 0; i < NUM_REASSEMBLY; i++)
1546                 kfree_skb(hdev->reassembly[i]);
1547
1548         if (!test_bit(HCI_INIT, &hdev->flags) &&
1549                                         !test_bit(HCI_SETUP, &hdev->flags))
1550                 mgmt_index_removed(hdev->id);
1551
1552         hci_notify(hdev, HCI_DEV_UNREG);
1553
1554         if (hdev->rfkill) {
1555                 rfkill_unregister(hdev->rfkill);
1556                 rfkill_destroy(hdev->rfkill);
1557         }
1558
1559         hci_del_sysfs(hdev);
1560
1561         hci_del_off_timer(hdev);
1562         del_timer(&hdev->adv_timer);
1563
1564         destroy_workqueue(hdev->workqueue);
1565
1566         hci_dev_lock_bh(hdev);
1567         hci_blacklist_clear(hdev);
1568         hci_uuids_clear(hdev);
1569         hci_link_keys_clear(hdev);
1570         hci_remote_oob_data_clear(hdev);
1571         hci_adv_entries_clear(hdev);
1572         hci_dev_unlock_bh(hdev);
1573
1574         __hci_dev_put(hdev);
1575 }
1576 EXPORT_SYMBOL(hci_unregister_dev);
1577
1578 /* Suspend HCI device */
1579 int hci_suspend_dev(struct hci_dev *hdev)
1580 {
1581         hci_notify(hdev, HCI_DEV_SUSPEND);
1582         return 0;
1583 }
1584 EXPORT_SYMBOL(hci_suspend_dev);
1585
1586 /* Resume HCI device */
1587 int hci_resume_dev(struct hci_dev *hdev)
1588 {
1589         hci_notify(hdev, HCI_DEV_RESUME);
1590         return 0;
1591 }
1592 EXPORT_SYMBOL(hci_resume_dev);
1593
1594 /* Receive frame from HCI drivers */
1595 int hci_recv_frame(struct sk_buff *skb)
1596 {
1597         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1598         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1599                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1600                 kfree_skb(skb);
1601                 return -ENXIO;
1602         }
1603
1604         /* Incomming skb */
1605         bt_cb(skb)->incoming = 1;
1606
1607         /* Time stamp */
1608         __net_timestamp(skb);
1609
1610         /* Queue frame for rx task */
1611         skb_queue_tail(&hdev->rx_q, skb);
1612         tasklet_schedule(&hdev->rx_task);
1613
1614         return 0;
1615 }
1616 EXPORT_SYMBOL(hci_recv_frame);
1617
1618 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1619                                                   int count, __u8 index)
1620 {
1621         int len = 0;
1622         int hlen = 0;
1623         int remain = count;
1624         struct sk_buff *skb;
1625         struct bt_skb_cb *scb;
1626
1627         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1628                                 index >= NUM_REASSEMBLY)
1629                 return -EILSEQ;
1630
1631         skb = hdev->reassembly[index];
1632
1633         if (!skb) {
1634                 switch (type) {
1635                 case HCI_ACLDATA_PKT:
1636                         len = HCI_MAX_FRAME_SIZE;
1637                         hlen = HCI_ACL_HDR_SIZE;
1638                         break;
1639                 case HCI_EVENT_PKT:
1640                         len = HCI_MAX_EVENT_SIZE;
1641                         hlen = HCI_EVENT_HDR_SIZE;
1642                         break;
1643                 case HCI_SCODATA_PKT:
1644                         len = HCI_MAX_SCO_SIZE;
1645                         hlen = HCI_SCO_HDR_SIZE;
1646                         break;
1647                 }
1648
1649                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1650                 if (!skb)
1651                         return -ENOMEM;
1652
1653                 scb = (void *) skb->cb;
1654                 scb->expect = hlen;
1655                 scb->pkt_type = type;
1656
1657                 skb->dev = (void *) hdev;
1658                 hdev->reassembly[index] = skb;
1659         }
1660
1661         while (count) {
1662                 scb = (void *) skb->cb;
1663                 len = min(scb->expect, (__u16)count);
1664
1665                 memcpy(skb_put(skb, len), data, len);
1666
1667                 count -= len;
1668                 data += len;
1669                 scb->expect -= len;
1670                 remain = count;
1671
1672                 switch (type) {
1673                 case HCI_EVENT_PKT:
1674                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1675                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1676                                 scb->expect = h->plen;
1677
1678                                 if (skb_tailroom(skb) < scb->expect) {
1679                                         kfree_skb(skb);
1680                                         hdev->reassembly[index] = NULL;
1681                                         return -ENOMEM;
1682                                 }
1683                         }
1684                         break;
1685
1686                 case HCI_ACLDATA_PKT:
1687                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1688                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1689                                 scb->expect = __le16_to_cpu(h->dlen);
1690
1691                                 if (skb_tailroom(skb) < scb->expect) {
1692                                         kfree_skb(skb);
1693                                         hdev->reassembly[index] = NULL;
1694                                         return -ENOMEM;
1695                                 }
1696                         }
1697                         break;
1698
1699                 case HCI_SCODATA_PKT:
1700                         if (skb->len == HCI_SCO_HDR_SIZE) {
1701                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1702                                 scb->expect = h->dlen;
1703
1704                                 if (skb_tailroom(skb) < scb->expect) {
1705                                         kfree_skb(skb);
1706                                         hdev->reassembly[index] = NULL;
1707                                         return -ENOMEM;
1708                                 }
1709                         }
1710                         break;
1711                 }
1712
1713                 if (scb->expect == 0) {
1714                         /* Complete frame */
1715
1716                         bt_cb(skb)->pkt_type = type;
1717                         hci_recv_frame(skb);
1718
1719                         hdev->reassembly[index] = NULL;
1720                         return remain;
1721                 }
1722         }
1723
1724         return remain;
1725 }
1726
1727 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1728 {
1729         int rem = 0;
1730
1731         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1732                 return -EILSEQ;
1733
1734         while (count) {
1735                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1736                 if (rem < 0)
1737                         return rem;
1738
1739                 data += (count - rem);
1740                 count = rem;
1741         }
1742
1743         return rem;
1744 }
1745 EXPORT_SYMBOL(hci_recv_fragment);
1746
1747 #define STREAM_REASSEMBLY 0
1748
1749 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1750 {
1751         int type;
1752         int rem = 0;
1753
1754         while (count) {
1755                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1756
1757                 if (!skb) {
1758                         struct { char type; } *pkt;
1759
1760                         /* Start of the frame */
1761                         pkt = data;
1762                         type = pkt->type;
1763
1764                         data++;
1765                         count--;
1766                 } else
1767                         type = bt_cb(skb)->pkt_type;
1768
1769                 rem = hci_reassembly(hdev, type, data, count,
1770                                                         STREAM_REASSEMBLY);
1771                 if (rem < 0)
1772                         return rem;
1773
1774                 data += (count - rem);
1775                 count = rem;
1776         }
1777
1778         return rem;
1779 }
1780 EXPORT_SYMBOL(hci_recv_stream_fragment);
1781
1782 /* ---- Interface to upper protocols ---- */
1783
1784 /* Register/Unregister protocols.
1785  * hci_task_lock is used to ensure that no tasks are running. */
1786 int hci_register_proto(struct hci_proto *hp)
1787 {
1788         int err = 0;
1789
1790         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1791
1792         if (hp->id >= HCI_MAX_PROTO)
1793                 return -EINVAL;
1794
1795         write_lock_bh(&hci_task_lock);
1796
1797         if (!hci_proto[hp->id])
1798                 hci_proto[hp->id] = hp;
1799         else
1800                 err = -EEXIST;
1801
1802         write_unlock_bh(&hci_task_lock);
1803
1804         return err;
1805 }
1806 EXPORT_SYMBOL(hci_register_proto);
1807
1808 int hci_unregister_proto(struct hci_proto *hp)
1809 {
1810         int err = 0;
1811
1812         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1813
1814         if (hp->id >= HCI_MAX_PROTO)
1815                 return -EINVAL;
1816
1817         write_lock_bh(&hci_task_lock);
1818
1819         if (hci_proto[hp->id])
1820                 hci_proto[hp->id] = NULL;
1821         else
1822                 err = -ENOENT;
1823
1824         write_unlock_bh(&hci_task_lock);
1825
1826         return err;
1827 }
1828 EXPORT_SYMBOL(hci_unregister_proto);
1829
1830 int hci_register_cb(struct hci_cb *cb)
1831 {
1832         BT_DBG("%p name %s", cb, cb->name);
1833
1834         write_lock_bh(&hci_cb_list_lock);
1835         list_add(&cb->list, &hci_cb_list);
1836         write_unlock_bh(&hci_cb_list_lock);
1837
1838         return 0;
1839 }
1840 EXPORT_SYMBOL(hci_register_cb);
1841
1842 int hci_unregister_cb(struct hci_cb *cb)
1843 {
1844         BT_DBG("%p name %s", cb, cb->name);
1845
1846         write_lock_bh(&hci_cb_list_lock);
1847         list_del(&cb->list);
1848         write_unlock_bh(&hci_cb_list_lock);
1849
1850         return 0;
1851 }
1852 EXPORT_SYMBOL(hci_unregister_cb);
1853
1854 static int hci_send_frame(struct sk_buff *skb)
1855 {
1856         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1857
1858         if (!hdev) {
1859                 kfree_skb(skb);
1860                 return -ENODEV;
1861         }
1862
1863         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1864
1865         if (atomic_read(&hdev->promisc)) {
1866                 /* Time stamp */
1867                 __net_timestamp(skb);
1868
1869                 hci_send_to_sock(hdev, skb, NULL);
1870         }
1871
1872         /* Get rid of skb owner, prior to sending to the driver. */
1873         skb_orphan(skb);
1874
1875         return hdev->send(skb);
1876 }
1877
1878 /* Send HCI command */
1879 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1880 {
1881         int len = HCI_COMMAND_HDR_SIZE + plen;
1882         struct hci_command_hdr *hdr;
1883         struct sk_buff *skb;
1884
1885         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1886
1887         skb = bt_skb_alloc(len, GFP_ATOMIC);
1888         if (!skb) {
1889                 BT_ERR("%s no memory for command", hdev->name);
1890                 return -ENOMEM;
1891         }
1892
1893         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1894         hdr->opcode = cpu_to_le16(opcode);
1895         hdr->plen   = plen;
1896
1897         if (plen)
1898                 memcpy(skb_put(skb, plen), param, plen);
1899
1900         BT_DBG("skb len %d", skb->len);
1901
1902         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1903         skb->dev = (void *) hdev;
1904
1905         if (test_bit(HCI_INIT, &hdev->flags))
1906                 hdev->init_last_cmd = opcode;
1907
1908         skb_queue_tail(&hdev->cmd_q, skb);
1909         tasklet_schedule(&hdev->cmd_task);
1910
1911         return 0;
1912 }
1913
1914 /* Get data from the previously sent command */
1915 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1916 {
1917         struct hci_command_hdr *hdr;
1918
1919         if (!hdev->sent_cmd)
1920                 return NULL;
1921
1922         hdr = (void *) hdev->sent_cmd->data;
1923
1924         if (hdr->opcode != cpu_to_le16(opcode))
1925                 return NULL;
1926
1927         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1928
1929         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1930 }
1931
1932 /* Send ACL data */
1933 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1934 {
1935         struct hci_acl_hdr *hdr;
1936         int len = skb->len;
1937
1938         skb_push(skb, HCI_ACL_HDR_SIZE);
1939         skb_reset_transport_header(skb);
1940         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1941         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1942         hdr->dlen   = cpu_to_le16(len);
1943 }
1944
1945 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1946                                 struct sk_buff *skb, __u16 flags)
1947 {
1948         struct hci_dev *hdev = conn->hdev;
1949         struct sk_buff *list;
1950
1951         list = skb_shinfo(skb)->frag_list;
1952         if (!list) {
1953                 /* Non fragmented */
1954                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1955
1956                 skb_queue_tail(queue, skb);
1957         } else {
1958                 /* Fragmented */
1959                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1960
1961                 skb_shinfo(skb)->frag_list = NULL;
1962
1963                 /* Queue all fragments atomically */
1964                 spin_lock_bh(&queue->lock);
1965
1966                 __skb_queue_tail(queue, skb);
1967
1968                 flags &= ~ACL_START;
1969                 flags |= ACL_CONT;
1970                 do {
1971                         skb = list; list = list->next;
1972
1973                         skb->dev = (void *) hdev;
1974                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1975                         hci_add_acl_hdr(skb, conn->handle, flags);
1976
1977                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1978
1979                         __skb_queue_tail(queue, skb);
1980                 } while (list);
1981
1982                 spin_unlock_bh(&queue->lock);
1983         }
1984 }
1985
1986 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1987 {
1988         struct hci_conn *conn = chan->conn;
1989         struct hci_dev *hdev = conn->hdev;
1990
1991         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1992
1993         skb->dev = (void *) hdev;
1994         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1995         hci_add_acl_hdr(skb, conn->handle, flags);
1996
1997         hci_queue_acl(conn, &chan->data_q, skb, flags);
1998
1999         tasklet_schedule(&hdev->tx_task);
2000 }
2001 EXPORT_SYMBOL(hci_send_acl);
2002
2003 /* Send SCO data */
2004 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2005 {
2006         struct hci_dev *hdev = conn->hdev;
2007         struct hci_sco_hdr hdr;
2008
2009         BT_DBG("%s len %d", hdev->name, skb->len);
2010
2011         hdr.handle = cpu_to_le16(conn->handle);
2012         hdr.dlen   = skb->len;
2013
2014         skb_push(skb, HCI_SCO_HDR_SIZE);
2015         skb_reset_transport_header(skb);
2016         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2017
2018         skb->dev = (void *) hdev;
2019         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2020
2021         skb_queue_tail(&conn->data_q, skb);
2022         tasklet_schedule(&hdev->tx_task);
2023 }
2024 EXPORT_SYMBOL(hci_send_sco);
2025
2026 /* ---- HCI TX task (outgoing data) ---- */
2027
2028 /* HCI Connection scheduler */
2029 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2030 {
2031         struct hci_conn_hash *h = &hdev->conn_hash;
2032         struct hci_conn *conn = NULL, *c;
2033         int num = 0, min = ~0;
2034
2035         /* We don't have to lock device here. Connections are always
2036          * added and removed with TX task disabled. */
2037         list_for_each_entry(c, &h->list, list) {
2038                 if (c->type != type || skb_queue_empty(&c->data_q))
2039                         continue;
2040
2041                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2042                         continue;
2043
2044                 num++;
2045
2046                 if (c->sent < min) {
2047                         min  = c->sent;
2048                         conn = c;
2049                 }
2050
2051                 if (hci_conn_num(hdev, type) == num)
2052                         break;
2053         }
2054
2055         if (conn) {
2056                 int cnt, q;
2057
2058                 switch (conn->type) {
2059                 case ACL_LINK:
2060                         cnt = hdev->acl_cnt;
2061                         break;
2062                 case SCO_LINK:
2063                 case ESCO_LINK:
2064                         cnt = hdev->sco_cnt;
2065                         break;
2066                 case LE_LINK:
2067                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2068                         break;
2069                 default:
2070                         cnt = 0;
2071                         BT_ERR("Unknown link type");
2072                 }
2073
2074                 q = cnt / num;
2075                 *quote = q ? q : 1;
2076         } else
2077                 *quote = 0;
2078
2079         BT_DBG("conn %p quote %d", conn, *quote);
2080         return conn;
2081 }
2082
2083 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2084 {
2085         struct hci_conn_hash *h = &hdev->conn_hash;
2086         struct hci_conn *c;
2087
2088         BT_ERR("%s link tx timeout", hdev->name);
2089
2090         /* Kill stalled connections */
2091         list_for_each_entry(c, &h->list, list) {
2092                 if (c->type == type && c->sent) {
2093                         BT_ERR("%s killing stalled connection %s",
2094                                 hdev->name, batostr(&c->dst));
2095                         hci_acl_disconn(c, 0x13);
2096                 }
2097         }
2098 }
2099
2100 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2101                                                 int *quote)
2102 {
2103         struct hci_conn_hash *h = &hdev->conn_hash;
2104         struct hci_chan *chan = NULL;
2105         int num = 0, min = ~0, cur_prio = 0;
2106         struct hci_conn *conn;
2107         int cnt, q, conn_num = 0;
2108
2109         BT_DBG("%s", hdev->name);
2110
2111         list_for_each_entry(conn, &h->list, list) {
2112                 struct hci_chan_hash *ch;
2113                 struct hci_chan *tmp;
2114
2115                 if (conn->type != type)
2116                         continue;
2117
2118                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2119                         continue;
2120
2121                 conn_num++;
2122
2123                 ch = &conn->chan_hash;
2124
2125                 list_for_each_entry(tmp, &ch->list, list) {
2126                         struct sk_buff *skb;
2127
2128                         if (skb_queue_empty(&tmp->data_q))
2129                                 continue;
2130
2131                         skb = skb_peek(&tmp->data_q);
2132                         if (skb->priority < cur_prio)
2133                                 continue;
2134
2135                         if (skb->priority > cur_prio) {
2136                                 num = 0;
2137                                 min = ~0;
2138                                 cur_prio = skb->priority;
2139                         }
2140
2141                         num++;
2142
2143                         if (conn->sent < min) {
2144                                 min  = conn->sent;
2145                                 chan = tmp;
2146                         }
2147                 }
2148
2149                 if (hci_conn_num(hdev, type) == conn_num)
2150                         break;
2151         }
2152
2153         if (!chan)
2154                 return NULL;
2155
2156         switch (chan->conn->type) {
2157         case ACL_LINK:
2158                 cnt = hdev->acl_cnt;
2159                 break;
2160         case SCO_LINK:
2161         case ESCO_LINK:
2162                 cnt = hdev->sco_cnt;
2163                 break;
2164         case LE_LINK:
2165                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2166                 break;
2167         default:
2168                 cnt = 0;
2169                 BT_ERR("Unknown link type");
2170         }
2171
2172         q = cnt / num;
2173         *quote = q ? q : 1;
2174         BT_DBG("chan %p quote %d", chan, *quote);
2175         return chan;
2176 }
2177
2178 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2179 {
2180         struct hci_conn_hash *h = &hdev->conn_hash;
2181         struct hci_conn *conn;
2182         int num = 0;
2183
2184         BT_DBG("%s", hdev->name);
2185
2186         list_for_each_entry(conn, &h->list, list) {
2187                 struct hci_chan_hash *ch;
2188                 struct hci_chan *chan;
2189
2190                 if (conn->type != type)
2191                         continue;
2192
2193                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2194                         continue;
2195
2196                 num++;
2197
2198                 ch = &conn->chan_hash;
2199                 list_for_each_entry(chan, &ch->list, list) {
2200                         struct sk_buff *skb;
2201
2202                         if (chan->sent) {
2203                                 chan->sent = 0;
2204                                 continue;
2205                         }
2206
2207                         if (skb_queue_empty(&chan->data_q))
2208                                 continue;
2209
2210                         skb = skb_peek(&chan->data_q);
2211                         if (skb->priority >= HCI_PRIO_MAX - 1)
2212                                 continue;
2213
2214                         skb->priority = HCI_PRIO_MAX - 1;
2215
2216                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2217                                                                 skb->priority);
2218                 }
2219
2220                 if (hci_conn_num(hdev, type) == num)
2221                         break;
2222         }
2223 }
2224
2225 static inline void hci_sched_acl(struct hci_dev *hdev)
2226 {
2227         struct hci_chan *chan;
2228         struct sk_buff *skb;
2229         int quote;
2230         unsigned int cnt;
2231
2232         BT_DBG("%s", hdev->name);
2233
2234         if (!hci_conn_num(hdev, ACL_LINK))
2235                 return;
2236
2237         if (!test_bit(HCI_RAW, &hdev->flags)) {
2238                 /* ACL tx timeout must be longer than maximum
2239                  * link supervision timeout (40.9 seconds) */
2240                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2241                         hci_link_tx_to(hdev, ACL_LINK);
2242         }
2243
2244         cnt = hdev->acl_cnt;
2245
2246         while (hdev->acl_cnt &&
2247                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2248                 u32 priority = (skb_peek(&chan->data_q))->priority;
2249                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2250                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2251                                         skb->len, skb->priority);
2252
2253                         /* Stop if priority has changed */
2254                         if (skb->priority < priority)
2255                                 break;
2256
2257                         skb = skb_dequeue(&chan->data_q);
2258
2259                         hci_conn_enter_active_mode(chan->conn,
2260                                                 bt_cb(skb)->force_active);
2261
2262                         hci_send_frame(skb);
2263                         hdev->acl_last_tx = jiffies;
2264
2265                         hdev->acl_cnt--;
2266                         chan->sent++;
2267                         chan->conn->sent++;
2268                 }
2269         }
2270
2271         if (cnt != hdev->acl_cnt)
2272                 hci_prio_recalculate(hdev, ACL_LINK);
2273 }
2274
2275 /* Schedule SCO */
2276 static inline void hci_sched_sco(struct hci_dev *hdev)
2277 {
2278         struct hci_conn *conn;
2279         struct sk_buff *skb;
2280         int quote;
2281
2282         BT_DBG("%s", hdev->name);
2283
2284         if (!hci_conn_num(hdev, SCO_LINK))
2285                 return;
2286
2287         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2288                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2289                         BT_DBG("skb %p len %d", skb, skb->len);
2290                         hci_send_frame(skb);
2291
2292                         conn->sent++;
2293                         if (conn->sent == ~0)
2294                                 conn->sent = 0;
2295                 }
2296         }
2297 }
2298
2299 static inline void hci_sched_esco(struct hci_dev *hdev)
2300 {
2301         struct hci_conn *conn;
2302         struct sk_buff *skb;
2303         int quote;
2304
2305         BT_DBG("%s", hdev->name);
2306
2307         if (!hci_conn_num(hdev, ESCO_LINK))
2308                 return;
2309
2310         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2311                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2312                         BT_DBG("skb %p len %d", skb, skb->len);
2313                         hci_send_frame(skb);
2314
2315                         conn->sent++;
2316                         if (conn->sent == ~0)
2317                                 conn->sent = 0;
2318                 }
2319         }
2320 }
2321
2322 static inline void hci_sched_le(struct hci_dev *hdev)
2323 {
2324         struct hci_chan *chan;
2325         struct sk_buff *skb;
2326         int quote, cnt, tmp;
2327
2328         BT_DBG("%s", hdev->name);
2329
2330         if (!hci_conn_num(hdev, LE_LINK))
2331                 return;
2332
2333         if (!test_bit(HCI_RAW, &hdev->flags)) {
2334                 /* LE tx timeout must be longer than maximum
2335                  * link supervision timeout (40.9 seconds) */
2336                 if (!hdev->le_cnt && hdev->le_pkts &&
2337                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2338                         hci_link_tx_to(hdev, LE_LINK);
2339         }
2340
2341         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2342         tmp = cnt;
2343         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2344                 u32 priority = (skb_peek(&chan->data_q))->priority;
2345                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2346                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2347                                         skb->len, skb->priority);
2348
2349                         /* Stop if priority has changed */
2350                         if (skb->priority < priority)
2351                                 break;
2352
2353                         skb = skb_dequeue(&chan->data_q);
2354
2355                         hci_send_frame(skb);
2356                         hdev->le_last_tx = jiffies;
2357
2358                         cnt--;
2359                         chan->sent++;
2360                         chan->conn->sent++;
2361                 }
2362         }
2363
2364         if (hdev->le_pkts)
2365                 hdev->le_cnt = cnt;
2366         else
2367                 hdev->acl_cnt = cnt;
2368
2369         if (cnt != tmp)
2370                 hci_prio_recalculate(hdev, LE_LINK);
2371 }
2372
2373 static void hci_tx_task(unsigned long arg)
2374 {
2375         struct hci_dev *hdev = (struct hci_dev *) arg;
2376         struct sk_buff *skb;
2377
2378         read_lock(&hci_task_lock);
2379
2380         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2381                 hdev->sco_cnt, hdev->le_cnt);
2382
2383         /* Schedule queues and send stuff to HCI driver */
2384
2385         hci_sched_acl(hdev);
2386
2387         hci_sched_sco(hdev);
2388
2389         hci_sched_esco(hdev);
2390
2391         hci_sched_le(hdev);
2392
2393         /* Send next queued raw (unknown type) packet */
2394         while ((skb = skb_dequeue(&hdev->raw_q)))
2395                 hci_send_frame(skb);
2396
2397         read_unlock(&hci_task_lock);
2398 }
2399
2400 /* ----- HCI RX task (incoming data processing) ----- */
2401
2402 /* ACL data packet */
2403 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2404 {
2405         struct hci_acl_hdr *hdr = (void *) skb->data;
2406         struct hci_conn *conn;
2407         __u16 handle, flags;
2408
2409         skb_pull(skb, HCI_ACL_HDR_SIZE);
2410
2411         handle = __le16_to_cpu(hdr->handle);
2412         flags  = hci_flags(handle);
2413         handle = hci_handle(handle);
2414
2415         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2416
2417         hdev->stat.acl_rx++;
2418
2419         hci_dev_lock(hdev);
2420         conn = hci_conn_hash_lookup_handle(hdev, handle);
2421         hci_dev_unlock(hdev);
2422
2423         if (conn) {
2424                 register struct hci_proto *hp;
2425
2426                 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2427
2428                 /* Send to upper protocol */
2429                 hp = hci_proto[HCI_PROTO_L2CAP];
2430                 if (hp && hp->recv_acldata) {
2431                         hp->recv_acldata(conn, skb, flags);
2432                         return;
2433                 }
2434         } else {
2435                 BT_ERR("%s ACL packet for unknown connection handle %d",
2436                         hdev->name, handle);
2437         }
2438
2439         kfree_skb(skb);
2440 }
2441
2442 /* SCO data packet */
2443 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2444 {
2445         struct hci_sco_hdr *hdr = (void *) skb->data;
2446         struct hci_conn *conn;
2447         __u16 handle;
2448
2449         skb_pull(skb, HCI_SCO_HDR_SIZE);
2450
2451         handle = __le16_to_cpu(hdr->handle);
2452
2453         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2454
2455         hdev->stat.sco_rx++;
2456
2457         hci_dev_lock(hdev);
2458         conn = hci_conn_hash_lookup_handle(hdev, handle);
2459         hci_dev_unlock(hdev);
2460
2461         if (conn) {
2462                 register struct hci_proto *hp;
2463
2464                 /* Send to upper protocol */
2465                 hp = hci_proto[HCI_PROTO_SCO];
2466                 if (hp && hp->recv_scodata) {
2467                         hp->recv_scodata(conn, skb);
2468                         return;
2469                 }
2470         } else {
2471                 BT_ERR("%s SCO packet for unknown connection handle %d",
2472                         hdev->name, handle);
2473         }
2474
2475         kfree_skb(skb);
2476 }
2477
2478 static void hci_rx_task(unsigned long arg)
2479 {
2480         struct hci_dev *hdev = (struct hci_dev *) arg;
2481         struct sk_buff *skb;
2482
2483         BT_DBG("%s", hdev->name);
2484
2485         read_lock(&hci_task_lock);
2486
2487         while ((skb = skb_dequeue(&hdev->rx_q))) {
2488                 if (atomic_read(&hdev->promisc)) {
2489                         /* Send copy to the sockets */
2490                         hci_send_to_sock(hdev, skb, NULL);
2491                 }
2492
2493                 if (test_bit(HCI_RAW, &hdev->flags)) {
2494                         kfree_skb(skb);
2495                         continue;
2496                 }
2497
2498                 if (test_bit(HCI_INIT, &hdev->flags)) {
2499                         /* Don't process data packets in this states. */
2500                         switch (bt_cb(skb)->pkt_type) {
2501                         case HCI_ACLDATA_PKT:
2502                         case HCI_SCODATA_PKT:
2503                                 kfree_skb(skb);
2504                                 continue;
2505                         }
2506                 }
2507
2508                 /* Process frame */
2509                 switch (bt_cb(skb)->pkt_type) {
2510                 case HCI_EVENT_PKT:
2511                         hci_event_packet(hdev, skb);
2512                         break;
2513
2514                 case HCI_ACLDATA_PKT:
2515                         BT_DBG("%s ACL data packet", hdev->name);
2516                         hci_acldata_packet(hdev, skb);
2517                         break;
2518
2519                 case HCI_SCODATA_PKT:
2520                         BT_DBG("%s SCO data packet", hdev->name);
2521                         hci_scodata_packet(hdev, skb);
2522                         break;
2523
2524                 default:
2525                         kfree_skb(skb);
2526                         break;
2527                 }
2528         }
2529
2530         read_unlock(&hci_task_lock);
2531 }
2532
2533 static void hci_cmd_task(unsigned long arg)
2534 {
2535         struct hci_dev *hdev = (struct hci_dev *) arg;
2536         struct sk_buff *skb;
2537
2538         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2539
2540         /* Send queued commands */
2541         if (atomic_read(&hdev->cmd_cnt)) {
2542                 skb = skb_dequeue(&hdev->cmd_q);
2543                 if (!skb)
2544                         return;
2545
2546                 kfree_skb(hdev->sent_cmd);
2547
2548                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2549                 if (hdev->sent_cmd) {
2550                         atomic_dec(&hdev->cmd_cnt);
2551                         hci_send_frame(skb);
2552                         if (test_bit(HCI_RESET, &hdev->flags))
2553                                 del_timer(&hdev->cmd_timer);
2554                         else
2555                                 mod_timer(&hdev->cmd_timer,
2556                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2557                 } else {
2558                         skb_queue_head(&hdev->cmd_q, skb);
2559                         tasklet_schedule(&hdev->cmd_task);
2560                 }
2561         }
2562 }