1  /*
2     BlueZ - Bluetooth protocol stack for Linux
3     Copyright (C) 2000-2001 Qualcomm Incorporated
4     Copyright (C) 2011 ProFUSION Embedded Systems
5  
6     Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7  
8     This program is free software; you can redistribute it and/or modify
9     it under the terms of the GNU General Public License version 2 as
10     published by the Free Software Foundation;
11  
12     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13     OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15     IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16     CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17     WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18     ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19     OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  
21     ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22     COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23     SOFTWARE IS DISCLAIMED.
24  */
25  
26  /* Bluetooth HCI core. */
27  
28  #include <linux/export.h>
29  #include <linux/rfkill.h>
30  #include <linux/debugfs.h>
31  #include <linux/crypto.h>
32  #include <linux/kcov.h>
33  #include <linux/property.h>
34  #include <linux/suspend.h>
35  #include <linux/wait.h>
36  #include <linux/unaligned.h>
37  
38  #include <net/bluetooth/bluetooth.h>
39  #include <net/bluetooth/hci_core.h>
40  #include <net/bluetooth/l2cap.h>
41  #include <net/bluetooth/mgmt.h>
42  
43  #include "hci_debugfs.h"
44  #include "smp.h"
45  #include "leds.h"
46  #include "msft.h"
47  #include "aosp.h"
48  #include "hci_codec.h"
49  
50  static void hci_rx_work(struct work_struct *work);
51  static void hci_cmd_work(struct work_struct *work);
52  static void hci_tx_work(struct work_struct *work);
53  
54  /* HCI device list */
55  LIST_HEAD(hci_dev_list);
56  DEFINE_RWLOCK(hci_dev_list_lock);
57  
58  /* HCI callback list */
59  LIST_HEAD(hci_cb_list);
60  DEFINE_MUTEX(hci_cb_list_lock);
61  
62  /* HCI ID Numbering */
63  static DEFINE_IDA(hci_index_ida);
64  
65  /* Get HCI device by index.
66   * Device is held on return. */
hci_dev_get(int index)67  struct hci_dev *hci_dev_get(int index)
68  {
69  	struct hci_dev *hdev = NULL, *d;
70  
71  	BT_DBG("%d", index);
72  
73  	if (index < 0)
74  		return NULL;
75  
76  	read_lock(&hci_dev_list_lock);
77  	list_for_each_entry(d, &hci_dev_list, list) {
78  		if (d->id == index) {
79  			hdev = hci_dev_hold(d);
80  			break;
81  		}
82  	}
83  	read_unlock(&hci_dev_list_lock);
84  	return hdev;
85  }
86  
87  /* ---- Inquiry support ---- */
88  
hci_discovery_active(struct hci_dev * hdev)89  bool hci_discovery_active(struct hci_dev *hdev)
90  {
91  	struct discovery_state *discov = &hdev->discovery;
92  
93  	switch (discov->state) {
94  	case DISCOVERY_FINDING:
95  	case DISCOVERY_RESOLVING:
96  		return true;
97  
98  	default:
99  		return false;
100  	}
101  }
102  
hci_discovery_set_state(struct hci_dev * hdev,int state)103  void hci_discovery_set_state(struct hci_dev *hdev, int state)
104  {
105  	int old_state = hdev->discovery.state;
106  
107  	if (old_state == state)
108  		return;
109  
110  	hdev->discovery.state = state;
111  
112  	switch (state) {
113  	case DISCOVERY_STOPPED:
114  		hci_update_passive_scan(hdev);
115  
116  		if (old_state != DISCOVERY_STARTING)
117  			mgmt_discovering(hdev, 0);
118  		break;
119  	case DISCOVERY_STARTING:
120  		break;
121  	case DISCOVERY_FINDING:
122  		mgmt_discovering(hdev, 1);
123  		break;
124  	case DISCOVERY_RESOLVING:
125  		break;
126  	case DISCOVERY_STOPPING:
127  		break;
128  	}
129  
130  	bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
131  }
132  
hci_inquiry_cache_flush(struct hci_dev * hdev)133  void hci_inquiry_cache_flush(struct hci_dev *hdev)
134  {
135  	struct discovery_state *cache = &hdev->discovery;
136  	struct inquiry_entry *p, *n;
137  
138  	list_for_each_entry_safe(p, n, &cache->all, all) {
139  		list_del(&p->all);
140  		kfree(p);
141  	}
142  
143  	INIT_LIST_HEAD(&cache->unknown);
144  	INIT_LIST_HEAD(&cache->resolve);
145  }
146  
hci_inquiry_cache_lookup(struct hci_dev * hdev,bdaddr_t * bdaddr)147  struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
148  					       bdaddr_t *bdaddr)
149  {
150  	struct discovery_state *cache = &hdev->discovery;
151  	struct inquiry_entry *e;
152  
153  	BT_DBG("cache %p, %pMR", cache, bdaddr);
154  
155  	list_for_each_entry(e, &cache->all, all) {
156  		if (!bacmp(&e->data.bdaddr, bdaddr))
157  			return e;
158  	}
159  
160  	return NULL;
161  }
162  
hci_inquiry_cache_lookup_unknown(struct hci_dev * hdev,bdaddr_t * bdaddr)163  struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
164  						       bdaddr_t *bdaddr)
165  {
166  	struct discovery_state *cache = &hdev->discovery;
167  	struct inquiry_entry *e;
168  
169  	BT_DBG("cache %p, %pMR", cache, bdaddr);
170  
171  	list_for_each_entry(e, &cache->unknown, list) {
172  		if (!bacmp(&e->data.bdaddr, bdaddr))
173  			return e;
174  	}
175  
176  	return NULL;
177  }
178  
hci_inquiry_cache_lookup_resolve(struct hci_dev * hdev,bdaddr_t * bdaddr,int state)179  struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
180  						       bdaddr_t *bdaddr,
181  						       int state)
182  {
183  	struct discovery_state *cache = &hdev->discovery;
184  	struct inquiry_entry *e;
185  
186  	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
187  
188  	list_for_each_entry(e, &cache->resolve, list) {
189  		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
190  			return e;
191  		if (!bacmp(&e->data.bdaddr, bdaddr))
192  			return e;
193  	}
194  
195  	return NULL;
196  }
197  
hci_inquiry_cache_update_resolve(struct hci_dev * hdev,struct inquiry_entry * ie)198  void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
199  				      struct inquiry_entry *ie)
200  {
201  	struct discovery_state *cache = &hdev->discovery;
202  	struct list_head *pos = &cache->resolve;
203  	struct inquiry_entry *p;
204  
205  	list_del(&ie->list);
206  
207  	list_for_each_entry(p, &cache->resolve, list) {
208  		if (p->name_state != NAME_PENDING &&
209  		    abs(p->data.rssi) >= abs(ie->data.rssi))
210  			break;
211  		pos = &p->list;
212  	}
213  
214  	list_add(&ie->list, pos);
215  }
216  
hci_inquiry_cache_update(struct hci_dev * hdev,struct inquiry_data * data,bool name_known)217  u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
218  			     bool name_known)
219  {
220  	struct discovery_state *cache = &hdev->discovery;
221  	struct inquiry_entry *ie;
222  	u32 flags = 0;
223  
224  	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
225  
226  	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
227  
228  	if (!data->ssp_mode)
229  		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
230  
231  	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
232  	if (ie) {
233  		if (!ie->data.ssp_mode)
234  			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
235  
236  		if (ie->name_state == NAME_NEEDED &&
237  		    data->rssi != ie->data.rssi) {
238  			ie->data.rssi = data->rssi;
239  			hci_inquiry_cache_update_resolve(hdev, ie);
240  		}
241  
242  		goto update;
243  	}
244  
245  	/* Entry not in the cache. Add new one. */
246  	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
247  	if (!ie) {
248  		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
249  		goto done;
250  	}
251  
252  	list_add(&ie->all, &cache->all);
253  
254  	if (name_known) {
255  		ie->name_state = NAME_KNOWN;
256  	} else {
257  		ie->name_state = NAME_NOT_KNOWN;
258  		list_add(&ie->list, &cache->unknown);
259  	}
260  
261  update:
262  	if (name_known && ie->name_state != NAME_KNOWN &&
263  	    ie->name_state != NAME_PENDING) {
264  		ie->name_state = NAME_KNOWN;
265  		list_del(&ie->list);
266  	}
267  
268  	memcpy(&ie->data, data, sizeof(*data));
269  	ie->timestamp = jiffies;
270  	cache->timestamp = jiffies;
271  
272  	if (ie->name_state == NAME_NOT_KNOWN)
273  		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
274  
275  done:
276  	return flags;
277  }
278  
inquiry_cache_dump(struct hci_dev * hdev,int num,__u8 * buf)279  static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
280  {
281  	struct discovery_state *cache = &hdev->discovery;
282  	struct inquiry_info *info = (struct inquiry_info *) buf;
283  	struct inquiry_entry *e;
284  	int copied = 0;
285  
286  	list_for_each_entry(e, &cache->all, all) {
287  		struct inquiry_data *data = &e->data;
288  
289  		if (copied >= num)
290  			break;
291  
292  		bacpy(&info->bdaddr, &data->bdaddr);
293  		info->pscan_rep_mode	= data->pscan_rep_mode;
294  		info->pscan_period_mode	= data->pscan_period_mode;
295  		info->pscan_mode	= data->pscan_mode;
296  		memcpy(info->dev_class, data->dev_class, 3);
297  		info->clock_offset	= data->clock_offset;
298  
299  		info++;
300  		copied++;
301  	}
302  
303  	BT_DBG("cache %p, copied %d", cache, copied);
304  	return copied;
305  }
306  
hci_inquiry(void __user * arg)307  int hci_inquiry(void __user *arg)
308  {
309  	__u8 __user *ptr = arg;
310  	struct hci_inquiry_req ir;
311  	struct hci_dev *hdev;
312  	int err = 0, do_inquiry = 0, max_rsp;
313  	__u8 *buf;
314  
315  	if (copy_from_user(&ir, ptr, sizeof(ir)))
316  		return -EFAULT;
317  
318  	hdev = hci_dev_get(ir.dev_id);
319  	if (!hdev)
320  		return -ENODEV;
321  
322  	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
323  		err = -EBUSY;
324  		goto done;
325  	}
326  
327  	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
328  		err = -EOPNOTSUPP;
329  		goto done;
330  	}
331  
332  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
333  		err = -EOPNOTSUPP;
334  		goto done;
335  	}
336  
337  	/* Restrict maximum inquiry length to 60 seconds */
338  	if (ir.length > 60) {
339  		err = -EINVAL;
340  		goto done;
341  	}
342  
343  	hci_dev_lock(hdev);
344  	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
345  	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
346  		hci_inquiry_cache_flush(hdev);
347  		do_inquiry = 1;
348  	}
349  	hci_dev_unlock(hdev);
350  
351  	if (do_inquiry) {
352  		hci_req_sync_lock(hdev);
353  		err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
354  		hci_req_sync_unlock(hdev);
355  
356  		if (err < 0)
357  			goto done;
358  
359  		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
360  		 * cleared). If it is interrupted by a signal, return -EINTR.
361  		 */
362  		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
363  				TASK_INTERRUPTIBLE)) {
364  			err = -EINTR;
365  			goto done;
366  		}
367  	}
368  
369  	/* for unlimited number of responses we will use buffer with
370  	 * 255 entries
371  	 */
372  	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
373  
374  	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
375  	 * copy it to the user space.
376  	 */
377  	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
378  	if (!buf) {
379  		err = -ENOMEM;
380  		goto done;
381  	}
382  
383  	hci_dev_lock(hdev);
384  	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
385  	hci_dev_unlock(hdev);
386  
387  	BT_DBG("num_rsp %d", ir.num_rsp);
388  
389  	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
390  		ptr += sizeof(ir);
391  		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
392  				 ir.num_rsp))
393  			err = -EFAULT;
394  	} else
395  		err = -EFAULT;
396  
397  	kfree(buf);
398  
399  done:
400  	hci_dev_put(hdev);
401  	return err;
402  }
403  
hci_dev_do_open(struct hci_dev * hdev)404  static int hci_dev_do_open(struct hci_dev *hdev)
405  {
406  	int ret = 0;
407  
408  	BT_DBG("%s %p", hdev->name, hdev);
409  
410  	hci_req_sync_lock(hdev);
411  
412  	ret = hci_dev_open_sync(hdev);
413  
414  	hci_req_sync_unlock(hdev);
415  	return ret;
416  }
417  
418  /* ---- HCI ioctl helpers ---- */
419  
hci_dev_open(__u16 dev)420  int hci_dev_open(__u16 dev)
421  {
422  	struct hci_dev *hdev;
423  	int err;
424  
425  	hdev = hci_dev_get(dev);
426  	if (!hdev)
427  		return -ENODEV;
428  
429  	/* Devices that are marked as unconfigured can only be powered
430  	 * up as user channel. Trying to bring them up as normal devices
431  	 * will result into a failure. Only user channel operation is
432  	 * possible.
433  	 *
434  	 * When this function is called for a user channel, the flag
435  	 * HCI_USER_CHANNEL will be set first before attempting to
436  	 * open the device.
437  	 */
438  	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
439  	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
440  		err = -EOPNOTSUPP;
441  		goto done;
442  	}
443  
444  	/* We need to ensure that no other power on/off work is pending
445  	 * before proceeding to call hci_dev_do_open. This is
446  	 * particularly important if the setup procedure has not yet
447  	 * completed.
448  	 */
449  	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
450  		cancel_delayed_work(&hdev->power_off);
451  
452  	/* After this call it is guaranteed that the setup procedure
453  	 * has finished. This means that error conditions like RFKILL
454  	 * or no valid public or static random address apply.
455  	 */
456  	flush_workqueue(hdev->req_workqueue);
457  
458  	/* For controllers not using the management interface and that
459  	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
460  	 * so that pairing works for them. Once the management interface
461  	 * is in use this bit will be cleared again and userspace has
462  	 * to explicitly enable it.
463  	 */
464  	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
465  	    !hci_dev_test_flag(hdev, HCI_MGMT))
466  		hci_dev_set_flag(hdev, HCI_BONDABLE);
467  
468  	err = hci_dev_do_open(hdev);
469  
470  done:
471  	hci_dev_put(hdev);
472  	return err;
473  }
474  
hci_dev_do_close(struct hci_dev * hdev)475  int hci_dev_do_close(struct hci_dev *hdev)
476  {
477  	int err;
478  
479  	BT_DBG("%s %p", hdev->name, hdev);
480  
481  	hci_req_sync_lock(hdev);
482  
483  	err = hci_dev_close_sync(hdev);
484  
485  	hci_req_sync_unlock(hdev);
486  
487  	return err;
488  }
489  
hci_dev_close(__u16 dev)490  int hci_dev_close(__u16 dev)
491  {
492  	struct hci_dev *hdev;
493  	int err;
494  
495  	hdev = hci_dev_get(dev);
496  	if (!hdev)
497  		return -ENODEV;
498  
499  	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
500  		err = -EBUSY;
501  		goto done;
502  	}
503  
504  	cancel_work_sync(&hdev->power_on);
505  	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
506  		cancel_delayed_work(&hdev->power_off);
507  
508  	err = hci_dev_do_close(hdev);
509  
510  done:
511  	hci_dev_put(hdev);
512  	return err;
513  }
514  
hci_dev_do_reset(struct hci_dev * hdev)515  static int hci_dev_do_reset(struct hci_dev *hdev)
516  {
517  	int ret;
518  
519  	BT_DBG("%s %p", hdev->name, hdev);
520  
521  	hci_req_sync_lock(hdev);
522  
523  	/* Drop queues */
524  	skb_queue_purge(&hdev->rx_q);
525  	skb_queue_purge(&hdev->cmd_q);
526  
527  	/* Cancel these to avoid queueing non-chained pending work */
528  	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
529  	/* Wait for
530  	 *
531  	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
532  	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
533  	 *
534  	 * inside RCU section to see the flag or complete scheduling.
535  	 */
536  	synchronize_rcu();
537  	/* Explicitly cancel works in case scheduled after setting the flag. */
538  	cancel_delayed_work(&hdev->cmd_timer);
539  	cancel_delayed_work(&hdev->ncmd_timer);
540  
541  	/* Avoid potential lockdep warnings from the *_flush() calls by
542  	 * ensuring the workqueue is empty up front.
543  	 */
544  	drain_workqueue(hdev->workqueue);
545  
546  	hci_dev_lock(hdev);
547  	hci_inquiry_cache_flush(hdev);
548  	hci_conn_hash_flush(hdev);
549  	hci_dev_unlock(hdev);
550  
551  	if (hdev->flush)
552  		hdev->flush(hdev);
553  
554  	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
555  
556  	atomic_set(&hdev->cmd_cnt, 1);
557  	hdev->acl_cnt = 0;
558  	hdev->sco_cnt = 0;
559  	hdev->le_cnt = 0;
560  	hdev->iso_cnt = 0;
561  
562  	ret = hci_reset_sync(hdev);
563  
564  	hci_req_sync_unlock(hdev);
565  	return ret;
566  }
567  
hci_dev_reset(__u16 dev)568  int hci_dev_reset(__u16 dev)
569  {
570  	struct hci_dev *hdev;
571  	int err;
572  
573  	hdev = hci_dev_get(dev);
574  	if (!hdev)
575  		return -ENODEV;
576  
577  	if (!test_bit(HCI_UP, &hdev->flags)) {
578  		err = -ENETDOWN;
579  		goto done;
580  	}
581  
582  	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
583  		err = -EBUSY;
584  		goto done;
585  	}
586  
587  	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
588  		err = -EOPNOTSUPP;
589  		goto done;
590  	}
591  
592  	err = hci_dev_do_reset(hdev);
593  
594  done:
595  	hci_dev_put(hdev);
596  	return err;
597  }
598  
hci_dev_reset_stat(__u16 dev)599  int hci_dev_reset_stat(__u16 dev)
600  {
601  	struct hci_dev *hdev;
602  	int ret = 0;
603  
604  	hdev = hci_dev_get(dev);
605  	if (!hdev)
606  		return -ENODEV;
607  
608  	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
609  		ret = -EBUSY;
610  		goto done;
611  	}
612  
613  	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
614  		ret = -EOPNOTSUPP;
615  		goto done;
616  	}
617  
618  	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
619  
620  done:
621  	hci_dev_put(hdev);
622  	return ret;
623  }
624  
hci_update_passive_scan_state(struct hci_dev * hdev,u8 scan)625  static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
626  {
627  	bool conn_changed, discov_changed;
628  
629  	BT_DBG("%s scan 0x%02x", hdev->name, scan);
630  
631  	if ((scan & SCAN_PAGE))
632  		conn_changed = !hci_dev_test_and_set_flag(hdev,
633  							  HCI_CONNECTABLE);
634  	else
635  		conn_changed = hci_dev_test_and_clear_flag(hdev,
636  							   HCI_CONNECTABLE);
637  
638  	if ((scan & SCAN_INQUIRY)) {
639  		discov_changed = !hci_dev_test_and_set_flag(hdev,
640  							    HCI_DISCOVERABLE);
641  	} else {
642  		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
643  		discov_changed = hci_dev_test_and_clear_flag(hdev,
644  							     HCI_DISCOVERABLE);
645  	}
646  
647  	if (!hci_dev_test_flag(hdev, HCI_MGMT))
648  		return;
649  
650  	if (conn_changed || discov_changed) {
651  		/* In case this was disabled through mgmt */
652  		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
653  
654  		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
655  			hci_update_adv_data(hdev, hdev->cur_adv_instance);
656  
657  		mgmt_new_settings(hdev);
658  	}
659  }
660  
hci_dev_cmd(unsigned int cmd,void __user * arg)661  int hci_dev_cmd(unsigned int cmd, void __user *arg)
662  {
663  	struct hci_dev *hdev;
664  	struct hci_dev_req dr;
665  	__le16 policy;
666  	int err = 0;
667  
668  	if (copy_from_user(&dr, arg, sizeof(dr)))
669  		return -EFAULT;
670  
671  	hdev = hci_dev_get(dr.dev_id);
672  	if (!hdev)
673  		return -ENODEV;
674  
675  	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
676  		err = -EBUSY;
677  		goto done;
678  	}
679  
680  	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
681  		err = -EOPNOTSUPP;
682  		goto done;
683  	}
684  
685  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
686  		err = -EOPNOTSUPP;
687  		goto done;
688  	}
689  
690  	switch (cmd) {
691  	case HCISETAUTH:
692  		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
693  					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
694  		break;
695  
696  	case HCISETENCRYPT:
697  		if (!lmp_encrypt_capable(hdev)) {
698  			err = -EOPNOTSUPP;
699  			break;
700  		}
701  
702  		if (!test_bit(HCI_AUTH, &hdev->flags)) {
703  			/* Auth must be enabled first */
704  			err = hci_cmd_sync_status(hdev,
705  						  HCI_OP_WRITE_AUTH_ENABLE,
706  						  1, &dr.dev_opt,
707  						  HCI_CMD_TIMEOUT);
708  			if (err)
709  				break;
710  		}
711  
712  		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
713  					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
714  		break;
715  
716  	case HCISETSCAN:
717  		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
718  					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
719  
720  		/* Ensure that the connectable and discoverable states
721  		 * get correctly modified as this was a non-mgmt change.
722  		 */
723  		if (!err)
724  			hci_update_passive_scan_state(hdev, dr.dev_opt);
725  		break;
726  
727  	case HCISETLINKPOL:
728  		policy = cpu_to_le16(dr.dev_opt);
729  
730  		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
731  					  2, &policy, HCI_CMD_TIMEOUT);
732  		break;
733  
734  	case HCISETLINKMODE:
735  		hdev->link_mode = ((__u16) dr.dev_opt) &
736  					(HCI_LM_MASTER | HCI_LM_ACCEPT);
737  		break;
738  
739  	case HCISETPTYPE:
740  		if (hdev->pkt_type == (__u16) dr.dev_opt)
741  			break;
742  
743  		hdev->pkt_type = (__u16) dr.dev_opt;
744  		mgmt_phy_configuration_changed(hdev, NULL);
745  		break;
746  
747  	case HCISETACLMTU:
748  		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
749  		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
750  		break;
751  
752  	case HCISETSCOMTU:
753  		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
754  		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
755  		break;
756  
757  	default:
758  		err = -EINVAL;
759  		break;
760  	}
761  
762  done:
763  	hci_dev_put(hdev);
764  	return err;
765  }
766  
hci_get_dev_list(void __user * arg)767  int hci_get_dev_list(void __user *arg)
768  {
769  	struct hci_dev *hdev;
770  	struct hci_dev_list_req *dl;
771  	struct hci_dev_req *dr;
772  	int n = 0, err;
773  	__u16 dev_num;
774  
775  	if (get_user(dev_num, (__u16 __user *) arg))
776  		return -EFAULT;
777  
778  	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
779  		return -EINVAL;
780  
781  	dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
782  	if (!dl)
783  		return -ENOMEM;
784  
785  	dl->dev_num = dev_num;
786  	dr = dl->dev_req;
787  
788  	read_lock(&hci_dev_list_lock);
789  	list_for_each_entry(hdev, &hci_dev_list, list) {
790  		unsigned long flags = hdev->flags;
791  
792  		/* When the auto-off is configured it means the transport
793  		 * is running, but in that case still indicate that the
794  		 * device is actually down.
795  		 */
796  		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
797  			flags &= ~BIT(HCI_UP);
798  
799  		dr[n].dev_id  = hdev->id;
800  		dr[n].dev_opt = flags;
801  
802  		if (++n >= dev_num)
803  			break;
804  	}
805  	read_unlock(&hci_dev_list_lock);
806  
807  	dl->dev_num = n;
808  	err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
809  	kfree(dl);
810  
811  	return err ? -EFAULT : 0;
812  }
813  
hci_get_dev_info(void __user * arg)814  int hci_get_dev_info(void __user *arg)
815  {
816  	struct hci_dev *hdev;
817  	struct hci_dev_info di;
818  	unsigned long flags;
819  	int err = 0;
820  
821  	if (copy_from_user(&di, arg, sizeof(di)))
822  		return -EFAULT;
823  
824  	hdev = hci_dev_get(di.dev_id);
825  	if (!hdev)
826  		return -ENODEV;
827  
828  	/* When the auto-off is configured it means the transport
829  	 * is running, but in that case still indicate that the
830  	 * device is actually down.
831  	 */
832  	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
833  		flags = hdev->flags & ~BIT(HCI_UP);
834  	else
835  		flags = hdev->flags;
836  
837  	strscpy(di.name, hdev->name, sizeof(di.name));
838  	di.bdaddr   = hdev->bdaddr;
839  	di.type     = (hdev->bus & 0x0f);
840  	di.flags    = flags;
841  	di.pkt_type = hdev->pkt_type;
842  	if (lmp_bredr_capable(hdev)) {
843  		di.acl_mtu  = hdev->acl_mtu;
844  		di.acl_pkts = hdev->acl_pkts;
845  		di.sco_mtu  = hdev->sco_mtu;
846  		di.sco_pkts = hdev->sco_pkts;
847  	} else {
848  		di.acl_mtu  = hdev->le_mtu;
849  		di.acl_pkts = hdev->le_pkts;
850  		di.sco_mtu  = 0;
851  		di.sco_pkts = 0;
852  	}
853  	di.link_policy = hdev->link_policy;
854  	di.link_mode   = hdev->link_mode;
855  
856  	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857  	memcpy(&di.features, &hdev->features, sizeof(di.features));
858  
859  	if (copy_to_user(arg, &di, sizeof(di)))
860  		err = -EFAULT;
861  
862  	hci_dev_put(hdev);
863  
864  	return err;
865  }
866  
867  /* ---- Interface to HCI drivers ---- */
868  
hci_dev_do_poweroff(struct hci_dev * hdev)869  static int hci_dev_do_poweroff(struct hci_dev *hdev)
870  {
871  	int err;
872  
873  	BT_DBG("%s %p", hdev->name, hdev);
874  
875  	hci_req_sync_lock(hdev);
876  
877  	err = hci_set_powered_sync(hdev, false);
878  
879  	hci_req_sync_unlock(hdev);
880  
881  	return err;
882  }
883  
hci_rfkill_set_block(void * data,bool blocked)884  static int hci_rfkill_set_block(void *data, bool blocked)
885  {
886  	struct hci_dev *hdev = data;
887  	int err;
888  
889  	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
890  
891  	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
892  		return -EBUSY;
893  
894  	if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
895  		return 0;
896  
897  	if (blocked) {
898  		hci_dev_set_flag(hdev, HCI_RFKILLED);
899  
900  		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
901  		    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
902  			err = hci_dev_do_poweroff(hdev);
903  			if (err) {
904  				bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
905  					   err);
906  
907  				/* Make sure the device is still closed even if
908  				 * anything during power off sequence (eg.
909  				 * disconnecting devices) failed.
910  				 */
911  				hci_dev_do_close(hdev);
912  			}
913  		}
914  	} else {
915  		hci_dev_clear_flag(hdev, HCI_RFKILLED);
916  	}
917  
918  	return 0;
919  }
920  
921  static const struct rfkill_ops hci_rfkill_ops = {
922  	.set_block = hci_rfkill_set_block,
923  };
924  
hci_power_on(struct work_struct * work)925  static void hci_power_on(struct work_struct *work)
926  {
927  	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
928  	int err;
929  
930  	BT_DBG("%s", hdev->name);
931  
932  	if (test_bit(HCI_UP, &hdev->flags) &&
933  	    hci_dev_test_flag(hdev, HCI_MGMT) &&
934  	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
935  		cancel_delayed_work(&hdev->power_off);
936  		err = hci_powered_update_sync(hdev);
937  		mgmt_power_on(hdev, err);
938  		return;
939  	}
940  
941  	err = hci_dev_do_open(hdev);
942  	if (err < 0) {
943  		hci_dev_lock(hdev);
944  		mgmt_set_powered_failed(hdev, err);
945  		hci_dev_unlock(hdev);
946  		return;
947  	}
948  
949  	/* During the HCI setup phase, a few error conditions are
950  	 * ignored and they need to be checked now. If they are still
951  	 * valid, it is important to turn the device back off.
952  	 */
953  	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
954  	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
955  	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
956  	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
957  		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
958  		hci_dev_do_close(hdev);
959  	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
960  		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
961  				   HCI_AUTO_OFF_TIMEOUT);
962  	}
963  
964  	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
965  		/* For unconfigured devices, set the HCI_RAW flag
966  		 * so that userspace can easily identify them.
967  		 */
968  		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
969  			set_bit(HCI_RAW, &hdev->flags);
970  
971  		/* For fully configured devices, this will send
972  		 * the Index Added event. For unconfigured devices,
973  		 * it will send Unconfigued Index Added event.
974  		 *
975  		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
976  		 * and no event will be send.
977  		 */
978  		mgmt_index_added(hdev);
979  	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
980  		/* When the controller is now configured, then it
981  		 * is important to clear the HCI_RAW flag.
982  		 */
983  		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
984  			clear_bit(HCI_RAW, &hdev->flags);
985  
986  		/* Powering on the controller with HCI_CONFIG set only
987  		 * happens with the transition from unconfigured to
988  		 * configured. This will send the Index Added event.
989  		 */
990  		mgmt_index_added(hdev);
991  	}
992  }
993  
hci_power_off(struct work_struct * work)994  static void hci_power_off(struct work_struct *work)
995  {
996  	struct hci_dev *hdev = container_of(work, struct hci_dev,
997  					    power_off.work);
998  
999  	BT_DBG("%s", hdev->name);
1000  
1001  	hci_dev_do_close(hdev);
1002  }
1003  
hci_error_reset(struct work_struct * work)1004  static void hci_error_reset(struct work_struct *work)
1005  {
1006  	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1007  
1008  	hci_dev_hold(hdev);
1009  	BT_DBG("%s", hdev->name);
1010  
1011  	if (hdev->hw_error)
1012  		hdev->hw_error(hdev, hdev->hw_error_code);
1013  	else
1014  		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1015  
1016  	if (!hci_dev_do_close(hdev))
1017  		hci_dev_do_open(hdev);
1018  
1019  	hci_dev_put(hdev);
1020  }
1021  
hci_uuids_clear(struct hci_dev * hdev)1022  void hci_uuids_clear(struct hci_dev *hdev)
1023  {
1024  	struct bt_uuid *uuid, *tmp;
1025  
1026  	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1027  		list_del(&uuid->list);
1028  		kfree(uuid);
1029  	}
1030  }
1031  
hci_link_keys_clear(struct hci_dev * hdev)1032  void hci_link_keys_clear(struct hci_dev *hdev)
1033  {
1034  	struct link_key *key, *tmp;
1035  
1036  	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1037  		list_del_rcu(&key->list);
1038  		kfree_rcu(key, rcu);
1039  	}
1040  }
1041  
hci_smp_ltks_clear(struct hci_dev * hdev)1042  void hci_smp_ltks_clear(struct hci_dev *hdev)
1043  {
1044  	struct smp_ltk *k, *tmp;
1045  
1046  	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1047  		list_del_rcu(&k->list);
1048  		kfree_rcu(k, rcu);
1049  	}
1050  }
1051  
hci_smp_irks_clear(struct hci_dev * hdev)1052  void hci_smp_irks_clear(struct hci_dev *hdev)
1053  {
1054  	struct smp_irk *k, *tmp;
1055  
1056  	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1057  		list_del_rcu(&k->list);
1058  		kfree_rcu(k, rcu);
1059  	}
1060  }
1061  
hci_blocked_keys_clear(struct hci_dev * hdev)1062  void hci_blocked_keys_clear(struct hci_dev *hdev)
1063  {
1064  	struct blocked_key *b, *tmp;
1065  
1066  	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1067  		list_del_rcu(&b->list);
1068  		kfree_rcu(b, rcu);
1069  	}
1070  }
1071  
hci_is_blocked_key(struct hci_dev * hdev,u8 type,u8 val[16])1072  bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1073  {
1074  	bool blocked = false;
1075  	struct blocked_key *b;
1076  
1077  	rcu_read_lock();
1078  	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1079  		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1080  			blocked = true;
1081  			break;
1082  		}
1083  	}
1084  
1085  	rcu_read_unlock();
1086  	return blocked;
1087  }
1088  
hci_find_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1089  struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1090  {
1091  	struct link_key *k;
1092  
1093  	rcu_read_lock();
1094  	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1095  		if (bacmp(bdaddr, &k->bdaddr) == 0) {
1096  			rcu_read_unlock();
1097  
1098  			if (hci_is_blocked_key(hdev,
1099  					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
1100  					       k->val)) {
1101  				bt_dev_warn_ratelimited(hdev,
1102  							"Link key blocked for %pMR",
1103  							&k->bdaddr);
1104  				return NULL;
1105  			}
1106  
1107  			return k;
1108  		}
1109  	}
1110  	rcu_read_unlock();
1111  
1112  	return NULL;
1113  }
1114  
hci_persistent_key(struct hci_dev * hdev,struct hci_conn * conn,u8 key_type,u8 old_key_type)1115  static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1116  			       u8 key_type, u8 old_key_type)
1117  {
1118  	/* Legacy key */
1119  	if (key_type < 0x03)
1120  		return true;
1121  
1122  	/* Debug keys are insecure so don't store them persistently */
1123  	if (key_type == HCI_LK_DEBUG_COMBINATION)
1124  		return false;
1125  
1126  	/* Changed combination key and there's no previous one */
1127  	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1128  		return false;
1129  
1130  	/* Security mode 3 case */
1131  	if (!conn)
1132  		return true;
1133  
1134  	/* BR/EDR key derived using SC from an LE link */
1135  	if (conn->type == LE_LINK)
1136  		return true;
1137  
1138  	/* Neither local nor remote side had no-bonding as requirement */
1139  	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1140  		return true;
1141  
1142  	/* Local side had dedicated bonding as requirement */
1143  	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1144  		return true;
1145  
1146  	/* Remote side had dedicated bonding as requirement */
1147  	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1148  		return true;
1149  
1150  	/* If none of the above criteria match, then don't store the key
1151  	 * persistently */
1152  	return false;
1153  }
1154  
ltk_role(u8 type)1155  static u8 ltk_role(u8 type)
1156  {
1157  	if (type == SMP_LTK)
1158  		return HCI_ROLE_MASTER;
1159  
1160  	return HCI_ROLE_SLAVE;
1161  }
1162  
hci_find_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 role)1163  struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1164  			     u8 addr_type, u8 role)
1165  {
1166  	struct smp_ltk *k;
1167  
1168  	rcu_read_lock();
1169  	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1170  		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1171  			continue;
1172  
1173  		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1174  			rcu_read_unlock();
1175  
1176  			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1177  					       k->val)) {
1178  				bt_dev_warn_ratelimited(hdev,
1179  							"LTK blocked for %pMR",
1180  							&k->bdaddr);
1181  				return NULL;
1182  			}
1183  
1184  			return k;
1185  		}
1186  	}
1187  	rcu_read_unlock();
1188  
1189  	return NULL;
1190  }
1191  
hci_find_irk_by_rpa(struct hci_dev * hdev,bdaddr_t * rpa)1192  struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1193  {
1194  	struct smp_irk *irk_to_return = NULL;
1195  	struct smp_irk *irk;
1196  
1197  	rcu_read_lock();
1198  	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1199  		if (!bacmp(&irk->rpa, rpa)) {
1200  			irk_to_return = irk;
1201  			goto done;
1202  		}
1203  	}
1204  
1205  	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1206  		if (smp_irk_matches(hdev, irk->val, rpa)) {
1207  			bacpy(&irk->rpa, rpa);
1208  			irk_to_return = irk;
1209  			goto done;
1210  		}
1211  	}
1212  
1213  done:
1214  	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1215  						irk_to_return->val)) {
1216  		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1217  					&irk_to_return->bdaddr);
1218  		irk_to_return = NULL;
1219  	}
1220  
1221  	rcu_read_unlock();
1222  
1223  	return irk_to_return;
1224  }
1225  
hci_find_irk_by_addr(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1226  struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227  				     u8 addr_type)
1228  {
1229  	struct smp_irk *irk_to_return = NULL;
1230  	struct smp_irk *irk;
1231  
1232  	/* Identity Address must be public or static random */
1233  	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1234  		return NULL;
1235  
1236  	rcu_read_lock();
1237  	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1238  		if (addr_type == irk->addr_type &&
1239  		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1240  			irk_to_return = irk;
1241  			goto done;
1242  		}
1243  	}
1244  
1245  done:
1246  
1247  	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1248  						irk_to_return->val)) {
1249  		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1250  					&irk_to_return->bdaddr);
1251  		irk_to_return = NULL;
1252  	}
1253  
1254  	rcu_read_unlock();
1255  
1256  	return irk_to_return;
1257  }
1258  
hci_add_link_key(struct hci_dev * hdev,struct hci_conn * conn,bdaddr_t * bdaddr,u8 * val,u8 type,u8 pin_len,bool * persistent)1259  struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1260  				  bdaddr_t *bdaddr, u8 *val, u8 type,
1261  				  u8 pin_len, bool *persistent)
1262  {
1263  	struct link_key *key, *old_key;
1264  	u8 old_key_type;
1265  
1266  	old_key = hci_find_link_key(hdev, bdaddr);
1267  	if (old_key) {
1268  		old_key_type = old_key->type;
1269  		key = old_key;
1270  	} else {
1271  		old_key_type = conn ? conn->key_type : 0xff;
1272  		key = kzalloc(sizeof(*key), GFP_KERNEL);
1273  		if (!key)
1274  			return NULL;
1275  		list_add_rcu(&key->list, &hdev->link_keys);
1276  	}
1277  
1278  	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1279  
1280  	/* Some buggy controller combinations generate a changed
1281  	 * combination key for legacy pairing even when there's no
1282  	 * previous key */
1283  	if (type == HCI_LK_CHANGED_COMBINATION &&
1284  	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1285  		type = HCI_LK_COMBINATION;
1286  		if (conn)
1287  			conn->key_type = type;
1288  	}
1289  
1290  	bacpy(&key->bdaddr, bdaddr);
1291  	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1292  	key->pin_len = pin_len;
1293  
1294  	if (type == HCI_LK_CHANGED_COMBINATION)
1295  		key->type = old_key_type;
1296  	else
1297  		key->type = type;
1298  
1299  	if (persistent)
1300  		*persistent = hci_persistent_key(hdev, conn, type,
1301  						 old_key_type);
1302  
1303  	return key;
1304  }
1305  
hci_add_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 type,u8 authenticated,u8 tk[16],u8 enc_size,__le16 ediv,__le64 rand)1306  struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1307  			    u8 addr_type, u8 type, u8 authenticated,
1308  			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1309  {
1310  	struct smp_ltk *key, *old_key;
1311  	u8 role = ltk_role(type);
1312  
1313  	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1314  	if (old_key)
1315  		key = old_key;
1316  	else {
1317  		key = kzalloc(sizeof(*key), GFP_KERNEL);
1318  		if (!key)
1319  			return NULL;
1320  		list_add_rcu(&key->list, &hdev->long_term_keys);
1321  	}
1322  
1323  	bacpy(&key->bdaddr, bdaddr);
1324  	key->bdaddr_type = addr_type;
1325  	memcpy(key->val, tk, sizeof(key->val));
1326  	key->authenticated = authenticated;
1327  	key->ediv = ediv;
1328  	key->rand = rand;
1329  	key->enc_size = enc_size;
1330  	key->type = type;
1331  
1332  	return key;
1333  }
1334  
hci_add_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type,u8 val[16],bdaddr_t * rpa)1335  struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1336  			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1337  {
1338  	struct smp_irk *irk;
1339  
1340  	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1341  	if (!irk) {
1342  		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1343  		if (!irk)
1344  			return NULL;
1345  
1346  		bacpy(&irk->bdaddr, bdaddr);
1347  		irk->addr_type = addr_type;
1348  
1349  		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1350  	}
1351  
1352  	memcpy(irk->val, val, 16);
1353  	bacpy(&irk->rpa, rpa);
1354  
1355  	return irk;
1356  }
1357  
hci_remove_link_key(struct hci_dev * hdev,bdaddr_t * bdaddr)1358  int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1359  {
1360  	struct link_key *key;
1361  
1362  	key = hci_find_link_key(hdev, bdaddr);
1363  	if (!key)
1364  		return -ENOENT;
1365  
1366  	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1367  
1368  	list_del_rcu(&key->list);
1369  	kfree_rcu(key, rcu);
1370  
1371  	return 0;
1372  }
1373  
hci_remove_ltk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1374  int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1375  {
1376  	struct smp_ltk *k, *tmp;
1377  	int removed = 0;
1378  
1379  	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1380  		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1381  			continue;
1382  
1383  		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1384  
1385  		list_del_rcu(&k->list);
1386  		kfree_rcu(k, rcu);
1387  		removed++;
1388  	}
1389  
1390  	return removed ? 0 : -ENOENT;
1391  }
1392  
hci_remove_irk(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 addr_type)1393  void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1394  {
1395  	struct smp_irk *k, *tmp;
1396  
1397  	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1398  		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1399  			continue;
1400  
1401  		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1402  
1403  		list_del_rcu(&k->list);
1404  		kfree_rcu(k, rcu);
1405  	}
1406  }
1407  
hci_bdaddr_is_paired(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 type)1408  bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1409  {
1410  	struct smp_ltk *k;
1411  	struct smp_irk *irk;
1412  	u8 addr_type;
1413  
1414  	if (type == BDADDR_BREDR) {
1415  		if (hci_find_link_key(hdev, bdaddr))
1416  			return true;
1417  		return false;
1418  	}
1419  
1420  	/* Convert to HCI addr type which struct smp_ltk uses */
1421  	if (type == BDADDR_LE_PUBLIC)
1422  		addr_type = ADDR_LE_DEV_PUBLIC;
1423  	else
1424  		addr_type = ADDR_LE_DEV_RANDOM;
1425  
1426  	irk = hci_get_irk(hdev, bdaddr, addr_type);
1427  	if (irk) {
1428  		bdaddr = &irk->bdaddr;
1429  		addr_type = irk->addr_type;
1430  	}
1431  
1432  	rcu_read_lock();
1433  	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1434  		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1435  			rcu_read_unlock();
1436  			return true;
1437  		}
1438  	}
1439  	rcu_read_unlock();
1440  
1441  	return false;
1442  }
1443  
1444  /* HCI command timer function */
hci_cmd_timeout(struct work_struct * work)1445  static void hci_cmd_timeout(struct work_struct *work)
1446  {
1447  	struct hci_dev *hdev = container_of(work, struct hci_dev,
1448  					    cmd_timer.work);
1449  
1450  	if (hdev->req_skb) {
1451  		u16 opcode = hci_skb_opcode(hdev->req_skb);
1452  
1453  		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1454  
1455  		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1456  	} else {
1457  		bt_dev_err(hdev, "command tx timeout");
1458  	}
1459  
1460  	if (hdev->cmd_timeout)
1461  		hdev->cmd_timeout(hdev);
1462  
1463  	atomic_set(&hdev->cmd_cnt, 1);
1464  	queue_work(hdev->workqueue, &hdev->cmd_work);
1465  }
1466  
1467  /* HCI ncmd timer function */
hci_ncmd_timeout(struct work_struct * work)1468  static void hci_ncmd_timeout(struct work_struct *work)
1469  {
1470  	struct hci_dev *hdev = container_of(work, struct hci_dev,
1471  					    ncmd_timer.work);
1472  
1473  	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1474  
1475  	/* During HCI_INIT phase no events can be injected if the ncmd timer
1476  	 * triggers since the procedure has its own timeout handling.
1477  	 */
1478  	if (test_bit(HCI_INIT, &hdev->flags))
1479  		return;
1480  
1481  	/* This is an irrecoverable state, inject hardware error event */
1482  	hci_reset_dev(hdev);
1483  }
1484  
hci_find_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1485  struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1486  					  bdaddr_t *bdaddr, u8 bdaddr_type)
1487  {
1488  	struct oob_data *data;
1489  
1490  	list_for_each_entry(data, &hdev->remote_oob_data, list) {
1491  		if (bacmp(bdaddr, &data->bdaddr) != 0)
1492  			continue;
1493  		if (data->bdaddr_type != bdaddr_type)
1494  			continue;
1495  		return data;
1496  	}
1497  
1498  	return NULL;
1499  }
1500  
hci_remove_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)1501  int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1502  			       u8 bdaddr_type)
1503  {
1504  	struct oob_data *data;
1505  
1506  	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1507  	if (!data)
1508  		return -ENOENT;
1509  
1510  	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1511  
1512  	list_del(&data->list);
1513  	kfree(data);
1514  
1515  	return 0;
1516  }
1517  
hci_remote_oob_data_clear(struct hci_dev * hdev)1518  void hci_remote_oob_data_clear(struct hci_dev *hdev)
1519  {
1520  	struct oob_data *data, *n;
1521  
1522  	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1523  		list_del(&data->list);
1524  		kfree(data);
1525  	}
1526  }
1527  
hci_add_remote_oob_data(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type,u8 * hash192,u8 * rand192,u8 * hash256,u8 * rand256)1528  int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1529  			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1530  			    u8 *hash256, u8 *rand256)
1531  {
1532  	struct oob_data *data;
1533  
1534  	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1535  	if (!data) {
1536  		data = kmalloc(sizeof(*data), GFP_KERNEL);
1537  		if (!data)
1538  			return -ENOMEM;
1539  
1540  		bacpy(&data->bdaddr, bdaddr);
1541  		data->bdaddr_type = bdaddr_type;
1542  		list_add(&data->list, &hdev->remote_oob_data);
1543  	}
1544  
1545  	if (hash192 && rand192) {
1546  		memcpy(data->hash192, hash192, sizeof(data->hash192));
1547  		memcpy(data->rand192, rand192, sizeof(data->rand192));
1548  		if (hash256 && rand256)
1549  			data->present = 0x03;
1550  	} else {
1551  		memset(data->hash192, 0, sizeof(data->hash192));
1552  		memset(data->rand192, 0, sizeof(data->rand192));
1553  		if (hash256 && rand256)
1554  			data->present = 0x02;
1555  		else
1556  			data->present = 0x00;
1557  	}
1558  
1559  	if (hash256 && rand256) {
1560  		memcpy(data->hash256, hash256, sizeof(data->hash256));
1561  		memcpy(data->rand256, rand256, sizeof(data->rand256));
1562  	} else {
1563  		memset(data->hash256, 0, sizeof(data->hash256));
1564  		memset(data->rand256, 0, sizeof(data->rand256));
1565  		if (hash192 && rand192)
1566  			data->present = 0x01;
1567  	}
1568  
1569  	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1570  
1571  	return 0;
1572  }
1573  
1574  /* This function requires the caller holds hdev->lock */
hci_find_adv_instance(struct hci_dev * hdev,u8 instance)1575  struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1576  {
1577  	struct adv_info *adv_instance;
1578  
1579  	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1580  		if (adv_instance->instance == instance)
1581  			return adv_instance;
1582  	}
1583  
1584  	return NULL;
1585  }
1586  
1587  /* This function requires the caller holds hdev->lock */
hci_get_next_instance(struct hci_dev * hdev,u8 instance)1588  struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1589  {
1590  	struct adv_info *cur_instance;
1591  
1592  	cur_instance = hci_find_adv_instance(hdev, instance);
1593  	if (!cur_instance)
1594  		return NULL;
1595  
1596  	if (cur_instance == list_last_entry(&hdev->adv_instances,
1597  					    struct adv_info, list))
1598  		return list_first_entry(&hdev->adv_instances,
1599  						 struct adv_info, list);
1600  	else
1601  		return list_next_entry(cur_instance, list);
1602  }
1603  
1604  /* This function requires the caller holds hdev->lock */
hci_remove_adv_instance(struct hci_dev * hdev,u8 instance)1605  int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1606  {
1607  	struct adv_info *adv_instance;
1608  
1609  	adv_instance = hci_find_adv_instance(hdev, instance);
1610  	if (!adv_instance)
1611  		return -ENOENT;
1612  
1613  	BT_DBG("%s removing %dMR", hdev->name, instance);
1614  
1615  	if (hdev->cur_adv_instance == instance) {
1616  		if (hdev->adv_instance_timeout) {
1617  			cancel_delayed_work(&hdev->adv_instance_expire);
1618  			hdev->adv_instance_timeout = 0;
1619  		}
1620  		hdev->cur_adv_instance = 0x00;
1621  	}
1622  
1623  	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1624  
1625  	list_del(&adv_instance->list);
1626  	kfree(adv_instance);
1627  
1628  	hdev->adv_instance_cnt--;
1629  
1630  	return 0;
1631  }
1632  
hci_adv_instances_set_rpa_expired(struct hci_dev * hdev,bool rpa_expired)1633  void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1634  {
1635  	struct adv_info *adv_instance, *n;
1636  
1637  	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1638  		adv_instance->rpa_expired = rpa_expired;
1639  }
1640  
1641  /* This function requires the caller holds hdev->lock */
hci_adv_instances_clear(struct hci_dev * hdev)1642  void hci_adv_instances_clear(struct hci_dev *hdev)
1643  {
1644  	struct adv_info *adv_instance, *n;
1645  
1646  	if (hdev->adv_instance_timeout) {
1647  		disable_delayed_work(&hdev->adv_instance_expire);
1648  		hdev->adv_instance_timeout = 0;
1649  	}
1650  
1651  	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1652  		disable_delayed_work_sync(&adv_instance->rpa_expired_cb);
1653  		list_del(&adv_instance->list);
1654  		kfree(adv_instance);
1655  	}
1656  
1657  	hdev->adv_instance_cnt = 0;
1658  	hdev->cur_adv_instance = 0x00;
1659  }
1660  
adv_instance_rpa_expired(struct work_struct * work)1661  static void adv_instance_rpa_expired(struct work_struct *work)
1662  {
1663  	struct adv_info *adv_instance = container_of(work, struct adv_info,
1664  						     rpa_expired_cb.work);
1665  
1666  	BT_DBG("");
1667  
1668  	adv_instance->rpa_expired = true;
1669  }
1670  
1671  /* This function requires the caller holds hdev->lock */
hci_add_adv_instance(struct hci_dev * hdev,u8 instance,u32 flags,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data,u16 timeout,u16 duration,s8 tx_power,u32 min_interval,u32 max_interval,u8 mesh_handle)1672  struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1673  				      u32 flags, u16 adv_data_len, u8 *adv_data,
1674  				      u16 scan_rsp_len, u8 *scan_rsp_data,
1675  				      u16 timeout, u16 duration, s8 tx_power,
1676  				      u32 min_interval, u32 max_interval,
1677  				      u8 mesh_handle)
1678  {
1679  	struct adv_info *adv;
1680  
1681  	adv = hci_find_adv_instance(hdev, instance);
1682  	if (adv) {
1683  		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1684  		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1685  		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1686  	} else {
1687  		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1688  		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1689  			return ERR_PTR(-EOVERFLOW);
1690  
1691  		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1692  		if (!adv)
1693  			return ERR_PTR(-ENOMEM);
1694  
1695  		adv->pending = true;
1696  		adv->instance = instance;
1697  
1698  		/* If controller support only one set and the instance is set to
1699  		 * 1 then there is no option other than using handle 0x00.
1700  		 */
1701  		if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1702  			adv->handle = 0x00;
1703  		else
1704  			adv->handle = instance;
1705  
1706  		list_add(&adv->list, &hdev->adv_instances);
1707  		hdev->adv_instance_cnt++;
1708  	}
1709  
1710  	adv->flags = flags;
1711  	adv->min_interval = min_interval;
1712  	adv->max_interval = max_interval;
1713  	adv->tx_power = tx_power;
1714  	/* Defining a mesh_handle changes the timing units to ms,
1715  	 * rather than seconds, and ties the instance to the requested
1716  	 * mesh_tx queue.
1717  	 */
1718  	adv->mesh = mesh_handle;
1719  
1720  	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1721  				  scan_rsp_len, scan_rsp_data);
1722  
1723  	adv->timeout = timeout;
1724  	adv->remaining_time = timeout;
1725  
1726  	if (duration == 0)
1727  		adv->duration = hdev->def_multi_adv_rotation_duration;
1728  	else
1729  		adv->duration = duration;
1730  
1731  	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1732  
1733  	BT_DBG("%s for %dMR", hdev->name, instance);
1734  
1735  	return adv;
1736  }
1737  
1738  /* This function requires the caller holds hdev->lock */
hci_add_per_instance(struct hci_dev * hdev,u8 instance,u32 flags,u8 data_len,u8 * data,u32 min_interval,u32 max_interval)1739  struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1740  				      u32 flags, u8 data_len, u8 *data,
1741  				      u32 min_interval, u32 max_interval)
1742  {
1743  	struct adv_info *adv;
1744  
1745  	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1746  				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1747  				   min_interval, max_interval, 0);
1748  	if (IS_ERR(adv))
1749  		return adv;
1750  
1751  	adv->periodic = true;
1752  	adv->per_adv_data_len = data_len;
1753  
1754  	if (data)
1755  		memcpy(adv->per_adv_data, data, data_len);
1756  
1757  	return adv;
1758  }
1759  
1760  /* This function requires the caller holds hdev->lock */
hci_set_adv_instance_data(struct hci_dev * hdev,u8 instance,u16 adv_data_len,u8 * adv_data,u16 scan_rsp_len,u8 * scan_rsp_data)1761  int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1762  			      u16 adv_data_len, u8 *adv_data,
1763  			      u16 scan_rsp_len, u8 *scan_rsp_data)
1764  {
1765  	struct adv_info *adv;
1766  
1767  	adv = hci_find_adv_instance(hdev, instance);
1768  
1769  	/* If advertisement doesn't exist, we can't modify its data */
1770  	if (!adv)
1771  		return -ENOENT;
1772  
1773  	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1774  		memset(adv->adv_data, 0, sizeof(adv->adv_data));
1775  		memcpy(adv->adv_data, adv_data, adv_data_len);
1776  		adv->adv_data_len = adv_data_len;
1777  		adv->adv_data_changed = true;
1778  	}
1779  
1780  	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1781  		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1782  		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1783  		adv->scan_rsp_len = scan_rsp_len;
1784  		adv->scan_rsp_changed = true;
1785  	}
1786  
1787  	/* Mark as changed if there are flags which would affect it */
1788  	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1789  	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1790  		adv->scan_rsp_changed = true;
1791  
1792  	return 0;
1793  }
1794  
1795  /* This function requires the caller holds hdev->lock */
hci_adv_instance_flags(struct hci_dev * hdev,u8 instance)1796  u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1797  {
1798  	u32 flags;
1799  	struct adv_info *adv;
1800  
1801  	if (instance == 0x00) {
1802  		/* Instance 0 always manages the "Tx Power" and "Flags"
1803  		 * fields
1804  		 */
1805  		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1806  
1807  		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1808  		 * corresponds to the "connectable" instance flag.
1809  		 */
1810  		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1811  			flags |= MGMT_ADV_FLAG_CONNECTABLE;
1812  
1813  		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1814  			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1815  		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1816  			flags |= MGMT_ADV_FLAG_DISCOV;
1817  
1818  		return flags;
1819  	}
1820  
1821  	adv = hci_find_adv_instance(hdev, instance);
1822  
1823  	/* Return 0 when we got an invalid instance identifier. */
1824  	if (!adv)
1825  		return 0;
1826  
1827  	return adv->flags;
1828  }
1829  
hci_adv_instance_is_scannable(struct hci_dev * hdev,u8 instance)1830  bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1831  {
1832  	struct adv_info *adv;
1833  
1834  	/* Instance 0x00 always set local name */
1835  	if (instance == 0x00)
1836  		return true;
1837  
1838  	adv = hci_find_adv_instance(hdev, instance);
1839  	if (!adv)
1840  		return false;
1841  
1842  	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1843  	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1844  		return true;
1845  
1846  	return adv->scan_rsp_len ? true : false;
1847  }
1848  
1849  /* This function requires the caller holds hdev->lock */
hci_adv_monitors_clear(struct hci_dev * hdev)1850  void hci_adv_monitors_clear(struct hci_dev *hdev)
1851  {
1852  	struct adv_monitor *monitor;
1853  	int handle;
1854  
1855  	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1856  		hci_free_adv_monitor(hdev, monitor);
1857  
1858  	idr_destroy(&hdev->adv_monitors_idr);
1859  }
1860  
1861  /* Frees the monitor structure and do some bookkeepings.
1862   * This function requires the caller holds hdev->lock.
1863   */
hci_free_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1864  void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1865  {
1866  	struct adv_pattern *pattern;
1867  	struct adv_pattern *tmp;
1868  
1869  	if (!monitor)
1870  		return;
1871  
1872  	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1873  		list_del(&pattern->list);
1874  		kfree(pattern);
1875  	}
1876  
1877  	if (monitor->handle)
1878  		idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1879  
1880  	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1881  		hdev->adv_monitors_cnt--;
1882  		mgmt_adv_monitor_removed(hdev, monitor->handle);
1883  	}
1884  
1885  	kfree(monitor);
1886  }
1887  
1888  /* Assigns handle to a monitor, and if offloading is supported and power is on,
1889   * also attempts to forward the request to the controller.
1890   * This function requires the caller holds hci_req_sync_lock.
1891   */
hci_add_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1892  int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1893  {
1894  	int min, max, handle;
1895  	int status = 0;
1896  
1897  	if (!monitor)
1898  		return -EINVAL;
1899  
1900  	hci_dev_lock(hdev);
1901  
1902  	min = HCI_MIN_ADV_MONITOR_HANDLE;
1903  	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1904  	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1905  			   GFP_KERNEL);
1906  
1907  	hci_dev_unlock(hdev);
1908  
1909  	if (handle < 0)
1910  		return handle;
1911  
1912  	monitor->handle = handle;
1913  
1914  	if (!hdev_is_powered(hdev))
1915  		return status;
1916  
1917  	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1918  	case HCI_ADV_MONITOR_EXT_NONE:
1919  		bt_dev_dbg(hdev, "add monitor %d status %d",
1920  			   monitor->handle, status);
1921  		/* Message was not forwarded to controller - not an error */
1922  		break;
1923  
1924  	case HCI_ADV_MONITOR_EXT_MSFT:
1925  		status = msft_add_monitor_pattern(hdev, monitor);
1926  		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1927  			   handle, status);
1928  		break;
1929  	}
1930  
1931  	return status;
1932  }
1933  
1934  /* Attempts to tell the controller and free the monitor. If somehow the
1935   * controller doesn't have a corresponding handle, remove anyway.
1936   * This function requires the caller holds hci_req_sync_lock.
1937   */
hci_remove_adv_monitor(struct hci_dev * hdev,struct adv_monitor * monitor)1938  static int hci_remove_adv_monitor(struct hci_dev *hdev,
1939  				  struct adv_monitor *monitor)
1940  {
1941  	int status = 0;
1942  	int handle;
1943  
1944  	switch (hci_get_adv_monitor_offload_ext(hdev)) {
1945  	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1946  		bt_dev_dbg(hdev, "remove monitor %d status %d",
1947  			   monitor->handle, status);
1948  		goto free_monitor;
1949  
1950  	case HCI_ADV_MONITOR_EXT_MSFT:
1951  		handle = monitor->handle;
1952  		status = msft_remove_monitor(hdev, monitor);
1953  		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1954  			   handle, status);
1955  		break;
1956  	}
1957  
1958  	/* In case no matching handle registered, just free the monitor */
1959  	if (status == -ENOENT)
1960  		goto free_monitor;
1961  
1962  	return status;
1963  
1964  free_monitor:
1965  	if (status == -ENOENT)
1966  		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1967  			    monitor->handle);
1968  	hci_free_adv_monitor(hdev, monitor);
1969  
1970  	return status;
1971  }
1972  
1973  /* This function requires the caller holds hci_req_sync_lock */
hci_remove_single_adv_monitor(struct hci_dev * hdev,u16 handle)1974  int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1975  {
1976  	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
1977  
1978  	if (!monitor)
1979  		return -EINVAL;
1980  
1981  	return hci_remove_adv_monitor(hdev, monitor);
1982  }
1983  
1984  /* This function requires the caller holds hci_req_sync_lock */
hci_remove_all_adv_monitor(struct hci_dev * hdev)1985  int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1986  {
1987  	struct adv_monitor *monitor;
1988  	int idr_next_id = 0;
1989  	int status = 0;
1990  
1991  	while (1) {
1992  		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1993  		if (!monitor)
1994  			break;
1995  
1996  		status = hci_remove_adv_monitor(hdev, monitor);
1997  		if (status)
1998  			return status;
1999  
2000  		idr_next_id++;
2001  	}
2002  
2003  	return status;
2004  }
2005  
2006  /* This function requires the caller holds hdev->lock */
hci_is_adv_monitoring(struct hci_dev * hdev)2007  bool hci_is_adv_monitoring(struct hci_dev *hdev)
2008  {
2009  	return !idr_is_empty(&hdev->adv_monitors_idr);
2010  }
2011  
hci_get_adv_monitor_offload_ext(struct hci_dev * hdev)2012  int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2013  {
2014  	if (msft_monitor_supported(hdev))
2015  		return HCI_ADV_MONITOR_EXT_MSFT;
2016  
2017  	return HCI_ADV_MONITOR_EXT_NONE;
2018  }
2019  
hci_bdaddr_list_lookup(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2020  struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2021  					 bdaddr_t *bdaddr, u8 type)
2022  {
2023  	struct bdaddr_list *b;
2024  
2025  	list_for_each_entry(b, bdaddr_list, list) {
2026  		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2027  			return b;
2028  	}
2029  
2030  	return NULL;
2031  }
2032  
hci_bdaddr_list_lookup_with_irk(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2033  struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2034  				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2035  				u8 type)
2036  {
2037  	struct bdaddr_list_with_irk *b;
2038  
2039  	list_for_each_entry(b, bdaddr_list, list) {
2040  		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2041  			return b;
2042  	}
2043  
2044  	return NULL;
2045  }
2046  
2047  struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head * bdaddr_list,bdaddr_t * bdaddr,u8 type)2048  hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2049  				  bdaddr_t *bdaddr, u8 type)
2050  {
2051  	struct bdaddr_list_with_flags *b;
2052  
2053  	list_for_each_entry(b, bdaddr_list, list) {
2054  		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2055  			return b;
2056  	}
2057  
2058  	return NULL;
2059  }
2060  
hci_bdaddr_list_clear(struct list_head * bdaddr_list)2061  void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2062  {
2063  	struct bdaddr_list *b, *n;
2064  
2065  	list_for_each_entry_safe(b, n, bdaddr_list, list) {
2066  		list_del(&b->list);
2067  		kfree(b);
2068  	}
2069  }
2070  
hci_bdaddr_list_add(struct list_head * list,bdaddr_t * bdaddr,u8 type)2071  int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2072  {
2073  	struct bdaddr_list *entry;
2074  
2075  	if (!bacmp(bdaddr, BDADDR_ANY))
2076  		return -EBADF;
2077  
2078  	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2079  		return -EEXIST;
2080  
2081  	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2082  	if (!entry)
2083  		return -ENOMEM;
2084  
2085  	bacpy(&entry->bdaddr, bdaddr);
2086  	entry->bdaddr_type = type;
2087  
2088  	list_add(&entry->list, list);
2089  
2090  	return 0;
2091  }
2092  
hci_bdaddr_list_add_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type,u8 * peer_irk,u8 * local_irk)2093  int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2094  					u8 type, u8 *peer_irk, u8 *local_irk)
2095  {
2096  	struct bdaddr_list_with_irk *entry;
2097  
2098  	if (!bacmp(bdaddr, BDADDR_ANY))
2099  		return -EBADF;
2100  
2101  	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2102  		return -EEXIST;
2103  
2104  	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2105  	if (!entry)
2106  		return -ENOMEM;
2107  
2108  	bacpy(&entry->bdaddr, bdaddr);
2109  	entry->bdaddr_type = type;
2110  
2111  	if (peer_irk)
2112  		memcpy(entry->peer_irk, peer_irk, 16);
2113  
2114  	if (local_irk)
2115  		memcpy(entry->local_irk, local_irk, 16);
2116  
2117  	list_add(&entry->list, list);
2118  
2119  	return 0;
2120  }
2121  
hci_bdaddr_list_add_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type,u32 flags)2122  int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2123  				   u8 type, u32 flags)
2124  {
2125  	struct bdaddr_list_with_flags *entry;
2126  
2127  	if (!bacmp(bdaddr, BDADDR_ANY))
2128  		return -EBADF;
2129  
2130  	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2131  		return -EEXIST;
2132  
2133  	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2134  	if (!entry)
2135  		return -ENOMEM;
2136  
2137  	bacpy(&entry->bdaddr, bdaddr);
2138  	entry->bdaddr_type = type;
2139  	entry->flags = flags;
2140  
2141  	list_add(&entry->list, list);
2142  
2143  	return 0;
2144  }
2145  
hci_bdaddr_list_del(struct list_head * list,bdaddr_t * bdaddr,u8 type)2146  int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2147  {
2148  	struct bdaddr_list *entry;
2149  
2150  	if (!bacmp(bdaddr, BDADDR_ANY)) {
2151  		hci_bdaddr_list_clear(list);
2152  		return 0;
2153  	}
2154  
2155  	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2156  	if (!entry)
2157  		return -ENOENT;
2158  
2159  	list_del(&entry->list);
2160  	kfree(entry);
2161  
2162  	return 0;
2163  }
2164  
hci_bdaddr_list_del_with_irk(struct list_head * list,bdaddr_t * bdaddr,u8 type)2165  int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2166  							u8 type)
2167  {
2168  	struct bdaddr_list_with_irk *entry;
2169  
2170  	if (!bacmp(bdaddr, BDADDR_ANY)) {
2171  		hci_bdaddr_list_clear(list);
2172  		return 0;
2173  	}
2174  
2175  	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2176  	if (!entry)
2177  		return -ENOENT;
2178  
2179  	list_del(&entry->list);
2180  	kfree(entry);
2181  
2182  	return 0;
2183  }
2184  
hci_bdaddr_list_del_with_flags(struct list_head * list,bdaddr_t * bdaddr,u8 type)2185  int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2186  				   u8 type)
2187  {
2188  	struct bdaddr_list_with_flags *entry;
2189  
2190  	if (!bacmp(bdaddr, BDADDR_ANY)) {
2191  		hci_bdaddr_list_clear(list);
2192  		return 0;
2193  	}
2194  
2195  	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2196  	if (!entry)
2197  		return -ENOENT;
2198  
2199  	list_del(&entry->list);
2200  	kfree(entry);
2201  
2202  	return 0;
2203  }
2204  
2205  /* This function requires the caller holds hdev->lock */
hci_conn_params_lookup(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2206  struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2207  					       bdaddr_t *addr, u8 addr_type)
2208  {
2209  	struct hci_conn_params *params;
2210  
2211  	list_for_each_entry(params, &hdev->le_conn_params, list) {
2212  		if (bacmp(&params->addr, addr) == 0 &&
2213  		    params->addr_type == addr_type) {
2214  			return params;
2215  		}
2216  	}
2217  
2218  	return NULL;
2219  }
2220  
2221  /* This function requires the caller holds hdev->lock or rcu_read_lock */
hci_pend_le_action_lookup(struct list_head * list,bdaddr_t * addr,u8 addr_type)2222  struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2223  						  bdaddr_t *addr, u8 addr_type)
2224  {
2225  	struct hci_conn_params *param;
2226  
2227  	rcu_read_lock();
2228  
2229  	list_for_each_entry_rcu(param, list, action) {
2230  		if (bacmp(&param->addr, addr) == 0 &&
2231  		    param->addr_type == addr_type) {
2232  			rcu_read_unlock();
2233  			return param;
2234  		}
2235  	}
2236  
2237  	rcu_read_unlock();
2238  
2239  	return NULL;
2240  }
2241  
2242  /* This function requires the caller holds hdev->lock */
hci_pend_le_list_del_init(struct hci_conn_params * param)2243  void hci_pend_le_list_del_init(struct hci_conn_params *param)
2244  {
2245  	if (list_empty(&param->action))
2246  		return;
2247  
2248  	list_del_rcu(&param->action);
2249  	synchronize_rcu();
2250  	INIT_LIST_HEAD(&param->action);
2251  }
2252  
2253  /* This function requires the caller holds hdev->lock */
hci_pend_le_list_add(struct hci_conn_params * param,struct list_head * list)2254  void hci_pend_le_list_add(struct hci_conn_params *param,
2255  			  struct list_head *list)
2256  {
2257  	list_add_rcu(&param->action, list);
2258  }
2259  
2260  /* This function requires the caller holds hdev->lock */
hci_conn_params_add(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2261  struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2262  					    bdaddr_t *addr, u8 addr_type)
2263  {
2264  	struct hci_conn_params *params;
2265  
2266  	params = hci_conn_params_lookup(hdev, addr, addr_type);
2267  	if (params)
2268  		return params;
2269  
2270  	params = kzalloc(sizeof(*params), GFP_KERNEL);
2271  	if (!params) {
2272  		bt_dev_err(hdev, "out of memory");
2273  		return NULL;
2274  	}
2275  
2276  	bacpy(&params->addr, addr);
2277  	params->addr_type = addr_type;
2278  
2279  	list_add(&params->list, &hdev->le_conn_params);
2280  	INIT_LIST_HEAD(&params->action);
2281  
2282  	params->conn_min_interval = hdev->le_conn_min_interval;
2283  	params->conn_max_interval = hdev->le_conn_max_interval;
2284  	params->conn_latency = hdev->le_conn_latency;
2285  	params->supervision_timeout = hdev->le_supv_timeout;
2286  	params->auto_connect = HCI_AUTO_CONN_DISABLED;
2287  
2288  	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2289  
2290  	return params;
2291  }
2292  
hci_conn_params_free(struct hci_conn_params * params)2293  void hci_conn_params_free(struct hci_conn_params *params)
2294  {
2295  	hci_pend_le_list_del_init(params);
2296  
2297  	if (params->conn) {
2298  		hci_conn_drop(params->conn);
2299  		hci_conn_put(params->conn);
2300  	}
2301  
2302  	list_del(&params->list);
2303  	kfree(params);
2304  }
2305  
2306  /* This function requires the caller holds hdev->lock */
hci_conn_params_del(struct hci_dev * hdev,bdaddr_t * addr,u8 addr_type)2307  void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2308  {
2309  	struct hci_conn_params *params;
2310  
2311  	params = hci_conn_params_lookup(hdev, addr, addr_type);
2312  	if (!params)
2313  		return;
2314  
2315  	hci_conn_params_free(params);
2316  
2317  	hci_update_passive_scan(hdev);
2318  
2319  	BT_DBG("addr %pMR (type %u)", addr, addr_type);
2320  }
2321  
2322  /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_disabled(struct hci_dev * hdev)2323  void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2324  {
2325  	struct hci_conn_params *params, *tmp;
2326  
2327  	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2328  		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2329  			continue;
2330  
2331  		/* If trying to establish one time connection to disabled
2332  		 * device, leave the params, but mark them as just once.
2333  		 */
2334  		if (params->explicit_connect) {
2335  			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2336  			continue;
2337  		}
2338  
2339  		hci_conn_params_free(params);
2340  	}
2341  
2342  	BT_DBG("All LE disabled connection parameters were removed");
2343  }
2344  
2345  /* This function requires the caller holds hdev->lock */
hci_conn_params_clear_all(struct hci_dev * hdev)2346  static void hci_conn_params_clear_all(struct hci_dev *hdev)
2347  {
2348  	struct hci_conn_params *params, *tmp;
2349  
2350  	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2351  		hci_conn_params_free(params);
2352  
2353  	BT_DBG("All LE connection parameters were removed");
2354  }
2355  
2356  /* Copy the Identity Address of the controller.
2357   *
2358   * If the controller has a public BD_ADDR, then by default use that one.
2359   * If this is a LE only controller without a public address, default to
2360   * the static random address.
2361   *
2362   * For debugging purposes it is possible to force controllers with a
2363   * public address to use the static random address instead.
2364   *
2365   * In case BR/EDR has been disabled on a dual-mode controller and
2366   * userspace has configured a static address, then that address
2367   * becomes the identity address instead of the public BR/EDR address.
2368   */
hci_copy_identity_address(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 * bdaddr_type)2369  void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2370  			       u8 *bdaddr_type)
2371  {
2372  	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2373  	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2374  	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2375  	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2376  		bacpy(bdaddr, &hdev->static_addr);
2377  		*bdaddr_type = ADDR_LE_DEV_RANDOM;
2378  	} else {
2379  		bacpy(bdaddr, &hdev->bdaddr);
2380  		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
2381  	}
2382  }
2383  
hci_clear_wake_reason(struct hci_dev * hdev)2384  static void hci_clear_wake_reason(struct hci_dev *hdev)
2385  {
2386  	hci_dev_lock(hdev);
2387  
2388  	hdev->wake_reason = 0;
2389  	bacpy(&hdev->wake_addr, BDADDR_ANY);
2390  	hdev->wake_addr_type = 0;
2391  
2392  	hci_dev_unlock(hdev);
2393  }
2394  
hci_suspend_notifier(struct notifier_block * nb,unsigned long action,void * data)2395  static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2396  				void *data)
2397  {
2398  	struct hci_dev *hdev =
2399  		container_of(nb, struct hci_dev, suspend_notifier);
2400  	int ret = 0;
2401  
2402  	/* Userspace has full control of this device. Do nothing. */
2403  	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2404  		return NOTIFY_DONE;
2405  
2406  	/* To avoid a potential race with hci_unregister_dev. */
2407  	hci_dev_hold(hdev);
2408  
2409  	switch (action) {
2410  	case PM_HIBERNATION_PREPARE:
2411  	case PM_SUSPEND_PREPARE:
2412  		ret = hci_suspend_dev(hdev);
2413  		break;
2414  	case PM_POST_HIBERNATION:
2415  	case PM_POST_SUSPEND:
2416  		ret = hci_resume_dev(hdev);
2417  		break;
2418  	}
2419  
2420  	if (ret)
2421  		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2422  			   action, ret);
2423  
2424  	hci_dev_put(hdev);
2425  	return NOTIFY_DONE;
2426  }
2427  
2428  /* Alloc HCI device */
hci_alloc_dev_priv(int sizeof_priv)2429  struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2430  {
2431  	struct hci_dev *hdev;
2432  	unsigned int alloc_size;
2433  
2434  	alloc_size = sizeof(*hdev);
2435  	if (sizeof_priv) {
2436  		/* Fixme: May need ALIGN-ment? */
2437  		alloc_size += sizeof_priv;
2438  	}
2439  
2440  	hdev = kzalloc(alloc_size, GFP_KERNEL);
2441  	if (!hdev)
2442  		return NULL;
2443  
2444  	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2445  	hdev->esco_type = (ESCO_HV1);
2446  	hdev->link_mode = (HCI_LM_ACCEPT);
2447  	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
2448  	hdev->io_capability = 0x03;	/* No Input No Output */
2449  	hdev->manufacturer = 0xffff;	/* Default to internal use */
2450  	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2451  	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2452  	hdev->adv_instance_cnt = 0;
2453  	hdev->cur_adv_instance = 0x00;
2454  	hdev->adv_instance_timeout = 0;
2455  
2456  	hdev->advmon_allowlist_duration = 300;
2457  	hdev->advmon_no_filter_duration = 500;
2458  	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2459  
2460  	hdev->sniff_max_interval = 800;
2461  	hdev->sniff_min_interval = 80;
2462  
2463  	hdev->le_adv_channel_map = 0x07;
2464  	hdev->le_adv_min_interval = 0x0800;
2465  	hdev->le_adv_max_interval = 0x0800;
2466  	hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2467  	hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2468  	hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2469  	hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2470  	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2471  	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2472  	hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2473  	hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2474  	hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2475  	hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2476  	hdev->le_conn_min_interval = 0x0018;
2477  	hdev->le_conn_max_interval = 0x0028;
2478  	hdev->le_conn_latency = 0x0000;
2479  	hdev->le_supv_timeout = 0x002a;
2480  	hdev->le_def_tx_len = 0x001b;
2481  	hdev->le_def_tx_time = 0x0148;
2482  	hdev->le_max_tx_len = 0x001b;
2483  	hdev->le_max_tx_time = 0x0148;
2484  	hdev->le_max_rx_len = 0x001b;
2485  	hdev->le_max_rx_time = 0x0148;
2486  	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2487  	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2488  	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2489  	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2490  	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2491  	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2492  	hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2493  	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2494  	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2495  
2496  	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2497  	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2498  	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2499  	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2500  	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2501  	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2502  
2503  	/* default 1.28 sec page scan */
2504  	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2505  	hdev->def_page_scan_int = 0x0800;
2506  	hdev->def_page_scan_window = 0x0012;
2507  
2508  	mutex_init(&hdev->lock);
2509  	mutex_init(&hdev->req_lock);
2510  
2511  	ida_init(&hdev->unset_handle_ida);
2512  
2513  	INIT_LIST_HEAD(&hdev->mesh_pending);
2514  	INIT_LIST_HEAD(&hdev->mgmt_pending);
2515  	INIT_LIST_HEAD(&hdev->reject_list);
2516  	INIT_LIST_HEAD(&hdev->accept_list);
2517  	INIT_LIST_HEAD(&hdev->uuids);
2518  	INIT_LIST_HEAD(&hdev->link_keys);
2519  	INIT_LIST_HEAD(&hdev->long_term_keys);
2520  	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2521  	INIT_LIST_HEAD(&hdev->remote_oob_data);
2522  	INIT_LIST_HEAD(&hdev->le_accept_list);
2523  	INIT_LIST_HEAD(&hdev->le_resolv_list);
2524  	INIT_LIST_HEAD(&hdev->le_conn_params);
2525  	INIT_LIST_HEAD(&hdev->pend_le_conns);
2526  	INIT_LIST_HEAD(&hdev->pend_le_reports);
2527  	INIT_LIST_HEAD(&hdev->conn_hash.list);
2528  	INIT_LIST_HEAD(&hdev->adv_instances);
2529  	INIT_LIST_HEAD(&hdev->blocked_keys);
2530  	INIT_LIST_HEAD(&hdev->monitored_devices);
2531  
2532  	INIT_LIST_HEAD(&hdev->local_codecs);
2533  	INIT_WORK(&hdev->rx_work, hci_rx_work);
2534  	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2535  	INIT_WORK(&hdev->tx_work, hci_tx_work);
2536  	INIT_WORK(&hdev->power_on, hci_power_on);
2537  	INIT_WORK(&hdev->error_reset, hci_error_reset);
2538  
2539  	hci_cmd_sync_init(hdev);
2540  
2541  	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2542  
2543  	skb_queue_head_init(&hdev->rx_q);
2544  	skb_queue_head_init(&hdev->cmd_q);
2545  	skb_queue_head_init(&hdev->raw_q);
2546  
2547  	init_waitqueue_head(&hdev->req_wait_q);
2548  
2549  	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2550  	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2551  
2552  	hci_devcd_setup(hdev);
2553  
2554  	hci_init_sysfs(hdev);
2555  	discovery_init(hdev);
2556  
2557  	return hdev;
2558  }
2559  EXPORT_SYMBOL(hci_alloc_dev_priv);
2560  
2561  /* Free HCI device */
hci_free_dev(struct hci_dev * hdev)2562  void hci_free_dev(struct hci_dev *hdev)
2563  {
2564  	/* will free via device release */
2565  	put_device(&hdev->dev);
2566  }
2567  EXPORT_SYMBOL(hci_free_dev);
2568  
2569  /* Register HCI device */
hci_register_dev(struct hci_dev * hdev)2570  int hci_register_dev(struct hci_dev *hdev)
2571  {
2572  	int id, error;
2573  
2574  	if (!hdev->open || !hdev->close || !hdev->send)
2575  		return -EINVAL;
2576  
2577  	id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2578  	if (id < 0)
2579  		return id;
2580  
2581  	error = dev_set_name(&hdev->dev, "hci%u", id);
2582  	if (error)
2583  		return error;
2584  
2585  	hdev->name = dev_name(&hdev->dev);
2586  	hdev->id = id;
2587  
2588  	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2589  
2590  	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2591  	if (!hdev->workqueue) {
2592  		error = -ENOMEM;
2593  		goto err;
2594  	}
2595  
2596  	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2597  						      hdev->name);
2598  	if (!hdev->req_workqueue) {
2599  		destroy_workqueue(hdev->workqueue);
2600  		error = -ENOMEM;
2601  		goto err;
2602  	}
2603  
2604  	if (!IS_ERR_OR_NULL(bt_debugfs))
2605  		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2606  
2607  	error = device_add(&hdev->dev);
2608  	if (error < 0)
2609  		goto err_wqueue;
2610  
2611  	hci_leds_init(hdev);
2612  
2613  	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2614  				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2615  				    hdev);
2616  	if (hdev->rfkill) {
2617  		if (rfkill_register(hdev->rfkill) < 0) {
2618  			rfkill_destroy(hdev->rfkill);
2619  			hdev->rfkill = NULL;
2620  		}
2621  	}
2622  
2623  	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2624  		hci_dev_set_flag(hdev, HCI_RFKILLED);
2625  
2626  	hci_dev_set_flag(hdev, HCI_SETUP);
2627  	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2628  
2629  	/* Assume BR/EDR support until proven otherwise (such as
2630  	 * through reading supported features during init.
2631  	 */
2632  	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2633  
2634  	write_lock(&hci_dev_list_lock);
2635  	list_add(&hdev->list, &hci_dev_list);
2636  	write_unlock(&hci_dev_list_lock);
2637  
2638  	/* Devices that are marked for raw-only usage are unconfigured
2639  	 * and should not be included in normal operation.
2640  	 */
2641  	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2642  		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2643  
2644  	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
2645  	 * callback.
2646  	 */
2647  	if (hdev->wakeup)
2648  		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2649  
2650  	hci_sock_dev_event(hdev, HCI_DEV_REG);
2651  	hci_dev_hold(hdev);
2652  
2653  	error = hci_register_suspend_notifier(hdev);
2654  	if (error)
2655  		BT_WARN("register suspend notifier failed error:%d\n", error);
2656  
2657  	queue_work(hdev->req_workqueue, &hdev->power_on);
2658  
2659  	idr_init(&hdev->adv_monitors_idr);
2660  	msft_register(hdev);
2661  
2662  	return id;
2663  
2664  err_wqueue:
2665  	debugfs_remove_recursive(hdev->debugfs);
2666  	destroy_workqueue(hdev->workqueue);
2667  	destroy_workqueue(hdev->req_workqueue);
2668  err:
2669  	ida_free(&hci_index_ida, hdev->id);
2670  
2671  	return error;
2672  }
2673  EXPORT_SYMBOL(hci_register_dev);
2674  
2675  /* Unregister HCI device */
hci_unregister_dev(struct hci_dev * hdev)2676  void hci_unregister_dev(struct hci_dev *hdev)
2677  {
2678  	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2679  
2680  	mutex_lock(&hdev->unregister_lock);
2681  	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2682  	mutex_unlock(&hdev->unregister_lock);
2683  
2684  	write_lock(&hci_dev_list_lock);
2685  	list_del(&hdev->list);
2686  	write_unlock(&hci_dev_list_lock);
2687  
2688  	disable_work_sync(&hdev->rx_work);
2689  	disable_work_sync(&hdev->cmd_work);
2690  	disable_work_sync(&hdev->tx_work);
2691  	disable_work_sync(&hdev->power_on);
2692  	disable_work_sync(&hdev->error_reset);
2693  
2694  	hci_cmd_sync_clear(hdev);
2695  
2696  	hci_unregister_suspend_notifier(hdev);
2697  
2698  	hci_dev_do_close(hdev);
2699  
2700  	if (!test_bit(HCI_INIT, &hdev->flags) &&
2701  	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
2702  	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2703  		hci_dev_lock(hdev);
2704  		mgmt_index_removed(hdev);
2705  		hci_dev_unlock(hdev);
2706  	}
2707  
2708  	/* mgmt_index_removed should take care of emptying the
2709  	 * pending list */
2710  	BUG_ON(!list_empty(&hdev->mgmt_pending));
2711  
2712  	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2713  
2714  	if (hdev->rfkill) {
2715  		rfkill_unregister(hdev->rfkill);
2716  		rfkill_destroy(hdev->rfkill);
2717  	}
2718  
2719  	device_del(&hdev->dev);
2720  	/* Actual cleanup is deferred until hci_release_dev(). */
2721  	hci_dev_put(hdev);
2722  }
2723  EXPORT_SYMBOL(hci_unregister_dev);
2724  
2725  /* Release HCI device */
hci_release_dev(struct hci_dev * hdev)2726  void hci_release_dev(struct hci_dev *hdev)
2727  {
2728  	debugfs_remove_recursive(hdev->debugfs);
2729  	kfree_const(hdev->hw_info);
2730  	kfree_const(hdev->fw_info);
2731  
2732  	destroy_workqueue(hdev->workqueue);
2733  	destroy_workqueue(hdev->req_workqueue);
2734  
2735  	hci_dev_lock(hdev);
2736  	hci_bdaddr_list_clear(&hdev->reject_list);
2737  	hci_bdaddr_list_clear(&hdev->accept_list);
2738  	hci_uuids_clear(hdev);
2739  	hci_link_keys_clear(hdev);
2740  	hci_smp_ltks_clear(hdev);
2741  	hci_smp_irks_clear(hdev);
2742  	hci_remote_oob_data_clear(hdev);
2743  	hci_adv_instances_clear(hdev);
2744  	hci_adv_monitors_clear(hdev);
2745  	hci_bdaddr_list_clear(&hdev->le_accept_list);
2746  	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2747  	hci_conn_params_clear_all(hdev);
2748  	hci_discovery_filter_clear(hdev);
2749  	hci_blocked_keys_clear(hdev);
2750  	hci_codec_list_clear(&hdev->local_codecs);
2751  	msft_release(hdev);
2752  	hci_dev_unlock(hdev);
2753  
2754  	ida_destroy(&hdev->unset_handle_ida);
2755  	ida_free(&hci_index_ida, hdev->id);
2756  	kfree_skb(hdev->sent_cmd);
2757  	kfree_skb(hdev->req_skb);
2758  	kfree_skb(hdev->recv_event);
2759  	kfree(hdev);
2760  }
2761  EXPORT_SYMBOL(hci_release_dev);
2762  
hci_register_suspend_notifier(struct hci_dev * hdev)2763  int hci_register_suspend_notifier(struct hci_dev *hdev)
2764  {
2765  	int ret = 0;
2766  
2767  	if (!hdev->suspend_notifier.notifier_call &&
2768  	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2769  		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2770  		ret = register_pm_notifier(&hdev->suspend_notifier);
2771  	}
2772  
2773  	return ret;
2774  }
2775  
hci_unregister_suspend_notifier(struct hci_dev * hdev)2776  int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2777  {
2778  	int ret = 0;
2779  
2780  	if (hdev->suspend_notifier.notifier_call) {
2781  		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2782  		if (!ret)
2783  			hdev->suspend_notifier.notifier_call = NULL;
2784  	}
2785  
2786  	return ret;
2787  }
2788  
2789  /* Cancel ongoing command synchronously:
2790   *
2791   * - Cancel command timer
2792   * - Reset command counter
2793   * - Cancel command request
2794   */
hci_cancel_cmd_sync(struct hci_dev * hdev,int err)2795  static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2796  {
2797  	bt_dev_dbg(hdev, "err 0x%2.2x", err);
2798  
2799  	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2800  		disable_delayed_work_sync(&hdev->cmd_timer);
2801  		disable_delayed_work_sync(&hdev->ncmd_timer);
2802  	} else  {
2803  		cancel_delayed_work_sync(&hdev->cmd_timer);
2804  		cancel_delayed_work_sync(&hdev->ncmd_timer);
2805  	}
2806  
2807  	atomic_set(&hdev->cmd_cnt, 1);
2808  
2809  	hci_cmd_sync_cancel_sync(hdev, err);
2810  }
2811  
2812  /* Suspend HCI device */
hci_suspend_dev(struct hci_dev * hdev)2813  int hci_suspend_dev(struct hci_dev *hdev)
2814  {
2815  	int ret;
2816  
2817  	bt_dev_dbg(hdev, "");
2818  
2819  	/* Suspend should only act on when powered. */
2820  	if (!hdev_is_powered(hdev) ||
2821  	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2822  		return 0;
2823  
2824  	/* If powering down don't attempt to suspend */
2825  	if (mgmt_powering_down(hdev))
2826  		return 0;
2827  
2828  	/* Cancel potentially blocking sync operation before suspend */
2829  	hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2830  
2831  	hci_req_sync_lock(hdev);
2832  	ret = hci_suspend_sync(hdev);
2833  	hci_req_sync_unlock(hdev);
2834  
2835  	hci_clear_wake_reason(hdev);
2836  	mgmt_suspending(hdev, hdev->suspend_state);
2837  
2838  	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2839  	return ret;
2840  }
2841  EXPORT_SYMBOL(hci_suspend_dev);
2842  
2843  /* Resume HCI device */
hci_resume_dev(struct hci_dev * hdev)2844  int hci_resume_dev(struct hci_dev *hdev)
2845  {
2846  	int ret;
2847  
2848  	bt_dev_dbg(hdev, "");
2849  
2850  	/* Resume should only act on when powered. */
2851  	if (!hdev_is_powered(hdev) ||
2852  	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
2853  		return 0;
2854  
2855  	/* If powering down don't attempt to resume */
2856  	if (mgmt_powering_down(hdev))
2857  		return 0;
2858  
2859  	hci_req_sync_lock(hdev);
2860  	ret = hci_resume_sync(hdev);
2861  	hci_req_sync_unlock(hdev);
2862  
2863  	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2864  		      hdev->wake_addr_type);
2865  
2866  	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2867  	return ret;
2868  }
2869  EXPORT_SYMBOL(hci_resume_dev);
2870  
2871  /* Reset HCI device */
hci_reset_dev(struct hci_dev * hdev)2872  int hci_reset_dev(struct hci_dev *hdev)
2873  {
2874  	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2875  	struct sk_buff *skb;
2876  
2877  	skb = bt_skb_alloc(3, GFP_ATOMIC);
2878  	if (!skb)
2879  		return -ENOMEM;
2880  
2881  	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2882  	skb_put_data(skb, hw_err, 3);
2883  
2884  	bt_dev_err(hdev, "Injecting HCI hardware error event");
2885  
2886  	/* Send Hardware Error to upper stack */
2887  	return hci_recv_frame(hdev, skb);
2888  }
2889  EXPORT_SYMBOL(hci_reset_dev);
2890  
hci_dev_classify_pkt_type(struct hci_dev * hdev,struct sk_buff * skb)2891  static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2892  {
2893  	if (hdev->classify_pkt_type)
2894  		return hdev->classify_pkt_type(hdev, skb);
2895  
2896  	return hci_skb_pkt_type(skb);
2897  }
2898  
2899  /* Receive frame from HCI drivers */
hci_recv_frame(struct hci_dev * hdev,struct sk_buff * skb)2900  int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2901  {
2902  	u8 dev_pkt_type;
2903  
2904  	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2905  		      && !test_bit(HCI_INIT, &hdev->flags))) {
2906  		kfree_skb(skb);
2907  		return -ENXIO;
2908  	}
2909  
2910  	/* Check if the driver agree with packet type classification */
2911  	dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2912  	if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2913  		hci_skb_pkt_type(skb) = dev_pkt_type;
2914  	}
2915  
2916  	switch (hci_skb_pkt_type(skb)) {
2917  	case HCI_EVENT_PKT:
2918  		break;
2919  	case HCI_ACLDATA_PKT:
2920  		/* Detect if ISO packet has been sent as ACL */
2921  		if (hci_conn_num(hdev, ISO_LINK)) {
2922  			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2923  			__u8 type;
2924  
2925  			type = hci_conn_lookup_type(hdev, hci_handle(handle));
2926  			if (type == ISO_LINK)
2927  				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2928  		}
2929  		break;
2930  	case HCI_SCODATA_PKT:
2931  		break;
2932  	case HCI_ISODATA_PKT:
2933  		break;
2934  	default:
2935  		kfree_skb(skb);
2936  		return -EINVAL;
2937  	}
2938  
2939  	/* Incoming skb */
2940  	bt_cb(skb)->incoming = 1;
2941  
2942  	/* Time stamp */
2943  	__net_timestamp(skb);
2944  
2945  	skb_queue_tail(&hdev->rx_q, skb);
2946  	queue_work(hdev->workqueue, &hdev->rx_work);
2947  
2948  	return 0;
2949  }
2950  EXPORT_SYMBOL(hci_recv_frame);
2951  
2952  /* Receive diagnostic message from HCI drivers */
hci_recv_diag(struct hci_dev * hdev,struct sk_buff * skb)2953  int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2954  {
2955  	/* Mark as diagnostic packet */
2956  	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2957  
2958  	/* Time stamp */
2959  	__net_timestamp(skb);
2960  
2961  	skb_queue_tail(&hdev->rx_q, skb);
2962  	queue_work(hdev->workqueue, &hdev->rx_work);
2963  
2964  	return 0;
2965  }
2966  EXPORT_SYMBOL(hci_recv_diag);
2967  
hci_set_hw_info(struct hci_dev * hdev,const char * fmt,...)2968  void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2969  {
2970  	va_list vargs;
2971  
2972  	va_start(vargs, fmt);
2973  	kfree_const(hdev->hw_info);
2974  	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2975  	va_end(vargs);
2976  }
2977  EXPORT_SYMBOL(hci_set_hw_info);
2978  
hci_set_fw_info(struct hci_dev * hdev,const char * fmt,...)2979  void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2980  {
2981  	va_list vargs;
2982  
2983  	va_start(vargs, fmt);
2984  	kfree_const(hdev->fw_info);
2985  	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2986  	va_end(vargs);
2987  }
2988  EXPORT_SYMBOL(hci_set_fw_info);
2989  
2990  /* ---- Interface to upper protocols ---- */
2991  
hci_register_cb(struct hci_cb * cb)2992  int hci_register_cb(struct hci_cb *cb)
2993  {
2994  	BT_DBG("%p name %s", cb, cb->name);
2995  
2996  	mutex_lock(&hci_cb_list_lock);
2997  	list_add_tail(&cb->list, &hci_cb_list);
2998  	mutex_unlock(&hci_cb_list_lock);
2999  
3000  	return 0;
3001  }
3002  EXPORT_SYMBOL(hci_register_cb);
3003  
hci_unregister_cb(struct hci_cb * cb)3004  int hci_unregister_cb(struct hci_cb *cb)
3005  {
3006  	BT_DBG("%p name %s", cb, cb->name);
3007  
3008  	mutex_lock(&hci_cb_list_lock);
3009  	list_del(&cb->list);
3010  	mutex_unlock(&hci_cb_list_lock);
3011  
3012  	return 0;
3013  }
3014  EXPORT_SYMBOL(hci_unregister_cb);
3015  
hci_send_frame(struct hci_dev * hdev,struct sk_buff * skb)3016  static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3017  {
3018  	int err;
3019  
3020  	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3021  	       skb->len);
3022  
3023  	/* Time stamp */
3024  	__net_timestamp(skb);
3025  
3026  	/* Send copy to monitor */
3027  	hci_send_to_monitor(hdev, skb);
3028  
3029  	if (atomic_read(&hdev->promisc)) {
3030  		/* Send copy to the sockets */
3031  		hci_send_to_sock(hdev, skb);
3032  	}
3033  
3034  	/* Get rid of skb owner, prior to sending to the driver. */
3035  	skb_orphan(skb);
3036  
3037  	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3038  		kfree_skb(skb);
3039  		return -EINVAL;
3040  	}
3041  
3042  	err = hdev->send(hdev, skb);
3043  	if (err < 0) {
3044  		bt_dev_err(hdev, "sending frame failed (%d)", err);
3045  		kfree_skb(skb);
3046  		return err;
3047  	}
3048  
3049  	return 0;
3050  }
3051  
3052  /* Send HCI command */
hci_send_cmd(struct hci_dev * hdev,__u16 opcode,__u32 plen,const void * param)3053  int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3054  		 const void *param)
3055  {
3056  	struct sk_buff *skb;
3057  
3058  	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3059  
3060  	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3061  	if (!skb) {
3062  		bt_dev_err(hdev, "no memory for command");
3063  		return -ENOMEM;
3064  	}
3065  
3066  	/* Stand-alone HCI commands must be flagged as
3067  	 * single-command requests.
3068  	 */
3069  	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3070  
3071  	skb_queue_tail(&hdev->cmd_q, skb);
3072  	queue_work(hdev->workqueue, &hdev->cmd_work);
3073  
3074  	return 0;
3075  }
3076  
__hci_cmd_send(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param)3077  int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3078  		   const void *param)
3079  {
3080  	struct sk_buff *skb;
3081  
3082  	if (hci_opcode_ogf(opcode) != 0x3f) {
3083  		/* A controller receiving a command shall respond with either
3084  		 * a Command Status Event or a Command Complete Event.
3085  		 * Therefore, all standard HCI commands must be sent via the
3086  		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3087  		 * Some vendors do not comply with this rule for vendor-specific
3088  		 * commands and do not return any event. We want to support
3089  		 * unresponded commands for such cases only.
3090  		 */
3091  		bt_dev_err(hdev, "unresponded command not supported");
3092  		return -EINVAL;
3093  	}
3094  
3095  	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3096  	if (!skb) {
3097  		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3098  			   opcode);
3099  		return -ENOMEM;
3100  	}
3101  
3102  	hci_send_frame(hdev, skb);
3103  
3104  	return 0;
3105  }
3106  EXPORT_SYMBOL(__hci_cmd_send);
3107  
3108  /* Get data from the previously sent command */
hci_cmd_data(struct sk_buff * skb,__u16 opcode)3109  static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3110  {
3111  	struct hci_command_hdr *hdr;
3112  
3113  	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3114  		return NULL;
3115  
3116  	hdr = (void *)skb->data;
3117  
3118  	if (hdr->opcode != cpu_to_le16(opcode))
3119  		return NULL;
3120  
3121  	return skb->data + HCI_COMMAND_HDR_SIZE;
3122  }
3123  
3124  /* Get data from the previously sent command */
hci_sent_cmd_data(struct hci_dev * hdev,__u16 opcode)3125  void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3126  {
3127  	void *data;
3128  
3129  	/* Check if opcode matches last sent command */
3130  	data = hci_cmd_data(hdev->sent_cmd, opcode);
3131  	if (!data)
3132  		/* Check if opcode matches last request */
3133  		data = hci_cmd_data(hdev->req_skb, opcode);
3134  
3135  	return data;
3136  }
3137  
3138  /* Get data from last received event */
hci_recv_event_data(struct hci_dev * hdev,__u8 event)3139  void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3140  {
3141  	struct hci_event_hdr *hdr;
3142  	int offset;
3143  
3144  	if (!hdev->recv_event)
3145  		return NULL;
3146  
3147  	hdr = (void *)hdev->recv_event->data;
3148  	offset = sizeof(*hdr);
3149  
3150  	if (hdr->evt != event) {
3151  		/* In case of LE metaevent check the subevent match */
3152  		if (hdr->evt == HCI_EV_LE_META) {
3153  			struct hci_ev_le_meta *ev;
3154  
3155  			ev = (void *)hdev->recv_event->data + offset;
3156  			offset += sizeof(*ev);
3157  			if (ev->subevent == event)
3158  				goto found;
3159  		}
3160  		return NULL;
3161  	}
3162  
3163  found:
3164  	bt_dev_dbg(hdev, "event 0x%2.2x", event);
3165  
3166  	return hdev->recv_event->data + offset;
3167  }
3168  
3169  /* Send ACL data */
hci_add_acl_hdr(struct sk_buff * skb,__u16 handle,__u16 flags)3170  static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3171  {
3172  	struct hci_acl_hdr *hdr;
3173  	int len = skb->len;
3174  
3175  	skb_push(skb, HCI_ACL_HDR_SIZE);
3176  	skb_reset_transport_header(skb);
3177  	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3178  	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3179  	hdr->dlen   = cpu_to_le16(len);
3180  }
3181  
hci_queue_acl(struct hci_chan * chan,struct sk_buff_head * queue,struct sk_buff * skb,__u16 flags)3182  static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3183  			  struct sk_buff *skb, __u16 flags)
3184  {
3185  	struct hci_conn *conn = chan->conn;
3186  	struct hci_dev *hdev = conn->hdev;
3187  	struct sk_buff *list;
3188  
3189  	skb->len = skb_headlen(skb);
3190  	skb->data_len = 0;
3191  
3192  	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3193  
3194  	hci_add_acl_hdr(skb, conn->handle, flags);
3195  
3196  	list = skb_shinfo(skb)->frag_list;
3197  	if (!list) {
3198  		/* Non fragmented */
3199  		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3200  
3201  		skb_queue_tail(queue, skb);
3202  	} else {
3203  		/* Fragmented */
3204  		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3205  
3206  		skb_shinfo(skb)->frag_list = NULL;
3207  
3208  		/* Queue all fragments atomically. We need to use spin_lock_bh
3209  		 * here because of 6LoWPAN links, as there this function is
3210  		 * called from softirq and using normal spin lock could cause
3211  		 * deadlocks.
3212  		 */
3213  		spin_lock_bh(&queue->lock);
3214  
3215  		__skb_queue_tail(queue, skb);
3216  
3217  		flags &= ~ACL_START;
3218  		flags |= ACL_CONT;
3219  		do {
3220  			skb = list; list = list->next;
3221  
3222  			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3223  			hci_add_acl_hdr(skb, conn->handle, flags);
3224  
3225  			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3226  
3227  			__skb_queue_tail(queue, skb);
3228  		} while (list);
3229  
3230  		spin_unlock_bh(&queue->lock);
3231  	}
3232  }
3233  
hci_send_acl(struct hci_chan * chan,struct sk_buff * skb,__u16 flags)3234  void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3235  {
3236  	struct hci_dev *hdev = chan->conn->hdev;
3237  
3238  	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3239  
3240  	hci_queue_acl(chan, &chan->data_q, skb, flags);
3241  
3242  	queue_work(hdev->workqueue, &hdev->tx_work);
3243  }
3244  
3245  /* Send SCO data */
hci_send_sco(struct hci_conn * conn,struct sk_buff * skb)3246  void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3247  {
3248  	struct hci_dev *hdev = conn->hdev;
3249  	struct hci_sco_hdr hdr;
3250  
3251  	BT_DBG("%s len %d", hdev->name, skb->len);
3252  
3253  	hdr.handle = cpu_to_le16(conn->handle);
3254  	hdr.dlen   = skb->len;
3255  
3256  	skb_push(skb, HCI_SCO_HDR_SIZE);
3257  	skb_reset_transport_header(skb);
3258  	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3259  
3260  	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3261  
3262  	skb_queue_tail(&conn->data_q, skb);
3263  	queue_work(hdev->workqueue, &hdev->tx_work);
3264  }
3265  
3266  /* Send ISO data */
hci_add_iso_hdr(struct sk_buff * skb,__u16 handle,__u8 flags)3267  static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3268  {
3269  	struct hci_iso_hdr *hdr;
3270  	int len = skb->len;
3271  
3272  	skb_push(skb, HCI_ISO_HDR_SIZE);
3273  	skb_reset_transport_header(skb);
3274  	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3275  	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3276  	hdr->dlen   = cpu_to_le16(len);
3277  }
3278  
hci_queue_iso(struct hci_conn * conn,struct sk_buff_head * queue,struct sk_buff * skb)3279  static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3280  			  struct sk_buff *skb)
3281  {
3282  	struct hci_dev *hdev = conn->hdev;
3283  	struct sk_buff *list;
3284  	__u16 flags;
3285  
3286  	skb->len = skb_headlen(skb);
3287  	skb->data_len = 0;
3288  
3289  	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3290  
3291  	list = skb_shinfo(skb)->frag_list;
3292  
3293  	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3294  	hci_add_iso_hdr(skb, conn->handle, flags);
3295  
3296  	if (!list) {
3297  		/* Non fragmented */
3298  		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3299  
3300  		skb_queue_tail(queue, skb);
3301  	} else {
3302  		/* Fragmented */
3303  		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3304  
3305  		skb_shinfo(skb)->frag_list = NULL;
3306  
3307  		__skb_queue_tail(queue, skb);
3308  
3309  		do {
3310  			skb = list; list = list->next;
3311  
3312  			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3313  			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3314  						   0x00);
3315  			hci_add_iso_hdr(skb, conn->handle, flags);
3316  
3317  			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3318  
3319  			__skb_queue_tail(queue, skb);
3320  		} while (list);
3321  	}
3322  }
3323  
hci_send_iso(struct hci_conn * conn,struct sk_buff * skb)3324  void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3325  {
3326  	struct hci_dev *hdev = conn->hdev;
3327  
3328  	BT_DBG("%s len %d", hdev->name, skb->len);
3329  
3330  	hci_queue_iso(conn, &conn->data_q, skb);
3331  
3332  	queue_work(hdev->workqueue, &hdev->tx_work);
3333  }
3334  
3335  /* ---- HCI TX task (outgoing data) ---- */
3336  
3337  /* HCI Connection scheduler */
hci_quote_sent(struct hci_conn * conn,int num,int * quote)3338  static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3339  {
3340  	struct hci_dev *hdev;
3341  	int cnt, q;
3342  
3343  	if (!conn) {
3344  		*quote = 0;
3345  		return;
3346  	}
3347  
3348  	hdev = conn->hdev;
3349  
3350  	switch (conn->type) {
3351  	case ACL_LINK:
3352  		cnt = hdev->acl_cnt;
3353  		break;
3354  	case SCO_LINK:
3355  	case ESCO_LINK:
3356  		cnt = hdev->sco_cnt;
3357  		break;
3358  	case LE_LINK:
3359  		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3360  		break;
3361  	case ISO_LINK:
3362  		cnt = hdev->iso_mtu ? hdev->iso_cnt :
3363  			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3364  		break;
3365  	default:
3366  		cnt = 0;
3367  		bt_dev_err(hdev, "unknown link type %d", conn->type);
3368  	}
3369  
3370  	q = cnt / num;
3371  	*quote = q ? q : 1;
3372  }
3373  
hci_low_sent(struct hci_dev * hdev,__u8 type,int * quote)3374  static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3375  				     int *quote)
3376  {
3377  	struct hci_conn_hash *h = &hdev->conn_hash;
3378  	struct hci_conn *conn = NULL, *c;
3379  	unsigned int num = 0, min = ~0;
3380  
3381  	/* We don't have to lock device here. Connections are always
3382  	 * added and removed with TX task disabled. */
3383  
3384  	rcu_read_lock();
3385  
3386  	list_for_each_entry_rcu(c, &h->list, list) {
3387  		if (c->type != type || skb_queue_empty(&c->data_q))
3388  			continue;
3389  
3390  		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3391  			continue;
3392  
3393  		num++;
3394  
3395  		if (c->sent < min) {
3396  			min  = c->sent;
3397  			conn = c;
3398  		}
3399  
3400  		if (hci_conn_num(hdev, type) == num)
3401  			break;
3402  	}
3403  
3404  	rcu_read_unlock();
3405  
3406  	hci_quote_sent(conn, num, quote);
3407  
3408  	BT_DBG("conn %p quote %d", conn, *quote);
3409  	return conn;
3410  }
3411  
hci_link_tx_to(struct hci_dev * hdev,__u8 type)3412  static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3413  {
3414  	struct hci_conn_hash *h = &hdev->conn_hash;
3415  	struct hci_conn *c;
3416  
3417  	bt_dev_err(hdev, "link tx timeout");
3418  
3419  	rcu_read_lock();
3420  
3421  	/* Kill stalled connections */
3422  	list_for_each_entry_rcu(c, &h->list, list) {
3423  		if (c->type == type && c->sent) {
3424  			bt_dev_err(hdev, "killing stalled connection %pMR",
3425  				   &c->dst);
3426  			/* hci_disconnect might sleep, so, we have to release
3427  			 * the RCU read lock before calling it.
3428  			 */
3429  			rcu_read_unlock();
3430  			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3431  			rcu_read_lock();
3432  		}
3433  	}
3434  
3435  	rcu_read_unlock();
3436  }
3437  
hci_chan_sent(struct hci_dev * hdev,__u8 type,int * quote)3438  static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3439  				      int *quote)
3440  {
3441  	struct hci_conn_hash *h = &hdev->conn_hash;
3442  	struct hci_chan *chan = NULL;
3443  	unsigned int num = 0, min = ~0, cur_prio = 0;
3444  	struct hci_conn *conn;
3445  	int conn_num = 0;
3446  
3447  	BT_DBG("%s", hdev->name);
3448  
3449  	rcu_read_lock();
3450  
3451  	list_for_each_entry_rcu(conn, &h->list, list) {
3452  		struct hci_chan *tmp;
3453  
3454  		if (conn->type != type)
3455  			continue;
3456  
3457  		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3458  			continue;
3459  
3460  		conn_num++;
3461  
3462  		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3463  			struct sk_buff *skb;
3464  
3465  			if (skb_queue_empty(&tmp->data_q))
3466  				continue;
3467  
3468  			skb = skb_peek(&tmp->data_q);
3469  			if (skb->priority < cur_prio)
3470  				continue;
3471  
3472  			if (skb->priority > cur_prio) {
3473  				num = 0;
3474  				min = ~0;
3475  				cur_prio = skb->priority;
3476  			}
3477  
3478  			num++;
3479  
3480  			if (conn->sent < min) {
3481  				min  = conn->sent;
3482  				chan = tmp;
3483  			}
3484  		}
3485  
3486  		if (hci_conn_num(hdev, type) == conn_num)
3487  			break;
3488  	}
3489  
3490  	rcu_read_unlock();
3491  
3492  	if (!chan)
3493  		return NULL;
3494  
3495  	hci_quote_sent(chan->conn, num, quote);
3496  
3497  	BT_DBG("chan %p quote %d", chan, *quote);
3498  	return chan;
3499  }
3500  
hci_prio_recalculate(struct hci_dev * hdev,__u8 type)3501  static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3502  {
3503  	struct hci_conn_hash *h = &hdev->conn_hash;
3504  	struct hci_conn *conn;
3505  	int num = 0;
3506  
3507  	BT_DBG("%s", hdev->name);
3508  
3509  	rcu_read_lock();
3510  
3511  	list_for_each_entry_rcu(conn, &h->list, list) {
3512  		struct hci_chan *chan;
3513  
3514  		if (conn->type != type)
3515  			continue;
3516  
3517  		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3518  			continue;
3519  
3520  		num++;
3521  
3522  		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3523  			struct sk_buff *skb;
3524  
3525  			if (chan->sent) {
3526  				chan->sent = 0;
3527  				continue;
3528  			}
3529  
3530  			if (skb_queue_empty(&chan->data_q))
3531  				continue;
3532  
3533  			skb = skb_peek(&chan->data_q);
3534  			if (skb->priority >= HCI_PRIO_MAX - 1)
3535  				continue;
3536  
3537  			skb->priority = HCI_PRIO_MAX - 1;
3538  
3539  			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3540  			       skb->priority);
3541  		}
3542  
3543  		if (hci_conn_num(hdev, type) == num)
3544  			break;
3545  	}
3546  
3547  	rcu_read_unlock();
3548  
3549  }
3550  
__check_timeout(struct hci_dev * hdev,unsigned int cnt,u8 type)3551  static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3552  {
3553  	unsigned long last_tx;
3554  
3555  	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3556  		return;
3557  
3558  	switch (type) {
3559  	case LE_LINK:
3560  		last_tx = hdev->le_last_tx;
3561  		break;
3562  	default:
3563  		last_tx = hdev->acl_last_tx;
3564  		break;
3565  	}
3566  
3567  	/* tx timeout must be longer than maximum link supervision timeout
3568  	 * (40.9 seconds)
3569  	 */
3570  	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3571  		hci_link_tx_to(hdev, type);
3572  }
3573  
3574  /* Schedule SCO */
hci_sched_sco(struct hci_dev * hdev)3575  static void hci_sched_sco(struct hci_dev *hdev)
3576  {
3577  	struct hci_conn *conn;
3578  	struct sk_buff *skb;
3579  	int quote;
3580  
3581  	BT_DBG("%s", hdev->name);
3582  
3583  	if (!hci_conn_num(hdev, SCO_LINK))
3584  		return;
3585  
3586  	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3587  		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3588  			BT_DBG("skb %p len %d", skb, skb->len);
3589  			hci_send_frame(hdev, skb);
3590  
3591  			conn->sent++;
3592  			if (conn->sent == ~0)
3593  				conn->sent = 0;
3594  		}
3595  	}
3596  }
3597  
hci_sched_esco(struct hci_dev * hdev)3598  static void hci_sched_esco(struct hci_dev *hdev)
3599  {
3600  	struct hci_conn *conn;
3601  	struct sk_buff *skb;
3602  	int quote;
3603  
3604  	BT_DBG("%s", hdev->name);
3605  
3606  	if (!hci_conn_num(hdev, ESCO_LINK))
3607  		return;
3608  
3609  	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3610  						     &quote))) {
3611  		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3612  			BT_DBG("skb %p len %d", skb, skb->len);
3613  			hci_send_frame(hdev, skb);
3614  
3615  			conn->sent++;
3616  			if (conn->sent == ~0)
3617  				conn->sent = 0;
3618  		}
3619  	}
3620  }
3621  
hci_sched_acl_pkt(struct hci_dev * hdev)3622  static void hci_sched_acl_pkt(struct hci_dev *hdev)
3623  {
3624  	unsigned int cnt = hdev->acl_cnt;
3625  	struct hci_chan *chan;
3626  	struct sk_buff *skb;
3627  	int quote;
3628  
3629  	__check_timeout(hdev, cnt, ACL_LINK);
3630  
3631  	while (hdev->acl_cnt &&
3632  	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3633  		u32 priority = (skb_peek(&chan->data_q))->priority;
3634  		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3635  			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3636  			       skb->len, skb->priority);
3637  
3638  			/* Stop if priority has changed */
3639  			if (skb->priority < priority)
3640  				break;
3641  
3642  			skb = skb_dequeue(&chan->data_q);
3643  
3644  			hci_conn_enter_active_mode(chan->conn,
3645  						   bt_cb(skb)->force_active);
3646  
3647  			hci_send_frame(hdev, skb);
3648  			hdev->acl_last_tx = jiffies;
3649  
3650  			hdev->acl_cnt--;
3651  			chan->sent++;
3652  			chan->conn->sent++;
3653  
3654  			/* Send pending SCO packets right away */
3655  			hci_sched_sco(hdev);
3656  			hci_sched_esco(hdev);
3657  		}
3658  	}
3659  
3660  	if (cnt != hdev->acl_cnt)
3661  		hci_prio_recalculate(hdev, ACL_LINK);
3662  }
3663  
hci_sched_acl(struct hci_dev * hdev)3664  static void hci_sched_acl(struct hci_dev *hdev)
3665  {
3666  	BT_DBG("%s", hdev->name);
3667  
3668  	/* No ACL link over BR/EDR controller */
3669  	if (!hci_conn_num(hdev, ACL_LINK))
3670  		return;
3671  
3672  	hci_sched_acl_pkt(hdev);
3673  }
3674  
hci_sched_le(struct hci_dev * hdev)3675  static void hci_sched_le(struct hci_dev *hdev)
3676  {
3677  	struct hci_chan *chan;
3678  	struct sk_buff *skb;
3679  	int quote, *cnt, tmp;
3680  
3681  	BT_DBG("%s", hdev->name);
3682  
3683  	if (!hci_conn_num(hdev, LE_LINK))
3684  		return;
3685  
3686  	cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3687  
3688  	__check_timeout(hdev, *cnt, LE_LINK);
3689  
3690  	tmp = *cnt;
3691  	while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3692  		u32 priority = (skb_peek(&chan->data_q))->priority;
3693  		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3694  			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3695  			       skb->len, skb->priority);
3696  
3697  			/* Stop if priority has changed */
3698  			if (skb->priority < priority)
3699  				break;
3700  
3701  			skb = skb_dequeue(&chan->data_q);
3702  
3703  			hci_send_frame(hdev, skb);
3704  			hdev->le_last_tx = jiffies;
3705  
3706  			(*cnt)--;
3707  			chan->sent++;
3708  			chan->conn->sent++;
3709  
3710  			/* Send pending SCO packets right away */
3711  			hci_sched_sco(hdev);
3712  			hci_sched_esco(hdev);
3713  		}
3714  	}
3715  
3716  	if (*cnt != tmp)
3717  		hci_prio_recalculate(hdev, LE_LINK);
3718  }
3719  
3720  /* Schedule CIS */
hci_sched_iso(struct hci_dev * hdev)3721  static void hci_sched_iso(struct hci_dev *hdev)
3722  {
3723  	struct hci_conn *conn;
3724  	struct sk_buff *skb;
3725  	int quote, *cnt;
3726  
3727  	BT_DBG("%s", hdev->name);
3728  
3729  	if (!hci_conn_num(hdev, ISO_LINK))
3730  		return;
3731  
3732  	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3733  		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3734  	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3735  		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3736  			BT_DBG("skb %p len %d", skb, skb->len);
3737  			hci_send_frame(hdev, skb);
3738  
3739  			conn->sent++;
3740  			if (conn->sent == ~0)
3741  				conn->sent = 0;
3742  			(*cnt)--;
3743  		}
3744  	}
3745  }
3746  
hci_tx_work(struct work_struct * work)3747  static void hci_tx_work(struct work_struct *work)
3748  {
3749  	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3750  	struct sk_buff *skb;
3751  
3752  	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3753  	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3754  
3755  	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3756  		/* Schedule queues and send stuff to HCI driver */
3757  		hci_sched_sco(hdev);
3758  		hci_sched_esco(hdev);
3759  		hci_sched_iso(hdev);
3760  		hci_sched_acl(hdev);
3761  		hci_sched_le(hdev);
3762  	}
3763  
3764  	/* Send next queued raw (unknown type) packet */
3765  	while ((skb = skb_dequeue(&hdev->raw_q)))
3766  		hci_send_frame(hdev, skb);
3767  }
3768  
3769  /* ----- HCI RX task (incoming data processing) ----- */
3770  
3771  /* ACL data packet */
hci_acldata_packet(struct hci_dev * hdev,struct sk_buff * skb)3772  static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3773  {
3774  	struct hci_acl_hdr *hdr = (void *) skb->data;
3775  	struct hci_conn *conn;
3776  	__u16 handle, flags;
3777  
3778  	skb_pull(skb, HCI_ACL_HDR_SIZE);
3779  
3780  	handle = __le16_to_cpu(hdr->handle);
3781  	flags  = hci_flags(handle);
3782  	handle = hci_handle(handle);
3783  
3784  	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3785  	       handle, flags);
3786  
3787  	hdev->stat.acl_rx++;
3788  
3789  	hci_dev_lock(hdev);
3790  	conn = hci_conn_hash_lookup_handle(hdev, handle);
3791  	hci_dev_unlock(hdev);
3792  
3793  	if (conn) {
3794  		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3795  
3796  		/* Send to upper protocol */
3797  		l2cap_recv_acldata(conn, skb, flags);
3798  		return;
3799  	} else {
3800  		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3801  			   handle);
3802  	}
3803  
3804  	kfree_skb(skb);
3805  }
3806  
3807  /* SCO data packet */
hci_scodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3808  static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3809  {
3810  	struct hci_sco_hdr *hdr = (void *) skb->data;
3811  	struct hci_conn *conn;
3812  	__u16 handle, flags;
3813  
3814  	skb_pull(skb, HCI_SCO_HDR_SIZE);
3815  
3816  	handle = __le16_to_cpu(hdr->handle);
3817  	flags  = hci_flags(handle);
3818  	handle = hci_handle(handle);
3819  
3820  	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3821  	       handle, flags);
3822  
3823  	hdev->stat.sco_rx++;
3824  
3825  	hci_dev_lock(hdev);
3826  	conn = hci_conn_hash_lookup_handle(hdev, handle);
3827  	hci_dev_unlock(hdev);
3828  
3829  	if (conn) {
3830  		/* Send to upper protocol */
3831  		hci_skb_pkt_status(skb) = flags & 0x03;
3832  		sco_recv_scodata(conn, skb);
3833  		return;
3834  	} else {
3835  		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3836  				       handle);
3837  	}
3838  
3839  	kfree_skb(skb);
3840  }
3841  
hci_isodata_packet(struct hci_dev * hdev,struct sk_buff * skb)3842  static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3843  {
3844  	struct hci_iso_hdr *hdr;
3845  	struct hci_conn *conn;
3846  	__u16 handle, flags;
3847  
3848  	hdr = skb_pull_data(skb, sizeof(*hdr));
3849  	if (!hdr) {
3850  		bt_dev_err(hdev, "ISO packet too small");
3851  		goto drop;
3852  	}
3853  
3854  	handle = __le16_to_cpu(hdr->handle);
3855  	flags  = hci_flags(handle);
3856  	handle = hci_handle(handle);
3857  
3858  	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3859  		   handle, flags);
3860  
3861  	hci_dev_lock(hdev);
3862  	conn = hci_conn_hash_lookup_handle(hdev, handle);
3863  	hci_dev_unlock(hdev);
3864  
3865  	if (!conn) {
3866  		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3867  			   handle);
3868  		goto drop;
3869  	}
3870  
3871  	/* Send to upper protocol */
3872  	iso_recv(conn, skb, flags);
3873  	return;
3874  
3875  drop:
3876  	kfree_skb(skb);
3877  }
3878  
hci_req_is_complete(struct hci_dev * hdev)3879  static bool hci_req_is_complete(struct hci_dev *hdev)
3880  {
3881  	struct sk_buff *skb;
3882  
3883  	skb = skb_peek(&hdev->cmd_q);
3884  	if (!skb)
3885  		return true;
3886  
3887  	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3888  }
3889  
hci_resend_last(struct hci_dev * hdev)3890  static void hci_resend_last(struct hci_dev *hdev)
3891  {
3892  	struct hci_command_hdr *sent;
3893  	struct sk_buff *skb;
3894  	u16 opcode;
3895  
3896  	if (!hdev->sent_cmd)
3897  		return;
3898  
3899  	sent = (void *) hdev->sent_cmd->data;
3900  	opcode = __le16_to_cpu(sent->opcode);
3901  	if (opcode == HCI_OP_RESET)
3902  		return;
3903  
3904  	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3905  	if (!skb)
3906  		return;
3907  
3908  	skb_queue_head(&hdev->cmd_q, skb);
3909  	queue_work(hdev->workqueue, &hdev->cmd_work);
3910  }
3911  
hci_req_cmd_complete(struct hci_dev * hdev,u16 opcode,u8 status,hci_req_complete_t * req_complete,hci_req_complete_skb_t * req_complete_skb)3912  void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3913  			  hci_req_complete_t *req_complete,
3914  			  hci_req_complete_skb_t *req_complete_skb)
3915  {
3916  	struct sk_buff *skb;
3917  	unsigned long flags;
3918  
3919  	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3920  
3921  	/* If the completed command doesn't match the last one that was
3922  	 * sent we need to do special handling of it.
3923  	 */
3924  	if (!hci_sent_cmd_data(hdev, opcode)) {
3925  		/* Some CSR based controllers generate a spontaneous
3926  		 * reset complete event during init and any pending
3927  		 * command will never be completed. In such a case we
3928  		 * need to resend whatever was the last sent
3929  		 * command.
3930  		 */
3931  		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3932  			hci_resend_last(hdev);
3933  
3934  		return;
3935  	}
3936  
3937  	/* If we reach this point this event matches the last command sent */
3938  	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3939  
3940  	/* If the command succeeded and there's still more commands in
3941  	 * this request the request is not yet complete.
3942  	 */
3943  	if (!status && !hci_req_is_complete(hdev))
3944  		return;
3945  
3946  	skb = hdev->req_skb;
3947  
3948  	/* If this was the last command in a request the complete
3949  	 * callback would be found in hdev->req_skb instead of the
3950  	 * command queue (hdev->cmd_q).
3951  	 */
3952  	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3953  		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3954  		return;
3955  	}
3956  
3957  	if (skb && bt_cb(skb)->hci.req_complete) {
3958  		*req_complete = bt_cb(skb)->hci.req_complete;
3959  		return;
3960  	}
3961  
3962  	/* Remove all pending commands belonging to this request */
3963  	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3964  	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3965  		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3966  			__skb_queue_head(&hdev->cmd_q, skb);
3967  			break;
3968  		}
3969  
3970  		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3971  			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3972  		else
3973  			*req_complete = bt_cb(skb)->hci.req_complete;
3974  		dev_kfree_skb_irq(skb);
3975  	}
3976  	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3977  }
3978  
hci_rx_work(struct work_struct * work)3979  static void hci_rx_work(struct work_struct *work)
3980  {
3981  	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3982  	struct sk_buff *skb;
3983  
3984  	BT_DBG("%s", hdev->name);
3985  
3986  	/* The kcov_remote functions used for collecting packet parsing
3987  	 * coverage information from this background thread and associate
3988  	 * the coverage with the syscall's thread which originally injected
3989  	 * the packet. This helps fuzzing the kernel.
3990  	 */
3991  	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
3992  		kcov_remote_start_common(skb_get_kcov_handle(skb));
3993  
3994  		/* Send copy to monitor */
3995  		hci_send_to_monitor(hdev, skb);
3996  
3997  		if (atomic_read(&hdev->promisc)) {
3998  			/* Send copy to the sockets */
3999  			hci_send_to_sock(hdev, skb);
4000  		}
4001  
4002  		/* If the device has been opened in HCI_USER_CHANNEL,
4003  		 * the userspace has exclusive access to device.
4004  		 * When device is HCI_INIT, we still need to process
4005  		 * the data packets to the driver in order
4006  		 * to complete its setup().
4007  		 */
4008  		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4009  		    !test_bit(HCI_INIT, &hdev->flags)) {
4010  			kfree_skb(skb);
4011  			continue;
4012  		}
4013  
4014  		if (test_bit(HCI_INIT, &hdev->flags)) {
4015  			/* Don't process data packets in this states. */
4016  			switch (hci_skb_pkt_type(skb)) {
4017  			case HCI_ACLDATA_PKT:
4018  			case HCI_SCODATA_PKT:
4019  			case HCI_ISODATA_PKT:
4020  				kfree_skb(skb);
4021  				continue;
4022  			}
4023  		}
4024  
4025  		/* Process frame */
4026  		switch (hci_skb_pkt_type(skb)) {
4027  		case HCI_EVENT_PKT:
4028  			BT_DBG("%s Event packet", hdev->name);
4029  			hci_event_packet(hdev, skb);
4030  			break;
4031  
4032  		case HCI_ACLDATA_PKT:
4033  			BT_DBG("%s ACL data packet", hdev->name);
4034  			hci_acldata_packet(hdev, skb);
4035  			break;
4036  
4037  		case HCI_SCODATA_PKT:
4038  			BT_DBG("%s SCO data packet", hdev->name);
4039  			hci_scodata_packet(hdev, skb);
4040  			break;
4041  
4042  		case HCI_ISODATA_PKT:
4043  			BT_DBG("%s ISO data packet", hdev->name);
4044  			hci_isodata_packet(hdev, skb);
4045  			break;
4046  
4047  		default:
4048  			kfree_skb(skb);
4049  			break;
4050  		}
4051  	}
4052  }
4053  
hci_send_cmd_sync(struct hci_dev * hdev,struct sk_buff * skb)4054  static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4055  {
4056  	int err;
4057  
4058  	bt_dev_dbg(hdev, "skb %p", skb);
4059  
4060  	kfree_skb(hdev->sent_cmd);
4061  
4062  	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4063  	if (!hdev->sent_cmd) {
4064  		skb_queue_head(&hdev->cmd_q, skb);
4065  		queue_work(hdev->workqueue, &hdev->cmd_work);
4066  		return;
4067  	}
4068  
4069  	err = hci_send_frame(hdev, skb);
4070  	if (err < 0) {
4071  		hci_cmd_sync_cancel_sync(hdev, -err);
4072  		return;
4073  	}
4074  
4075  	if (hdev->req_status == HCI_REQ_PEND &&
4076  	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4077  		kfree_skb(hdev->req_skb);
4078  		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4079  	}
4080  
4081  	atomic_dec(&hdev->cmd_cnt);
4082  }
4083  
hci_cmd_work(struct work_struct * work)4084  static void hci_cmd_work(struct work_struct *work)
4085  {
4086  	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4087  	struct sk_buff *skb;
4088  
4089  	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4090  	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4091  
4092  	/* Send queued commands */
4093  	if (atomic_read(&hdev->cmd_cnt)) {
4094  		skb = skb_dequeue(&hdev->cmd_q);
4095  		if (!skb)
4096  			return;
4097  
4098  		hci_send_cmd_sync(hdev, skb);
4099  
4100  		rcu_read_lock();
4101  		if (test_bit(HCI_RESET, &hdev->flags) ||
4102  		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4103  			cancel_delayed_work(&hdev->cmd_timer);
4104  		else
4105  			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4106  					   HCI_CMD_TIMEOUT);
4107  		rcu_read_unlock();
4108  	}
4109  }
4110