hci_core.c 93.7 KB
Newer Older
1
/*
Linus Torvalds's avatar
Linus Torvalds committed
2 3
   BlueZ - Bluetooth protocol stack for Linux
   Copyright (C) 2000-2001 Qualcomm Incorporated
4
   Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds's avatar
Linus Torvalds committed
5 6 7 8 9 10 11 12 13 14 15

   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License version 2 as
   published by the Free Software Foundation;

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 17 18
   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds's avatar
Linus Torvalds committed
19 20
   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

21 22
   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds's avatar
Linus Torvalds committed
23 24 25 26 27
   SOFTWARE IS DISCLAIMED.
*/

/* Bluetooth HCI core. */

28 29
#include <linux/export.h>
#include <linux/rfkill.h>
30
#include <linux/debugfs.h>
31
#include <linux/crypto.h>
32
#include <linux/kcov.h>
33
#include <linux/property.h>
34 35
#include <linux/suspend.h>
#include <linux/wait.h>
36
#include <asm/unaligned.h>
Linus Torvalds's avatar
Linus Torvalds committed
37 38 39

#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
40
#include <net/bluetooth/l2cap.h>
41
#include <net/bluetooth/mgmt.h>
Linus Torvalds's avatar
Linus Torvalds committed
42

43
#include "hci_debugfs.h"
44
#include "smp.h"
45
#include "leds.h"
46
#include "msft.h"
47
#include "aosp.h"
48
#include "hci_codec.h"
49

50
static void hci_rx_work(struct work_struct *work);
51
static void hci_cmd_work(struct work_struct *work);
52
static void hci_tx_work(struct work_struct *work);
Linus Torvalds's avatar
Linus Torvalds committed
53 54 55 56 57 58 59

/* HCI device list */
LIST_HEAD(hci_dev_list);
DEFINE_RWLOCK(hci_dev_list_lock);

/* HCI callback list */
LIST_HEAD(hci_cb_list);
60
DEFINE_MUTEX(hci_cb_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
61

62 63 64
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);

65
/* Get HCI device by index.
Linus Torvalds's avatar
Linus Torvalds committed
66 67 68
 * Device is held on return. */
struct hci_dev *hci_dev_get(int index)
{
69
	struct hci_dev *hdev = NULL, *d;
Linus Torvalds's avatar
Linus Torvalds committed
70 71 72 73 74 75 76

	BT_DBG("%d", index);

	if (index < 0)
		return NULL;

	read_lock(&hci_dev_list_lock);
77
	list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds's avatar
Linus Torvalds committed
78 79 80 81 82 83 84 85 86 87
		if (d->id == index) {
			hdev = hci_dev_hold(d);
			break;
		}
	}
	read_unlock(&hci_dev_list_lock);
	return hdev;
}

/* ---- Inquiry support ---- */
88

89 90 91 92
bool hci_discovery_active(struct hci_dev *hdev)
{
	struct discovery_state *discov = &hdev->discovery;

93
	switch (discov->state) {
94
	case DISCOVERY_FINDING:
95
	case DISCOVERY_RESOLVING:
96 97
		return true;

98 99 100
	default:
		return false;
	}
101 102
}

103 104
void hci_discovery_set_state(struct hci_dev *hdev, int state)
{
105 106 107
	int old_state = hdev->discovery.state;

	if (old_state == state)
108 109
		return;

110 111
	hdev->discovery.state = state;

112 113
	switch (state) {
	case DISCOVERY_STOPPED:
114
		hci_update_passive_scan(hdev);
115

116
		if (old_state != DISCOVERY_STARTING)
117
			mgmt_discovering(hdev, 0);
118 119 120
		break;
	case DISCOVERY_STARTING:
		break;
121
	case DISCOVERY_FINDING:
122 123
		mgmt_discovering(hdev, 1);
		break;
124 125
	case DISCOVERY_RESOLVING:
		break;
126 127 128
	case DISCOVERY_STOPPING:
		break;
	}
129 130

	bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
131 132
}

133
void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds's avatar
Linus Torvalds committed
134
{
135
	struct discovery_state *cache = &hdev->discovery;
136
	struct inquiry_entry *p, *n;
Linus Torvalds's avatar
Linus Torvalds committed
137

138 139
	list_for_each_entry_safe(p, n, &cache->all, all) {
		list_del(&p->all);
140
		kfree(p);
Linus Torvalds's avatar
Linus Torvalds committed
141
	}
142 143 144

	INIT_LIST_HEAD(&cache->unknown);
	INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds's avatar
Linus Torvalds committed
145 146
}

147 148
struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
					       bdaddr_t *bdaddr)
Linus Torvalds's avatar
Linus Torvalds committed
149
{
150
	struct discovery_state *cache = &hdev->discovery;
Linus Torvalds's avatar
Linus Torvalds committed
151 152
	struct inquiry_entry *e;

153
	BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds's avatar
Linus Torvalds committed
154

155 156 157 158 159 160 161 162 163
	list_for_each_entry(e, &cache->all, all) {
		if (!bacmp(&e->data.bdaddr, bdaddr))
			return e;
	}

	return NULL;
}

struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
164
						       bdaddr_t *bdaddr)
165
{
166
	struct discovery_state *cache = &hdev->discovery;
167 168
	struct inquiry_entry *e;

169
	BT_DBG("cache %p, %pMR", cache, bdaddr);
170 171

	list_for_each_entry(e, &cache->unknown, list) {
Linus Torvalds's avatar
Linus Torvalds committed
172
		if (!bacmp(&e->data.bdaddr, bdaddr))
173 174 175 176
			return e;
	}

	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
177 178
}

179
struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
180 181
						       bdaddr_t *bdaddr,
						       int state)
182 183 184 185
{
	struct discovery_state *cache = &hdev->discovery;
	struct inquiry_entry *e;

186
	BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
187 188 189 190 191 192 193 194 195 196 197

	list_for_each_entry(e, &cache->resolve, list) {
		if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
			return e;
		if (!bacmp(&e->data.bdaddr, bdaddr))
			return e;
	}

	return NULL;
}

198
void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
199
				      struct inquiry_entry *ie)
200 201 202 203 204 205 206 207 208
{
	struct discovery_state *cache = &hdev->discovery;
	struct list_head *pos = &cache->resolve;
	struct inquiry_entry *p;

	list_del(&ie->list);

	list_for_each_entry(p, &cache->resolve, list) {
		if (p->name_state != NAME_PENDING &&
209
		    abs(p->data.rssi) >= abs(ie->data.rssi))
210 211 212 213 214 215 216
			break;
		pos = &p->list;
	}

	list_add(&ie->list, pos);
}

217 218
u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
			     bool name_known)
Linus Torvalds's avatar
Linus Torvalds committed
219
{
220
	struct discovery_state *cache = &hdev->discovery;
221
	struct inquiry_entry *ie;
222
	u32 flags = 0;
Linus Torvalds's avatar
Linus Torvalds committed
223

224
	BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds's avatar
Linus Torvalds committed
225

226
	hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
227

228 229
	if (!data->ssp_mode)
		flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
230

231
	ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
232
	if (ie) {
233 234
		if (!ie->data.ssp_mode)
			flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
235

236
		if (ie->name_state == NAME_NEEDED &&
237
		    data->rssi != ie->data.rssi) {
238 239 240 241
			ie->data.rssi = data->rssi;
			hci_inquiry_cache_update_resolve(hdev, ie);
		}

242
		goto update;
243
	}
244 245

	/* Entry not in the cache. Add new one. */
246
	ie = kzalloc(sizeof(*ie), GFP_KERNEL);
247 248 249 250
	if (!ie) {
		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
		goto done;
	}
251 252 253 254 255 256 257 258 259

	list_add(&ie->all, &cache->all);

	if (name_known) {
		ie->name_state = NAME_KNOWN;
	} else {
		ie->name_state = NAME_NOT_KNOWN;
		list_add(&ie->list, &cache->unknown);
	}
260

261 262
update:
	if (name_known && ie->name_state != NAME_KNOWN &&
263
	    ie->name_state != NAME_PENDING) {
264 265
		ie->name_state = NAME_KNOWN;
		list_del(&ie->list);
Linus Torvalds's avatar
Linus Torvalds committed
266 267
	}

268 269
	memcpy(&ie->data, data, sizeof(*data));
	ie->timestamp = jiffies;
Linus Torvalds's avatar
Linus Torvalds committed
270
	cache->timestamp = jiffies;
271 272

	if (ie->name_state == NAME_NOT_KNOWN)
273
		flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
274

275 276
done:
	return flags;
Linus Torvalds's avatar
Linus Torvalds committed
277 278 279 280
}

static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
281
	struct discovery_state *cache = &hdev->discovery;
Linus Torvalds's avatar
Linus Torvalds committed
282 283 284 285
	struct inquiry_info *info = (struct inquiry_info *) buf;
	struct inquiry_entry *e;
	int copied = 0;

286
	list_for_each_entry(e, &cache->all, all) {
Linus Torvalds's avatar
Linus Torvalds committed
287
		struct inquiry_data *data = &e->data;
288 289 290 291

		if (copied >= num)
			break;

Linus Torvalds's avatar
Linus Torvalds committed
292 293 294 295 296 297
		bacpy(&info->bdaddr, &data->bdaddr);
		info->pscan_rep_mode	= data->pscan_rep_mode;
		info->pscan_period_mode	= data->pscan_period_mode;
		info->pscan_mode	= data->pscan_mode;
		memcpy(info->dev_class, data->dev_class, 3);
		info->clock_offset	= data->clock_offset;
298

Linus Torvalds's avatar
Linus Torvalds committed
299
		info++;
300
		copied++;
Linus Torvalds's avatar
Linus Torvalds committed
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
	}

	BT_DBG("cache %p, copied %d", cache, copied);
	return copied;
}

int hci_inquiry(void __user *arg)
{
	__u8 __user *ptr = arg;
	struct hci_inquiry_req ir;
	struct hci_dev *hdev;
	int err = 0, do_inquiry = 0, max_rsp;
	__u8 *buf;

	if (copy_from_user(&ir, ptr, sizeof(ir)))
		return -EFAULT;

318 319
	hdev = hci_dev_get(ir.dev_id);
	if (!hdev)
Linus Torvalds's avatar
Linus Torvalds committed
320 321
		return -ENODEV;

322
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
323 324 325 326
		err = -EBUSY;
		goto done;
	}

327
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
328 329 330 331
		err = -EOPNOTSUPP;
		goto done;
	}

332
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
333 334 335 336
		err = -EOPNOTSUPP;
		goto done;
	}

337 338 339 340 341 342
	/* Restrict maximum inquiry length to 60 seconds */
	if (ir.length > 60) {
		err = -EINVAL;
		goto done;
	}

343
	hci_dev_lock(hdev);
344
	if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
345
	    inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
346
		hci_inquiry_cache_flush(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
347 348
		do_inquiry = 1;
	}
349
	hci_dev_unlock(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
350

351
	if (do_inquiry) {
352 353 354 355
		hci_req_sync_lock(hdev);
		err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp);
		hci_req_sync_unlock(hdev);

356 357
		if (err < 0)
			goto done;
358 359 360 361

		/* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
		 * cleared). If it is interrupted by a signal, return -EINTR.
		 */
362
		if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
363 364 365 366
				TASK_INTERRUPTIBLE)) {
			err = -EINTR;
			goto done;
		}
367
	}
Linus Torvalds's avatar
Linus Torvalds committed
368

369 370 371
	/* for unlimited number of responses we will use buffer with
	 * 255 entries
	 */
Linus Torvalds's avatar
Linus Torvalds committed
372 373 374 375 376
	max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;

	/* cache_dump can't sleep. Therefore we allocate temp buffer and then
	 * copy it to the user space.
	 */
377
	buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
378
	if (!buf) {
Linus Torvalds's avatar
Linus Torvalds committed
379 380 381 382
		err = -ENOMEM;
		goto done;
	}

383
	hci_dev_lock(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
384
	ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
385
	hci_dev_unlock(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
386 387 388 389 390 391

	BT_DBG("num_rsp %d", ir.num_rsp);

	if (!copy_to_user(ptr, &ir, sizeof(ir))) {
		ptr += sizeof(ir);
		if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
392
				 ir.num_rsp))
Linus Torvalds's avatar
Linus Torvalds committed
393
			err = -EFAULT;
394
	} else
Linus Torvalds's avatar
Linus Torvalds committed
395 396 397 398 399 400 401 402 403
		err = -EFAULT;

	kfree(buf);

done:
	hci_dev_put(hdev);
	return err;
}

404 405 406 407 408 409 410 411 412 413
static int hci_dev_do_open(struct hci_dev *hdev)
{
	int ret = 0;

	BT_DBG("%s %p", hdev->name, hdev);

	hci_req_sync_lock(hdev);

	ret = hci_dev_open_sync(hdev);

414
	hci_req_sync_unlock(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
415 416 417
	return ret;
}

418 419 420 421 422 423 424 425 426 427 428
/* ---- HCI ioctl helpers ---- */

int hci_dev_open(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

	hdev = hci_dev_get(dev);
	if (!hdev)
		return -ENODEV;

429
	/* Devices that are marked as unconfigured can only be powered
430 431 432 433 434 435 436 437
	 * up as user channel. Trying to bring them up as normal devices
	 * will result into a failure. Only user channel operation is
	 * possible.
	 *
	 * When this function is called for a user channel, the flag
	 * HCI_USER_CHANNEL will be set first before attempting to
	 * open the device.
	 */
438 439
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
440 441 442 443
		err = -EOPNOTSUPP;
		goto done;
	}

444 445 446 447 448
	/* We need to ensure that no other power on/off work is pending
	 * before proceeding to call hci_dev_do_open. This is
	 * particularly important if the setup procedure has not yet
	 * completed.
	 */
449
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
450 451
		cancel_delayed_work(&hdev->power_off);

452 453 454 455
	/* After this call it is guaranteed that the setup procedure
	 * has finished. This means that error conditions like RFKILL
	 * or no valid public or static random address apply.
	 */
456 457
	flush_workqueue(hdev->req_workqueue);

458
	/* For controllers not using the management interface and that
459
	 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
460 461 462 463
	 * so that pairing works for them. Once the management interface
	 * is in use this bit will be cleared again and userspace has
	 * to explicitly enable it.
	 */
464 465
	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
	    !hci_dev_test_flag(hdev, HCI_MGMT))
466
		hci_dev_set_flag(hdev, HCI_BONDABLE);
467

468 469
	err = hci_dev_do_open(hdev);

470
done:
471 472 473 474
	hci_dev_put(hdev);
	return err;
}

475 476 477 478 479 480 481 482 483 484
int hci_dev_do_close(struct hci_dev *hdev)
{
	int err;

	BT_DBG("%s %p", hdev->name, hdev);

	hci_req_sync_lock(hdev);

	err = hci_dev_close_sync(hdev);

485
	hci_req_sync_unlock(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
486

487
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
488 489 490 491 492 493 494
}

int hci_dev_close(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

495 496
	hdev = hci_dev_get(dev);
	if (!hdev)
Linus Torvalds's avatar
Linus Torvalds committed
497
		return -ENODEV;
498

499
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
500 501 502 503
		err = -EBUSY;
		goto done;
	}

504
	cancel_work_sync(&hdev->power_on);
505
	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
506 507
		cancel_delayed_work(&hdev->power_off);

Linus Torvalds's avatar
Linus Torvalds committed
508
	err = hci_dev_do_close(hdev);
509

510
done:
Linus Torvalds's avatar
Linus Torvalds committed
511 512 513 514
	hci_dev_put(hdev);
	return err;
}

515
static int hci_dev_do_reset(struct hci_dev *hdev)
Linus Torvalds's avatar
Linus Torvalds committed
516
{
517
	int ret;
Linus Torvalds's avatar
Linus Torvalds committed
518

519
	BT_DBG("%s %p", hdev->name, hdev);
Linus Torvalds's avatar
Linus Torvalds committed
520

521
	hci_req_sync_lock(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
522 523 524 525 526

	/* Drop queues */
	skb_queue_purge(&hdev->rx_q);
	skb_queue_purge(&hdev->cmd_q);

527 528
	/* Cancel these to avoid queueing non-chained pending work */
	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
529 530 531 532 533 534 535 536 537
	/* Wait for
	 *
	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
	 *
	 * inside RCU section to see the flag or complete scheduling.
	 */
	synchronize_rcu();
	/* Explicitly cancel works in case scheduled after setting the flag. */
538 539 540
	cancel_delayed_work(&hdev->cmd_timer);
	cancel_delayed_work(&hdev->ncmd_timer);

541 542 543 544 545
	/* Avoid potential lockdep warnings from the *_flush() calls by
	 * ensuring the workqueue is empty up front.
	 */
	drain_workqueue(hdev->workqueue);

546
	hci_dev_lock(hdev);
547
	hci_inquiry_cache_flush(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
548
	hci_conn_hash_flush(hdev);
549
	hci_dev_unlock(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
550 551 552 553

	if (hdev->flush)
		hdev->flush(hdev);

554 555
	hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);

556
	atomic_set(&hdev->cmd_cnt, 1);
557 558 559 560
	hdev->acl_cnt = 0;
	hdev->sco_cnt = 0;
	hdev->le_cnt = 0;
	hdev->iso_cnt = 0;
Linus Torvalds's avatar
Linus Torvalds committed
561

562
	ret = hci_reset_sync(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
563

564
	hci_req_sync_unlock(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
565 566 567
	return ret;
}

568 569 570 571 572 573 574 575 576 577 578 579 580 581
int hci_dev_reset(__u16 dev)
{
	struct hci_dev *hdev;
	int err;

	hdev = hci_dev_get(dev);
	if (!hdev)
		return -ENODEV;

	if (!test_bit(HCI_UP, &hdev->flags)) {
		err = -ENETDOWN;
		goto done;
	}

582
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
583 584 585 586
		err = -EBUSY;
		goto done;
	}

587
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
588 589 590 591 592 593 594 595 596 597 598
		err = -EOPNOTSUPP;
		goto done;
	}

	err = hci_dev_do_reset(hdev);

done:
	hci_dev_put(hdev);
	return err;
}

Linus Torvalds's avatar
Linus Torvalds committed
599 600 601 602 603
int hci_dev_reset_stat(__u16 dev)
{
	struct hci_dev *hdev;
	int ret = 0;

604 605
	hdev = hci_dev_get(dev);
	if (!hdev)
Linus Torvalds's avatar
Linus Torvalds committed
606 607
		return -ENODEV;

608
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
609 610 611 612
		ret = -EBUSY;
		goto done;
	}

613
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
614 615 616 617
		ret = -EOPNOTSUPP;
		goto done;
	}

Linus Torvalds's avatar
Linus Torvalds committed
618 619
	memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));

620
done:
Linus Torvalds's avatar
Linus Torvalds committed
621 622 623 624
	hci_dev_put(hdev);
	return ret;
}

625
static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
626
{
627
	bool conn_changed, discov_changed;
628 629 630 631

	BT_DBG("%s scan 0x%02x", hdev->name, scan);

	if ((scan & SCAN_PAGE))
632 633
		conn_changed = !hci_dev_test_and_set_flag(hdev,
							  HCI_CONNECTABLE);
634
	else
635 636
		conn_changed = hci_dev_test_and_clear_flag(hdev,
							   HCI_CONNECTABLE);
637

638
	if ((scan & SCAN_INQUIRY)) {
639 640
		discov_changed = !hci_dev_test_and_set_flag(hdev,
							    HCI_DISCOVERABLE);
641
	} else {
642
		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
643 644
		discov_changed = hci_dev_test_and_clear_flag(hdev,
							     HCI_DISCOVERABLE);
645 646
	}

647
	if (!hci_dev_test_flag(hdev, HCI_MGMT))
648 649
		return;

650 651
	if (conn_changed || discov_changed) {
		/* In case this was disabled through mgmt */
652
		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
653

654
		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
655
			hci_update_adv_data(hdev, hdev->cur_adv_instance);
656

657
		mgmt_new_settings(hdev);
658
	}
659 660
}

Linus Torvalds's avatar
Linus Torvalds committed
661 662 663 664
int hci_dev_cmd(unsigned int cmd, void __user *arg)
{
	struct hci_dev *hdev;
	struct hci_dev_req dr;
665
	__le16 policy;
Linus Torvalds's avatar
Linus Torvalds committed
666 667 668 669 670
	int err = 0;

	if (copy_from_user(&dr, arg, sizeof(dr)))
		return -EFAULT;

671 672
	hdev = hci_dev_get(dr.dev_id);
	if (!hdev)
Linus Torvalds's avatar
Linus Torvalds committed
673 674
		return -ENODEV;

675
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
676 677 678 679
		err = -EBUSY;
		goto done;
	}

680
	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
681 682 683 684
		err = -EOPNOTSUPP;
		goto done;
	}

685
	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
686 687 688 689
		err = -EOPNOTSUPP;
		goto done;
	}

Linus Torvalds's avatar
Linus Torvalds committed
690 691
	switch (cmd) {
	case HCISETAUTH:
692 693
		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
Linus Torvalds's avatar
Linus Torvalds committed
694 695 696 697 698 699 700 701 702 703
		break;

	case HCISETENCRYPT:
		if (!lmp_encrypt_capable(hdev)) {
			err = -EOPNOTSUPP;
			break;
		}

		if (!test_bit(HCI_AUTH, &hdev->flags)) {
			/* Auth must be enabled first */
704 705 706 707
			err = hci_cmd_sync_status(hdev,
						  HCI_OP_WRITE_AUTH_ENABLE,
						  1, &dr.dev_opt,
						  HCI_CMD_TIMEOUT);
Linus Torvalds's avatar
Linus Torvalds committed
708 709 710 711
			if (err)
				break;
		}

712 713
		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
Linus Torvalds's avatar
Linus Torvalds committed
714 715 716
		break;

	case HCISETSCAN:
717 718
		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
					  1, &dr.dev_opt, HCI_CMD_TIMEOUT);
719

720 721
		/* Ensure that the connectable and discoverable states
		 * get correctly modified as this was a non-mgmt change.
722
		 */
723
		if (!err)
724
			hci_update_passive_scan_state(hdev, dr.dev_opt);
Linus Torvalds's avatar
Linus Torvalds committed
725 726 727
		break;

	case HCISETLINKPOL:
728 729
		policy = cpu_to_le16(dr.dev_opt);

730 731
		err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
					  2, &policy, HCI_CMD_TIMEOUT);
Linus Torvalds's avatar
Linus Torvalds committed
732 733 734
		break;

	case HCISETLINKMODE:
735 736 737 738 739
		hdev->link_mode = ((__u16) dr.dev_opt) &
					(HCI_LM_MASTER | HCI_LM_ACCEPT);
		break;

	case HCISETPTYPE:
740 741 742
		if (hdev->pkt_type == (__u16) dr.dev_opt)
			break;

743
		hdev->pkt_type = (__u16) dr.dev_opt;
744
		mgmt_phy_configuration_changed(hdev, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
745 746 747
		break;

	case HCISETACLMTU:
748 749
		hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
		hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds's avatar
Linus Torvalds committed
750 751 752
		break;

	case HCISETSCOMTU:
753 754
		hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
		hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds's avatar
Linus Torvalds committed
755 756 757 758 759 760
		break;

	default:
		err = -EINVAL;
		break;
	}
761

762
done:
Linus Torvalds's avatar
Linus Torvalds committed
763 764 765 766 767 768
	hci_dev_put(hdev);
	return err;
}

int hci_get_dev_list(void __user *arg)
{
769
	struct hci_dev *hdev;
Linus Torvalds's avatar
Linus Torvalds committed
770 771
	struct hci_dev_list_req *dl;
	struct hci_dev_req *dr;
772
	int n = 0, err;
Linus Torvalds's avatar
Linus Torvalds committed
773 774 775 776 777 778 779 780
	__u16 dev_num;

	if (get_user(dev_num, (__u16 __user *) arg))
		return -EFAULT;

	if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
		return -EINVAL;

781
	dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
782
	if (!dl)
Linus Torvalds's avatar
Linus Torvalds committed
783 784
		return -ENOMEM;

785
	dl->dev_num = dev_num;
Linus Torvalds's avatar
Linus Torvalds committed
786 787
	dr = dl->dev_req;

788
	read_lock(&hci_dev_list_lock);
789
	list_for_each_entry(hdev, &hci_dev_list, list) {
790
		unsigned long flags = hdev->flags;
791

792 793 794 795
		/* When the auto-off is configured it means the transport
		 * is running, but in that case still indicate that the
		 * device is actually down.
		 */
796
		if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
797
			flags &= ~BIT(HCI_UP);
798

799 800
		dr[n].dev_id  = hdev->id;
		dr[n].dev_opt = flags;
801

Linus Torvalds's avatar
Linus Torvalds committed
802 803 804
		if (++n >= dev_num)
			break;
	}
805
	read_unlock(&hci_dev_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
806 807

	dl->dev_num = n;
808
	err = copy_to_user(arg, dl, struct_size(dl, dev_req, n));
Linus Torvalds's avatar
Linus Torvalds committed
809 810 811 812 813 814 815 816 817
	kfree(dl);

	return err ? -EFAULT : 0;
}

int hci_get_dev_info(void __user *arg)
{
	struct hci_dev *hdev;
	struct hci_dev_info di;
818
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
819 820 821 822 823
	int err = 0;

	if (copy_from_user(&di, arg, sizeof(di)))
		return -EFAULT;

824 825
	hdev = hci_dev_get(di.dev_id);
	if (!hdev)
Linus Torvalds's avatar
Linus Torvalds committed
826 827
		return -ENODEV;

828 829 830 831
	/* When the auto-off is configured it means the transport
	 * is running, but in that case still indicate that the
	 * device is actually down.
	 */
832
	if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
833 834 835
		flags = hdev->flags & ~BIT(HCI_UP);
	else
		flags = hdev->flags;
836

837
	strscpy(di.name, hdev->name, sizeof(di.name));
Linus Torvalds's avatar
Linus Torvalds committed
838
	di.bdaddr   = hdev->bdaddr;
839
	di.type     = (hdev->bus & 0x0f);
840
	di.flags    = flags;
Linus Torvalds's avatar
Linus Torvalds committed
841
	di.pkt_type = hdev->pkt_type;
842 843 844 845 846 847 848 849 850 851 852
	if (lmp_bredr_capable(hdev)) {
		di.acl_mtu  = hdev->acl_mtu;
		di.acl_pkts = hdev->acl_pkts;
		di.sco_mtu  = hdev->sco_mtu;
		di.sco_pkts = hdev->sco_pkts;
	} else {
		di.acl_mtu  = hdev->le_mtu;
		di.acl_pkts = hdev->le_pkts;
		di.sco_mtu  = 0;
		di.sco_pkts = 0;
	}
Linus Torvalds's avatar
Linus Torvalds committed
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
	di.link_policy = hdev->link_policy;
	di.link_mode   = hdev->link_mode;

	memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
	memcpy(&di.features, &hdev->features, sizeof(di.features));

	if (copy_to_user(arg, &di, sizeof(di)))
		err = -EFAULT;

	hci_dev_put(hdev);

	return err;
}

/* ---- Interface to HCI drivers ---- */

869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
static int hci_dev_do_poweroff(struct hci_dev *hdev)
{
	int err;

	BT_DBG("%s %p", hdev->name, hdev);

	hci_req_sync_lock(hdev);

	err = hci_set_powered_sync(hdev, false);

	hci_req_sync_unlock(hdev);

	return err;
}

884 885 886
static int hci_rfkill_set_block(void *data, bool blocked)
{
	struct hci_dev *hdev = data;
887
	int err;
888 889 890

	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);

891
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
892 893
		return -EBUSY;

894 895 896
	if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
		return 0;

897
	if (blocked) {
898
		hci_dev_set_flag(hdev, HCI_RFKILLED);
899

900
		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
901 902 903 904 905 906 907 908 909 910 911 912 913
		    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
			err = hci_dev_do_poweroff(hdev);
			if (err) {
				bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
					   err);

				/* Make sure the device is still closed even if
				 * anything during power off sequence (eg.
				 * disconnecting devices) failed.
				 */
				hci_dev_do_close(hdev);
			}
		}
914
	} else {
915
		hci_dev_clear_flag(hdev, HCI_RFKILLED);
916
	}
917 918 919 920 921 922 923 924

	return 0;
}

static const struct rfkill_ops hci_rfkill_ops = {
	.set_block = hci_rfkill_set_block,
};

925 926 927
static void hci_power_on(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
928
	int err;
929 930 931

	BT_DBG("%s", hdev->name);

932 933 934
	if (test_bit(HCI_UP, &hdev->flags) &&
	    hci_dev_test_flag(hdev, HCI_MGMT) &&
	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
935
		cancel_delayed_work(&hdev->power_off);
936
		err = hci_powered_update_sync(hdev);
937 938 939 940
		mgmt_power_on(hdev, err);
		return;
	}

941
	err = hci_dev_do_open(hdev);
942
	if (err < 0) {
943
		hci_dev_lock(hdev);
944
		mgmt_set_powered_failed(hdev, err);
945
		hci_dev_unlock(hdev);
946
		return;
947
	}
948

949 950 951 952
	/* During the HCI setup phase, a few error conditions are
	 * ignored and they need to be checked now. If they are still
	 * valid, it is important to turn the device back off.
	 */
953 954
	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
955
	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
956
	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
957
		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
958
		hci_dev_do_close(hdev);
959
	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
960 961
		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
				   HCI_AUTO_OFF_TIMEOUT);
962
	}
963

964
	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
965 966 967
		/* For unconfigured devices, set the HCI_RAW flag
		 * so that userspace can easily identify them.
		 */
968
		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
969
			set_bit(HCI_RAW, &hdev->flags);
970 971 972 973 974 975 976 977 978

		/* For fully configured devices, this will send
		 * the Index Added event. For unconfigured devices,
		 * it will send Unconfigued Index Added event.
		 *
		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
		 * and no event will be send.
		 */
		mgmt_index_added(hdev);
979
	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
980 981 982
		/* When the controller is now configured, then it
		 * is important to clear the HCI_RAW flag.
		 */
983
		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
984 985
			clear_bit(HCI_RAW, &hdev->flags);

986 987 988 989
		/* Powering on the controller with HCI_CONFIG set only
		 * happens with the transition from unconfigured to
		 * configured. This will send the Index Added event.
		 */
990
		mgmt_index_added(hdev);
991
	}
992 993 994 995
}

static void hci_power_off(struct work_struct *work)
{
996
	struct hci_dev *hdev = container_of(work, struct hci_dev,
997
					    power_off.work);
998 999 1000

	BT_DBG("%s", hdev->name);

1001
	hci_dev_do_close(hdev);
1002 1003
}

1004 1005 1006 1007
static void hci_error_reset(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);

1008
	hci_dev_hold(hdev);
1009 1010 1011 1012 1013
	BT_DBG("%s", hdev->name);

	if (hdev->hw_error)
		hdev->hw_error(hdev, hdev->hw_error_code);
	else
1014
		bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1015

1016 1017
	if (!hci_dev_do_close(hdev))
		hci_dev_do_open(hdev);
1018

1019
	hci_dev_put(hdev);
1020 1021
}

1022
void hci_uuids_clear(struct hci_dev *hdev)
1023
{
1024
	struct bt_uuid *uuid, *tmp;
1025

1026 1027
	list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
		list_del(&uuid->list);
1028 1029 1030 1031
		kfree(uuid);
	}
}

1032
void hci_link_keys_clear(struct hci_dev *hdev)
1033
{
1034
	struct link_key *key, *tmp;
1035

1036
	list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1037 1038
		list_del_rcu(&key->list);
		kfree_rcu(key, rcu);
1039 1040 1041
	}
}

1042
void hci_smp_ltks_clear(struct hci_dev *hdev)
1043
{
1044
	struct smp_ltk *k, *tmp;
1045

1046
	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1047 1048
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
1049 1050 1051
	}
}

1052 1053
void hci_smp_irks_clear(struct hci_dev *hdev)
{
1054
	struct smp_irk *k, *tmp;
1055

1056
	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1057 1058
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
1059 1060 1061
	}
}

1062 1063
void hci_blocked_keys_clear(struct hci_dev *hdev)
{
1064
	struct blocked_key *b, *tmp;
1065

1066
	list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
		list_del_rcu(&b->list);
		kfree_rcu(b, rcu);
	}
}

bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
{
	bool blocked = false;
	struct blocked_key *b;

	rcu_read_lock();
1078
	list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
		if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
			blocked = true;
			break;
		}
	}

	rcu_read_unlock();
	return blocked;
}

1089 1090
struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
1091
	struct link_key *k;
1092

1093 1094 1095 1096
	rcu_read_lock();
	list_for_each_entry_rcu(k, &hdev->link_keys, list) {
		if (bacmp(bdaddr, &k->bdaddr) == 0) {
			rcu_read_unlock();
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106

			if (hci_is_blocked_key(hdev,
					       HCI_BLOCKED_KEY_TYPE_LINKKEY,
					       k->val)) {
				bt_dev_warn_ratelimited(hdev,
							"Link key blocked for %pMR",
							&k->bdaddr);
				return NULL;
			}

1107
			return k;
1108 1109 1110
		}
	}
	rcu_read_unlock();
1111 1112 1113 1114

	return NULL;
}

1115
static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1116
			       u8 key_type, u8 old_key_type)
1117 1118 1119
{
	/* Legacy key */
	if (key_type < 0x03)
1120
		return true;
1121 1122 1123

	/* Debug keys are insecure so don't store them persistently */
	if (key_type == HCI_LK_DEBUG_COMBINATION)
1124
		return false;
1125 1126 1127

	/* Changed combination key and there's no previous one */
	if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1128
		return false;
1129 1130 1131

	/* Security mode 3 case */
	if (!conn)
1132
		return true;
1133

1134 1135 1136 1137
	/* BR/EDR key derived using SC from an LE link */
	if (conn->type == LE_LINK)
		return true;

1138 1139
	/* Neither local nor remote side had no-bonding as requirement */
	if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1140
		return true;
1141 1142 1143

	/* Local side had dedicated bonding as requirement */
	if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1144
		return true;
1145 1146 1147

	/* Remote side had dedicated bonding as requirement */
	if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1148
		return true;
1149 1150 1151

	/* If none of the above criteria match, then don't store the key
	 * persistently */
1152
	return false;
1153 1154
}

1155
static u8 ltk_role(u8 type)
1156
{
1157 1158
	if (type == SMP_LTK)
		return HCI_ROLE_MASTER;
1159

1160
	return HCI_ROLE_SLAVE;
1161 1162
}

1163 1164
struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
			     u8 addr_type, u8 role)
1165
{
1166
	struct smp_ltk *k;
1167

1168 1169
	rcu_read_lock();
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1170 1171 1172
		if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
			continue;

1173
		if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1174
			rcu_read_unlock();
1175 1176 1177 1178 1179 1180 1181 1182 1183

			if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
					       k->val)) {
				bt_dev_warn_ratelimited(hdev,
							"LTK blocked for %pMR",
							&k->bdaddr);
				return NULL;
			}

1184
			return k;
1185 1186 1187
		}
	}
	rcu_read_unlock();
1188 1189 1190 1191

	return NULL;
}

1192 1193
struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
{
1194
	struct smp_irk *irk_to_return = NULL;
1195 1196
	struct smp_irk *irk;

1197 1198 1199
	rcu_read_lock();
	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
		if (!bacmp(&irk->rpa, rpa)) {
1200 1201
			irk_to_return = irk;
			goto done;
1202
		}
1203 1204
	}

1205
	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1206
		if (smp_irk_matches(hdev, irk->val, rpa)) {
1207
			bacpy(&irk->rpa, rpa);
1208 1209
			irk_to_return = irk;
			goto done;
1210 1211
		}
	}
1212 1213 1214 1215 1216 1217 1218 1219 1220

done:
	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
						irk_to_return->val)) {
		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
					&irk_to_return->bdaddr);
		irk_to_return = NULL;
	}

1221
	rcu_read_unlock();
1222

1223
	return irk_to_return;
1224 1225 1226 1227 1228
}

struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
				     u8 addr_type)
{
1229
	struct smp_irk *irk_to_return = NULL;
1230 1231
	struct smp_irk *irk;

1232 1233 1234 1235
	/* Identity Address must be public or static random */
	if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
		return NULL;

1236 1237
	rcu_read_lock();
	list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1238
		if (addr_type == irk->addr_type &&
1239
		    bacmp(bdaddr, &irk->bdaddr) == 0) {
1240 1241
			irk_to_return = irk;
			goto done;
1242
		}
1243
	}
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253

done:

	if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
						irk_to_return->val)) {
		bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
					&irk_to_return->bdaddr);
		irk_to_return = NULL;
	}

1254
	rcu_read_unlock();
1255

1256
	return irk_to_return;
1257 1258
}

1259
struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1260 1261
				  bdaddr_t *bdaddr, u8 *val, u8 type,
				  u8 pin_len, bool *persistent)
1262 1263
{
	struct link_key *key, *old_key;
1264
	u8 old_key_type;
1265 1266 1267 1268 1269 1270

	old_key = hci_find_link_key(hdev, bdaddr);
	if (old_key) {
		old_key_type = old_key->type;
		key = old_key;
	} else {
1271
		old_key_type = conn ? conn->key_type : 0xff;
1272
		key = kzalloc(sizeof(*key), GFP_KERNEL);
1273
		if (!key)
1274
			return NULL;
1275
		list_add_rcu(&key->list, &hdev->link_keys);
1276 1277
	}

1278
	BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1279

1280 1281 1282 1283
	/* Some buggy controller combinations generate a changed
	 * combination key for legacy pairing even when there's no
	 * previous key */
	if (type == HCI_LK_CHANGED_COMBINATION &&
1284
	    (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1285
		type = HCI_LK_COMBINATION;
1286 1287 1288
		if (conn)
			conn->key_type = type;
	}
1289

1290
	bacpy(&key->bdaddr, bdaddr);
1291
	memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1292 1293
	key->pin_len = pin_len;

1294
	if (type == HCI_LK_CHANGED_COMBINATION)
1295
		key->type = old_key_type;
1296 1297 1298
	else
		key->type = type;

1299 1300 1301
	if (persistent)
		*persistent = hci_persistent_key(hdev, conn, type,
						 old_key_type);
1302

1303
	return key;
1304 1305
}

1306
struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1307
			    u8 addr_type, u8 type, u8 authenticated,
1308
			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1309
{
1310
	struct smp_ltk *key, *old_key;
1311
	u8 role = ltk_role(type);
1312

1313
	old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1314
	if (old_key)
1315
		key = old_key;
1316
	else {
1317
		key = kzalloc(sizeof(*key), GFP_KERNEL);
1318
		if (!key)
1319
			return NULL;
1320
		list_add_rcu(&key->list, &hdev->long_term_keys);
1321 1322 1323
	}

	bacpy(&key->bdaddr, bdaddr);
1324 1325 1326 1327
	key->bdaddr_type = addr_type;
	memcpy(key->val, tk, sizeof(key->val));
	key->authenticated = authenticated;
	key->ediv = ediv;
1328
	key->rand = rand;
1329 1330
	key->enc_size = enc_size;
	key->type = type;
1331

1332
	return key;
1333 1334
}

1335 1336
struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
			    u8 addr_type, u8 val[16], bdaddr_t *rpa)
1337 1338 1339 1340 1341 1342 1343
{
	struct smp_irk *irk;

	irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
	if (!irk) {
		irk = kzalloc(sizeof(*irk), GFP_KERNEL);
		if (!irk)
1344
			return NULL;
1345 1346 1347 1348

		bacpy(&irk->bdaddr, bdaddr);
		irk->addr_type = addr_type;

1349
		list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1350 1351 1352 1353 1354
	}

	memcpy(irk->val, val, 16);
	bacpy(&irk->rpa, rpa);

1355
	return irk;
1356 1357
}

1358 1359 1360 1361 1362 1363 1364 1365
int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
	struct link_key *key;

	key = hci_find_link_key(hdev, bdaddr);
	if (!key)
		return -ENOENT;

1366
	BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1367

1368 1369
	list_del_rcu(&key->list);
	kfree_rcu(key, rcu);
1370 1371 1372 1373

	return 0;
}

1374
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1375
{
1376
	struct smp_ltk *k, *tmp;
1377
	int removed = 0;
1378

1379
	list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1380
		if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1381 1382
			continue;

1383
		BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1384

1385 1386
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
1387
		removed++;
1388 1389
	}

1390
	return removed ? 0 : -ENOENT;
1391 1392
}

1393 1394
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
{
1395
	struct smp_irk *k, *tmp;
1396

1397
	list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1398 1399 1400 1401 1402
		if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
			continue;

		BT_DBG("%s removing %pMR", hdev->name, bdaddr);

1403 1404
		list_del_rcu(&k->list);
		kfree_rcu(k, rcu);
1405 1406 1407
	}
}

1408 1409 1410
bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
{
	struct smp_ltk *k;
1411
	struct smp_irk *irk;
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
	u8 addr_type;

	if (type == BDADDR_BREDR) {
		if (hci_find_link_key(hdev, bdaddr))
			return true;
		return false;
	}

	/* Convert to HCI addr type which struct smp_ltk uses */
	if (type == BDADDR_LE_PUBLIC)
		addr_type = ADDR_LE_DEV_PUBLIC;
	else
		addr_type = ADDR_LE_DEV_RANDOM;

1426 1427 1428 1429 1430 1431
	irk = hci_get_irk(hdev, bdaddr, addr_type);
	if (irk) {
		bdaddr = &irk->bdaddr;
		addr_type = irk->addr_type;
	}

1432 1433
	rcu_read_lock();
	list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1434 1435
		if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
			rcu_read_unlock();
1436
			return true;
1437
		}
1438 1439 1440 1441 1442 1443
	}
	rcu_read_unlock();

	return false;
}

1444
/* HCI command timer function */
1445
static void hci_cmd_timeout(struct work_struct *work)
1446
{
1447 1448
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    cmd_timer.work);
1449

1450 1451
	if (hdev->req_skb) {
		u16 opcode = hci_skb_opcode(hdev->req_skb);
1452

1453
		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1454 1455

		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1456
	} else {
1457
		bt_dev_err(hdev, "command tx timeout");
1458 1459
	}

1460 1461 1462
	if (hdev->cmd_timeout)
		hdev->cmd_timeout(hdev);

1463
	atomic_set(&hdev->cmd_cnt, 1);
1464
	queue_work(hdev->workqueue, &hdev->cmd_work);
1465 1466
}

1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
/* HCI ncmd timer function */
static void hci_ncmd_timeout(struct work_struct *work)
{
	struct hci_dev *hdev = container_of(work, struct hci_dev,
					    ncmd_timer.work);

	bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");

	/* During HCI_INIT phase no events can be injected if the ncmd timer
	 * triggers since the procedure has its own timeout handling.
	 */
	if (test_bit(HCI_INIT, &hdev->flags))
		return;

	/* This is an irrecoverable state, inject hardware error event */
	hci_reset_dev(hdev);
}

1485
struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1486
					  bdaddr_t *bdaddr, u8 bdaddr_type)
1487 1488 1489
{
	struct oob_data *data;

1490 1491 1492 1493 1494 1495 1496
	list_for_each_entry(data, &hdev->remote_oob_data, list) {
		if (bacmp(bdaddr, &data->bdaddr) != 0)
			continue;
		if (data->bdaddr_type != bdaddr_type)
			continue;
		return data;
	}
1497 1498 1499 1500

	return NULL;
}

1501 1502
int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
			       u8 bdaddr_type)
1503 1504 1505
{
	struct oob_data *data;

1506
	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1507 1508 1509
	if (!data)
		return -ENOENT;

1510
	BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1511 1512 1513 1514 1515 1516 1517

	list_del(&data->list);
	kfree(data);

	return 0;
}

1518
void hci_remote_oob_data_clear(struct hci_dev *hdev)
1519 1520 1521 1522 1523 1524 1525 1526 1527
{
	struct oob_data *data, *n;

	list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
		list_del(&data->list);
		kfree(data);
	}
}

1528
int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1529
			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1530
			    u8 *hash256, u8 *rand256)
1531 1532 1533
{
	struct oob_data *data;

1534
	data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1535
	if (!data) {
1536
		data = kmalloc(sizeof(*data), GFP_KERNEL);
1537 1538 1539 1540
		if (!data)
			return -ENOMEM;

		bacpy(&data->bdaddr, bdaddr);
1541
		data->bdaddr_type = bdaddr_type;
1542 1543 1544
		list_add(&data->list, &hdev->remote_oob_data);
	}

1545 1546 1547
	if (hash192 && rand192) {
		memcpy(data->hash192, hash192, sizeof(data->hash192));
		memcpy(data->rand192, rand192, sizeof(data->rand192));
1548 1549
		if (hash256 && rand256)
			data->present = 0x03;
1550 1551 1552
	} else {
		memset(data->hash192, 0, sizeof(data->hash192));
		memset(data->rand192, 0, sizeof(data->rand192));
1553 1554 1555 1556
		if (hash256 && rand256)
			data->present = 0x02;
		else
			data->present = 0x00;
1557 1558
	}

1559 1560 1561 1562 1563 1564
	if (hash256 && rand256) {
		memcpy(data->hash256, hash256, sizeof(data->hash256));
		memcpy(data->rand256, rand256, sizeof(data->rand256));
	} else {
		memset(data->hash256, 0, sizeof(data->hash256));
		memset(data->rand256, 0, sizeof(data->rand256));
1565 1566
		if (hash192 && rand192)
			data->present = 0x01;
1567
	}
1568

1569
	BT_DBG("%s for %pMR", hdev->name, bdaddr);
1570 1571 1572 1573

	return 0;
}

1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
/* This function requires the caller holds hdev->lock */
struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
{
	struct adv_info *adv_instance;

	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
		if (adv_instance->instance == instance)
			return adv_instance;
	}

	return NULL;
}

/* This function requires the caller holds hdev->lock */
1588 1589
struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
{
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614
	struct adv_info *cur_instance;

	cur_instance = hci_find_adv_instance(hdev, instance);
	if (!cur_instance)
		return NULL;

	if (cur_instance == list_last_entry(&hdev->adv_instances,
					    struct adv_info, list))
		return list_first_entry(&hdev->adv_instances,
						 struct adv_info, list);
	else
		return list_next_entry(cur_instance, list);
}

/* This function requires the caller holds hdev->lock */
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
{
	struct adv_info *adv_instance;

	adv_instance = hci_find_adv_instance(hdev, instance);
	if (!adv_instance)
		return -ENOENT;

	BT_DBG("%s removing %dMR", hdev->name, instance);

1615 1616 1617 1618 1619 1620
	if (hdev->cur_adv_instance == instance) {
		if (hdev->adv_instance_timeout) {
			cancel_delayed_work(&hdev->adv_instance_expire);
			hdev->adv_instance_timeout = 0;
		}
		hdev->cur_adv_instance = 0x00;
1621 1622
	}

1623 1624
	cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);

1625 1626 1627 1628 1629 1630 1631 1632
	list_del(&adv_instance->list);
	kfree(adv_instance);

	hdev->adv_instance_cnt--;

	return 0;
}

1633 1634 1635 1636 1637 1638 1639 1640
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
{
	struct adv_info *adv_instance, *n;

	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
		adv_instance->rpa_expired = rpa_expired;
}

1641 1642 1643 1644 1645
/* This function requires the caller holds hdev->lock */
void hci_adv_instances_clear(struct hci_dev *hdev)
{
	struct adv_info *adv_instance, *n;

1646 1647 1648 1649 1650
	if (hdev->adv_instance_timeout) {
		cancel_delayed_work(&hdev->adv_instance_expire);
		hdev->adv_instance_timeout = 0;
	}

1651
	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1652
		cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1653 1654 1655 1656 1657
		list_del(&adv_instance->list);
		kfree(adv_instance);
	}

	hdev->adv_instance_cnt = 0;
1658
	hdev->cur_adv_instance = 0x00;
1659 1660
}

1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
static void adv_instance_rpa_expired(struct work_struct *work)
{
	struct adv_info *adv_instance = container_of(work, struct adv_info,
						     rpa_expired_cb.work);

	BT_DBG("");

	adv_instance->rpa_expired = true;
}

1671
/* This function requires the caller holds hdev->lock */
1672 1673 1674 1675
struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
				      u32 flags, u16 adv_data_len, u8 *adv_data,
				      u16 scan_rsp_len, u8 *scan_rsp_data,
				      u16 timeout, u16 duration, s8 tx_power,
1676 1677
				      u32 min_interval, u32 max_interval,
				      u8 mesh_handle)
1678
{
1679
	struct adv_info *adv;
1680

1681 1682 1683 1684 1685
	adv = hci_find_adv_instance(hdev, instance);
	if (adv) {
		memset(adv->adv_data, 0, sizeof(adv->adv_data));
		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1686
	} else {
1687
		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1688
		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1689
			return ERR_PTR(-EOVERFLOW);
1690

1691 1692 1693
		adv = kzalloc(sizeof(*adv), GFP_KERNEL);
		if (!adv)
			return ERR_PTR(-ENOMEM);
1694

1695 1696
		adv->pending = true;
		adv->instance = instance;
1697 1698 1699 1700 1701 1702 1703 1704 1705

		/* If controller support only one set and the instance is set to
		 * 1 then there is no option other than using handle 0x00.
		 */
		if (hdev->le_num_of_adv_sets == 1 && instance == 1)
			adv->handle = 0x00;
		else
			adv->handle = instance;

1706
		list_add(&adv->list, &hdev->adv_instances);
1707 1708 1709
		hdev->adv_instance_cnt++;
	}

1710 1711 1712 1713
	adv->flags = flags;
	adv->min_interval = min_interval;
	adv->max_interval = max_interval;
	adv->tx_power = tx_power;
1714 1715 1716 1717 1718
	/* Defining a mesh_handle changes the timing units to ms,
	 * rather than seconds, and ties the instance to the requested
	 * mesh_tx queue.
	 */
	adv->mesh = mesh_handle;
1719

1720 1721
	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
				  scan_rsp_len, scan_rsp_data);
1722

1723 1724
	adv->timeout = timeout;
	adv->remaining_time = timeout;
1725 1726

	if (duration == 0)
1727
		adv->duration = hdev->def_multi_adv_rotation_duration;
1728
	else
1729
		adv->duration = duration;
1730

1731
	INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1732

1733 1734
	BT_DBG("%s for %dMR", hdev->name, instance);

1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
	return adv;
}

/* This function requires the caller holds hdev->lock */
struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
				      u32 flags, u8 data_len, u8 *data,
				      u32 min_interval, u32 max_interval)
{
	struct adv_info *adv;

	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1747
				   min_interval, max_interval, 0);
1748 1749 1750 1751 1752 1753 1754 1755 1756 1757
	if (IS_ERR(adv))
		return adv;

	adv->periodic = true;
	adv->per_adv_data_len = data_len;

	if (data)
		memcpy(adv->per_adv_data, data, data_len);

	return adv;
1758 1759
}

1760 1761 1762 1763 1764
/* This function requires the caller holds hdev->lock */
int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
			      u16 adv_data_len, u8 *adv_data,
			      u16 scan_rsp_len, u8 *scan_rsp_data)
{
1765
	struct adv_info *adv;
1766

1767
	adv = hci_find_adv_instance(hdev, instance);
1768 1769

	/* If advertisement doesn't exist, we can't modify its data */
1770
	if (!adv)
1771 1772
		return -ENOENT;

1773 1774 1775 1776 1777
	if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
		memset(adv->adv_data, 0, sizeof(adv->adv_data));
		memcpy(adv->adv_data, adv_data, adv_data_len);
		adv->adv_data_len = adv_data_len;
		adv->adv_data_changed = true;
1778 1779
	}

1780 1781 1782 1783 1784
	if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
		memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
		memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
		adv->scan_rsp_len = scan_rsp_len;
		adv->scan_rsp_changed = true;
1785 1786
	}

1787 1788 1789 1790 1791
	/* Mark as changed if there are flags which would affect it */
	if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
		adv->scan_rsp_changed = true;

1792 1793 1794
	return 0;
}

1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848
/* This function requires the caller holds hdev->lock */
u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
{
	u32 flags;
	struct adv_info *adv;

	if (instance == 0x00) {
		/* Instance 0 always manages the "Tx Power" and "Flags"
		 * fields
		 */
		flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;

		/* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
		 * corresponds to the "connectable" instance flag.
		 */
		if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
			flags |= MGMT_ADV_FLAG_CONNECTABLE;

		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
			flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
			flags |= MGMT_ADV_FLAG_DISCOV;

		return flags;
	}

	adv = hci_find_adv_instance(hdev, instance);

	/* Return 0 when we got an invalid instance identifier. */
	if (!adv)
		return 0;

	return adv->flags;
}

bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
{
	struct adv_info *adv;

	/* Instance 0x00 always set local name */
	if (instance == 0x00)
		return true;

	adv = hci_find_adv_instance(hdev, instance);
	if (!adv)
		return false;

	if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
	    adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
		return true;

	return adv->scan_rsp_len ? true : false;
}

1849 1850 1851
/* This function requires the caller holds hdev->lock */
void hci_adv_monitors_clear(struct hci_dev *hdev)
{
1852 1853 1854 1855
	struct adv_monitor *monitor;
	int handle;

	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1856
		hci_free_adv_monitor(hdev, monitor);
1857

1858 1859 1860
	idr_destroy(&hdev->adv_monitors_idr);
}

1861 1862 1863 1864
/* Frees the monitor structure and do some bookkeepings.
 * This function requires the caller holds hdev->lock.
 */
void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1865 1866 1867 1868 1869 1870 1871
{
	struct adv_pattern *pattern;
	struct adv_pattern *tmp;

	if (!monitor)
		return;

1872 1873
	list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
		list_del(&pattern->list);
1874
		kfree(pattern);
1875 1876 1877 1878 1879 1880 1881 1882 1883
	}

	if (monitor->handle)
		idr_remove(&hdev->adv_monitors_idr, monitor->handle);

	if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
		hdev->adv_monitors_cnt--;
		mgmt_adv_monitor_removed(hdev, monitor->handle);
	}
1884 1885 1886 1887

	kfree(monitor);
}

1888 1889
/* Assigns handle to a monitor, and if offloading is supported and power is on,
 * also attempts to forward the request to the controller.
1890
 * This function requires the caller holds hci_req_sync_lock.
1891
 */
1892
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1893 1894
{
	int min, max, handle;
1895
	int status = 0;
1896

1897 1898
	if (!monitor)
		return -EINVAL;
1899

1900
	hci_dev_lock(hdev);
1901 1902 1903 1904 1905

	min = HCI_MIN_ADV_MONITOR_HANDLE;
	max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
	handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
			   GFP_KERNEL);
1906 1907 1908 1909 1910

	hci_dev_unlock(hdev);

	if (handle < 0)
		return handle;
1911 1912

	monitor->handle = handle;
1913

1914
	if (!hdev_is_powered(hdev))
1915
		return status;
1916

1917 1918
	switch (hci_get_adv_monitor_offload_ext(hdev)) {
	case HCI_ADV_MONITOR_EXT_NONE:
1919
		bt_dev_dbg(hdev, "add monitor %d status %d",
1920
			   monitor->handle, status);
1921
		/* Message was not forwarded to controller - not an error */
1922 1923
		break;

1924
	case HCI_ADV_MONITOR_EXT_MSFT:
1925
		status = msft_add_monitor_pattern(hdev, monitor);
1926
		bt_dev_dbg(hdev, "add monitor %d msft status %d",
1927
			   handle, status);
1928 1929 1930
		break;
	}

1931
	return status;
1932 1933
}

1934 1935
/* Attempts to tell the controller and free the monitor. If somehow the
 * controller doesn't have a corresponding handle, remove anyway.
1936
 * This function requires the caller holds hci_req_sync_lock.
1937
 */
1938 1939
static int hci_remove_adv_monitor(struct hci_dev *hdev,
				  struct adv_monitor *monitor)
1940
{
1941
	int status = 0;
1942
	int handle;
1943

1944 1945
	switch (hci_get_adv_monitor_offload_ext(hdev)) {
	case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1946
		bt_dev_dbg(hdev, "remove monitor %d status %d",
1947
			   monitor->handle, status);
1948
		goto free_monitor;
1949

1950
	case HCI_ADV_MONITOR_EXT_MSFT:
1951
		handle = monitor->handle;
1952
		status = msft_remove_monitor(hdev, monitor);
1953 1954
		bt_dev_dbg(hdev, "remove monitor %d msft status %d",
			   handle, status);
1955 1956
		break;
	}
1957

1958
	/* In case no matching handle registered, just free the monitor */
1959
	if (status == -ENOENT)
1960 1961
		goto free_monitor;

1962
	return status;
1963 1964

free_monitor:
1965
	if (status == -ENOENT)
1966 1967 1968 1969
		bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
			    monitor->handle);
	hci_free_adv_monitor(hdev, monitor);

1970
	return status;
1971 1972
}

1973 1974
/* This function requires the caller holds hci_req_sync_lock */
int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1975 1976 1977
{
	struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);

1978 1979
	if (!monitor)
		return -EINVAL;
1980

1981
	return hci_remove_adv_monitor(hdev, monitor);
1982 1983
}

1984 1985
/* This function requires the caller holds hci_req_sync_lock */
int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1986 1987
{
	struct adv_monitor *monitor;
1988
	int idr_next_id = 0;
1989
	int status = 0;
1990

1991
	while (1) {
1992
		monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
1993
		if (!monitor)
1994
			break;
1995

1996 1997 1998
		status = hci_remove_adv_monitor(hdev, monitor);
		if (status)
			return status;
1999

2000
		idr_next_id++;
2001 2002
	}

2003
	return status;
2004 2005
}

2006 2007 2008 2009 2010 2011
/* This function requires the caller holds hdev->lock */
bool hci_is_adv_monitoring(struct hci_dev *hdev)
{
	return !idr_is_empty(&hdev->adv_monitors_idr);
}

2012 2013 2014 2015 2016 2017 2018 2019
int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
{
	if (msft_monitor_supported(hdev))
		return HCI_ADV_MONITOR_EXT_MSFT;

	return HCI_ADV_MONITOR_EXT_NONE;
}

2020
struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2021
					 bdaddr_t *bdaddr, u8 type)
2022
{
2023
	struct bdaddr_list *b;
2024

2025
	list_for_each_entry(b, bdaddr_list, list) {
2026
		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2027
			return b;
2028
	}
2029 2030 2031 2032

	return NULL;
}

2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046
struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
				struct list_head *bdaddr_list, bdaddr_t *bdaddr,
				u8 type)
{
	struct bdaddr_list_with_irk *b;

	list_for_each_entry(b, bdaddr_list, list) {
		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
			return b;
	}

	return NULL;
}

2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
struct bdaddr_list_with_flags *
hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
				  bdaddr_t *bdaddr, u8 type)
{
	struct bdaddr_list_with_flags *b;

	list_for_each_entry(b, bdaddr_list, list) {
		if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
			return b;
	}

	return NULL;
}

2061
void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2062
{
2063
	struct bdaddr_list *b, *n;
2064

2065 2066
	list_for_each_entry_safe(b, n, bdaddr_list, list) {
		list_del(&b->list);
2067 2068 2069 2070
		kfree(b);
	}
}

2071
int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2072 2073 2074
{
	struct bdaddr_list *entry;

2075
	if (!bacmp(bdaddr, BDADDR_ANY))
2076 2077
		return -EBADF;

2078
	if (hci_bdaddr_list_lookup(list, bdaddr, type))
2079
		return -EEXIST;
2080

2081
	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2082 2083
	if (!entry)
		return -ENOMEM;
2084 2085

	bacpy(&entry->bdaddr, bdaddr);
2086
	entry->bdaddr_type = type;
2087

2088
	list_add(&entry->list, list);
2089

2090
	return 0;
2091 2092
}

2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
					u8 type, u8 *peer_irk, u8 *local_irk)
{
	struct bdaddr_list_with_irk *entry;

	if (!bacmp(bdaddr, BDADDR_ANY))
		return -EBADF;

	if (hci_bdaddr_list_lookup(list, bdaddr, type))
		return -EEXIST;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	bacpy(&entry->bdaddr, bdaddr);
	entry->bdaddr_type = type;

	if (peer_irk)
		memcpy(entry->peer_irk, peer_irk, 16);

	if (local_irk)
		memcpy(entry->local_irk, local_irk, 16);

	list_add(&entry->list, list);

	return 0;
}

2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
				   u8 type, u32 flags)
{
	struct bdaddr_list_with_flags *entry;

	if (!bacmp(bdaddr, BDADDR_ANY))
		return -EBADF;

	if (hci_bdaddr_list_lookup(list, bdaddr, type))
		return -EEXIST;

	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
	if (!entry)
		return -ENOMEM;

	bacpy(&entry->bdaddr, bdaddr);
	entry->bdaddr_type = type;
2139
	entry->flags = flags;
2140 2141 2142 2143 2144 2145

	list_add(&entry->list, list);

	return 0;
}

2146
int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2147 2148 2149
{
	struct bdaddr_list *entry;

2150
	if (!bacmp(bdaddr, BDADDR_ANY)) {
2151
		hci_bdaddr_list_clear(list);
2152 2153
		return 0;
	}
2154

2155
	entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2156 2157 2158 2159 2160 2161 2162 2163 2164
	if (!entry)
		return -ENOENT;

	list_del(&entry->list);
	kfree(entry);

	return 0;
}

2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184
int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
							u8 type)
{
	struct bdaddr_list_with_irk *entry;

	if (!bacmp(bdaddr, BDADDR_ANY)) {
		hci_bdaddr_list_clear(list);
		return 0;
	}

	entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
	if (!entry)
		return -ENOENT;

	list_del(&entry->list);
	kfree(entry);

	return 0;
}

2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204
int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
				   u8 type)
{
	struct bdaddr_list_with_flags *entry;

	if (!bacmp(bdaddr, BDADDR_ANY)) {
		hci_bdaddr_list_clear(list);
		return 0;
	}

	entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
	if (!entry)
		return -ENOENT;

	list_del(&entry->list);
	kfree(entry);

	return 0;
}

2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
					       bdaddr_t *addr, u8 addr_type)
{
	struct hci_conn_params *params;

	list_for_each_entry(params, &hdev->le_conn_params, list) {
		if (bacmp(&params->addr, addr) == 0 &&
		    params->addr_type == addr_type) {
			return params;
		}
	}

	return NULL;
}

2221
/* This function requires the caller holds hdev->lock or rcu_read_lock */
2222 2223
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
						  bdaddr_t *addr, u8 addr_type)
2224
{
2225
	struct hci_conn_params *param;
2226

2227 2228 2229
	rcu_read_lock();

	list_for_each_entry_rcu(param, list, action) {
2230
		if (bacmp(&param->addr, addr) == 0 &&
2231 2232
		    param->addr_type == addr_type) {
			rcu_read_unlock();
2233
			return param;
2234
		}
2235 2236
	}

2237 2238
	rcu_read_unlock();

2239
	return NULL;
2240 2241
}

2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259
/* This function requires the caller holds hdev->lock */
void hci_pend_le_list_del_init(struct hci_conn_params *param)
{
	if (list_empty(&param->action))
		return;

	list_del_rcu(&param->action);
	synchronize_rcu();
	INIT_LIST_HEAD(&param->action);
}

/* This function requires the caller holds hdev->lock */
void hci_pend_le_list_add(struct hci_conn_params *param,
			  struct list_head *list)
{
	list_add_rcu(&param->action, list);
}

2260
/* This function requires the caller holds hdev->lock */
2261 2262
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
					    bdaddr_t *addr, u8 addr_type)
2263 2264 2265 2266
{
	struct hci_conn_params *params;

	params = hci_conn_params_lookup(hdev, addr, addr_type);
2267
	if (params)
2268
		return params;
2269 2270 2271

	params = kzalloc(sizeof(*params), GFP_KERNEL);
	if (!params) {
2272
		bt_dev_err(hdev, "out of memory");
2273
		return NULL;
2274 2275 2276 2277
	}

	bacpy(&params->addr, addr);
	params->addr_type = addr_type;
2278 2279

	list_add(&params->list, &hdev->le_conn_params);
2280
	INIT_LIST_HEAD(&params->action);
2281

2282 2283 2284 2285 2286 2287 2288 2289
	params->conn_min_interval = hdev->le_conn_min_interval;
	params->conn_max_interval = hdev->le_conn_max_interval;
	params->conn_latency = hdev->le_conn_latency;
	params->supervision_timeout = hdev->le_supv_timeout;
	params->auto_connect = HCI_AUTO_CONN_DISABLED;

	BT_DBG("addr %pMR (type %u)", addr, addr_type);

2290
	return params;
2291 2292
}

2293
void hci_conn_params_free(struct hci_conn_params *params)
2294
{
2295 2296
	hci_pend_le_list_del_init(params);

2297
	if (params->conn) {
2298
		hci_conn_drop(params->conn);
2299 2300
		hci_conn_put(params->conn);
	}
2301

2302 2303
	list_del(&params->list);
	kfree(params);
2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315
}

/* This function requires the caller holds hdev->lock */
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
{
	struct hci_conn_params *params;

	params = hci_conn_params_lookup(hdev, addr, addr_type);
	if (!params)
		return;

	hci_conn_params_free(params);
2316

2317
	hci_update_passive_scan(hdev);
2318

2319 2320 2321 2322
	BT_DBG("addr %pMR (type %u)", addr, addr_type);
}

/* This function requires the caller holds hdev->lock */
2323
void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2324 2325 2326 2327
{
	struct hci_conn_params *params, *tmp;

	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2328 2329
		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
			continue;
2330

2331
		/* If trying to establish one time connection to disabled
2332 2333 2334 2335 2336 2337 2338
		 * device, leave the params, but mark them as just once.
		 */
		if (params->explicit_connect) {
			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
			continue;
		}

2339
		hci_conn_params_free(params);
2340 2341
	}

2342
	BT_DBG("All LE disabled connection parameters were removed");
2343 2344 2345
}

/* This function requires the caller holds hdev->lock */
2346
static void hci_conn_params_clear_all(struct hci_dev *hdev)
2347
{
2348
	struct hci_conn_params *params, *tmp;
2349

2350 2351
	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
		hci_conn_params_free(params);
2352

2353
	BT_DBG("All LE connection parameters were removed");
2354 2355
}

2356 2357 2358 2359 2360 2361 2362 2363
/* Copy the Identity Address of the controller.
 *
 * If the controller has a public BD_ADDR, then by default use that one.
 * If this is a LE only controller without a public address, default to
 * the static random address.
 *
 * For debugging purposes it is possible to force controllers with a
 * public address to use the static random address instead.
2364 2365 2366 2367
 *
 * In case BR/EDR has been disabled on a dual-mode controller and
 * userspace has configured a static address, then that address
 * becomes the identity address instead of the public BR/EDR address.
2368 2369 2370 2371
 */
void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
			       u8 *bdaddr_type)
{
2372
	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2373
	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2374
	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2375
	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
2376 2377 2378 2379 2380 2381 2382 2383
		bacpy(bdaddr, &hdev->static_addr);
		*bdaddr_type = ADDR_LE_DEV_RANDOM;
	} else {
		bacpy(bdaddr, &hdev->bdaddr);
		*bdaddr_type = ADDR_LE_DEV_PUBLIC;
	}
}

2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394
static void hci_clear_wake_reason(struct hci_dev *hdev)
{
	hci_dev_lock(hdev);

	hdev->wake_reason = 0;
	bacpy(&hdev->wake_addr, BDADDR_ANY);
	hdev->wake_addr_type = 0;

	hci_dev_unlock(hdev);
}

2395 2396 2397 2398 2399 2400 2401
static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
				void *data)
{
	struct hci_dev *hdev =
		container_of(nb, struct hci_dev, suspend_notifier);
	int ret = 0;

2402 2403 2404 2405
	/* Userspace has full control of this device. Do nothing. */
	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
		return NOTIFY_DONE;

2406 2407 2408
	/* To avoid a potential race with hci_unregister_dev. */
	hci_dev_hold(hdev);

2409 2410 2411
	switch (action) {
	case PM_HIBERNATION_PREPARE:
	case PM_SUSPEND_PREPARE:
2412
		ret = hci_suspend_dev(hdev);
2413 2414 2415
		break;
	case PM_POST_HIBERNATION:
	case PM_POST_SUSPEND:
2416
		ret = hci_resume_dev(hdev);
2417 2418
		break;
	}
2419

2420 2421 2422 2423
	if (ret)
		bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
			   action, ret);

2424
	hci_dev_put(hdev);
2425
	return NOTIFY_DONE;
2426
}
2427

2428
/* Alloc HCI device */
2429
struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2430 2431
{
	struct hci_dev *hdev;
2432
	unsigned int alloc_size;
2433

2434 2435 2436 2437 2438
	alloc_size = sizeof(*hdev);
	if (sizeof_priv) {
		/* Fixme: May need ALIGN-ment? */
		alloc_size += sizeof_priv;
	}
2439

2440
	hdev = kzalloc(alloc_size, GFP_KERNEL);
2441 2442 2443
	if (!hdev)
		return NULL;

2444 2445 2446
	hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
	hdev->esco_type = (ESCO_HV1);
	hdev->link_mode = (HCI_LM_ACCEPT);
2447 2448
	hdev->num_iac = 0x01;		/* One IAC support is mandatory */
	hdev->io_capability = 0x03;	/* No Input No Output */
2449
	hdev->manufacturer = 0xffff;	/* Default to internal use */
2450 2451
	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2452 2453
	hdev->adv_instance_cnt = 0;
	hdev->cur_adv_instance = 0x00;
2454
	hdev->adv_instance_timeout = 0;
2455

2456 2457
	hdev->advmon_allowlist_duration = 300;
	hdev->advmon_no_filter_duration = 500;
2458
	hdev->enable_advmon_interleave_scan = 0x00;	/* Default to disable */
2459

2460 2461 2462
	hdev->sniff_max_interval = 800;
	hdev->sniff_min_interval = 80;

2463
	hdev->le_adv_channel_map = 0x07;
2464 2465
	hdev->le_adv_min_interval = 0x0800;
	hdev->le_adv_max_interval = 0x0800;
2466 2467 2468 2469
	hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
	hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
	hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
	hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2470 2471
	hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
	hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2472 2473 2474 2475
	hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
	hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
	hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
	hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2476 2477
	hdev->le_conn_min_interval = 0x0018;
	hdev->le_conn_max_interval = 0x0028;
2478 2479
	hdev->le_conn_latency = 0x0000;
	hdev->le_supv_timeout = 0x002a;
2480 2481 2482 2483 2484 2485
	hdev->le_def_tx_len = 0x001b;
	hdev->le_def_tx_time = 0x0148;
	hdev->le_max_tx_len = 0x001b;
	hdev->le_max_tx_time = 0x0148;
	hdev->le_max_rx_len = 0x001b;
	hdev->le_max_rx_time = 0x0148;
2486 2487
	hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
	hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2488 2489
	hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
	hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2490
	hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2491
	hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2492
	hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2493 2494
	hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
	hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2495

2496
	hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2497
	hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2498 2499
	hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
	hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2500
	hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2501
	hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2502

2503 2504 2505 2506 2507
	/* default 1.28 sec page scan */
	hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
	hdev->def_page_scan_int = 0x0800;
	hdev->def_page_scan_window = 0x0012;

2508 2509 2510
	mutex_init(&hdev->lock);
	mutex_init(&hdev->req_lock);

2511 2512
	ida_init(&hdev->unset_handle_ida);

2513
	INIT_LIST_HEAD(&hdev->mesh_pending);
2514
	INIT_LIST_HEAD(&hdev->mgmt_pending);
2515 2516
	INIT_LIST_HEAD(&hdev->reject_list);
	INIT_LIST_HEAD(&hdev->accept_list);
2517 2518 2519
	INIT_LIST_HEAD(&hdev->uuids);
	INIT_LIST_HEAD(&hdev->link_keys);
	INIT_LIST_HEAD(&hdev->long_term_keys);
2520
	INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2521
	INIT_LIST_HEAD(&hdev->remote_oob_data);
2522
	INIT_LIST_HEAD(&hdev->le_accept_list);
2523
	INIT_LIST_HEAD(&hdev->le_resolv_list);
2524
	INIT_LIST_HEAD(&hdev->le_conn_params);
2525
	INIT_LIST_HEAD(&hdev->pend_le_conns);
2526
	INIT_LIST_HEAD(&hdev->pend_le_reports);
2527
	INIT_LIST_HEAD(&hdev->conn_hash.list);
2528
	INIT_LIST_HEAD(&hdev->adv_instances);
2529
	INIT_LIST_HEAD(&hdev->blocked_keys);
2530
	INIT_LIST_HEAD(&hdev->monitored_devices);
2531

2532
	INIT_LIST_HEAD(&hdev->local_codecs);
2533 2534 2535 2536
	INIT_WORK(&hdev->rx_work, hci_rx_work);
	INIT_WORK(&hdev->cmd_work, hci_cmd_work);
	INIT_WORK(&hdev->tx_work, hci_tx_work);
	INIT_WORK(&hdev->power_on, hci_power_on);
2537
	INIT_WORK(&hdev->error_reset, hci_error_reset);
2538

2539 2540
	hci_cmd_sync_init(hdev);

2541 2542 2543 2544 2545 2546 2547 2548
	INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);

	skb_queue_head_init(&hdev->rx_q);
	skb_queue_head_init(&hdev->cmd_q);
	skb_queue_head_init(&hdev->raw_q);

	init_waitqueue_head(&hdev->req_wait_q);

2549
	INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2550
	INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2551

2552
	hci_devcd_setup(hdev);
2553

2554 2555
	hci_init_sysfs(hdev);
	discovery_init(hdev);
2556 2557 2558

	return hdev;
}
2559
EXPORT_SYMBOL(hci_alloc_dev_priv);
2560 2561 2562 2563 2564 2565 2566 2567 2568

/* Free HCI device */
void hci_free_dev(struct hci_dev *hdev)
{
	/* will free via device release */
	put_device(&hdev->dev);
}
EXPORT_SYMBOL(hci_free_dev);

Linus Torvalds's avatar
Linus Torvalds committed
2569 2570 2571
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
2572
	int id, error;
Linus Torvalds's avatar
Linus Torvalds committed
2573

2574
	if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds's avatar
Linus Torvalds committed
2575 2576
		return -EINVAL;

2577
	id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2578 2579 2580
	if (id < 0)
		return id;

2581 2582 2583 2584 2585
	error = dev_set_name(&hdev->dev, "hci%u", id);
	if (error)
		return error;

	hdev->name = dev_name(&hdev->dev);
Linus Torvalds's avatar
Linus Torvalds committed
2586
	hdev->id = id;
2587 2588 2589

	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);

2590
	hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2591 2592 2593 2594
	if (!hdev->workqueue) {
		error = -ENOMEM;
		goto err;
	}
2595

2596 2597
	hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
						      hdev->name);
2598 2599 2600 2601 2602 2603
	if (!hdev->req_workqueue) {
		destroy_workqueue(hdev->workqueue);
		error = -ENOMEM;
		goto err;
	}

2604 2605 2606
	if (!IS_ERR_OR_NULL(bt_debugfs))
		hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);

2607
	error = device_add(&hdev->dev);
2608
	if (error < 0)
2609
		goto err_wqueue;
Linus Torvalds's avatar
Linus Torvalds committed
2610

2611 2612
	hci_leds_init(hdev);

2613
	hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2614 2615
				    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
				    hdev);
2616 2617 2618 2619 2620 2621 2622
	if (hdev->rfkill) {
		if (rfkill_register(hdev->rfkill) < 0) {
			rfkill_destroy(hdev->rfkill);
			hdev->rfkill = NULL;
		}
	}

2623
	if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2624
		hci_dev_set_flag(hdev, HCI_RFKILLED);
2625

2626 2627
	hci_dev_set_flag(hdev, HCI_SETUP);
	hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2628

2629 2630 2631 2632
	/* Assume BR/EDR support until proven otherwise (such as
	 * through reading supported features during init.
	 */
	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2633

2634 2635 2636 2637
	write_lock(&hci_dev_list_lock);
	list_add(&hdev->list, &hci_dev_list);
	write_unlock(&hci_dev_list_lock);

2638 2639
	/* Devices that are marked for raw-only usage are unconfigured
	 * and should not be included in normal operation.
2640 2641
	 */
	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2642
		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2643

2644 2645 2646 2647
	/* Mark Remote Wakeup connection flag as supported if driver has wakeup
	 * callback.
	 */
	if (hdev->wakeup)
2648
		hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2649

2650
	hci_sock_dev_event(hdev, HCI_DEV_REG);
2651
	hci_dev_hold(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
2652

2653 2654
	error = hci_register_suspend_notifier(hdev);
	if (error)
2655
		BT_WARN("register suspend notifier failed error:%d\n", error);
2656

2657
	queue_work(hdev->req_workqueue, &hdev->power_on);
2658

2659
	idr_init(&hdev->adv_monitors_idr);
2660
	msft_register(hdev);
2661

Linus Torvalds's avatar
Linus Torvalds committed
2662
	return id;
2663

2664
err_wqueue:
2665
	debugfs_remove_recursive(hdev->debugfs);
2666
	destroy_workqueue(hdev->workqueue);
2667
	destroy_workqueue(hdev->req_workqueue);
2668
err:
2669
	ida_free(&hci_index_ida, hdev->id);
2670

2671
	return error;
Linus Torvalds's avatar
Linus Torvalds committed
2672 2673 2674 2675
}
EXPORT_SYMBOL(hci_register_dev);

/* Unregister HCI device */
2676
void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds's avatar
Linus Torvalds committed
2677
{
2678
	BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds's avatar
Linus Torvalds committed
2679

2680
	mutex_lock(&hdev->unregister_lock);
2681
	hci_dev_set_flag(hdev, HCI_UNREGISTER);
2682
	mutex_unlock(&hdev->unregister_lock);
2683

2684
	write_lock(&hci_dev_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2685
	list_del(&hdev->list);
2686
	write_unlock(&hci_dev_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2687

2688 2689 2690
	cancel_work_sync(&hdev->rx_work);
	cancel_work_sync(&hdev->cmd_work);
	cancel_work_sync(&hdev->tx_work);
2691
	cancel_work_sync(&hdev->power_on);
2692
	cancel_work_sync(&hdev->error_reset);
2693

2694 2695
	hci_cmd_sync_clear(hdev);

2696
	hci_unregister_suspend_notifier(hdev);
2697 2698

	hci_dev_do_close(hdev);
2699

2700
	if (!test_bit(HCI_INIT, &hdev->flags) &&
2701 2702
	    !hci_dev_test_flag(hdev, HCI_SETUP) &&
	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2703
		hci_dev_lock(hdev);
2704
		mgmt_index_removed(hdev);
2705
		hci_dev_unlock(hdev);
2706
	}
2707

2708 2709 2710 2711
	/* mgmt_index_removed should take care of emptying the
	 * pending list */
	BUG_ON(!list_empty(&hdev->mgmt_pending));

2712
	hci_sock_dev_event(hdev, HCI_DEV_UNREG);
Linus Torvalds's avatar
Linus Torvalds committed
2713

2714 2715 2716 2717 2718
	if (hdev->rfkill) {
		rfkill_unregister(hdev->rfkill);
		rfkill_destroy(hdev->rfkill);
	}

2719
	device_del(&hdev->dev);
2720
	/* Actual cleanup is deferred until hci_release_dev(). */
2721 2722 2723
	hci_dev_put(hdev);
}
EXPORT_SYMBOL(hci_unregister_dev);
2724

2725 2726
/* Release HCI device */
void hci_release_dev(struct hci_dev *hdev)
2727
{
2728
	debugfs_remove_recursive(hdev->debugfs);
2729 2730
	kfree_const(hdev->hw_info);
	kfree_const(hdev->fw_info);
2731

2732
	destroy_workqueue(hdev->workqueue);
2733
	destroy_workqueue(hdev->req_workqueue);
2734

2735
	hci_dev_lock(hdev);
2736 2737
	hci_bdaddr_list_clear(&hdev->reject_list);
	hci_bdaddr_list_clear(&hdev->accept_list);
2738
	hci_uuids_clear(hdev);
2739
	hci_link_keys_clear(hdev);
2740
	hci_smp_ltks_clear(hdev);
2741
	hci_smp_irks_clear(hdev);
2742
	hci_remote_oob_data_clear(hdev);
2743
	hci_adv_instances_clear(hdev);
2744
	hci_adv_monitors_clear(hdev);
2745
	hci_bdaddr_list_clear(&hdev->le_accept_list);
2746
	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2747
	hci_conn_params_clear_all(hdev);
2748
	hci_discovery_filter_clear(hdev);
2749
	hci_blocked_keys_clear(hdev);
2750
	hci_codec_list_clear(&hdev->local_codecs);
2751
	msft_release(hdev);
2752
	hci_dev_unlock(hdev);
2753

2754
	ida_destroy(&hdev->unset_handle_ida);
2755
	ida_free(&hci_index_ida, hdev->id);
2756
	kfree_skb(hdev->sent_cmd);
2757
	kfree_skb(hdev->req_skb);
2758
	kfree_skb(hdev->recv_event);
2759
	kfree(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
2760
}
2761
EXPORT_SYMBOL(hci_release_dev);
Linus Torvalds's avatar
Linus Torvalds committed
2762

2763 2764 2765 2766
int hci_register_suspend_notifier(struct hci_dev *hdev)
{
	int ret = 0;

2767 2768
	if (!hdev->suspend_notifier.notifier_call &&
	    !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779
		hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
		ret = register_pm_notifier(&hdev->suspend_notifier);
	}

	return ret;
}

int hci_unregister_suspend_notifier(struct hci_dev *hdev)
{
	int ret = 0;

2780
	if (hdev->suspend_notifier.notifier_call) {
2781
		ret = unregister_pm_notifier(&hdev->suspend_notifier);
2782 2783 2784
		if (!ret)
			hdev->suspend_notifier.notifier_call = NULL;
	}
2785 2786 2787 2788

	return ret;
}

2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802
/* Cancel ongoing command synchronously:
 *
 * - Cancel command timer
 * - Reset command counter
 * - Cancel command request
 */
static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
{
	bt_dev_dbg(hdev, "err 0x%2.2x", err);

	cancel_delayed_work_sync(&hdev->cmd_timer);
	cancel_delayed_work_sync(&hdev->ncmd_timer);
	atomic_set(&hdev->cmd_cnt, 1);

2803
	hci_cmd_sync_cancel_sync(hdev, err);
2804 2805
}

Linus Torvalds's avatar
Linus Torvalds committed
2806 2807 2808
/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
2809 2810 2811 2812 2813 2814 2815 2816 2817
	int ret;

	bt_dev_dbg(hdev, "");

	/* Suspend should only act on when powered. */
	if (!hdev_is_powered(hdev) ||
	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
		return 0;

2818 2819 2820
	/* If powering down don't attempt to suspend */
	if (mgmt_powering_down(hdev))
		return 0;
2821

2822
	/* Cancel potentially blocking sync operation before suspend */
2823
	hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2824

2825 2826 2827
	hci_req_sync_lock(hdev);
	ret = hci_suspend_sync(hdev);
	hci_req_sync_unlock(hdev);
2828 2829

	hci_clear_wake_reason(hdev);
2830
	mgmt_suspending(hdev, hdev->suspend_state);
2831

2832
	hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2833
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
2834 2835 2836 2837 2838 2839
}
EXPORT_SYMBOL(hci_suspend_dev);

/* Resume HCI device */
int hci_resume_dev(struct hci_dev *hdev)
{
2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
	int ret;

	bt_dev_dbg(hdev, "");

	/* Resume should only act on when powered. */
	if (!hdev_is_powered(hdev) ||
	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
		return 0;

	/* If powering down don't attempt to resume */
	if (mgmt_powering_down(hdev))
		return 0;

2853 2854 2855
	hci_req_sync_lock(hdev);
	ret = hci_resume_sync(hdev);
	hci_req_sync_unlock(hdev);
2856 2857

	mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2858
		      hdev->wake_addr_type);
2859

2860
	hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2861
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
2862 2863 2864
}
EXPORT_SYMBOL(hci_resume_dev);

2865 2866 2867
/* Reset HCI device */
int hci_reset_dev(struct hci_dev *hdev)
{
2868
	static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2869 2870 2871 2872 2873 2874
	struct sk_buff *skb;

	skb = bt_skb_alloc(3, GFP_ATOMIC);
	if (!skb)
		return -ENOMEM;

2875
	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2876
	skb_put_data(skb, hw_err, 3);
2877

2878 2879
	bt_dev_err(hdev, "Injecting HCI hardware error event");

2880 2881 2882 2883 2884
	/* Send Hardware Error to upper stack */
	return hci_recv_frame(hdev, skb);
}
EXPORT_SYMBOL(hci_reset_dev);

2885 2886 2887 2888 2889 2890 2891 2892
static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
{
	if (hdev->classify_pkt_type)
		return hdev->classify_pkt_type(hdev, skb);

	return hci_skb_pkt_type(skb);
}

2893
/* Receive frame from HCI drivers */
2894
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2895
{
2896 2897
	u8 dev_pkt_type;

2898
	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2899
		      && !test_bit(HCI_INIT, &hdev->flags))) {
2900 2901 2902 2903
		kfree_skb(skb);
		return -ENXIO;
	}

2904 2905 2906 2907 2908 2909
	/* Check if the driver agree with packet type classification */
	dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
	if (hci_skb_pkt_type(skb) != dev_pkt_type) {
		hci_skb_pkt_type(skb) = dev_pkt_type;
	}

2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928
	switch (hci_skb_pkt_type(skb)) {
	case HCI_EVENT_PKT:
		break;
	case HCI_ACLDATA_PKT:
		/* Detect if ISO packet has been sent as ACL */
		if (hci_conn_num(hdev, ISO_LINK)) {
			__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
			__u8 type;

			type = hci_conn_lookup_type(hdev, hci_handle(handle));
			if (type == ISO_LINK)
				hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
		}
		break;
	case HCI_SCODATA_PKT:
		break;
	case HCI_ISODATA_PKT:
		break;
	default:
2929 2930 2931 2932
		kfree_skb(skb);
		return -EINVAL;
	}

2933
	/* Incoming skb */
2934 2935 2936 2937 2938 2939
	bt_cb(skb)->incoming = 1;

	/* Time stamp */
	__net_timestamp(skb);

	skb_queue_tail(&hdev->rx_q, skb);
2940
	queue_work(hdev->workqueue, &hdev->rx_work);
2941

2942 2943 2944 2945
	return 0;
}
EXPORT_SYMBOL(hci_recv_frame);

2946 2947 2948
/* Receive diagnostic message from HCI drivers */
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
{
2949
	/* Mark as diagnostic packet */
2950
	hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2951

2952 2953 2954
	/* Time stamp */
	__net_timestamp(skb);

2955 2956
	skb_queue_tail(&hdev->rx_q, skb);
	queue_work(hdev->workqueue, &hdev->rx_work);
2957 2958 2959 2960 2961

	return 0;
}
EXPORT_SYMBOL(hci_recv_diag);

2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
{
	va_list vargs;

	va_start(vargs, fmt);
	kfree_const(hdev->hw_info);
	hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
	va_end(vargs);
}
EXPORT_SYMBOL(hci_set_hw_info);

void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
{
	va_list vargs;

	va_start(vargs, fmt);
	kfree_const(hdev->fw_info);
	hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
	va_end(vargs);
}
EXPORT_SYMBOL(hci_set_fw_info);

Linus Torvalds's avatar
Linus Torvalds committed
2984 2985 2986 2987 2988 2989
/* ---- Interface to upper protocols ---- */

int hci_register_cb(struct hci_cb *cb)
{
	BT_DBG("%p name %s", cb, cb->name);

2990
	mutex_lock(&hci_cb_list_lock);
2991
	list_add_tail(&cb->list, &hci_cb_list);
2992
	mutex_unlock(&hci_cb_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
2993 2994 2995 2996 2997 2998 2999 3000 3001

	return 0;
}
EXPORT_SYMBOL(hci_register_cb);

int hci_unregister_cb(struct hci_cb *cb)
{
	BT_DBG("%p name %s", cb, cb->name);

3002
	mutex_lock(&hci_cb_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
3003
	list_del(&cb->list);
3004
	mutex_unlock(&hci_cb_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
3005 3006 3007 3008 3009

	return 0;
}
EXPORT_SYMBOL(hci_unregister_cb);

3010
static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
3011
{
3012 3013
	int err;

3014 3015
	BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
	       skb->len);
Linus Torvalds's avatar
Linus Torvalds committed
3016

3017 3018
	/* Time stamp */
	__net_timestamp(skb);
Linus Torvalds's avatar
Linus Torvalds committed
3019

3020 3021 3022 3023 3024
	/* Send copy to monitor */
	hci_send_to_monitor(hdev, skb);

	if (atomic_read(&hdev->promisc)) {
		/* Send copy to the sockets */
3025
		hci_send_to_sock(hdev, skb);
Linus Torvalds's avatar
Linus Torvalds committed
3026 3027 3028 3029 3030
	}

	/* Get rid of skb owner, prior to sending to the driver. */
	skb_orphan(skb);

3031 3032
	if (!test_bit(HCI_RUNNING, &hdev->flags)) {
		kfree_skb(skb);
3033
		return -EINVAL;
3034 3035
	}

3036 3037
	err = hdev->send(hdev, skb);
	if (err < 0) {
3038
		bt_dev_err(hdev, "sending frame failed (%d)", err);
3039
		kfree_skb(skb);
3040
		return err;
3041
	}
3042 3043

	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
3044 3045
}

3046
/* Send HCI command */
3047 3048
int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
		 const void *param)
3049 3050 3051 3052 3053
{
	struct sk_buff *skb;

	BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);

3054
	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3055
	if (!skb) {
3056
		bt_dev_err(hdev, "no memory for command");
3057 3058 3059
		return -ENOMEM;
	}

3060
	/* Stand-alone HCI commands must be flagged as
3061 3062
	 * single-command requests.
	 */
3063
	bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3064

Linus Torvalds's avatar
Linus Torvalds committed
3065
	skb_queue_tail(&hdev->cmd_q, skb);
3066
	queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds's avatar
Linus Torvalds committed
3067 3068 3069 3070

	return 0;
}

3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088
int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
		   const void *param)
{
	struct sk_buff *skb;

	if (hci_opcode_ogf(opcode) != 0x3f) {
		/* A controller receiving a command shall respond with either
		 * a Command Status Event or a Command Complete Event.
		 * Therefore, all standard HCI commands must be sent via the
		 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
		 * Some vendors do not comply with this rule for vendor-specific
		 * commands and do not return any event. We want to support
		 * unresponded commands for such cases only.
		 */
		bt_dev_err(hdev, "unresponded command not supported");
		return -EINVAL;
	}

3089
	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101
	if (!skb) {
		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
			   opcode);
		return -ENOMEM;
	}

	hci_send_frame(hdev, skb);

	return 0;
}
EXPORT_SYMBOL(__hci_cmd_send);

Linus Torvalds's avatar
Linus Torvalds committed
3102
/* Get data from the previously sent command */
3103
static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
Linus Torvalds's avatar
Linus Torvalds committed
3104 3105 3106
{
	struct hci_command_hdr *hdr;

3107
	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
3108 3109
		return NULL;

3110
	hdr = (void *)skb->data;
Linus Torvalds's avatar
Linus Torvalds committed
3111

3112
	if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds's avatar
Linus Torvalds committed
3113 3114
		return NULL;

3115 3116
	return skb->data + HCI_COMMAND_HDR_SIZE;
}
Linus Torvalds's avatar
Linus Torvalds committed
3117

3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
{
	void *data;

	/* Check if opcode matches last sent command */
	data = hci_cmd_data(hdev->sent_cmd, opcode);
	if (!data)
		/* Check if opcode matches last request */
		data = hci_cmd_data(hdev->req_skb, opcode);

	return data;
Linus Torvalds's avatar
Linus Torvalds committed
3130 3131
}

3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162
/* Get data from last received event */
void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
{
	struct hci_event_hdr *hdr;
	int offset;

	if (!hdev->recv_event)
		return NULL;

	hdr = (void *)hdev->recv_event->data;
	offset = sizeof(*hdr);

	if (hdr->evt != event) {
		/* In case of LE metaevent check the subevent match */
		if (hdr->evt == HCI_EV_LE_META) {
			struct hci_ev_le_meta *ev;

			ev = (void *)hdev->recv_event->data + offset;
			offset += sizeof(*ev);
			if (ev->subevent == event)
				goto found;
		}
		return NULL;
	}

found:
	bt_dev_dbg(hdev, "event 0x%2.2x", event);

	return hdev->recv_event->data + offset;
}

Linus Torvalds's avatar
Linus Torvalds committed
3163 3164 3165 3166 3167 3168
/* Send ACL data */
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
{
	struct hci_acl_hdr *hdr;
	int len = skb->len;

3169 3170
	skb_push(skb, HCI_ACL_HDR_SIZE);
	skb_reset_transport_header(skb);
3171
	hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3172 3173
	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
	hdr->dlen   = cpu_to_le16(len);
Linus Torvalds's avatar
Linus Torvalds committed
3174 3175
}

3176
static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3177
			  struct sk_buff *skb, __u16 flags)
Linus Torvalds's avatar
Linus Torvalds committed
3178
{
3179
	struct hci_conn *conn = chan->conn;
Linus Torvalds's avatar
Linus Torvalds committed
3180 3181 3182
	struct hci_dev *hdev = conn->hdev;
	struct sk_buff *list;

3183 3184 3185
	skb->len = skb_headlen(skb);
	skb->data_len = 0;

3186
	hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3187

3188
	hci_add_acl_hdr(skb, conn->handle, flags);
3189

3190 3191
	list = skb_shinfo(skb)->frag_list;
	if (!list) {
Linus Torvalds's avatar
Linus Torvalds committed
3192 3193 3194
		/* Non fragmented */
		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);

3195
		skb_queue_tail(queue, skb);
Linus Torvalds's avatar
Linus Torvalds committed
3196 3197 3198 3199 3200 3201
	} else {
		/* Fragmented */
		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

		skb_shinfo(skb)->frag_list = NULL;

3202 3203 3204 3205 3206 3207
		/* Queue all fragments atomically. We need to use spin_lock_bh
		 * here because of 6LoWPAN links, as there this function is
		 * called from softirq and using normal spin lock could cause
		 * deadlocks.
		 */
		spin_lock_bh(&queue->lock);
Linus Torvalds's avatar
Linus Torvalds committed
3208

3209
		__skb_queue_tail(queue, skb);
3210 3211 3212

		flags &= ~ACL_START;
		flags |= ACL_CONT;
Linus Torvalds's avatar
Linus Torvalds committed
3213 3214
		do {
			skb = list; list = list->next;
3215

3216
			hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3217
			hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds's avatar
Linus Torvalds committed
3218 3219 3220

			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

3221
			__skb_queue_tail(queue, skb);
Linus Torvalds's avatar
Linus Torvalds committed
3222 3223
		} while (list);

3224
		spin_unlock_bh(&queue->lock);
Linus Torvalds's avatar
Linus Torvalds committed
3225
	}
3226 3227 3228 3229
}

void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
{
3230
	struct hci_dev *hdev = chan->conn->hdev;
3231

3232
	BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3233

3234
	hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds's avatar
Linus Torvalds committed
3235

3236
	queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds's avatar
Linus Torvalds committed
3237 3238 3239
}

/* Send SCO data */
3240
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
3241 3242 3243 3244 3245 3246
{
	struct hci_dev *hdev = conn->hdev;
	struct hci_sco_hdr hdr;

	BT_DBG("%s len %d", hdev->name, skb->len);

3247
	hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds's avatar
Linus Torvalds committed
3248 3249
	hdr.dlen   = skb->len;

3250 3251
	skb_push(skb, HCI_SCO_HDR_SIZE);
	skb_reset_transport_header(skb);
3252
	memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
3253

3254
	hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3255

Linus Torvalds's avatar
Linus Torvalds committed
3256
	skb_queue_tail(&conn->data_q, skb);
3257
	queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds's avatar
Linus Torvalds committed
3258 3259
}

3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328
/* Send ISO data */
static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
{
	struct hci_iso_hdr *hdr;
	int len = skb->len;

	skb_push(skb, HCI_ISO_HDR_SIZE);
	skb_reset_transport_header(skb);
	hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
	hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
	hdr->dlen   = cpu_to_le16(len);
}

static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
			  struct sk_buff *skb)
{
	struct hci_dev *hdev = conn->hdev;
	struct sk_buff *list;
	__u16 flags;

	skb->len = skb_headlen(skb);
	skb->data_len = 0;

	hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;

	list = skb_shinfo(skb)->frag_list;

	flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
	hci_add_iso_hdr(skb, conn->handle, flags);

	if (!list) {
		/* Non fragmented */
		BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);

		skb_queue_tail(queue, skb);
	} else {
		/* Fragmented */
		BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

		skb_shinfo(skb)->frag_list = NULL;

		__skb_queue_tail(queue, skb);

		do {
			skb = list; list = list->next;

			hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
			flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
						   0x00);
			hci_add_iso_hdr(skb, conn->handle, flags);

			BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);

			__skb_queue_tail(queue, skb);
		} while (list);
	}
}

void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
{
	struct hci_dev *hdev = conn->hdev;

	BT_DBG("%s len %d", hdev->name, skb->len);

	hci_queue_iso(conn, &conn->data_q, skb);

	queue_work(hdev->workqueue, &hdev->tx_work);
}

Linus Torvalds's avatar
Linus Torvalds committed
3329 3330 3331
/* ---- HCI TX task (outgoing data) ---- */

/* HCI Connection scheduler */
3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367
static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
{
	struct hci_dev *hdev;
	int cnt, q;

	if (!conn) {
		*quote = 0;
		return;
	}

	hdev = conn->hdev;

	switch (conn->type) {
	case ACL_LINK:
		cnt = hdev->acl_cnt;
		break;
	case SCO_LINK:
	case ESCO_LINK:
		cnt = hdev->sco_cnt;
		break;
	case LE_LINK:
		cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
		break;
	case ISO_LINK:
		cnt = hdev->iso_mtu ? hdev->iso_cnt :
			hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
		break;
	default:
		cnt = 0;
		bt_dev_err(hdev, "unknown link type %d", conn->type);
	}

	q = cnt / num;
	*quote = q ? q : 1;
}

3368 3369
static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
				     int *quote)
Linus Torvalds's avatar
Linus Torvalds committed
3370 3371
{
	struct hci_conn_hash *h = &hdev->conn_hash;
3372
	struct hci_conn *conn = NULL, *c;
3373
	unsigned int num = 0, min = ~0;
Linus Torvalds's avatar
Linus Torvalds committed
3374

3375
	/* We don't have to lock device here. Connections are always
Linus Torvalds's avatar
Linus Torvalds committed
3376
	 * added and removed with TX task disabled. */
3377 3378 3379 3380

	rcu_read_lock();

	list_for_each_entry_rcu(c, &h->list, list) {
3381
		if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds's avatar
Linus Torvalds committed
3382
			continue;
3383 3384 3385 3386

		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
			continue;

Linus Torvalds's avatar
Linus Torvalds committed
3387 3388 3389 3390 3391 3392
		num++;

		if (c->sent < min) {
			min  = c->sent;
			conn = c;
		}
3393 3394 3395

		if (hci_conn_num(hdev, type) == num)
			break;
Linus Torvalds's avatar
Linus Torvalds committed
3396 3397
	}

3398 3399
	rcu_read_unlock();

3400
	hci_quote_sent(conn, num, quote);
Linus Torvalds's avatar
Linus Torvalds committed
3401 3402 3403 3404 3405

	BT_DBG("conn %p quote %d", conn, *quote);
	return conn;
}

3406
static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds's avatar
Linus Torvalds committed
3407 3408
{
	struct hci_conn_hash *h = &hdev->conn_hash;
3409
	struct hci_conn *c;
Linus Torvalds's avatar
Linus Torvalds committed
3410

3411
	bt_dev_err(hdev, "link tx timeout");
Linus Torvalds's avatar
Linus Torvalds committed
3412

3413 3414
	rcu_read_lock();

Linus Torvalds's avatar
Linus Torvalds committed
3415
	/* Kill stalled connections */
3416
	list_for_each_entry_rcu(c, &h->list, list) {
3417
		if (c->type == type && c->sent) {
3418 3419
			bt_dev_err(hdev, "killing stalled connection %pMR",
				   &c->dst);
3420 3421 3422 3423
			/* hci_disconnect might sleep, so, we have to release
			 * the RCU read lock before calling it.
			 */
			rcu_read_unlock();
3424
			hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3425
			rcu_read_lock();
Linus Torvalds's avatar
Linus Torvalds committed
3426 3427
		}
	}
3428 3429

	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
3430 3431
}

3432 3433
static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
				      int *quote)
Linus Torvalds's avatar
Linus Torvalds committed
3434
{
3435 3436
	struct hci_conn_hash *h = &hdev->conn_hash;
	struct hci_chan *chan = NULL;
3437
	unsigned int num = 0, min = ~0, cur_prio = 0;
Linus Torvalds's avatar
Linus Torvalds committed
3438
	struct hci_conn *conn;
3439
	int conn_num = 0;
3440 3441 3442

	BT_DBG("%s", hdev->name);

3443 3444 3445
	rcu_read_lock();

	list_for_each_entry_rcu(conn, &h->list, list) {
3446 3447 3448 3449 3450 3451 3452 3453 3454 3455
		struct hci_chan *tmp;

		if (conn->type != type)
			continue;

		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
			continue;

		conn_num++;

3456
		list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483
			struct sk_buff *skb;

			if (skb_queue_empty(&tmp->data_q))
				continue;

			skb = skb_peek(&tmp->data_q);
			if (skb->priority < cur_prio)
				continue;

			if (skb->priority > cur_prio) {
				num = 0;
				min = ~0;
				cur_prio = skb->priority;
			}

			num++;

			if (conn->sent < min) {
				min  = conn->sent;
				chan = tmp;
			}
		}

		if (hci_conn_num(hdev, type) == conn_num)
			break;
	}

3484 3485
	rcu_read_unlock();

3486 3487 3488
	if (!chan)
		return NULL;

3489
	hci_quote_sent(chan->conn, num, quote);
3490 3491 3492 3493 3494

	BT_DBG("chan %p quote %d", chan, *quote);
	return chan;
}

3495 3496 3497 3498 3499 3500 3501 3502
static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
{
	struct hci_conn_hash *h = &hdev->conn_hash;
	struct hci_conn *conn;
	int num = 0;

	BT_DBG("%s", hdev->name);

3503 3504 3505
	rcu_read_lock();

	list_for_each_entry_rcu(conn, &h->list, list) {
3506 3507 3508 3509 3510 3511 3512 3513 3514 3515
		struct hci_chan *chan;

		if (conn->type != type)
			continue;

		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
			continue;

		num++;

3516
		list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533
			struct sk_buff *skb;

			if (chan->sent) {
				chan->sent = 0;
				continue;
			}

			if (skb_queue_empty(&chan->data_q))
				continue;

			skb = skb_peek(&chan->data_q);
			if (skb->priority >= HCI_PRIO_MAX - 1)
				continue;

			skb->priority = HCI_PRIO_MAX - 1;

			BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3534
			       skb->priority);
3535 3536 3537 3538 3539
		}

		if (hci_conn_num(hdev, type) == num)
			break;
	}
3540 3541 3542

	rcu_read_unlock();

3543 3544
}

3545
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3546
{
3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558
	unsigned long last_tx;

	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
		return;

	switch (type) {
	case LE_LINK:
		last_tx = hdev->le_last_tx;
		break;
	default:
		last_tx = hdev->acl_last_tx;
		break;
Linus Torvalds's avatar
Linus Torvalds committed
3559
	}
3560 3561 3562 3563 3564 3565

	/* tx timeout must be longer than maximum link supervision timeout
	 * (40.9 seconds)
	 */
	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
		hci_link_tx_to(hdev, type);
3566
}
Linus Torvalds's avatar
Linus Torvalds committed
3567

3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615
/* Schedule SCO */
static void hci_sched_sco(struct hci_dev *hdev)
{
	struct hci_conn *conn;
	struct sk_buff *skb;
	int quote;

	BT_DBG("%s", hdev->name);

	if (!hci_conn_num(hdev, SCO_LINK))
		return;

	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
			BT_DBG("skb %p len %d", skb, skb->len);
			hci_send_frame(hdev, skb);

			conn->sent++;
			if (conn->sent == ~0)
				conn->sent = 0;
		}
	}
}

static void hci_sched_esco(struct hci_dev *hdev)
{
	struct hci_conn *conn;
	struct sk_buff *skb;
	int quote;

	BT_DBG("%s", hdev->name);

	if (!hci_conn_num(hdev, ESCO_LINK))
		return;

	while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
						     &quote))) {
		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
			BT_DBG("skb %p len %d", skb, skb->len);
			hci_send_frame(hdev, skb);

			conn->sent++;
			if (conn->sent == ~0)
				conn->sent = 0;
		}
	}
}

3616
static void hci_sched_acl_pkt(struct hci_dev *hdev)
3617 3618 3619 3620 3621 3622
{
	unsigned int cnt = hdev->acl_cnt;
	struct hci_chan *chan;
	struct sk_buff *skb;
	int quote;

3623
	__check_timeout(hdev, cnt, ACL_LINK);
3624

3625
	while (hdev->acl_cnt &&
3626
	       (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3627 3628
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3629
			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3630
			       skb->len, skb->priority);
3631

3632 3633 3634 3635 3636 3637
			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

3638
			hci_conn_enter_active_mode(chan->conn,
3639
						   bt_cb(skb)->force_active);
3640

3641
			hci_send_frame(hdev, skb);
Linus Torvalds's avatar
Linus Torvalds committed
3642 3643 3644
			hdev->acl_last_tx = jiffies;

			hdev->acl_cnt--;
3645 3646
			chan->sent++;
			chan->conn->sent++;
3647 3648 3649 3650

			/* Send pending SCO packets right away */
			hci_sched_sco(hdev);
			hci_sched_esco(hdev);
Linus Torvalds's avatar
Linus Torvalds committed
3651 3652
		}
	}
3653 3654 3655

	if (cnt != hdev->acl_cnt)
		hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds's avatar
Linus Torvalds committed
3656 3657
}

3658
static void hci_sched_acl(struct hci_dev *hdev)
3659 3660 3661
{
	BT_DBG("%s", hdev->name);

3662
	/* No ACL link over BR/EDR controller */
3663
	if (!hci_conn_num(hdev, ACL_LINK))
3664 3665
		return;

3666
	hci_sched_acl_pkt(hdev);
3667 3668
}

3669
static void hci_sched_le(struct hci_dev *hdev)
3670
{
3671
	struct hci_chan *chan;
3672
	struct sk_buff *skb;
3673
	int quote, *cnt, tmp;
3674 3675 3676

	BT_DBG("%s", hdev->name);

3677 3678 3679
	if (!hci_conn_num(hdev, LE_LINK))
		return;

3680
	cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3681

3682
	__check_timeout(hdev, *cnt, LE_LINK);
3683

3684 3685
	tmp = *cnt;
	while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3686 3687
		u32 priority = (skb_peek(&chan->data_q))->priority;
		while (quote-- && (skb = skb_peek(&chan->data_q))) {
3688
			BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3689
			       skb->len, skb->priority);
3690

3691 3692 3693 3694 3695 3696
			/* Stop if priority has changed */
			if (skb->priority < priority)
				break;

			skb = skb_dequeue(&chan->data_q);

3697
			hci_send_frame(hdev, skb);
3698 3699
			hdev->le_last_tx = jiffies;

3700
			(*cnt)--;
3701 3702
			chan->sent++;
			chan->conn->sent++;
3703 3704 3705 3706

			/* Send pending SCO packets right away */
			hci_sched_sco(hdev);
			hci_sched_esco(hdev);
3707 3708
		}
	}
3709

3710
	if (*cnt != tmp)
3711
		hci_prio_recalculate(hdev, LE_LINK);
3712 3713
}

3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740
/* Schedule CIS */
static void hci_sched_iso(struct hci_dev *hdev)
{
	struct hci_conn *conn;
	struct sk_buff *skb;
	int quote, *cnt;

	BT_DBG("%s", hdev->name);

	if (!hci_conn_num(hdev, ISO_LINK))
		return;

	cnt = hdev->iso_pkts ? &hdev->iso_cnt :
		hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
	while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
		while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
			BT_DBG("skb %p len %d", skb, skb->len);
			hci_send_frame(hdev, skb);

			conn->sent++;
			if (conn->sent == ~0)
				conn->sent = 0;
			(*cnt)--;
		}
	}
}

3741
static void hci_tx_work(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
3742
{
3743
	struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds's avatar
Linus Torvalds committed
3744 3745
	struct sk_buff *skb;

3746 3747
	BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
	       hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
Linus Torvalds's avatar
Linus Torvalds committed
3748

3749
	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3750 3751 3752
		/* Schedule queues and send stuff to HCI driver */
		hci_sched_sco(hdev);
		hci_sched_esco(hdev);
3753
		hci_sched_iso(hdev);
3754
		hci_sched_acl(hdev);
3755 3756
		hci_sched_le(hdev);
	}
3757

Linus Torvalds's avatar
Linus Torvalds committed
3758 3759
	/* Send next queued raw (unknown type) packet */
	while ((skb = skb_dequeue(&hdev->raw_q)))
3760
		hci_send_frame(hdev, skb);
Linus Torvalds's avatar
Linus Torvalds committed
3761 3762
}

Lucas De Marchi's avatar
Lucas De Marchi committed
3763
/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds's avatar
Linus Torvalds committed
3764 3765

/* ACL data packet */
3766
static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777
{
	struct hci_acl_hdr *hdr = (void *) skb->data;
	struct hci_conn *conn;
	__u16 handle, flags;

	skb_pull(skb, HCI_ACL_HDR_SIZE);

	handle = __le16_to_cpu(hdr->handle);
	flags  = hci_flags(handle);
	handle = hci_handle(handle);

3778
	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3779
	       handle, flags);
Linus Torvalds's avatar
Linus Torvalds committed
3780 3781 3782 3783 3784

	hdev->stat.acl_rx++;

	hci_dev_lock(hdev);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
3785 3786
	if (conn && hci_dev_test_flag(hdev, HCI_MGMT))
		mgmt_device_connected(hdev, conn, NULL, 0);
Linus Torvalds's avatar
Linus Torvalds committed
3787
	hci_dev_unlock(hdev);
3788

Linus Torvalds's avatar
Linus Torvalds committed
3789
	if (conn) {
3790
		hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3791

Linus Torvalds's avatar
Linus Torvalds committed
3792
		/* Send to upper protocol */
3793 3794
		l2cap_recv_acldata(conn, skb, flags);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
3795
	} else {
3796 3797
		bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
			   handle);
Linus Torvalds's avatar
Linus Torvalds committed
3798 3799 3800 3801 3802 3803
	}

	kfree_skb(skb);
}

/* SCO data packet */
3804
static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
3805 3806 3807
{
	struct hci_sco_hdr *hdr = (void *) skb->data;
	struct hci_conn *conn;
3808
	__u16 handle, flags;
Linus Torvalds's avatar
Linus Torvalds committed
3809 3810 3811 3812

	skb_pull(skb, HCI_SCO_HDR_SIZE);

	handle = __le16_to_cpu(hdr->handle);
3813 3814
	flags  = hci_flags(handle);
	handle = hci_handle(handle);
Linus Torvalds's avatar
Linus Torvalds committed
3815

3816 3817
	BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
	       handle, flags);
Linus Torvalds's avatar
Linus Torvalds committed
3818 3819 3820 3821 3822 3823 3824 3825 3826

	hdev->stat.sco_rx++;

	hci_dev_lock(hdev);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	hci_dev_unlock(hdev);

	if (conn) {
		/* Send to upper protocol */
3827
		hci_skb_pkt_status(skb) = flags & 0x03;
3828 3829
		sco_recv_scodata(conn, skb);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
3830
	} else {
3831 3832
		bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
				       handle);
Linus Torvalds's avatar
Linus Torvalds committed
3833 3834 3835 3836 3837
	}

	kfree_skb(skb);
}

3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863
static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct hci_iso_hdr *hdr;
	struct hci_conn *conn;
	__u16 handle, flags;

	hdr = skb_pull_data(skb, sizeof(*hdr));
	if (!hdr) {
		bt_dev_err(hdev, "ISO packet too small");
		goto drop;
	}

	handle = __le16_to_cpu(hdr->handle);
	flags  = hci_flags(handle);
	handle = hci_handle(handle);

	bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
		   handle, flags);

	hci_dev_lock(hdev);
	conn = hci_conn_hash_lookup_handle(hdev, handle);
	hci_dev_unlock(hdev);

	if (!conn) {
		bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
			   handle);
3864
		goto drop;
3865 3866
	}

3867 3868 3869 3870
	/* Send to upper protocol */
	iso_recv(conn, skb, flags);
	return;

3871 3872 3873 3874
drop:
	kfree_skb(skb);
}

3875 3876 3877 3878 3879 3880 3881 3882
static bool hci_req_is_complete(struct hci_dev *hdev)
{
	struct sk_buff *skb;

	skb = skb_peek(&hdev->cmd_q);
	if (!skb)
		return true;

3883
	return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3884 3885
}

3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907
static void hci_resend_last(struct hci_dev *hdev)
{
	struct hci_command_hdr *sent;
	struct sk_buff *skb;
	u16 opcode;

	if (!hdev->sent_cmd)
		return;

	sent = (void *) hdev->sent_cmd->data;
	opcode = __le16_to_cpu(sent->opcode);
	if (opcode == HCI_OP_RESET)
		return;

	skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
	if (!skb)
		return;

	skb_queue_head(&hdev->cmd_q, skb);
	queue_work(hdev->workqueue, &hdev->cmd_work);
}

3908 3909 3910
void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
			  hci_req_complete_t *req_complete,
			  hci_req_complete_skb_t *req_complete_skb)
3911 3912 3913 3914 3915 3916
{
	struct sk_buff *skb;
	unsigned long flags;

	BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);

3917 3918
	/* If the completed command doesn't match the last one that was
	 * sent we need to do special handling of it.
3919
	 */
3920 3921 3922 3923 3924 3925 3926 3927 3928 3929
	if (!hci_sent_cmd_data(hdev, opcode)) {
		/* Some CSR based controllers generate a spontaneous
		 * reset complete event during init and any pending
		 * command will never be completed. In such a case we
		 * need to resend whatever was the last sent
		 * command.
		 */
		if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
			hci_resend_last(hdev);

3930
		return;
3931
	}
3932

3933 3934 3935
	/* If we reach this point this event matches the last command sent */
	hci_dev_clear_flag(hdev, HCI_CMD_PENDING);

3936 3937 3938 3939 3940 3941
	/* If the command succeeded and there's still more commands in
	 * this request the request is not yet complete.
	 */
	if (!status && !hci_req_is_complete(hdev))
		return;

3942 3943
	skb = hdev->req_skb;

3944
	/* If this was the last command in a request the complete
3945
	 * callback would be found in hdev->req_skb instead of the
3946 3947
	 * command queue (hdev->cmd_q).
	 */
3948 3949
	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3950 3951
		return;
	}
3952

3953 3954
	if (skb && bt_cb(skb)->hci.req_complete) {
		*req_complete = bt_cb(skb)->hci.req_complete;
3955
		return;
3956 3957 3958 3959 3960
	}

	/* Remove all pending commands belonging to this request */
	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
	while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3961
		if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3962 3963 3964 3965
			__skb_queue_head(&hdev->cmd_q, skb);
			break;
		}

3966 3967 3968 3969
		if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
			*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
		else
			*req_complete = bt_cb(skb)->hci.req_complete;
3970
		dev_kfree_skb_irq(skb);
3971 3972 3973 3974
	}
	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
}

3975
static void hci_rx_work(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
3976
{
3977
	struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds's avatar
Linus Torvalds committed
3978 3979 3980 3981
	struct sk_buff *skb;

	BT_DBG("%s", hdev->name);

3982 3983 3984 3985 3986 3987 3988 3989
	/* The kcov_remote functions used for collecting packet parsing
	 * coverage information from this background thread and associate
	 * the coverage with the syscall's thread which originally injected
	 * the packet. This helps fuzzing the kernel.
	 */
	for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
		kcov_remote_start_common(skb_get_kcov_handle(skb));

3990 3991 3992
		/* Send copy to monitor */
		hci_send_to_monitor(hdev, skb);

Linus Torvalds's avatar
Linus Torvalds committed
3993 3994
		if (atomic_read(&hdev->promisc)) {
			/* Send copy to the sockets */
3995
			hci_send_to_sock(hdev, skb);
Linus Torvalds's avatar
Linus Torvalds committed
3996 3997
		}

3998 3999 4000 4001 4002 4003 4004 4005
		/* If the device has been opened in HCI_USER_CHANNEL,
		 * the userspace has exclusive access to device.
		 * When device is HCI_INIT, we still need to process
		 * the data packets to the driver in order
		 * to complete its setup().
		 */
		if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
		    !test_bit(HCI_INIT, &hdev->flags)) {
Linus Torvalds's avatar
Linus Torvalds committed
4006 4007 4008 4009 4010 4011
			kfree_skb(skb);
			continue;
		}

		if (test_bit(HCI_INIT, &hdev->flags)) {
			/* Don't process data packets in this states. */
4012
			switch (hci_skb_pkt_type(skb)) {
Linus Torvalds's avatar
Linus Torvalds committed
4013 4014
			case HCI_ACLDATA_PKT:
			case HCI_SCODATA_PKT:
4015
			case HCI_ISODATA_PKT:
Linus Torvalds's avatar
Linus Torvalds committed
4016 4017
				kfree_skb(skb);
				continue;
4018
			}
Linus Torvalds's avatar
Linus Torvalds committed
4019 4020 4021
		}

		/* Process frame */
4022
		switch (hci_skb_pkt_type(skb)) {
Linus Torvalds's avatar
Linus Torvalds committed
4023
		case HCI_EVENT_PKT:
4024
			BT_DBG("%s Event packet", hdev->name);
Linus Torvalds's avatar
Linus Torvalds committed
4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037
			hci_event_packet(hdev, skb);
			break;

		case HCI_ACLDATA_PKT:
			BT_DBG("%s ACL data packet", hdev->name);
			hci_acldata_packet(hdev, skb);
			break;

		case HCI_SCODATA_PKT:
			BT_DBG("%s SCO data packet", hdev->name);
			hci_scodata_packet(hdev, skb);
			break;

4038 4039 4040 4041 4042
		case HCI_ISODATA_PKT:
			BT_DBG("%s ISO data packet", hdev->name);
			hci_isodata_packet(hdev, skb);
			break;

Linus Torvalds's avatar
Linus Torvalds committed
4043 4044 4045 4046 4047 4048 4049
		default:
			kfree_skb(skb);
			break;
		}
	}
}

4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066
static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
{
	int err;

	bt_dev_dbg(hdev, "skb %p", skb);

	kfree_skb(hdev->sent_cmd);

	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
	if (!hdev->sent_cmd) {
		skb_queue_head(&hdev->cmd_q, skb);
		queue_work(hdev->workqueue, &hdev->cmd_work);
		return;
	}

	err = hci_send_frame(hdev, skb);
	if (err < 0) {
4067
		hci_cmd_sync_cancel_sync(hdev, -err);
4068 4069 4070
		return;
	}

4071
	if (hdev->req_status == HCI_REQ_PEND &&
4072 4073
	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
		kfree_skb(hdev->req_skb);
4074
		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4075
	}
4076 4077 4078 4079

	atomic_dec(&hdev->cmd_cnt);
}

4080
static void hci_cmd_work(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
4081
{
4082
	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds's avatar
Linus Torvalds committed
4083 4084
	struct sk_buff *skb;

4085 4086
	BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
	       atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds's avatar
Linus Torvalds committed
4087 4088

	/* Send queued commands */
4089 4090 4091 4092 4093
	if (atomic_read(&hdev->cmd_cnt)) {
		skb = skb_dequeue(&hdev->cmd_q);
		if (!skb)
			return;

4094
		hci_send_cmd_sync(hdev, skb);
4095

4096 4097 4098 4099 4100 4101 4102 4103
		rcu_read_lock();
		if (test_bit(HCI_RESET, &hdev->flags) ||
		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
			cancel_delayed_work(&hdev->cmd_timer);
		else
			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
					   HCI_CMD_TIMEOUT);
		rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
4104 4105
	}
}