Commit 0ae20159 authored by David S. Miller's avatar David S. Miller

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kern

el/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2021-02-11

Here's the main bluetooth-next pull request for 5.12:

 - Add support for advertising monitor offliading using Microsoft
   vendor extensions
 - Add firmware download support for MediaTek MT7921U USB devices
 - Suspend-related fixes for Qualcomm devices
 - Add support for Intel GarfieldPeak controller
 - Various other smaller fixes & cleanups

Please let me know if there are any issues pulling. Thanks.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1d131151 55c0bd77
...@@ -38,7 +38,7 @@ Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt: ...@@ -38,7 +38,7 @@ Following example uses irq pin number 3 of gpio0 for out of band wake-on-bt:
compatible = "usb1286,204e"; compatible = "usb1286,204e";
reg = <1>; reg = <1>;
interrupt-parent = <&gpio0>; interrupt-parent = <&gpio0>;
interrupt-name = "wakeup"; interrupt-names = "wakeup";
interrupts = <3 IRQ_TYPE_LEVEL_LOW>; interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
}; };
}; };
...@@ -437,38 +437,31 @@ int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver ...@@ -437,38 +437,31 @@ int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver
tlv = (struct intel_tlv *)skb->data; tlv = (struct intel_tlv *)skb->data;
switch (tlv->type) { switch (tlv->type) {
case INTEL_TLV_CNVI_TOP: case INTEL_TLV_CNVI_TOP:
version->cnvi_top = version->cnvi_top = get_unaligned_le32(tlv->val);
__le32_to_cpu(get_unaligned_le32(tlv->val));
break; break;
case INTEL_TLV_CNVR_TOP: case INTEL_TLV_CNVR_TOP:
version->cnvr_top = version->cnvr_top = get_unaligned_le32(tlv->val);
__le32_to_cpu(get_unaligned_le32(tlv->val));
break; break;
case INTEL_TLV_CNVI_BT: case INTEL_TLV_CNVI_BT:
version->cnvi_bt = version->cnvi_bt = get_unaligned_le32(tlv->val);
__le32_to_cpu(get_unaligned_le32(tlv->val));
break; break;
case INTEL_TLV_CNVR_BT: case INTEL_TLV_CNVR_BT:
version->cnvr_bt = version->cnvr_bt = get_unaligned_le32(tlv->val);
__le32_to_cpu(get_unaligned_le32(tlv->val));
break; break;
case INTEL_TLV_DEV_REV_ID: case INTEL_TLV_DEV_REV_ID:
version->dev_rev_id = version->dev_rev_id = get_unaligned_le16(tlv->val);
__le16_to_cpu(get_unaligned_le16(tlv->val));
break; break;
case INTEL_TLV_IMAGE_TYPE: case INTEL_TLV_IMAGE_TYPE:
version->img_type = tlv->val[0]; version->img_type = tlv->val[0];
break; break;
case INTEL_TLV_TIME_STAMP: case INTEL_TLV_TIME_STAMP:
version->timestamp = version->timestamp = get_unaligned_le16(tlv->val);
__le16_to_cpu(get_unaligned_le16(tlv->val));
break; break;
case INTEL_TLV_BUILD_TYPE: case INTEL_TLV_BUILD_TYPE:
version->build_type = tlv->val[0]; version->build_type = tlv->val[0];
break; break;
case INTEL_TLV_BUILD_NUM: case INTEL_TLV_BUILD_NUM:
version->build_num = version->build_num = get_unaligned_le32(tlv->val);
__le32_to_cpu(get_unaligned_le32(tlv->val));
break; break;
case INTEL_TLV_SECURE_BOOT: case INTEL_TLV_SECURE_BOOT:
version->secure_boot = tlv->val[0]; version->secure_boot = tlv->val[0];
......
...@@ -94,6 +94,53 @@ int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, ...@@ -94,6 +94,53 @@ int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
} }
EXPORT_SYMBOL_GPL(qca_read_soc_version); EXPORT_SYMBOL_GPL(qca_read_soc_version);
static int qca_read_fw_build_info(struct hci_dev *hdev)
{
struct sk_buff *skb;
struct edl_event_hdr *edl;
char cmd, build_label[QCA_FW_BUILD_VER_LEN];
int build_lbl_len, err = 0;
bt_dev_dbg(hdev, "QCA read fw build info");
cmd = EDL_GET_BUILD_INFO_CMD;
skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
&cmd, 0, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
bt_dev_err(hdev, "Reading QCA fw build info failed (%d)",
err);
return err;
}
edl = (struct edl_event_hdr *)(skb->data);
if (!edl) {
bt_dev_err(hdev, "QCA read fw build info with no header");
err = -EILSEQ;
goto out;
}
if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
edl->rtype != EDL_GET_BUILD_INFO_CMD) {
bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
edl->rtype);
err = -EIO;
goto out;
}
build_lbl_len = edl->data[0];
if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) {
memcpy(build_label, edl->data + 1, build_lbl_len);
*(build_label + build_lbl_len) = '\0';
}
hci_set_fw_info(hdev, "%s", build_label);
out:
kfree_skb(skb);
return err;
}
static int qca_send_reset(struct hci_dev *hdev) static int qca_send_reset(struct hci_dev *hdev)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -517,6 +564,19 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, ...@@ -517,6 +564,19 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err; return err;
} }
/* WCN399x supports the Microsoft vendor extension with 0xFD70 as the
* VsMsftOpCode.
*/
switch (soc_type) {
case QCA_WCN3990:
case QCA_WCN3991:
case QCA_WCN3998:
hci_set_msft_opcode(hdev, 0xFD70);
break;
default:
break;
}
/* Perform HCI reset */ /* Perform HCI reset */
err = qca_send_reset(hdev); err = qca_send_reset(hdev);
if (err < 0) { if (err < 0) {
...@@ -524,6 +584,13 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, ...@@ -524,6 +584,13 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
return err; return err;
} }
if (soc_type == QCA_WCN3991) {
/* get fw build info */
err = qca_read_fw_build_info(hdev);
if (err < 0)
return err;
}
bt_dev_info(hdev, "QCA setup on UART is completed"); bt_dev_info(hdev, "QCA setup on UART is completed");
return 0; return 0;
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#define EDL_PATCH_CMD_LEN (1) #define EDL_PATCH_CMD_LEN (1)
#define EDL_PATCH_VER_REQ_CMD (0x19) #define EDL_PATCH_VER_REQ_CMD (0x19)
#define EDL_PATCH_TLV_REQ_CMD (0x1E) #define EDL_PATCH_TLV_REQ_CMD (0x1E)
#define EDL_GET_BUILD_INFO_CMD (0x20)
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
#define MAX_SIZE_PER_TLV_SEGMENT (243) #define MAX_SIZE_PER_TLV_SEGMENT (243)
#define QCA_PRE_SHUTDOWN_CMD (0xFC08) #define QCA_PRE_SHUTDOWN_CMD (0xFC08)
......
...@@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev) ...@@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD", btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
btqcomsmd_cmd_callback, btq); btqcomsmd_cmd_callback, btq);
if (IS_ERR(btq->cmd_channel)) if (IS_ERR(btq->cmd_channel)) {
return PTR_ERR(btq->cmd_channel); ret = PTR_ERR(btq->cmd_channel);
goto destroy_acl_channel;
}
hdev = hci_alloc_dev(); hdev = hci_alloc_dev();
if (!hdev) if (!hdev) {
return -ENOMEM; ret = -ENOMEM;
goto destroy_cmd_channel;
}
hci_set_drvdata(hdev, btq); hci_set_drvdata(hdev, btq);
btq->hdev = hdev; btq->hdev = hdev;
...@@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev) ...@@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->set_bdaddr = qca_set_bdaddr_rome; hdev->set_bdaddr = qca_set_bdaddr_rome;
ret = hci_register_dev(hdev); ret = hci_register_dev(hdev);
if (ret < 0) { if (ret < 0)
hci_free_dev(hdev); goto hci_free_dev;
return ret;
}
platform_set_drvdata(pdev, btq); platform_set_drvdata(pdev, btq);
return 0; return 0;
hci_free_dev:
hci_free_dev(hdev);
destroy_cmd_channel:
rpmsg_destroy_ept(btq->cmd_channel);
destroy_acl_channel:
rpmsg_destroy_ept(btq->acl_channel);
return ret;
} }
static int btqcomsmd_remove(struct platform_device *pdev) static int btqcomsmd_remove(struct platform_device *pdev)
......
...@@ -38,6 +38,19 @@ ...@@ -38,6 +38,19 @@
.hci_ver = (hciv), \ .hci_ver = (hciv), \
.hci_bus = (bus) .hci_bus = (bus)
enum btrtl_chip_id {
CHIP_ID_8723A,
CHIP_ID_8723B,
CHIP_ID_8821A,
CHIP_ID_8761A,
CHIP_ID_8822B = 8,
CHIP_ID_8723D,
CHIP_ID_8821C,
CHIP_ID_8822C = 13,
CHIP_ID_8761B,
CHIP_ID_8852A = 18,
};
struct id_table { struct id_table {
__u16 match_flags; __u16 match_flags;
__u16 lmp_subver; __u16 lmp_subver;
...@@ -58,6 +71,7 @@ struct btrtl_device_info { ...@@ -58,6 +71,7 @@ struct btrtl_device_info {
u8 *cfg_data; u8 *cfg_data;
int cfg_len; int cfg_len;
bool drop_fw; bool drop_fw;
int project_id;
}; };
static const struct id_table ic_id_table[] = { static const struct id_table ic_id_table[] = {
...@@ -307,9 +321,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, ...@@ -307,9 +321,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
/* Find project_id in table */ /* Find project_id in table */
for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) { for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) {
if (project_id == project_id_to_lmp_subver[i].id) if (project_id == project_id_to_lmp_subver[i].id) {
btrtl_dev->project_id = project_id;
break; break;
} }
}
if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) { if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) {
rtl_dev_err(hdev, "unknown project id %d", project_id); rtl_dev_err(hdev, "unknown project id %d", project_id);
...@@ -658,6 +674,12 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, ...@@ -658,6 +674,12 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
} }
} }
/* RTL8822CE supports the Microsoft vendor extension and uses 0xFCF0
* for VsMsftOpCode.
*/
if (lmp_subver == RTL_ROM_LMP_8822B)
hci_set_msft_opcode(hdev, 0xFCF0);
return btrtl_dev; return btrtl_dev;
err_free: err_free:
...@@ -708,13 +730,28 @@ int btrtl_setup_realtek(struct hci_dev *hdev) ...@@ -708,13 +730,28 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
ret = btrtl_download_firmware(hdev, btrtl_dev); ret = btrtl_download_firmware(hdev, btrtl_dev);
btrtl_free(btrtl_dev);
/* Enable controller to do both LE scan and BR/EDR inquiry /* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously. * simultaneously.
*/ */
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
/* Enable central-peripheral role (able to create new connections with
* an existing connection in slave role).
*/
/* Enable WBS supported for the specific Realtek devices. */
switch (btrtl_dev->project_id) {
case CHIP_ID_8822C:
case CHIP_ID_8852A:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
break;
default:
rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
rtl_dev_dbg(hdev, "WBS supported not enabled.");
break;
}
btrtl_free(btrtl_dev);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(btrtl_setup_realtek); EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
......
This diff is collapsed.
...@@ -654,6 +654,7 @@ static const struct h4_recv_pkt bcm_recv_pkts[] = { ...@@ -654,6 +654,7 @@ static const struct h4_recv_pkt bcm_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame },
{ H4_RECV_ISO, .recv = hci_recv_frame },
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag }, { BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
{ BCM_RECV_NULL, .recv = hci_recv_diag }, { BCM_RECV_NULL, .recv = hci_recv_diag },
{ BCM_RECV_TYPE49, .recv = hci_recv_diag }, { BCM_RECV_TYPE49, .recv = hci_recv_diag },
......
...@@ -906,6 +906,11 @@ static int h5_btrtl_setup(struct h5 *h5) ...@@ -906,6 +906,11 @@ static int h5_btrtl_setup(struct h5 *h5)
/* Give the device some time before the hci-core sends it a reset */ /* Give the device some time before the hci-core sends it a reset */
usleep_range(10000, 20000); usleep_range(10000, 20000);
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &h5->hu->hdev->quirks);
out_free: out_free:
btrtl_free(btrtl_dev); btrtl_free(btrtl_dev);
...@@ -1022,6 +1027,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = { ...@@ -1022,6 +1027,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = {
.data = (const void *)&rtl_vnd }, .data = (const void *)&rtl_vnd },
{ .compatible = "realtek,rtl8723bs-bt", { .compatible = "realtek,rtl8723bs-bt",
.data = (const void *)&rtl_vnd }, .data = (const void *)&rtl_vnd },
{ .compatible = "realtek,rtl8723ds-bt",
.data = (const void *)&rtl_vnd },
#endif #endif
{ }, { },
}; };
......
...@@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) ...@@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
goto no_schedule; goto no_schedule;
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
goto no_schedule; goto no_schedule;
}
BT_DBG(""); BT_DBG("");
...@@ -174,10 +173,10 @@ static void hci_uart_write_work(struct work_struct *work) ...@@ -174,10 +173,10 @@ static void hci_uart_write_work(struct work_struct *work)
kfree_skb(skb); kfree_skb(skb);
} }
clear_bit(HCI_UART_SENDING, &hu->tx_state);
if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)) if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
goto restart; goto restart;
clear_bit(HCI_UART_SENDING, &hu->tx_state);
wake_up_bit(&hu->tx_state, HCI_UART_SENDING); wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
} }
......
...@@ -50,7 +50,8 @@ ...@@ -50,7 +50,8 @@
#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000 #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100 #define CMD_TRANS_TIMEOUT_MS 100
#define MEMDUMP_TIMEOUT_MS 8000 #define MEMDUMP_TIMEOUT_MS 8000
#define IBS_DISABLE_SSR_TIMEOUT_MS (MEMDUMP_TIMEOUT_MS + 1000) #define IBS_DISABLE_SSR_TIMEOUT_MS \
(MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS)
#define FW_DOWNLOAD_TIMEOUT_MS 3000 #define FW_DOWNLOAD_TIMEOUT_MS 3000
/* susclk rate */ /* susclk rate */
...@@ -76,7 +77,8 @@ enum qca_flags { ...@@ -76,7 +77,8 @@ enum qca_flags {
QCA_MEMDUMP_COLLECTION, QCA_MEMDUMP_COLLECTION,
QCA_HW_ERROR_EVENT, QCA_HW_ERROR_EVENT,
QCA_SSR_TRIGGERED, QCA_SSR_TRIGGERED,
QCA_BT_OFF QCA_BT_OFF,
QCA_ROM_FW
}; };
enum qca_capabilities { enum qca_capabilities {
...@@ -1024,7 +1026,9 @@ static void qca_controller_memdump(struct work_struct *work) ...@@ -1024,7 +1026,9 @@ static void qca_controller_memdump(struct work_struct *work)
dump_size = __le32_to_cpu(dump->dump_size); dump_size = __le32_to_cpu(dump->dump_size);
if (!(dump_size)) { if (!(dump_size)) {
bt_dev_err(hu->hdev, "Rx invalid memdump size"); bt_dev_err(hu->hdev, "Rx invalid memdump size");
kfree(qca_memdump);
kfree_skb(skb); kfree_skb(skb);
qca->qca_memdump = NULL;
mutex_unlock(&qca->hci_memdump_lock); mutex_unlock(&qca->hci_memdump_lock);
return; return;
} }
...@@ -1661,6 +1665,7 @@ static int qca_setup(struct hci_uart *hu) ...@@ -1661,6 +1665,7 @@ static int qca_setup(struct hci_uart *hu)
if (ret) if (ret)
return ret; return ret;
clear_bit(QCA_ROM_FW, &qca->flags);
/* Patch downloading has to be done without IBS mode */ /* Patch downloading has to be done without IBS mode */
set_bit(QCA_IBS_DISABLED, &qca->flags); set_bit(QCA_IBS_DISABLED, &qca->flags);
...@@ -1718,12 +1723,14 @@ static int qca_setup(struct hci_uart *hu) ...@@ -1718,12 +1723,14 @@ static int qca_setup(struct hci_uart *hu)
hu->hdev->cmd_timeout = qca_cmd_timeout; hu->hdev->cmd_timeout = qca_cmd_timeout;
} else if (ret == -ENOENT) { } else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */ /* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
ret = 0; ret = 0;
} else if (ret == -EAGAIN) { } else if (ret == -EAGAIN) {
/* /*
* Userspace firmware loader will return -EAGAIN in case no * Userspace firmware loader will return -EAGAIN in case no
* patch/nvm-config is found, so run with original fw/config. * patch/nvm-config is found, so run with original fw/config.
*/ */
set_bit(QCA_ROM_FW, &qca->flags);
ret = 0; ret = 0;
} }
...@@ -2100,17 +2107,29 @@ static int __maybe_unused qca_suspend(struct device *dev) ...@@ -2100,17 +2107,29 @@ static int __maybe_unused qca_suspend(struct device *dev)
set_bit(QCA_SUSPENDING, &qca->flags); set_bit(QCA_SUSPENDING, &qca->flags);
if (test_bit(QCA_BT_OFF, &qca->flags)) /* if BT SoC is running with default firmware then it does not
* support in-band sleep
*/
if (test_bit(QCA_ROM_FW, &qca->flags))
return 0; return 0;
if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { /* During SSR after memory dump collection, controller will be
* powered off and then powered on.If controller is powered off
* during SSR then we should wait until SSR is completed.
*/
if (test_bit(QCA_BT_OFF, &qca->flags) &&
!test_bit(QCA_SSR_TRIGGERED, &qca->flags))
return 0;
if (test_bit(QCA_IBS_DISABLED, &qca->flags) ||
test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ?
IBS_DISABLE_SSR_TIMEOUT_MS : IBS_DISABLE_SSR_TIMEOUT_MS :
FW_DOWNLOAD_TIMEOUT_MS; FW_DOWNLOAD_TIMEOUT_MS;
/* QCA_IBS_DISABLED flag is set to true, During FW download /* QCA_IBS_DISABLED flag is set to true, During FW download
* and during memory dump collection. It is reset to false, * and during memory dump collection. It is reset to false,
* After FW download complete and after memory dump collections. * After FW download complete.
*/ */
wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED,
TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout)); TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout));
...@@ -2122,10 +2141,6 @@ static int __maybe_unused qca_suspend(struct device *dev) ...@@ -2122,10 +2141,6 @@ static int __maybe_unused qca_suspend(struct device *dev)
} }
} }
/* After memory dump collection, Controller is powered off.*/
if (test_bit(QCA_BT_OFF, &qca->flags))
return 0;
cancel_work_sync(&qca->ws_awake_device); cancel_work_sync(&qca->ws_awake_device);
cancel_work_sync(&qca->ws_awake_rx); cancel_work_sync(&qca->ws_awake_rx);
......
...@@ -83,9 +83,9 @@ static void hci_uart_write_work(struct work_struct *work) ...@@ -83,9 +83,9 @@ static void hci_uart_write_work(struct work_struct *work)
hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
kfree_skb(skb); kfree_skb(skb);
} }
} while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
clear_bit(HCI_UART_SENDING, &hu->tx_state); clear_bit(HCI_UART_SENDING, &hu->tx_state);
} while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
} }
/* ------- Interface to HCI layer ------ */ /* ------- Interface to HCI layer ------ */
......
...@@ -238,6 +238,14 @@ enum { ...@@ -238,6 +238,14 @@ enum {
* during the hdev->setup vendor callback. * during the hdev->setup vendor callback.
*/ */
HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
/*
* When this quirk is set, then the hci_suspend_notifier is not
* registered. This is intended for devices which drop completely
* from the bus on system-suspend and which will show up as a new
* HCI after resume.
*/
HCI_QUIRK_NO_SUSPEND_NOTIFIER,
}; };
/* HCI device flags */ /* HCI device flags */
......
...@@ -105,6 +105,8 @@ enum suspend_tasks { ...@@ -105,6 +105,8 @@ enum suspend_tasks {
SUSPEND_POWERING_DOWN, SUSPEND_POWERING_DOWN,
SUSPEND_PREPARE_NOTIFIER, SUSPEND_PREPARE_NOTIFIER,
SUSPEND_SET_ADV_FILTER,
__SUSPEND_NUM_TASKS __SUSPEND_NUM_TASKS
}; };
...@@ -250,15 +252,31 @@ struct adv_pattern { ...@@ -250,15 +252,31 @@ struct adv_pattern {
__u8 value[HCI_MAX_AD_LENGTH]; __u8 value[HCI_MAX_AD_LENGTH];
}; };
struct adv_rssi_thresholds {
__s8 low_threshold;
__s8 high_threshold;
__u16 low_threshold_timeout;
__u16 high_threshold_timeout;
__u8 sampling_period;
};
struct adv_monitor { struct adv_monitor {
struct list_head patterns; struct list_head patterns;
bool active; struct adv_rssi_thresholds rssi;
__u16 handle; __u16 handle;
enum {
ADV_MONITOR_STATE_NOT_REGISTERED,
ADV_MONITOR_STATE_REGISTERED,
ADV_MONITOR_STATE_OFFLOADED
} state;
}; };
#define HCI_MIN_ADV_MONITOR_HANDLE 1 #define HCI_MIN_ADV_MONITOR_HANDLE 1
#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 #define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32
#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16
#define HCI_ADV_MONITOR_EXT_NONE 1
#define HCI_ADV_MONITOR_EXT_MSFT 2
#define HCI_MAX_SHORT_NAME_LENGTH 10 #define HCI_MAX_SHORT_NAME_LENGTH 10
...@@ -1316,10 +1334,15 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); ...@@ -1316,10 +1334,15 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_adv_monitors_clear(struct hci_dev *hdev);
void hci_free_adv_monitor(struct adv_monitor *monitor); void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor);
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status);
int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle); int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status);
bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
int *err);
bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err);
bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err);
bool hci_is_adv_monitoring(struct hci_dev *hdev); bool hci_is_adv_monitoring(struct hci_dev *hdev);
int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
...@@ -1342,6 +1365,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn); ...@@ -1342,6 +1365,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) #define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE)
#define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
#define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
#define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M)
#define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) #define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ)
#define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) #define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
#define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) #define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
...@@ -1794,7 +1818,10 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, ...@@ -1794,7 +1818,10 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance); u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance); u8 instance);
void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle);
int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status);
int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier); u16 to_multiplier);
......
...@@ -207,6 +207,7 @@ struct l2cap_hdr { ...@@ -207,6 +207,7 @@ struct l2cap_hdr {
__le16 len; __le16 len;
__le16 cid; __le16 cid;
} __packed; } __packed;
#define L2CAP_LEN_SIZE 2
#define L2CAP_HDR_SIZE 4 #define L2CAP_HDR_SIZE 4
#define L2CAP_ENH_HDR_SIZE 6 #define L2CAP_ENH_HDR_SIZE 6
#define L2CAP_EXT_HDR_SIZE 8 #define L2CAP_EXT_HDR_SIZE 8
......
...@@ -821,6 +821,22 @@ struct mgmt_rp_add_ext_adv_data { ...@@ -821,6 +821,22 @@ struct mgmt_rp_add_ext_adv_data {
__u8 instance; __u8 instance;
} __packed; } __packed;
struct mgmt_adv_rssi_thresholds {
__s8 high_threshold;
__le16 high_threshold_timeout;
__s8 low_threshold;
__le16 low_threshold_timeout;
__u8 sampling_period;
} __packed;
#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI 0x0056
struct mgmt_cp_add_adv_patterns_monitor_rssi {
struct mgmt_adv_rssi_thresholds rssi;
__u8 pattern_count;
struct mgmt_adv_pattern patterns[];
} __packed;
#define MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE 8
#define MGMT_EV_CMD_COMPLETE 0x0001 #define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete { struct mgmt_ev_cmd_complete {
__le16 opcode; __le16 opcode;
......
...@@ -381,9 +381,9 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, ...@@ -381,9 +381,9 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
hdev = hci_dev_get(req->id); hdev = hci_dev_get(req->id);
if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) { if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
struct a2mp_amp_assoc_rsp rsp; struct a2mp_amp_assoc_rsp rsp;
rsp.id = req->id;
memset(&rsp, 0, sizeof(rsp)); memset(&rsp, 0, sizeof(rsp));
rsp.id = req->id;
if (tmp) { if (tmp) {
rsp.status = A2MP_STATUS_COLLISION_OCCURED; rsp.status = A2MP_STATUS_COLLISION_OCCURED;
...@@ -512,6 +512,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, ...@@ -512,6 +512,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
if (!assoc) { if (!assoc) {
amp_ctrl_put(ctrl); amp_ctrl_put(ctrl);
hci_dev_put(hdev);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -508,7 +508,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) ...@@ -508,7 +508,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0) if (amount < 0)
amount = 0; amount = 0;
err = put_user(amount, (int __user *) arg); err = put_user(amount, (int __user *)arg);
break; break;
case TIOCINQ: case TIOCINQ:
...@@ -519,7 +519,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) ...@@ -519,7 +519,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
skb = skb_peek(&sk->sk_receive_queue); skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0; amount = skb ? skb->len : 0;
release_sock(sk); release_sock(sk);
err = put_user(amount, (int __user *) arg); err = put_user(amount, (int __user *)arg);
break; break;
default: default:
...@@ -637,7 +637,7 @@ static int bt_seq_show(struct seq_file *seq, void *v) ...@@ -637,7 +637,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
struct bt_sock_list *l = PDE_DATA(file_inode(seq->file)); struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
if (v == SEQ_START_TOKEN) { if (v == SEQ_START_TOKEN) {
seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent"); seq_puts(seq, "sk RefCnt Rmem Wmem User Inode Parent");
if (l->custom_seq_show) { if (l->custom_seq_show) {
seq_putc(seq, ' '); seq_putc(seq, ' ');
...@@ -657,7 +657,7 @@ static int bt_seq_show(struct seq_file *seq, void *v) ...@@ -657,7 +657,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(sk), sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)), from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk), sock_i_ino(sk),
bt->parent? sock_i_ino(bt->parent): 0LU); bt->parent ? sock_i_ino(bt->parent) : 0LU);
if (l->custom_seq_show) { if (l->custom_seq_show) {
seq_putc(seq, ' '); seq_putc(seq, ' ');
...@@ -678,7 +678,7 @@ static const struct seq_operations bt_seq_ops = { ...@@ -678,7 +678,7 @@ static const struct seq_operations bt_seq_ops = {
int bt_procfs_init(struct net *net, const char *name, int bt_procfs_init(struct net *net, const char *name,
struct bt_sock_list *sk_list, struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *)) int (*seq_show)(struct seq_file *, void *))
{ {
sk_list->custom_seq_show = seq_show; sk_list->custom_seq_show = seq_show;
...@@ -694,7 +694,7 @@ void bt_procfs_cleanup(struct net *net, const char *name) ...@@ -694,7 +694,7 @@ void bt_procfs_cleanup(struct net *net, const char *name)
#else #else
int bt_procfs_init(struct net *net, const char *name, int bt_procfs_init(struct net *net, const char *name,
struct bt_sock_list *sk_list, struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *)) int (*seq_show)(struct seq_file *, void *))
{ {
return 0; return 0;
} }
......
...@@ -297,6 +297,9 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev, ...@@ -297,6 +297,9 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
struct hci_request req; struct hci_request req;
int err; int err;
if (!mgr)
return;
cp.phy_handle = hcon->handle; cp.phy_handle = hcon->handle;
cp.len_so_far = cpu_to_le16(0); cp.len_so_far = cpu_to_le16(0);
cp.max_len = cpu_to_le16(hdev->amp_assoc_size); cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
......
...@@ -203,6 +203,23 @@ static void hci_acl_create_connection(struct hci_conn *conn) ...@@ -203,6 +203,23 @@ static void hci_acl_create_connection(struct hci_conn *conn)
BT_DBG("hcon %p", conn); BT_DBG("hcon %p", conn);
/* Many controllers disallow HCI Create Connection while it is doing
* HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
* Connection. This may cause the MGMT discovering state to become false
* without user space's request but it is okay since the MGMT Discovery
* APIs do not promise that discovery should be done forever. Instead,
* the user space monitors the status of MGMT discovering and it may
* request for discovery again when this flag becomes false.
*/
if (test_bit(HCI_INQUIRY, &hdev->flags)) {
/* Put this connection to "pending" state so that it will be
* executed after the inquiry cancel command complete event.
*/
conn->state = BT_CONNECT2;
hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
return;
}
conn->state = BT_CONNECT; conn->state = BT_CONNECT;
conn->out = true; conn->out = true;
conn->role = HCI_ROLE_MASTER; conn->role = HCI_ROLE_MASTER;
...@@ -276,6 +293,20 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle) ...@@ -276,6 +293,20 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle)
hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
} }
static bool find_next_esco_param(struct hci_conn *conn,
const struct sco_param *esco_param, int size)
{
for (; conn->attempt <= size; conn->attempt++) {
if (lmp_esco_2m_capable(conn->link) ||
(esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3))
break;
BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported",
conn, conn->attempt);
}
return conn->attempt <= size;
}
bool hci_setup_sync(struct hci_conn *conn, __u16 handle) bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
{ {
struct hci_dev *hdev = conn->hdev; struct hci_dev *hdev = conn->hdev;
...@@ -297,13 +328,15 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle) ...@@ -297,13 +328,15 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
switch (conn->setting & SCO_AIRMODE_MASK) { switch (conn->setting & SCO_AIRMODE_MASK) {
case SCO_AIRMODE_TRANSP: case SCO_AIRMODE_TRANSP:
if (conn->attempt > ARRAY_SIZE(esco_param_msbc)) if (!find_next_esco_param(conn, esco_param_msbc,
ARRAY_SIZE(esco_param_msbc)))
return false; return false;
param = &esco_param_msbc[conn->attempt - 1]; param = &esco_param_msbc[conn->attempt - 1];
break; break;
case SCO_AIRMODE_CVSD: case SCO_AIRMODE_CVSD:
if (lmp_esco_capable(conn->link)) { if (lmp_esco_capable(conn->link)) {
if (conn->attempt > ARRAY_SIZE(esco_param_cvsd)) if (!find_next_esco_param(conn, esco_param_cvsd,
ARRAY_SIZE(esco_param_cvsd)))
return false; return false;
param = &esco_param_cvsd[conn->attempt - 1]; param = &esco_param_cvsd[conn->attempt - 1];
} else { } else {
......
...@@ -1362,8 +1362,10 @@ int hci_inquiry(void __user *arg) ...@@ -1362,8 +1362,10 @@ int hci_inquiry(void __user *arg)
* cleared). If it is interrupted by a signal, return -EINTR. * cleared). If it is interrupted by a signal, return -EINTR.
*/ */
if (wait_on_bit(&hdev->flags, HCI_INQUIRY, if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
TASK_INTERRUPTIBLE)) TASK_INTERRUPTIBLE)) {
return -EINTR; err = -EINTR;
goto done;
}
} }
/* for unlimited number of responses we will use buffer with /* for unlimited number of responses we will use buffer with
...@@ -3051,12 +3053,15 @@ void hci_adv_monitors_clear(struct hci_dev *hdev) ...@@ -3051,12 +3053,15 @@ void hci_adv_monitors_clear(struct hci_dev *hdev)
int handle; int handle;
idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
hci_free_adv_monitor(monitor); hci_free_adv_monitor(hdev, monitor);
idr_destroy(&hdev->adv_monitors_idr); idr_destroy(&hdev->adv_monitors_idr);
} }
void hci_free_adv_monitor(struct adv_monitor *monitor) /* Frees the monitor structure and do some bookkeepings.
* This function requires the caller holds hdev->lock.
*/
void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
{ {
struct adv_pattern *pattern; struct adv_pattern *pattern;
struct adv_pattern *tmp; struct adv_pattern *tmp;
...@@ -3064,68 +3069,167 @@ void hci_free_adv_monitor(struct adv_monitor *monitor) ...@@ -3064,68 +3069,167 @@ void hci_free_adv_monitor(struct adv_monitor *monitor)
if (!monitor) if (!monitor)
return; return;
list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
list_del(&pattern->list);
kfree(pattern); kfree(pattern);
}
if (monitor->handle)
idr_remove(&hdev->adv_monitors_idr, monitor->handle);
if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
hdev->adv_monitors_cnt--;
mgmt_adv_monitor_removed(hdev, monitor->handle);
}
kfree(monitor); kfree(monitor);
} }
/* This function requires the caller holds hdev->lock */ int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) {
return mgmt_add_adv_patterns_monitor_complete(hdev, status);
}
int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
{
return mgmt_remove_adv_monitor_complete(hdev, status);
}
/* Assigns handle to a monitor, and if offloading is supported and power is on,
* also attempts to forward the request to the controller.
* Returns true if request is forwarded (result is pending), false otherwise.
* This function requires the caller holds hdev->lock.
*/
bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
int *err)
{ {
int min, max, handle; int min, max, handle;
if (!monitor) *err = 0;
return -EINVAL;
if (!monitor) {
*err = -EINVAL;
return false;
}
min = HCI_MIN_ADV_MONITOR_HANDLE; min = HCI_MIN_ADV_MONITOR_HANDLE;
max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
GFP_KERNEL); GFP_KERNEL);
if (handle < 0) if (handle < 0) {
return handle; *err = handle;
return false;
}
hdev->adv_monitors_cnt++;
monitor->handle = handle; monitor->handle = handle;
if (!hdev_is_powered(hdev))
return false;
switch (hci_get_adv_monitor_offload_ext(hdev)) {
case HCI_ADV_MONITOR_EXT_NONE:
hci_update_background_scan(hdev); hci_update_background_scan(hdev);
bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
/* Message was not forwarded to controller - not an error */
return false;
case HCI_ADV_MONITOR_EXT_MSFT:
*err = msft_add_monitor_pattern(hdev, monitor);
bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
*err);
break;
}
return 0; return (*err == 0);
} }
static int free_adv_monitor(int id, void *ptr, void *data) /* Attempts to tell the controller and free the monitor. If somehow the
* controller doesn't have a corresponding handle, remove anyway.
* Returns true if request is forwarded (result is pending), false otherwise.
* This function requires the caller holds hdev->lock.
*/
static bool hci_remove_adv_monitor(struct hci_dev *hdev,
struct adv_monitor *monitor,
u16 handle, int *err)
{ {
struct hci_dev *hdev = data; *err = 0;
struct adv_monitor *monitor = ptr;
idr_remove(&hdev->adv_monitors_idr, monitor->handle); switch (hci_get_adv_monitor_offload_ext(hdev)) {
hci_free_adv_monitor(monitor); case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
hdev->adv_monitors_cnt--; goto free_monitor;
case HCI_ADV_MONITOR_EXT_MSFT:
*err = msft_remove_monitor(hdev, monitor, handle);
break;
}
return 0; /* In case no matching handle registered, just free the monitor */
if (*err == -ENOENT)
goto free_monitor;
return (*err == 0);
free_monitor:
if (*err == -ENOENT)
bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
monitor->handle);
hci_free_adv_monitor(hdev, monitor);
*err = 0;
return false;
} }
/* This function requires the caller holds hdev->lock */ /* Returns true if request is forwarded (result is pending), false otherwise.
int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle) * This function requires the caller holds hdev->lock.
*/
bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
{
struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
bool pending;
if (!monitor) {
*err = -EINVAL;
return false;
}
pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
if (!*err && !pending)
hci_update_background_scan(hdev);
bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
hdev->name, handle, *err, pending ? "" : "not ");
return pending;
}
/* Returns true if request is forwarded (result is pending), false otherwise.
* This function requires the caller holds hdev->lock.
*/
bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
{ {
struct adv_monitor *monitor; struct adv_monitor *monitor;
int idr_next_id = 0;
bool pending = false;
bool update = false;
*err = 0;
if (handle) { while (!*err && !pending) {
monitor = idr_find(&hdev->adv_monitors_idr, handle); monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
if (!monitor) if (!monitor)
return -ENOENT; break;
idr_remove(&hdev->adv_monitors_idr, monitor->handle); pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
hci_free_adv_monitor(monitor);
hdev->adv_monitors_cnt--; if (!*err && !pending)
} else { update = true;
/* Remove all monitors if handle is 0. */
idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
} }
if (update)
hci_update_background_scan(hdev); hci_update_background_scan(hdev);
return 0; bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
hdev->name, *err, pending ? "" : "not ");
return pending;
} }
/* This function requires the caller holds hdev->lock */ /* This function requires the caller holds hdev->lock */
...@@ -3134,6 +3238,14 @@ bool hci_is_adv_monitoring(struct hci_dev *hdev) ...@@ -3134,6 +3238,14 @@ bool hci_is_adv_monitoring(struct hci_dev *hdev)
return !idr_is_empty(&hdev->adv_monitors_idr); return !idr_is_empty(&hdev->adv_monitors_idr);
} }
int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
{
if (msft_monitor_supported(hdev))
return HCI_ADV_MONITOR_EXT_MSFT;
return HCI_ADV_MONITOR_EXT_NONE;
}
struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
bdaddr_t *bdaddr, u8 type) bdaddr_t *bdaddr, u8 type)
{ {
...@@ -3566,7 +3678,8 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, ...@@ -3566,7 +3678,8 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
} }
/* Suspend notifier should only act on events when powered. */ /* Suspend notifier should only act on events when powered. */
if (!hdev_is_powered(hdev)) if (!hdev_is_powered(hdev) ||
hci_dev_test_flag(hdev, HCI_UNREGISTER))
goto done; goto done;
if (action == PM_SUSPEND_PREPARE) { if (action == PM_SUSPEND_PREPARE) {
...@@ -3827,10 +3940,12 @@ int hci_register_dev(struct hci_dev *hdev) ...@@ -3827,10 +3940,12 @@ int hci_register_dev(struct hci_dev *hdev)
hci_sock_dev_event(hdev, HCI_DEV_REG); hci_sock_dev_event(hdev, HCI_DEV_REG);
hci_dev_hold(hdev); hci_dev_hold(hdev);
if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
hdev->suspend_notifier.notifier_call = hci_suspend_notifier; hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
error = register_pm_notifier(&hdev->suspend_notifier); error = register_pm_notifier(&hdev->suspend_notifier);
if (error) if (error)
goto err_wqueue; goto err_wqueue;
}
queue_work(hdev->req_workqueue, &hdev->power_on); queue_work(hdev->req_workqueue, &hdev->power_on);
...@@ -3865,9 +3980,11 @@ void hci_unregister_dev(struct hci_dev *hdev) ...@@ -3865,9 +3980,11 @@ void hci_unregister_dev(struct hci_dev *hdev)
cancel_work_sync(&hdev->power_on); cancel_work_sync(&hdev->power_on);
if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
hci_suspend_clear_tasks(hdev); hci_suspend_clear_tasks(hdev);
unregister_pm_notifier(&hdev->suspend_notifier); unregister_pm_notifier(&hdev->suspend_notifier);
cancel_work_sync(&hdev->suspend_prepare); cancel_work_sync(&hdev->suspend_prepare);
}
hci_dev_do_close(hdev); hci_dev_do_close(hdev);
......
...@@ -237,7 +237,7 @@ static int conn_info_min_age_get(void *data, u64 *val) ...@@ -237,7 +237,7 @@ static int conn_info_min_age_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get, DEFINE_DEBUGFS_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
conn_info_min_age_set, "%llu\n"); conn_info_min_age_set, "%llu\n");
static int conn_info_max_age_set(void *data, u64 val) static int conn_info_max_age_set(void *data, u64 val)
...@@ -265,7 +265,7 @@ static int conn_info_max_age_get(void *data, u64 *val) ...@@ -265,7 +265,7 @@ static int conn_info_max_age_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get, DEFINE_DEBUGFS_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
conn_info_max_age_set, "%llu\n"); conn_info_max_age_set, "%llu\n");
static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf, static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
...@@ -419,7 +419,7 @@ static int voice_setting_get(void *data, u64 *val) ...@@ -419,7 +419,7 @@ static int voice_setting_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get, DEFINE_DEBUGFS_ATTRIBUTE(voice_setting_fops, voice_setting_get,
NULL, "0x%4.4llx\n"); NULL, "0x%4.4llx\n");
static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf, static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf,
...@@ -476,7 +476,7 @@ static int min_encrypt_key_size_get(void *data, u64 *val) ...@@ -476,7 +476,7 @@ static int min_encrypt_key_size_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops, DEFINE_DEBUGFS_ATTRIBUTE(min_encrypt_key_size_fops,
min_encrypt_key_size_get, min_encrypt_key_size_get,
min_encrypt_key_size_set, "%llu\n"); min_encrypt_key_size_set, "%llu\n");
...@@ -491,7 +491,7 @@ static int auto_accept_delay_get(void *data, u64 *val) ...@@ -491,7 +491,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, DEFINE_DEBUGFS_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
auto_accept_delay_set, "%llu\n"); auto_accept_delay_set, "%llu\n");
static ssize_t force_bredr_smp_read(struct file *file, static ssize_t force_bredr_smp_read(struct file *file,
...@@ -558,7 +558,7 @@ static int idle_timeout_get(void *data, u64 *val) ...@@ -558,7 +558,7 @@ static int idle_timeout_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get, DEFINE_DEBUGFS_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
idle_timeout_set, "%llu\n"); idle_timeout_set, "%llu\n");
static int sniff_min_interval_set(void *data, u64 val) static int sniff_min_interval_set(void *data, u64 val)
...@@ -586,7 +586,7 @@ static int sniff_min_interval_get(void *data, u64 *val) ...@@ -586,7 +586,7 @@ static int sniff_min_interval_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get, DEFINE_DEBUGFS_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
sniff_min_interval_set, "%llu\n"); sniff_min_interval_set, "%llu\n");
static int sniff_max_interval_set(void *data, u64 val) static int sniff_max_interval_set(void *data, u64 val)
...@@ -614,7 +614,7 @@ static int sniff_max_interval_get(void *data, u64 *val) ...@@ -614,7 +614,7 @@ static int sniff_max_interval_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, DEFINE_DEBUGFS_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
sniff_max_interval_set, "%llu\n"); sniff_max_interval_set, "%llu\n");
void hci_debugfs_create_bredr(struct hci_dev *hdev) void hci_debugfs_create_bredr(struct hci_dev *hdev)
...@@ -706,7 +706,7 @@ static int rpa_timeout_get(void *data, u64 *val) ...@@ -706,7 +706,7 @@ static int rpa_timeout_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get, DEFINE_DEBUGFS_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
rpa_timeout_set, "%llu\n"); rpa_timeout_set, "%llu\n");
static int random_address_show(struct seq_file *f, void *p) static int random_address_show(struct seq_file *f, void *p)
...@@ -869,7 +869,7 @@ static int conn_min_interval_get(void *data, u64 *val) ...@@ -869,7 +869,7 @@ static int conn_min_interval_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get, DEFINE_DEBUGFS_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
conn_min_interval_set, "%llu\n"); conn_min_interval_set, "%llu\n");
static int conn_max_interval_set(void *data, u64 val) static int conn_max_interval_set(void *data, u64 val)
...@@ -897,7 +897,7 @@ static int conn_max_interval_get(void *data, u64 *val) ...@@ -897,7 +897,7 @@ static int conn_max_interval_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, DEFINE_DEBUGFS_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
conn_max_interval_set, "%llu\n"); conn_max_interval_set, "%llu\n");
static int conn_latency_set(void *data, u64 val) static int conn_latency_set(void *data, u64 val)
...@@ -925,7 +925,7 @@ static int conn_latency_get(void *data, u64 *val) ...@@ -925,7 +925,7 @@ static int conn_latency_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get, DEFINE_DEBUGFS_ATTRIBUTE(conn_latency_fops, conn_latency_get,
conn_latency_set, "%llu\n"); conn_latency_set, "%llu\n");
static int supervision_timeout_set(void *data, u64 val) static int supervision_timeout_set(void *data, u64 val)
...@@ -953,7 +953,7 @@ static int supervision_timeout_get(void *data, u64 *val) ...@@ -953,7 +953,7 @@ static int supervision_timeout_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get, DEFINE_DEBUGFS_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
supervision_timeout_set, "%llu\n"); supervision_timeout_set, "%llu\n");
static int adv_channel_map_set(void *data, u64 val) static int adv_channel_map_set(void *data, u64 val)
...@@ -981,7 +981,7 @@ static int adv_channel_map_get(void *data, u64 *val) ...@@ -981,7 +981,7 @@ static int adv_channel_map_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, DEFINE_DEBUGFS_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
adv_channel_map_set, "%llu\n"); adv_channel_map_set, "%llu\n");
static int adv_min_interval_set(void *data, u64 val) static int adv_min_interval_set(void *data, u64 val)
...@@ -1009,7 +1009,7 @@ static int adv_min_interval_get(void *data, u64 *val) ...@@ -1009,7 +1009,7 @@ static int adv_min_interval_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get, DEFINE_DEBUGFS_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
adv_min_interval_set, "%llu\n"); adv_min_interval_set, "%llu\n");
static int adv_max_interval_set(void *data, u64 val) static int adv_max_interval_set(void *data, u64 val)
...@@ -1037,7 +1037,7 @@ static int adv_max_interval_get(void *data, u64 *val) ...@@ -1037,7 +1037,7 @@ static int adv_max_interval_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get, DEFINE_DEBUGFS_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
adv_max_interval_set, "%llu\n"); adv_max_interval_set, "%llu\n");
static int min_key_size_set(void *data, u64 val) static int min_key_size_set(void *data, u64 val)
...@@ -1065,7 +1065,7 @@ static int min_key_size_get(void *data, u64 *val) ...@@ -1065,7 +1065,7 @@ static int min_key_size_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(min_key_size_fops, min_key_size_get, DEFINE_DEBUGFS_ATTRIBUTE(min_key_size_fops, min_key_size_get,
min_key_size_set, "%llu\n"); min_key_size_set, "%llu\n");
static int max_key_size_set(void *data, u64 val) static int max_key_size_set(void *data, u64 val)
...@@ -1093,7 +1093,7 @@ static int max_key_size_get(void *data, u64 *val) ...@@ -1093,7 +1093,7 @@ static int max_key_size_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(max_key_size_fops, max_key_size_get, DEFINE_DEBUGFS_ATTRIBUTE(max_key_size_fops, max_key_size_get,
max_key_size_set, "%llu\n"); max_key_size_set, "%llu\n");
static int auth_payload_timeout_set(void *data, u64 val) static int auth_payload_timeout_set(void *data, u64 val)
...@@ -1121,7 +1121,7 @@ static int auth_payload_timeout_get(void *data, u64 *val) ...@@ -1121,7 +1121,7 @@ static int auth_payload_timeout_get(void *data, u64 *val)
return 0; return 0;
} }
DEFINE_SIMPLE_ATTRIBUTE(auth_payload_timeout_fops, DEFINE_DEBUGFS_ATTRIBUTE(auth_payload_timeout_fops,
auth_payload_timeout_get, auth_payload_timeout_get,
auth_payload_timeout_set, "%llu\n"); auth_payload_timeout_set, "%llu\n");
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "smp.h" #include "smp.h"
#include "hci_request.h" #include "hci_request.h"
#include "msft.h"
#define HCI_REQ_DONE 0 #define HCI_REQ_DONE 0
#define HCI_REQ_PEND 1 #define HCI_REQ_PEND 1
...@@ -404,13 +405,18 @@ static void cancel_interleave_scan(struct hci_dev *hdev) ...@@ -404,13 +405,18 @@ static void cancel_interleave_scan(struct hci_dev *hdev)
*/ */
static bool __hci_update_interleaved_scan(struct hci_dev *hdev) static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
{ {
/* If there is at least one ADV monitors and one pending LE connection /* Do interleaved scan only if all of the following are true:
* or one device to be scanned for, we should alternate between * - There is at least one ADV monitor
* allowlist scan and one without any filters to save power. * - At least one pending LE connection or one device to be scanned for
* - Monitor offloading is not supported
* If so, we should alternate between allowlist scan and one without
* any filters to save power.
*/ */
bool use_interleaving = hci_is_adv_monitoring(hdev) && bool use_interleaving = hci_is_adv_monitoring(hdev) &&
!(list_empty(&hdev->pend_le_conns) && !(list_empty(&hdev->pend_le_conns) &&
list_empty(&hdev->pend_le_reports)); list_empty(&hdev->pend_le_reports)) &&
hci_get_adv_monitor_offload_ext(hdev) ==
HCI_ADV_MONITOR_EXT_NONE;
bool is_interleaving = is_interleave_scanning(hdev); bool is_interleaving = is_interleave_scanning(hdev);
if (use_interleaving && !is_interleaving) { if (use_interleaving && !is_interleaving) {
...@@ -899,14 +905,11 @@ static u8 update_white_list(struct hci_request *req) ...@@ -899,14 +905,11 @@ static u8 update_white_list(struct hci_request *req)
/* Use the allowlist unless the following conditions are all true: /* Use the allowlist unless the following conditions are all true:
* - We are not currently suspending * - We are not currently suspending
* - There are 1 or more ADV monitors registered * - There are 1 or more ADV monitors registered and it's not offloaded
* - Interleaved scanning is not currently using the allowlist * - Interleaved scanning is not currently using the allowlist
*
* Once the controller offloading of advertisement monitor is in place,
* the above condition should include the support of MSFT extension
* support.
*/ */
if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
return 0x00; return 0x00;
...@@ -1087,6 +1090,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req) ...@@ -1087,6 +1090,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
if (hdev->suspended) { if (hdev->suspended) {
window = hdev->le_scan_window_suspend; window = hdev->le_scan_window_suspend;
interval = hdev->le_scan_int_suspend; interval = hdev->le_scan_int_suspend;
set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
} else if (hci_is_le_conn_scanning(hdev)) { } else if (hci_is_le_conn_scanning(hdev)) {
window = hdev->le_scan_window_connect; window = hdev->le_scan_window_connect;
interval = hdev->le_scan_int_connect; interval = hdev->le_scan_int_connect;
...@@ -1170,19 +1175,6 @@ static void hci_req_set_event_filter(struct hci_request *req) ...@@ -1170,19 +1175,6 @@ static void hci_req_set_event_filter(struct hci_request *req)
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
} }
static void hci_req_config_le_suspend_scan(struct hci_request *req)
{
/* Before changing params disable scan if enabled */
if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
hci_req_add_le_scan_disable(req, false);
/* Configure params and enable scanning */
hci_req_add_le_passive_scan(req);
/* Block suspend notifier on response */
set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
}
static void cancel_adv_timeout(struct hci_dev *hdev) static void cancel_adv_timeout(struct hci_dev *hdev)
{ {
if (hdev->adv_instance_timeout) { if (hdev->adv_instance_timeout) {
...@@ -1245,12 +1237,37 @@ static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode) ...@@ -1245,12 +1237,37 @@ static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{ {
bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode, bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
status); status);
if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) || if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) { test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
wake_up(&hdev->suspend_wait_q);
}
if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
wake_up(&hdev->suspend_wait_q); wake_up(&hdev->suspend_wait_q);
} }
} }
static void hci_req_add_set_adv_filter_enable(struct hci_request *req,
bool enable)
{
struct hci_dev *hdev = req->hdev;
switch (hci_get_adv_monitor_offload_ext(hdev)) {
case HCI_ADV_MONITOR_EXT_MSFT:
msft_req_add_set_filter_enable(req, enable);
break;
default:
return;
}
/* No need to block when enabling since it's on resume path */
if (hdev->suspended && !enable)
set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
}
/* Call with hci_dev_lock */ /* Call with hci_dev_lock */
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
{ {
...@@ -1308,6 +1325,9 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) ...@@ -1308,6 +1325,9 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
hci_req_add_le_scan_disable(&req, false); hci_req_add_le_scan_disable(&req, false);
} }
/* Disable advertisement filters */
hci_req_add_set_adv_filter_enable(&req, false);
/* Mark task needing completion */ /* Mark task needing completion */
set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
...@@ -1336,7 +1356,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) ...@@ -1336,7 +1356,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
/* Enable event filter for paired devices */ /* Enable event filter for paired devices */
hci_req_set_event_filter(&req); hci_req_set_event_filter(&req);
/* Enable passive scan at lower duty cycle */ /* Enable passive scan at lower duty cycle */
hci_req_config_le_suspend_scan(&req); __hci_update_background_scan(&req);
/* Pause scan changes again. */ /* Pause scan changes again. */
hdev->scanning_paused = true; hdev->scanning_paused = true;
hci_req_run(&req, suspend_req_complete); hci_req_run(&req, suspend_req_complete);
...@@ -1346,7 +1366,9 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) ...@@ -1346,7 +1366,9 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
hci_req_clear_event_filter(&req); hci_req_clear_event_filter(&req);
/* Reset passive/background scanning to normal */ /* Reset passive/background scanning to normal */
hci_req_config_le_suspend_scan(&req); __hci_update_background_scan(&req);
/* Enable all of the advertisement filters */
hci_req_add_set_adv_filter_enable(&req, true);
/* Unpause directed advertising */ /* Unpause directed advertising */
hdev->advertising_paused = false; hdev->advertising_paused = false;
......
...@@ -4519,6 +4519,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, ...@@ -4519,6 +4519,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
} }
goto done; goto done;
case L2CAP_CONF_UNKNOWN:
case L2CAP_CONF_UNACCEPT: case L2CAP_CONF_UNACCEPT:
if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
char req[64]; char req[64];
...@@ -8276,10 +8277,73 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) ...@@ -8276,10 +8277,73 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
mutex_unlock(&conn->chan_lock); mutex_unlock(&conn->chan_lock);
} }
/* Append fragment into frame respecting the maximum len of rx_skb */
static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
u16 len)
{
if (!conn->rx_skb) {
/* Allocate skb for the complete frame (with header) */
conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
if (!conn->rx_skb)
return -ENOMEM;
/* Init rx_len */
conn->rx_len = len;
}
/* Copy as much as the rx_skb can hold */
len = min_t(u16, len, skb->len);
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
skb_pull(skb, len);
conn->rx_len -= len;
return len;
}
static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct sk_buff *rx_skb;
int len;
/* Append just enough to complete the header */
len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
/* If header could not be read just continue */
if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
return len;
rx_skb = conn->rx_skb;
len = get_unaligned_le16(rx_skb->data);
/* Check if rx_skb has enough space to received all fragments */
if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
/* Update expected len */
conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
return L2CAP_LEN_SIZE;
}
/* Reset conn->rx_skb since it will need to be reallocated in order to
* fit all fragments.
*/
conn->rx_skb = NULL;
/* Reallocates rx_skb using the exact expected length */
len = l2cap_recv_frag(conn, rx_skb,
len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
kfree_skb(rx_skb);
return len;
}
static void l2cap_recv_reset(struct l2cap_conn *conn)
{
kfree_skb(conn->rx_skb);
conn->rx_skb = NULL;
conn->rx_len = 0;
}
void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
{ {
struct l2cap_conn *conn = hcon->l2cap_data; struct l2cap_conn *conn = hcon->l2cap_data;
struct l2cap_hdr *hdr;
int len; int len;
/* For AMP controller do not create l2cap conn */ /* For AMP controller do not create l2cap conn */
...@@ -8298,23 +8362,23 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) ...@@ -8298,23 +8362,23 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
case ACL_START: case ACL_START:
case ACL_START_NO_FLUSH: case ACL_START_NO_FLUSH:
case ACL_COMPLETE: case ACL_COMPLETE:
if (conn->rx_len) { if (conn->rx_skb) {
BT_ERR("Unexpected start frame (len %d)", skb->len); BT_ERR("Unexpected start frame (len %d)", skb->len);
kfree_skb(conn->rx_skb); l2cap_recv_reset(conn);
conn->rx_skb = NULL;
conn->rx_len = 0;
l2cap_conn_unreliable(conn, ECOMM); l2cap_conn_unreliable(conn, ECOMM);
} }
/* Start fragment always begin with Basic L2CAP header */ /* Start fragment may not contain the L2CAP length so just
if (skb->len < L2CAP_HDR_SIZE) { * copy the initial byte when that happens and use conn->mtu as
BT_ERR("Frame is too short (len %d)", skb->len); * expected length.
l2cap_conn_unreliable(conn, ECOMM); */
if (skb->len < L2CAP_LEN_SIZE) {
if (l2cap_recv_frag(conn, skb, conn->mtu) < 0)
goto drop; goto drop;
return;
} }
hdr = (struct l2cap_hdr *) skb->data; len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
if (len == skb->len) { if (len == skb->len) {
/* Complete frame received */ /* Complete frame received */
...@@ -8331,38 +8395,43 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) ...@@ -8331,38 +8395,43 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
goto drop; goto drop;
} }
/* Allocate skb for the complete frame (with header) */ /* Append fragment into frame (with header) */
conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); if (l2cap_recv_frag(conn, skb, len) < 0)
if (!conn->rx_skb)
goto drop; goto drop;
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
skb->len);
conn->rx_len = len - skb->len;
break; break;
case ACL_CONT: case ACL_CONT:
BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len); BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
if (!conn->rx_len) { if (!conn->rx_skb) {
BT_ERR("Unexpected continuation frame (len %d)", skb->len); BT_ERR("Unexpected continuation frame (len %d)", skb->len);
l2cap_conn_unreliable(conn, ECOMM); l2cap_conn_unreliable(conn, ECOMM);
goto drop; goto drop;
} }
/* Complete the L2CAP length if it has not been read */
if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
if (l2cap_recv_len(conn, skb) < 0) {
l2cap_conn_unreliable(conn, ECOMM);
goto drop;
}
/* Header still could not be read just continue */
if (conn->rx_skb->len < L2CAP_LEN_SIZE)
return;
}
if (skb->len > conn->rx_len) { if (skb->len > conn->rx_len) {
BT_ERR("Fragment is too long (len %d, expected %d)", BT_ERR("Fragment is too long (len %d, expected %d)",
skb->len, conn->rx_len); skb->len, conn->rx_len);
kfree_skb(conn->rx_skb); l2cap_recv_reset(conn);
conn->rx_skb = NULL;
conn->rx_len = 0;
l2cap_conn_unreliable(conn, ECOMM); l2cap_conn_unreliable(conn, ECOMM);
goto drop; goto drop;
} }
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), /* Append fragment into frame (with header) */
skb->len); l2cap_recv_frag(conn, skb, skb->len);
conn->rx_len -= skb->len;
if (!conn->rx_len) { if (!conn->rx_len) {
/* Complete frame received. l2cap_recv_frame /* Complete frame received. l2cap_recv_frame
......
This diff is collapsed.
This diff is collapsed.
...@@ -12,16 +12,46 @@ ...@@ -12,16 +12,46 @@
#if IS_ENABLED(CONFIG_BT_MSFTEXT) #if IS_ENABLED(CONFIG_BT_MSFTEXT)
bool msft_monitor_supported(struct hci_dev *hdev);
void msft_do_open(struct hci_dev *hdev); void msft_do_open(struct hci_dev *hdev);
void msft_do_close(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev);
void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb);
__u64 msft_get_features(struct hci_dev *hdev); __u64 msft_get_features(struct hci_dev *hdev);
int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor);
int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
u16 handle);
void msft_req_add_set_filter_enable(struct hci_request *req, bool enable);
int msft_set_filter_enable(struct hci_dev *hdev, bool enable);
#else #else
static inline bool msft_monitor_supported(struct hci_dev *hdev)
{
return false;
}
static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_open(struct hci_dev *hdev) {}
static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {}
static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {}
static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; } static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; }
static inline int msft_add_monitor_pattern(struct hci_dev *hdev,
struct adv_monitor *monitor)
{
return -EOPNOTSUPP;
}
static inline int msft_remove_monitor(struct hci_dev *hdev,
struct adv_monitor *monitor,
u16 handle)
{
return -EOPNOTSUPP;
}
static inline void msft_req_add_set_filter_enable(struct hci_request *req,
bool enable) {}
static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable)
{
return -EOPNOTSUPP;
}
#endif #endif
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <linux/crypto.h> #include <linux/crypto.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/b128ops.h>
#include <crypto/hash.h> #include <crypto/hash.h>
#include <crypto/kpp.h> #include <crypto/kpp.h>
...@@ -425,7 +424,7 @@ static int smp_c1(const u8 k[16], ...@@ -425,7 +424,7 @@ static int smp_c1(const u8 k[16],
SMP_DBG("p1 %16phN", p1); SMP_DBG("p1 %16phN", p1);
/* res = r XOR p1 */ /* res = r XOR p1 */
u128_xor((u128 *) res, (u128 *) r, (u128 *) p1); crypto_xor_cpy(res, r, p1, sizeof(p1));
/* res = e(k, res) */ /* res = e(k, res) */
err = smp_e(k, res); err = smp_e(k, res);
...@@ -442,7 +441,7 @@ static int smp_c1(const u8 k[16], ...@@ -442,7 +441,7 @@ static int smp_c1(const u8 k[16],
SMP_DBG("p2 %16phN", p2); SMP_DBG("p2 %16phN", p2);
/* res = res XOR p2 */ /* res = res XOR p2 */
u128_xor((u128 *) res, (u128 *) res, (u128 *) p2); crypto_xor(res, p2, sizeof(p2));
/* res = e(k, res) */ /* res = e(k, res) */
err = smp_e(k, res); err = smp_e(k, res);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment