Commit a72c9512 authored by David S. Miller's avatar David S. Miller

Merge branch 'for-upstream' of...

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2015-10-22

Here's probably the last bluetooth-next pull request for 4.4. Among
several other changes it contains the rest of the fixes & cleanups from
the Bluetooth UnplugFest (that didn't need to be hurried to 4.3).

 - Refactoring & cleanups to 6lowpan code
 - New USB ids for two Atheros controllers and BCM43142A0 from Broadcom
 - Fix (quirk) for broken Broadcom BCM2045 controllers
 - Support for latest Apple controllers
 - Improvements to the vendor diagnostic message support

Please let me know if there are any issues pulling. Thanks.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 379a80a1 13972adc
...@@ -183,7 +183,7 @@ config BT_HCIBCM203X ...@@ -183,7 +183,7 @@ config BT_HCIBCM203X
config BT_HCIBPA10X config BT_HCIBPA10X
tristate "HCI BPA10x USB driver" tristate "HCI BPA10x USB driver"
depends on USB depends on USB && BT_HCIUART
select BT_HCIUART_H4 select BT_HCIUART_H4
help help
Bluetooth HCI BPA10x USB driver. Bluetooth HCI BPA10x USB driver.
......
...@@ -93,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = { ...@@ -93,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x300f) }, { USB_DEVICE(0x04CA, 0x300f) },
{ USB_DEVICE(0x04CA, 0x3010) }, { USB_DEVICE(0x04CA, 0x3010) },
{ USB_DEVICE(0x0930, 0x0219) }, { USB_DEVICE(0x0930, 0x0219) },
{ USB_DEVICE(0x0930, 0x021c) },
{ USB_DEVICE(0x0930, 0x0220) }, { USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x0930, 0x0227) }, { USB_DEVICE(0x0930, 0x0227) },
{ USB_DEVICE(0x0b05, 0x17d0) }, { USB_DEVICE(0x0b05, 0x17d0) },
...@@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = { ...@@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0x311F) }, { USB_DEVICE(0x0CF3, 0x311F) },
{ USB_DEVICE(0x0cf3, 0x3121) }, { USB_DEVICE(0x0cf3, 0x3121) },
{ USB_DEVICE(0x0CF3, 0x817a) }, { USB_DEVICE(0x0CF3, 0x817a) },
{ USB_DEVICE(0x0CF3, 0x817b) },
{ USB_DEVICE(0x0cf3, 0xe003) }, { USB_DEVICE(0x0cf3, 0xe003) },
{ USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0CF3, 0xE005) }, { USB_DEVICE(0x0CF3, 0xE005) },
...@@ -153,6 +155,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { ...@@ -153,6 +155,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
...@@ -164,6 +167,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { ...@@ -164,6 +167,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
......
...@@ -323,7 +323,7 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len) ...@@ -323,7 +323,7 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
} }
BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
hw_name ? : "BCM", (subver & 0x7000) >> 13, hw_name ? : "BCM", (subver & 0xe000) >> 13,
(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
return 0; return 0;
...@@ -353,7 +353,7 @@ int btbcm_finalize(struct hci_dev *hdev) ...@@ -353,7 +353,7 @@ int btbcm_finalize(struct hci_dev *hdev)
kfree_skb(skb); kfree_skb(skb);
BT_INFO("%s: BCM (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, BT_INFO("%s: BCM (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
(subver & 0x7000) >> 13, (subver & 0x1f00) >> 8, (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8,
(subver & 0x00ff), rev & 0x0fff); (subver & 0x00ff), rev & 0x0fff);
btbcm_check_bdaddr(hdev); btbcm_check_bdaddr(hdev);
...@@ -461,7 +461,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev) ...@@ -461,7 +461,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
} }
BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
hw_name ? : "BCM", (subver & 0x7000) >> 13, hw_name ? : "BCM", (subver & 0xe000) >> 13,
(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
err = request_firmware(&fw, fw_name, &hdev->dev); err = request_firmware(&fw, fw_name, &hdev->dev);
...@@ -490,7 +490,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev) ...@@ -490,7 +490,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
kfree_skb(skb); kfree_skb(skb);
BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name, BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
hw_name ? : "BCM", (subver & 0x7000) >> 13, hw_name ? : "BCM", (subver & 0xe000) >> 13,
(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
/* Read Local Name */ /* Read Local Name */
...@@ -527,6 +527,15 @@ int btbcm_setup_apple(struct hci_dev *hdev) ...@@ -527,6 +527,15 @@ int btbcm_setup_apple(struct hci_dev *hdev)
kfree_skb(skb); kfree_skb(skb);
} }
/* Read USB Product Info */
skb = btbcm_read_usb_product(hdev);
if (!IS_ERR(skb)) {
BT_INFO("%s: BCM: product %4.4x:%4.4x", hdev->name,
get_unaligned_le16(skb->data + 1),
get_unaligned_le16(skb->data + 3));
kfree_skb(skb);
}
/* Read Local Name */ /* Read Local Name */
skb = btbcm_read_local_name(hdev); skb = btbcm_read_local_name(hdev);
if (!IS_ERR(skb)) { if (!IS_ERR(skb)) {
......
...@@ -91,6 +91,75 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) ...@@ -91,6 +91,75 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
} }
EXPORT_SYMBOL_GPL(btintel_set_bdaddr); EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
int btintel_set_diag(struct hci_dev *hdev, bool enable)
{
struct sk_buff *skb;
u8 param[3];
int err;
if (enable) {
param[0] = 0x03;
param[1] = 0x03;
param[2] = 0x03;
} else {
param[0] = 0x00;
param[1] = 0x00;
param[2] = 0x00;
}
skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
if (err == -ENODATA)
goto done;
BT_ERR("%s: Changing Intel diagnostic mode failed (%d)",
hdev->name, err);
return err;
}
kfree_skb(skb);
done:
btintel_set_event_mask(hdev, enable);
return 0;
}
EXPORT_SYMBOL_GPL(btintel_set_diag);
int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
{
struct sk_buff *skb;
u8 param[2];
int err;
param[0] = 0x01;
param[1] = 0x00;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
hdev->name, err);
return PTR_ERR(skb);
}
kfree_skb(skb);
err = btintel_set_diag(hdev, enable);
param[0] = 0x00;
param[1] = 0x00;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
hdev->name, err);
return PTR_ERR(skb);
}
kfree_skb(skb);
return err;
}
EXPORT_SYMBOL_GPL(btintel_set_diag_mfg);
void btintel_hw_error(struct hci_dev *hdev, u8 code) void btintel_hw_error(struct hci_dev *hdev, u8 code)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -216,6 +285,64 @@ int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name) ...@@ -216,6 +285,64 @@ int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name)
} }
EXPORT_SYMBOL_GPL(btintel_load_ddc_config); EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
{
u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
struct sk_buff *skb;
int err;
if (debug)
mask[1] |= 0x62;
skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Setting Intel event mask failed (%d)",
hdev->name, err);
return err;
}
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(btintel_set_event_mask);
int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
{
struct sk_buff *skb;
u8 param[2];
int err;
param[0] = 0x01;
param[1] = 0x00;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
hdev->name, err);
return PTR_ERR(skb);
}
kfree_skb(skb);
err = btintel_set_event_mask(hdev, debug);
param[0] = 0x00;
param[1] = 0x00;
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
hdev->name, err);
return PTR_ERR(skb);
}
kfree_skb(skb);
return err;
}
EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg);
/* ------- REGMAP IBT SUPPORT ------- */ /* ------- REGMAP IBT SUPPORT ------- */
#define IBT_REG_MODE_8BIT 0x00 #define IBT_REG_MODE_8BIT 0x00
......
...@@ -73,12 +73,16 @@ struct intel_secure_send_result { ...@@ -73,12 +73,16 @@ struct intel_secure_send_result {
int btintel_check_bdaddr(struct hci_dev *hdev); int btintel_check_bdaddr(struct hci_dev *hdev);
int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr); int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
int btintel_set_diag(struct hci_dev *hdev, bool enable);
int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable);
void btintel_hw_error(struct hci_dev *hdev, u8 code); void btintel_hw_error(struct hci_dev *hdev, u8 code);
void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver); void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
const void *param); const void *param);
int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name); int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
int btintel_set_event_mask(struct hci_dev *hdev, bool debug);
int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug);
struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
u16 opcode_write); u16 opcode_write);
...@@ -95,6 +99,16 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd ...@@ -95,6 +99,16 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int btintel_set_diag(struct hci_dev *hdev, bool enable)
{
return -EOPNOTSUPP;
}
static inline int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
{
return -EOPNOTSUPP;
}
static inline void btintel_hw_error(struct hci_dev *hdev, u8 code) static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
{ {
} }
...@@ -116,6 +130,16 @@ static inline int btintel_load_ddc_config(struct hci_dev *hdev, ...@@ -116,6 +130,16 @@ static inline int btintel_load_ddc_config(struct hci_dev *hdev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
{
return -EOPNOTSUPP;
}
static inline int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
{
return -EOPNOTSUPP;
}
static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev, static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
u16 opcode_read, u16 opcode_read,
u16 opcode_write) u16 opcode_write)
......
This diff is collapsed.
...@@ -243,6 +243,7 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu) ...@@ -243,6 +243,7 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu)
static const struct hci_uart_proto athp = { static const struct hci_uart_proto athp = {
.id = HCI_UART_ATH3K, .id = HCI_UART_ATH3K,
.name = "ATH3K", .name = "ATH3K",
.manufacturer = 69,
.open = ath_open, .open = ath_open,
.close = ath_close, .close = ath_close,
.flush = ath_flush, .flush = ath_flush,
......
...@@ -259,8 +259,8 @@ static int bcm_set_diag(struct hci_dev *hdev, bool enable) ...@@ -259,8 +259,8 @@ static int bcm_set_diag(struct hci_dev *hdev, bool enable)
return -ENETDOWN; return -ENETDOWN;
skb = bt_skb_alloc(3, GFP_KERNEL); skb = bt_skb_alloc(3, GFP_KERNEL);
if (IS_ERR(skb)) if (!skb)
return PTR_ERR(skb); return -ENOMEM;
*skb_put(skb, 1) = BCM_LM_DIAG_PKT; *skb_put(skb, 1) = BCM_LM_DIAG_PKT;
*skb_put(skb, 1) = 0xf0; *skb_put(skb, 1) = 0xf0;
...@@ -799,6 +799,7 @@ static int bcm_remove(struct platform_device *pdev) ...@@ -799,6 +799,7 @@ static int bcm_remove(struct platform_device *pdev)
static const struct hci_uart_proto bcm_proto = { static const struct hci_uart_proto bcm_proto = {
.id = HCI_UART_BCM, .id = HCI_UART_BCM,
.name = "BCM", .name = "BCM",
.manufacturer = 15,
.init_speed = 115200, .init_speed = 115200,
.oper_speed = 4000000, .oper_speed = 4000000,
.open = bcm_open, .open = bcm_open,
......
...@@ -557,6 +557,7 @@ static int intel_setup(struct hci_uart *hu) ...@@ -557,6 +557,7 @@ static int intel_setup(struct hci_uart *hu)
bt_dev_dbg(hdev, "start intel_setup"); bt_dev_dbg(hdev, "start intel_setup");
hu->hdev->set_diag = btintel_set_diag;
hu->hdev->set_bdaddr = btintel_set_bdaddr; hu->hdev->set_bdaddr = btintel_set_bdaddr;
calltime = ktime_get(); calltime = ktime_get();
...@@ -1147,6 +1148,7 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu) ...@@ -1147,6 +1148,7 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu)
static const struct hci_uart_proto intel_proto = { static const struct hci_uart_proto intel_proto = {
.id = HCI_UART_INTEL, .id = HCI_UART_INTEL,
.name = "Intel", .name = "Intel",
.manufacturer = 2,
.init_speed = 115200, .init_speed = 115200,
.oper_speed = 3000000, .oper_speed = 3000000,
.open = intel_open, .open = intel_open,
......
...@@ -587,6 +587,13 @@ static int hci_uart_register_dev(struct hci_uart *hu) ...@@ -587,6 +587,13 @@ static int hci_uart_register_dev(struct hci_uart *hu)
hdev->bus = HCI_UART; hdev->bus = HCI_UART;
hci_set_drvdata(hdev, hu); hci_set_drvdata(hdev, hu);
/* Only when vendor specific setup callback is provided, consider
* the manufacturer information valid. This avoids filling in the
* value for Ericsson when nothing is specified.
*/
if (hu->proto->setup)
hdev->manufacturer = hu->proto->manufacturer;
hdev->open = hci_uart_open; hdev->open = hci_uart_open;
hdev->close = hci_uart_close; hdev->close = hci_uart_close;
hdev->flush = hci_uart_flush; hdev->flush = hci_uart_flush;
......
...@@ -947,6 +947,7 @@ static int qca_setup(struct hci_uart *hu) ...@@ -947,6 +947,7 @@ static int qca_setup(struct hci_uart *hu)
static struct hci_uart_proto qca_proto = { static struct hci_uart_proto qca_proto = {
.id = HCI_UART_QCA, .id = HCI_UART_QCA,
.name = "QCA", .name = "QCA",
.manufacturer = 29,
.init_speed = 115200, .init_speed = 115200,
.oper_speed = 3000000, .oper_speed = 3000000,
.open = qca_open, .open = qca_open,
......
...@@ -59,6 +59,7 @@ struct hci_uart; ...@@ -59,6 +59,7 @@ struct hci_uart;
struct hci_uart_proto { struct hci_uart_proto {
unsigned int id; unsigned int id;
const char *name; const char *name;
unsigned int manufacturer;
unsigned int init_speed; unsigned int init_speed;
unsigned int oper_speed; unsigned int oper_speed;
int (*open)(struct hci_uart *hu); int (*open)(struct hci_uart *hu);
......
...@@ -56,85 +56,23 @@ ...@@ -56,85 +56,23 @@
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */
#define EUI64_ADDR_LEN 8 #define EUI64_ADDR_LEN 8
#define LOWPAN_NHC_MAX_ID_LEN 1 #define LOWPAN_NHC_MAX_ID_LEN 1
/* Maximum next header compression length which we currently support inclusive
* possible inline data.
*/
#define LOWPAN_NHC_MAX_HDR_LEN (sizeof(struct udphdr))
/* Max IPHC Header len without IPv6 hdr specific inline data. /* Max IPHC Header len without IPv6 hdr specific inline data.
* Useful for getting the "extra" bytes we need at worst case compression. * Useful for getting the "extra" bytes we need at worst case compression.
* *
* LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN * LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN
*/ */
#define LOWPAN_IPHC_MAX_HEADER_LEN (2 + 1 + LOWPAN_NHC_MAX_ID_LEN) #define LOWPAN_IPHC_MAX_HEADER_LEN (2 + 1 + LOWPAN_NHC_MAX_ID_LEN)
/* Maximum worst case IPHC header buffer size */
/* #define LOWPAN_IPHC_MAX_HC_BUF_LEN (sizeof(struct ipv6hdr) + \
* ipv6 address based on mac LOWPAN_IPHC_MAX_HEADER_LEN + \
* second bit-flip (Universe/Local) is done according RFC2464 LOWPAN_NHC_MAX_HDR_LEN)
*/
#define is_addr_mac_addr_based(a, m) \
((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \
(((a)->s6_addr[9]) == (m)[1]) && \
(((a)->s6_addr[10]) == (m)[2]) && \
(((a)->s6_addr[11]) == (m)[3]) && \
(((a)->s6_addr[12]) == (m)[4]) && \
(((a)->s6_addr[13]) == (m)[5]) && \
(((a)->s6_addr[14]) == (m)[6]) && \
(((a)->s6_addr[15]) == (m)[7]))
/*
* check whether we can compress the IID to 16 bits,
* it's possible for unicast adresses with first 49 bits are zero only.
*/
#define lowpan_is_iid_16_bit_compressable(a) \
((((a)->s6_addr16[4]) == 0) && \
(((a)->s6_addr[10]) == 0) && \
(((a)->s6_addr[11]) == 0xff) && \
(((a)->s6_addr[12]) == 0xfe) && \
(((a)->s6_addr[13]) == 0))
/* check whether the 112-bit gid of the multicast address is mappable to: */
/* 48 bits, FFXX::00XX:XXXX:XXXX */
#define lowpan_is_mcast_addr_compressable48(a) \
((((a)->s6_addr16[1]) == 0) && \
(((a)->s6_addr16[2]) == 0) && \
(((a)->s6_addr16[3]) == 0) && \
(((a)->s6_addr16[4]) == 0) && \
(((a)->s6_addr[10]) == 0))
/* 32 bits, FFXX::00XX:XXXX */
#define lowpan_is_mcast_addr_compressable32(a) \
((((a)->s6_addr16[1]) == 0) && \
(((a)->s6_addr16[2]) == 0) && \
(((a)->s6_addr16[3]) == 0) && \
(((a)->s6_addr16[4]) == 0) && \
(((a)->s6_addr16[5]) == 0) && \
(((a)->s6_addr[12]) == 0))
/* 8 bits, FF02::00XX */
#define lowpan_is_mcast_addr_compressable8(a) \
((((a)->s6_addr[1]) == 2) && \
(((a)->s6_addr16[1]) == 0) && \
(((a)->s6_addr16[2]) == 0) && \
(((a)->s6_addr16[3]) == 0) && \
(((a)->s6_addr16[4]) == 0) && \
(((a)->s6_addr16[5]) == 0) && \
(((a)->s6_addr16[6]) == 0) && \
(((a)->s6_addr[14]) == 0))
#define lowpan_is_addr_broadcast(a) \
((((a)[0]) == 0xFF) && \
(((a)[1]) == 0xFF) && \
(((a)[2]) == 0xFF) && \
(((a)[3]) == 0xFF) && \
(((a)[4]) == 0xFF) && \
(((a)[5]) == 0xFF) && \
(((a)[6]) == 0xFF) && \
(((a)[7]) == 0xFF))
#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */ #define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */ #define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
...@@ -150,69 +88,6 @@ static inline bool lowpan_is_iphc(u8 dispatch) ...@@ -150,69 +88,6 @@ static inline bool lowpan_is_iphc(u8 dispatch)
return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC; return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC;
} }
#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */
#define LOWPAN_FRAG1_HEAD_SIZE 0x4
#define LOWPAN_FRAGN_HEAD_SIZE 0x5
/*
* Values of fields within the IPHC encoding first byte
* (C stands for compressed and I for inline)
*/
#define LOWPAN_IPHC_TF 0x18
#define LOWPAN_IPHC_FL_C 0x10
#define LOWPAN_IPHC_TC_C 0x08
#define LOWPAN_IPHC_NH_C 0x04
#define LOWPAN_IPHC_TTL_1 0x01
#define LOWPAN_IPHC_TTL_64 0x02
#define LOWPAN_IPHC_TTL_255 0x03
#define LOWPAN_IPHC_TTL_I 0x00
/* Values of fields within the IPHC encoding second byte */
#define LOWPAN_IPHC_CID 0x80
#define LOWPAN_IPHC_ADDR_00 0x00
#define LOWPAN_IPHC_ADDR_01 0x01
#define LOWPAN_IPHC_ADDR_02 0x02
#define LOWPAN_IPHC_ADDR_03 0x03
#define LOWPAN_IPHC_SAC 0x40
#define LOWPAN_IPHC_SAM 0x30
#define LOWPAN_IPHC_SAM_BIT 4
#define LOWPAN_IPHC_M 0x08
#define LOWPAN_IPHC_DAC 0x04
#define LOWPAN_IPHC_DAM_00 0x00
#define LOWPAN_IPHC_DAM_01 0x01
#define LOWPAN_IPHC_DAM_10 0x02
#define LOWPAN_IPHC_DAM_11 0x03
#define LOWPAN_IPHC_DAM_BIT 0
/*
* LOWPAN_UDP encoding (works together with IPHC)
*/
#define LOWPAN_NHC_UDP_MASK 0xF8
#define LOWPAN_NHC_UDP_ID 0xF0
#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000
#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00
/* values for port compression, _with checksum_ ie bit 5 set to 0 */
#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
dest = 0xF0 + 8 bit inline */
#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
dest = 16 bit inline */
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
#define LOWPAN_PRIV_SIZE(llpriv_size) \ #define LOWPAN_PRIV_SIZE(llpriv_size) \
(sizeof(struct lowpan_priv) + llpriv_size) (sizeof(struct lowpan_priv) + llpriv_size)
...@@ -250,7 +125,7 @@ struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb) ...@@ -250,7 +125,7 @@ struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
#ifdef DEBUG #ifdef DEBUG
/* print data in line */ /* print data in line */
static inline void raw_dump_inline(const char *caller, char *msg, static inline void raw_dump_inline(const char *caller, char *msg,
unsigned char *buf, int len) const unsigned char *buf, int len)
{ {
if (msg) if (msg)
pr_debug("%s():%s: ", caller, msg); pr_debug("%s():%s: ", caller, msg);
...@@ -265,7 +140,7 @@ static inline void raw_dump_inline(const char *caller, char *msg, ...@@ -265,7 +140,7 @@ static inline void raw_dump_inline(const char *caller, char *msg,
* ... * ...
*/ */
static inline void raw_dump_table(const char *caller, char *msg, static inline void raw_dump_table(const char *caller, char *msg,
unsigned char *buf, int len) const unsigned char *buf, int len)
{ {
if (msg) if (msg)
pr_debug("%s():%s:\n", caller, msg); pr_debug("%s():%s:\n", caller, msg);
...@@ -274,24 +149,25 @@ static inline void raw_dump_table(const char *caller, char *msg, ...@@ -274,24 +149,25 @@ static inline void raw_dump_table(const char *caller, char *msg,
} }
#else #else
static inline void raw_dump_table(const char *caller, char *msg, static inline void raw_dump_table(const char *caller, char *msg,
unsigned char *buf, int len) { } const unsigned char *buf, int len) { }
static inline void raw_dump_inline(const char *caller, char *msg, static inline void raw_dump_inline(const char *caller, char *msg,
unsigned char *buf, int len) { } const unsigned char *buf, int len) { }
#endif #endif
static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val) /**
{ * lowpan_fetch_skb - getting inline data from 6LoWPAN header
if (unlikely(!pskb_may_pull(skb, 1))) *
return -EINVAL; * This function will pull data from sk buffer and put it into data to
* remove the 6LoWPAN inline data. This function returns true if the
*val = skb->data[0]; * sk buffer is too small to pull the amount of data which is specified
skb_pull(skb, 1); * by len.
*
return 0; * @skb: the buffer where the inline data should be pulled from.
} * @data: destination buffer for the inline data.
* @len: amount of data which should be pulled in bytes.
static inline bool lowpan_fetch_skb(struct sk_buff *skb, */
void *data, const unsigned int len) static inline bool lowpan_fetch_skb(struct sk_buff *skb, void *data,
unsigned int len)
{ {
if (unlikely(!pskb_may_pull(skb, len))) if (unlikely(!pskb_may_pull(skb, len)))
return true; return true;
...@@ -311,14 +187,42 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data, ...@@ -311,14 +187,42 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype); void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
int /**
lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev, * lowpan_header_decompress - replace 6LoWPAN header with IPv6 header
const u8 *saddr, const u8 saddr_type, *
const u8 saddr_len, const u8 *daddr, * This function replaces the IPHC 6LoWPAN header which should be pointed at
const u8 daddr_type, const u8 daddr_len, * skb->data and skb_network_header, with the IPv6 header.
u8 iphc0, u8 iphc1); * It would be nice that the caller have the necessary headroom of IPv6 header
int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev, * and greatest Transport layer header, this would reduce the overhead for
unsigned short type, const void *_daddr, * reallocate headroom.
const void *_saddr, unsigned int len); *
* @skb: the buffer which should be manipulate.
* @dev: the lowpan net device pointer.
* @daddr: destination lladdr of mac header which is used for compression
* methods.
* @saddr: source lladdr of mac header which is used for compression
* methods.
*/
int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
const void *daddr, const void *saddr);
/**
* lowpan_header_compress - replace IPv6 header with 6LoWPAN header
*
* This function replaces the IPv6 header which should be pointed at
* skb->data and skb_network_header, with the IPHC 6LoWPAN header.
* The caller need to be sure that the sk buffer is not shared and at have
* at least a headroom which is smaller or equal LOWPAN_IPHC_MAX_HEADER_LEN,
* which is the IPHC "more bytes than IPv6 header" at worst case.
*
* @skb: the buffer which should be manipulate.
* @dev: the lowpan net device pointer.
* @daddr: destination lladdr of mac header which is used for compression
* methods.
* @saddr: source lladdr of mac header which is used for compression
* methods.
*/
int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
const void *daddr, const void *saddr);
#endif /* __6LOWPAN_H__ */ #endif /* __6LOWPAN_H__ */
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#define HCI_DEV_RESUME 6 #define HCI_DEV_RESUME 6
#define HCI_DEV_OPEN 7 #define HCI_DEV_OPEN 7
#define HCI_DEV_CLOSE 8 #define HCI_DEV_CLOSE 8
#define HCI_DEV_SETUP 9
/* HCI notify events */ /* HCI notify events */
#define HCI_NOTIFY_CONN_ADD 1 #define HCI_NOTIFY_CONN_ADD 1
...@@ -170,6 +171,15 @@ enum { ...@@ -170,6 +171,15 @@ enum {
* during the hdev->setup vendor callback. * during the hdev->setup vendor callback.
*/ */
HCI_QUIRK_SIMULTANEOUS_DISCOVERY, HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
/* When this quirk is set, the enabling of diagnostic mode is
* not persistent over HCI Reset. Every time the controller
* is brought up it needs to be reprogrammed.
*
* This quirk can be set before hci_register_dev is called or
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_NON_PERSISTENT_DIAG,
}; };
/* HCI device flags */ /* HCI device flags */
......
...@@ -398,6 +398,7 @@ struct hci_dev { ...@@ -398,6 +398,7 @@ struct hci_dev {
int (*send)(struct hci_dev *hdev, struct sk_buff *skb); int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
void (*notify)(struct hci_dev *hdev, unsigned int evt); void (*notify)(struct hci_dev *hdev, unsigned int evt);
void (*hw_error)(struct hci_dev *hdev, u8 code); void (*hw_error)(struct hci_dev *hdev, u8 code);
int (*post_init)(struct hci_dev *hdev);
int (*set_diag)(struct hci_dev *hdev, bool enable); int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
}; };
...@@ -470,6 +471,7 @@ struct hci_conn { ...@@ -470,6 +471,7 @@ struct hci_conn {
struct delayed_work auto_accept_work; struct delayed_work auto_accept_work;
struct delayed_work idle_work; struct delayed_work idle_work;
struct delayed_work le_conn_timeout; struct delayed_work le_conn_timeout;
struct work_struct le_scan_cleanup;
struct device dev; struct device dev;
struct dentry *debugfs; struct dentry *debugfs;
...@@ -792,6 +794,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, ...@@ -792,6 +794,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
return NULL; return NULL;
} }
static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
bdaddr_t *ba,
__u8 ba_type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
if (c->type != LE_LINK)
continue;
if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
rcu_read_unlock();
return c;
}
}
rcu_read_unlock();
return NULL;
}
static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
__u8 type, __u16 state) __u8 type, __u16 state)
{ {
...@@ -1016,9 +1042,6 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev); ...@@ -1016,9 +1042,6 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
bdaddr_t *addr, bdaddr_t *addr,
u8 addr_type); u8 addr_type);
struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
bdaddr_t *addr,
u8 addr_type);
void hci_uuids_clear(struct hci_dev *hdev); void hci_uuids_clear(struct hci_dev *hdev);
...@@ -1458,7 +1481,7 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, ...@@ -1458,7 +1481,7 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
void mgmt_discovering(struct hci_dev *hdev, u8 discovering); void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
bool mgmt_powering_down(struct hci_dev *hdev); bool mgmt_powering_down(struct hci_dev *hdev);
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk); void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
bool persistent); bool persistent);
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
......
...@@ -276,6 +276,16 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) ...@@ -276,6 +276,16 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
__put_unaligned_memmove64(swab64p(le64_src), be64_dst); __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
} }
/**
* ieee802154_le16_to_be16 - copies and convert le16 to be16
* @be16_dst: be16 destination pointer
* @le16_src: le16 source pointer
*/
static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
{
__put_unaligned_memmove16(swab16p(le16_src), be16_dst);
}
/** /**
* ieee802154_alloc_hw - Allocate a new hardware device * ieee802154_alloc_hw - Allocate a new hardware device
* *
......
This diff is collapsed.
...@@ -95,23 +95,20 @@ static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb) ...@@ -95,23 +95,20 @@ static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
} }
int lowpan_nhc_check_compression(struct sk_buff *skb, int lowpan_nhc_check_compression(struct sk_buff *skb,
const struct ipv6hdr *hdr, u8 **hc_ptr, const struct ipv6hdr *hdr, u8 **hc_ptr)
u8 *iphc0)
{ {
struct lowpan_nhc *nhc; struct lowpan_nhc *nhc;
int ret = 0;
spin_lock_bh(&lowpan_nhc_lock); spin_lock_bh(&lowpan_nhc_lock);
nhc = lowpan_nexthdr_nhcs[hdr->nexthdr]; nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
if (nhc && nhc->compress) if (!(nhc && nhc->compress))
*iphc0 |= LOWPAN_IPHC_NH_C; ret = -ENOENT;
else
lowpan_push_hc_data(hc_ptr, &hdr->nexthdr,
sizeof(hdr->nexthdr));
spin_unlock_bh(&lowpan_nhc_lock); spin_unlock_bh(&lowpan_nhc_lock);
return 0; return ret;
} }
int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr, int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
...@@ -157,7 +154,8 @@ int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr, ...@@ -157,7 +154,8 @@ int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
return ret; return ret;
} }
int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev, int lowpan_nhc_do_uncompression(struct sk_buff *skb,
const struct net_device *dev,
struct ipv6hdr *hdr) struct ipv6hdr *hdr)
{ {
struct lowpan_nhc *nhc; struct lowpan_nhc *nhc;
......
...@@ -86,19 +86,16 @@ struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr); ...@@ -86,19 +86,16 @@ struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr);
/** /**
* lowpan_nhc_check_compression - checks if we support compression format. If * lowpan_nhc_check_compression - checks if we support compression format. If
* we support the nhc by nexthdr field, the 6LoWPAN iphc NHC bit will be * we support the nhc by nexthdr field, the function will return 0. If we
* set. If we don't support nexthdr will be added as inline data to the * don't support the nhc by nexthdr this function will return -ENOENT.
* 6LoWPAN header.
* *
* @skb: skb of 6LoWPAN header to read nhc and replace header. * @skb: skb of 6LoWPAN header to read nhc and replace header.
* @hdr: ipv6hdr to check the nexthdr value * @hdr: ipv6hdr to check the nexthdr value
* @hc_ptr: pointer for 6LoWPAN header which should increment at the end of * @hc_ptr: pointer for 6LoWPAN header which should increment at the end of
* replaced header. * replaced header.
* @iphc0: iphc0 pointer to set the 6LoWPAN NHC bit
*/ */
int lowpan_nhc_check_compression(struct sk_buff *skb, int lowpan_nhc_check_compression(struct sk_buff *skb,
const struct ipv6hdr *hdr, u8 **hc_ptr, const struct ipv6hdr *hdr, u8 **hc_ptr);
u8 *iphc0);
/** /**
* lowpan_nhc_do_compression - calling compress callback for nhc * lowpan_nhc_do_compression - calling compress callback for nhc
...@@ -119,7 +116,8 @@ int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr, ...@@ -119,7 +116,8 @@ int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
* @dev: netdevice for print logging information. * @dev: netdevice for print logging information.
* @hdr: ipv6hdr for setting nexthdr value. * @hdr: ipv6hdr for setting nexthdr value.
*/ */
int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev, int lowpan_nhc_do_uncompression(struct sk_buff *skb,
const struct net_device *dev,
struct ipv6hdr *hdr); struct ipv6hdr *hdr);
/** /**
......
...@@ -17,7 +17,27 @@ ...@@ -17,7 +17,27 @@
#include "nhc.h" #include "nhc.h"
#define LOWPAN_NHC_UDP_IDLEN 1 #define LOWPAN_NHC_UDP_MASK 0xF8
#define LOWPAN_NHC_UDP_ID 0xF0
#define LOWPAN_NHC_UDP_IDLEN 1
#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000
#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00
/* values for port compression, _with checksum_ ie bit 5 set to 0 */
/* all inline */
#define LOWPAN_NHC_UDP_CS_P_00 0xF0
/* source 16bit inline, dest = 0xF0 + 8 bit inline */
#define LOWPAN_NHC_UDP_CS_P_01 0xF1
/* source = 0xF0 + 8bit inline, dest = 16 bit inline */
#define LOWPAN_NHC_UDP_CS_P_10 0xF2
/* source & dest = 0xF0B + 4bit inline */
#define LOWPAN_NHC_UDP_CS_P_11 0xF3
/* checksum elided */
#define LOWPAN_NHC_UDP_CS_C 0x04
static int udp_uncompress(struct sk_buff *skb, size_t needed) static int udp_uncompress(struct sk_buff *skb, size_t needed)
{ {
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
#include <net/ip6_route.h> #include <net/ip6_route.h>
#include <net/addrconf.h> #include <net/addrconf.h>
#include <net/af_ieee802154.h> /* to get the address type */
#include <net/bluetooth/bluetooth.h> #include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h> #include <net/bluetooth/l2cap.h>
...@@ -272,7 +270,6 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, ...@@ -272,7 +270,6 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
struct l2cap_chan *chan) struct l2cap_chan *chan)
{ {
const u8 *saddr, *daddr; const u8 *saddr, *daddr;
u8 iphc0, iphc1;
struct lowpan_dev *dev; struct lowpan_dev *dev;
struct lowpan_peer *peer; struct lowpan_peer *peer;
...@@ -287,22 +284,7 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, ...@@ -287,22 +284,7 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
saddr = peer->eui64_addr; saddr = peer->eui64_addr;
daddr = dev->netdev->dev_addr; daddr = dev->netdev->dev_addr;
/* at least two bytes will be used for the encoding */ return lowpan_header_decompress(skb, netdev, daddr, saddr);
if (skb->len < 2)
return -EINVAL;
if (lowpan_fetch_skb_u8(skb, &iphc0))
return -EINVAL;
if (lowpan_fetch_skb_u8(skb, &iphc1))
return -EINVAL;
return lowpan_header_decompress(skb, netdev,
saddr, IEEE802154_ADDR_LONG,
EUI64_ADDR_LEN, daddr,
IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
iphc0, iphc1);
} }
static int recv_pkt(struct sk_buff *skb, struct net_device *dev, static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
...@@ -314,15 +296,17 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, ...@@ -314,15 +296,17 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
if (!netif_running(dev)) if (!netif_running(dev))
goto drop; goto drop;
if (dev->type != ARPHRD_6LOWPAN) if (dev->type != ARPHRD_6LOWPAN || !skb->len)
goto drop; goto drop;
skb_reset_network_header(skb);
skb = skb_share_check(skb, GFP_ATOMIC); skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb) if (!skb)
goto drop; goto drop;
/* check that it's our buffer */ /* check that it's our buffer */
if (skb->data[0] == LOWPAN_DISPATCH_IPV6) { if (lowpan_is_ipv6(*skb_network_header(skb))) {
/* Copy the packet so that the IPv6 header is /* Copy the packet so that the IPv6 header is
* properly aligned. * properly aligned.
*/ */
...@@ -334,7 +318,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, ...@@ -334,7 +318,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
local_skb->protocol = htons(ETH_P_IPV6); local_skb->protocol = htons(ETH_P_IPV6);
local_skb->pkt_type = PACKET_HOST; local_skb->pkt_type = PACKET_HOST;
skb_reset_network_header(local_skb);
skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) { if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
...@@ -347,38 +330,34 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, ...@@ -347,38 +330,34 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
consume_skb(local_skb); consume_skb(local_skb);
consume_skb(skb); consume_skb(skb);
} else { } else if (lowpan_is_iphc(*skb_network_header(skb))) {
switch (skb->data[0] & 0xe0) { local_skb = skb_clone(skb, GFP_ATOMIC);
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */ if (!local_skb)
local_skb = skb_clone(skb, GFP_ATOMIC); goto drop;
if (!local_skb)
goto drop;
ret = iphc_decompress(local_skb, dev, chan); ret = iphc_decompress(local_skb, dev, chan);
if (ret < 0) { if (ret < 0) {
kfree_skb(local_skb); kfree_skb(local_skb);
goto drop; goto drop;
} }
local_skb->protocol = htons(ETH_P_IPV6); local_skb->protocol = htons(ETH_P_IPV6);
local_skb->pkt_type = PACKET_HOST; local_skb->pkt_type = PACKET_HOST;
local_skb->dev = dev; local_skb->dev = dev;
if (give_skb_to_upper(local_skb, dev) if (give_skb_to_upper(local_skb, dev)
!= NET_RX_SUCCESS) { != NET_RX_SUCCESS) {
kfree_skb(local_skb); kfree_skb(local_skb);
goto drop; goto drop;
} }
dev->stats.rx_bytes += skb->len; dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++; dev->stats.rx_packets++;
consume_skb(local_skb); consume_skb(local_skb);
consume_skb(skb); consume_skb(skb);
break; } else {
default: goto drop;
break;
}
} }
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
...@@ -492,8 +471,7 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev, ...@@ -492,8 +471,7 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev,
status = 1; status = 1;
} }
lowpan_header_compress(skb, netdev, ETH_P_IPV6, daddr, lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
dev->netdev->dev_addr, skb->len);
err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0); err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
if (err < 0) if (err < 0)
...@@ -1135,7 +1113,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, ...@@ -1135,7 +1113,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
return -ENOENT; return -ENOENT;
hci_dev_lock(hdev); hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
hci_dev_unlock(hdev); hci_dev_unlock(hdev);
if (!hcon) if (!hcon)
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#include "selftest.h" #include "selftest.h"
#define VERSION "2.20" #define VERSION "2.21"
/* Bluetooth sockets */ /* Bluetooth sockets */
#define BT_MAX_PROTO 8 #define BT_MAX_PROTO 8
......
...@@ -59,15 +59,11 @@ static const struct sco_param esco_param_msbc[] = { ...@@ -59,15 +59,11 @@ static const struct sco_param esco_param_msbc[] = {
{ EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */ { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
}; };
static void hci_le_create_connection_cancel(struct hci_conn *conn)
{
hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
}
/* This function requires the caller holds hdev->lock */ /* This function requires the caller holds hdev->lock */
static void hci_connect_le_scan_cleanup(struct hci_conn *conn) static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
{ {
struct hci_conn_params *params; struct hci_conn_params *params;
struct hci_dev *hdev = conn->hdev;
struct smp_irk *irk; struct smp_irk *irk;
bdaddr_t *bdaddr; bdaddr_t *bdaddr;
u8 bdaddr_type; u8 bdaddr_type;
...@@ -76,14 +72,15 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn) ...@@ -76,14 +72,15 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
bdaddr_type = conn->dst_type; bdaddr_type = conn->dst_type;
/* Check if we need to convert to identity address */ /* Check if we need to convert to identity address */
irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type); irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
if (irk) { if (irk) {
bdaddr = &irk->bdaddr; bdaddr = &irk->bdaddr;
bdaddr_type = irk->addr_type; bdaddr_type = irk->addr_type;
} }
params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type); params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
if (!params) bdaddr_type);
if (!params || !params->explicit_connect)
return; return;
/* The connection attempt was doing scan for new RPA, and is /* The connection attempt was doing scan for new RPA, and is
...@@ -97,21 +94,21 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn) ...@@ -97,21 +94,21 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
switch (params->auto_connect) { switch (params->auto_connect) {
case HCI_AUTO_CONN_EXPLICIT: case HCI_AUTO_CONN_EXPLICIT:
hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type); hci_conn_params_del(hdev, bdaddr, bdaddr_type);
/* return instead of break to avoid duplicate scan update */ /* return instead of break to avoid duplicate scan update */
return; return;
case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_DIRECT:
case HCI_AUTO_CONN_ALWAYS: case HCI_AUTO_CONN_ALWAYS:
list_add(&params->action, &conn->hdev->pend_le_conns); list_add(&params->action, &hdev->pend_le_conns);
break; break;
case HCI_AUTO_CONN_REPORT: case HCI_AUTO_CONN_REPORT:
list_add(&params->action, &conn->hdev->pend_le_reports); list_add(&params->action, &hdev->pend_le_reports);
break; break;
default: default:
break; break;
} }
hci_update_background_scan(conn->hdev); hci_update_background_scan(hdev);
} }
static void hci_conn_cleanup(struct hci_conn *conn) static void hci_conn_cleanup(struct hci_conn *conn)
...@@ -137,18 +134,51 @@ static void hci_conn_cleanup(struct hci_conn *conn) ...@@ -137,18 +134,51 @@ static void hci_conn_cleanup(struct hci_conn *conn)
hci_conn_put(conn); hci_conn_put(conn);
} }
/* This function requires the caller holds hdev->lock */ static void le_scan_cleanup(struct work_struct *work)
static void hci_connect_le_scan_remove(struct hci_conn *conn)
{ {
hci_connect_le_scan_cleanup(conn); struct hci_conn *conn = container_of(work, struct hci_conn,
le_scan_cleanup);
struct hci_dev *hdev = conn->hdev;
struct hci_conn *c = NULL;
/* We can't call hci_conn_del here since that would deadlock BT_DBG("%s hcon %p", hdev->name, conn);
* with trying to call cancel_delayed_work_sync(&conn->disc_work).
* Instead, call just hci_conn_cleanup() which contains the bare hci_dev_lock(hdev);
* minimum cleanup operations needed for a connection in this
* state. /* Check that the hci_conn is still around */
rcu_read_lock();
list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
if (c == conn)
break;
}
rcu_read_unlock();
if (c == conn) {
hci_connect_le_scan_cleanup(conn);
hci_conn_cleanup(conn);
}
hci_dev_unlock(hdev);
hci_dev_put(hdev);
hci_conn_put(conn);
}
static void hci_connect_le_scan_remove(struct hci_conn *conn)
{
BT_DBG("%s hcon %p", conn->hdev->name, conn);
/* We can't call hci_conn_del/hci_conn_cleanup here since that
* could deadlock with another hci_conn_del() call that's holding
* hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
* Instead, grab temporary extra references to the hci_dev and
* hci_conn and perform the necessary cleanup in a separate work
* callback.
*/ */
hci_conn_cleanup(conn);
hci_dev_hold(conn->hdev);
hci_conn_get(conn);
schedule_work(&conn->le_scan_cleanup);
} }
static void hci_acl_create_connection(struct hci_conn *conn) static void hci_acl_create_connection(struct hci_conn *conn)
...@@ -194,33 +224,8 @@ static void hci_acl_create_connection(struct hci_conn *conn) ...@@ -194,33 +224,8 @@ static void hci_acl_create_connection(struct hci_conn *conn)
hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
} }
static void hci_acl_create_connection_cancel(struct hci_conn *conn)
{
struct hci_cp_create_conn_cancel cp;
BT_DBG("hcon %p", conn);
if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
bacpy(&cp.bdaddr, &conn->dst);
hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
}
static void hci_reject_sco(struct hci_conn *conn)
{
struct hci_cp_reject_sync_conn_req cp;
cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
bacpy(&cp.bdaddr, &conn->dst);
hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
}
int hci_disconnect(struct hci_conn *conn, __u8 reason) int hci_disconnect(struct hci_conn *conn, __u8 reason)
{ {
struct hci_cp_disconnect cp;
BT_DBG("hcon %p", conn); BT_DBG("hcon %p", conn);
/* When we are master of an established connection and it enters /* When we are master of an established connection and it enters
...@@ -228,7 +233,8 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason) ...@@ -228,7 +233,8 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason)
* current clock offset. Processing of the result is done * current clock offset. Processing of the result is done
* within the event handling and hci_clock_offset_evt function. * within the event handling and hci_clock_offset_evt function.
*/ */
if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) { if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
(conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
struct hci_dev *hdev = conn->hdev; struct hci_dev *hdev = conn->hdev;
struct hci_cp_read_clock_offset clkoff_cp; struct hci_cp_read_clock_offset clkoff_cp;
...@@ -237,25 +243,7 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason) ...@@ -237,25 +243,7 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason)
&clkoff_cp); &clkoff_cp);
} }
conn->state = BT_DISCONN; return hci_abort_conn(conn, reason);
cp.handle = cpu_to_le16(conn->handle);
cp.reason = reason;
return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
}
static void hci_amp_disconn(struct hci_conn *conn)
{
struct hci_cp_disconn_phy_link cp;
BT_DBG("hcon %p", conn);
conn->state = BT_DISCONN;
cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
cp.reason = hci_proto_disconn_ind(conn);
hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
sizeof(cp), &cp);
} }
static void hci_add_sco(struct hci_conn *conn, __u16 handle) static void hci_add_sco(struct hci_conn *conn, __u16 handle)
...@@ -421,35 +409,14 @@ static void hci_conn_timeout(struct work_struct *work) ...@@ -421,35 +409,14 @@ static void hci_conn_timeout(struct work_struct *work)
if (refcnt > 0) if (refcnt > 0)
return; return;
switch (conn->state) { /* LE connections in scanning state need special handling */
case BT_CONNECT: if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
case BT_CONNECT2: test_bit(HCI_CONN_SCANNING, &conn->flags)) {
if (conn->out) { hci_connect_le_scan_remove(conn);
if (conn->type == ACL_LINK) return;
hci_acl_create_connection_cancel(conn);
else if (conn->type == LE_LINK) {
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
hci_connect_le_scan_remove(conn);
else
hci_le_create_connection_cancel(conn);
}
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
hci_reject_sco(conn);
}
break;
case BT_CONFIG:
case BT_CONNECTED:
if (conn->type == AMP_LINK) {
hci_amp_disconn(conn);
} else {
__u8 reason = hci_proto_disconn_ind(conn);
hci_disconnect(conn, reason);
}
break;
default:
conn->state = BT_CLOSED;
break;
} }
hci_abort_conn(conn, hci_proto_disconn_ind(conn));
} }
/* Enter sniff mode */ /* Enter sniff mode */
...@@ -517,7 +484,7 @@ static void le_conn_timeout(struct work_struct *work) ...@@ -517,7 +484,7 @@ static void le_conn_timeout(struct work_struct *work)
return; return;
} }
hci_le_create_connection_cancel(conn); hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
} }
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
...@@ -580,6 +547,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, ...@@ -580,6 +547,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout); INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
atomic_set(&conn->refcnt, 0); atomic_set(&conn->refcnt, 0);
...@@ -835,7 +803,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, ...@@ -835,7 +803,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
* attempt, we simply update pending_sec_level and auth_type fields * attempt, we simply update pending_sec_level and auth_type fields
* and return the object found. * and return the object found.
*/ */
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
conn_unfinished = NULL; conn_unfinished = NULL;
if (conn) { if (conn) {
if (conn->state == BT_CONNECT && if (conn->state == BT_CONNECT &&
...@@ -985,13 +953,10 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) ...@@ -985,13 +953,10 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
{ {
struct hci_conn *conn; struct hci_conn *conn;
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); conn = hci_conn_hash_lookup_le(hdev, addr, type);
if (!conn) if (!conn)
return false; return false;
if (conn->dst_type != type)
return false;
if (conn->state != BT_CONNECTED) if (conn->state != BT_CONNECTED)
return false; return false;
...@@ -1064,7 +1029,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, ...@@ -1064,7 +1029,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
* attempt, we simply update pending_sec_level and auth_type fields * attempt, we simply update pending_sec_level and auth_type fields
* and return the object found. * and return the object found.
*/ */
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
if (conn) { if (conn) {
if (conn->pending_sec_level < sec_level) if (conn->pending_sec_level < sec_level)
conn->pending_sec_level = sec_level; conn->pending_sec_level = sec_level;
......
...@@ -162,6 +162,16 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, ...@@ -162,6 +162,16 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
if (strtobool(buf, &enable)) if (strtobool(buf, &enable))
return -EINVAL; return -EINVAL;
/* When the diagnostic flags are not persistent and the transport
* is not active, then there is no need for the vendor callback.
*
* Instead just store the desired value. If needed the setting
* will be programmed when the controller gets powered on.
*/
if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
!test_bit(HCI_RUNNING, &hdev->flags))
goto done;
hci_req_lock(hdev); hci_req_lock(hdev);
err = hdev->set_diag(hdev, enable); err = hdev->set_diag(hdev, enable);
hci_req_unlock(hdev); hci_req_unlock(hdev);
...@@ -169,6 +179,7 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, ...@@ -169,6 +179,7 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
if (err < 0) if (err < 0)
return err; return err;
done:
if (enable) if (enable)
hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
else else
...@@ -1450,6 +1461,8 @@ static int hci_dev_do_open(struct hci_dev *hdev) ...@@ -1450,6 +1461,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
set_bit(HCI_INIT, &hdev->flags); set_bit(HCI_INIT, &hdev->flags);
if (hci_dev_test_flag(hdev, HCI_SETUP)) { if (hci_dev_test_flag(hdev, HCI_SETUP)) {
hci_sock_dev_event(hdev, HCI_DEV_SETUP);
if (hdev->setup) if (hdev->setup)
ret = hdev->setup(hdev); ret = hdev->setup(hdev);
...@@ -1490,10 +1503,21 @@ static int hci_dev_do_open(struct hci_dev *hdev) ...@@ -1490,10 +1503,21 @@ static int hci_dev_do_open(struct hci_dev *hdev)
if (!ret) { if (!ret) {
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
ret = __hci_init(hdev); ret = __hci_init(hdev);
if (!ret && hdev->post_init)
ret = hdev->post_init(hdev);
}
} }
/* If the HCI Reset command is clearing all diagnostic settings,
* then they need to be reprogrammed after the init procedure
* completed.
*/
if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
ret = hdev->set_diag(hdev, true);
clear_bit(HCI_INIT, &hdev->flags); clear_bit(HCI_INIT, &hdev->flags);
if (!ret) { if (!ret) {
...@@ -2916,23 +2940,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, ...@@ -2916,23 +2940,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
return NULL; return NULL;
} }
/* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
bdaddr_t *addr,
u8 addr_type)
{
struct hci_conn_params *param;
list_for_each_entry(param, &hdev->pend_le_conns, action) {
if (bacmp(&param->addr, addr) == 0 &&
param->addr_type == addr_type &&
param->explicit_connect)
return param;
}
return NULL;
}
/* This function requires the caller holds hdev->lock */ /* This function requires the caller holds hdev->lock */
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
bdaddr_t *addr, u8 addr_type) bdaddr_t *addr, u8 addr_type)
...@@ -3555,14 +3562,15 @@ EXPORT_SYMBOL(hci_recv_frame); ...@@ -3555,14 +3562,15 @@ EXPORT_SYMBOL(hci_recv_frame);
/* Receive diagnostic message from HCI drivers */ /* Receive diagnostic message from HCI drivers */
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
{ {
/* Mark as diagnostic packet */
bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
/* Time stamp */ /* Time stamp */
__net_timestamp(skb); __net_timestamp(skb);
/* Mark as diagnostic packet and send to monitor */ skb_queue_tail(&hdev->rx_q, skb);
bt_cb(skb)->pkt_type = HCI_DIAG_PKT; queue_work(hdev->workqueue, &hdev->rx_work);
hci_send_to_monitor(hdev, skb);
kfree_skb(skb);
return 0; return 0;
} }
EXPORT_SYMBOL(hci_recv_diag); EXPORT_SYMBOL(hci_recv_diag);
......
...@@ -1915,7 +1915,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) ...@@ -1915,7 +1915,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
hci_dev_lock(hdev); hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
cp->peer_addr_type);
if (!conn) if (!conn)
goto unlock; goto unlock;
......
...@@ -564,3 +564,96 @@ void hci_update_background_scan(struct hci_dev *hdev) ...@@ -564,3 +564,96 @@ void hci_update_background_scan(struct hci_dev *hdev)
if (err && err != -ENODATA) if (err && err != -ENODATA)
BT_ERR("Failed to run HCI request: err %d", err); BT_ERR("Failed to run HCI request: err %d", err);
} }
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason)
{
switch (conn->state) {
case BT_CONNECTED:
case BT_CONFIG:
if (conn->type == AMP_LINK) {
struct hci_cp_disconn_phy_link cp;
cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
cp.reason = reason;
hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
&cp);
} else {
struct hci_cp_disconnect dc;
dc.handle = cpu_to_le16(conn->handle);
dc.reason = reason;
hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
}
conn->state = BT_DISCONN;
break;
case BT_CONNECT:
if (conn->type == LE_LINK) {
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
break;
hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
0, NULL);
} else if (conn->type == ACL_LINK) {
if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
break;
hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
6, &conn->dst);
}
break;
case BT_CONNECT2:
if (conn->type == ACL_LINK) {
struct hci_cp_reject_conn_req rej;
bacpy(&rej.bdaddr, &conn->dst);
rej.reason = reason;
hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
sizeof(rej), &rej);
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
struct hci_cp_reject_sync_conn_req rej;
bacpy(&rej.bdaddr, &conn->dst);
/* SCO rejection has its own limited set of
* allowed error values (0x0D-0x0F) which isn't
* compatible with most values passed to this
* function. To be safe hard-code one of the
* values that's suitable for SCO.
*/
rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
sizeof(rej), &rej);
}
break;
default:
conn->state = BT_CLOSED;
break;
}
}
static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
if (status)
BT_DBG("Failed to abort connection: status 0x%2.2x", status);
}
int hci_abort_conn(struct hci_conn *conn, u8 reason)
{
struct hci_request req;
int err;
hci_req_init(&req, conn->hdev);
__hci_abort_conn(&req, conn, reason);
err = hci_req_run(&req, abort_conn_complete);
if (err && err != -ENODATA) {
BT_ERR("Failed to run HCI request: err %d", err);
return err;
}
return 0;
}
...@@ -55,3 +55,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy, ...@@ -55,3 +55,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
void hci_update_background_scan(struct hci_dev *hdev); void hci_update_background_scan(struct hci_dev *hdev);
void __hci_update_background_scan(struct hci_request *req); void __hci_update_background_scan(struct hci_request *req);
int hci_abort_conn(struct hci_conn *conn, u8 reason);
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason);
...@@ -120,10 +120,7 @@ static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb) ...@@ -120,10 +120,7 @@ static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
/* Apply filter */ /* Apply filter */
flt = &hci_pi(sk)->filter; flt = &hci_pi(sk)->filter;
if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
flt_type = 0;
else
flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
if (!test_bit(flt_type, &flt->type_mask)) if (!test_bit(flt_type, &flt->type_mask))
return true; return true;
...@@ -173,6 +170,11 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) ...@@ -173,6 +170,11 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
continue; continue;
if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) { if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
continue;
if (is_filtered_packet(sk, skb)) if (is_filtered_packet(sk, skb))
continue; continue;
} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
...@@ -333,6 +335,12 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) ...@@ -333,6 +335,12 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
opcode = cpu_to_le16(HCI_MON_DEL_INDEX); opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
break; break;
case HCI_DEV_SETUP:
if (hdev->manufacturer == 0xffff)
return NULL;
/* fall through */
case HCI_DEV_UP: case HCI_DEV_UP:
skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
if (!skb) if (!skb)
...@@ -401,15 +409,17 @@ static void send_monitor_replay(struct sock *sk) ...@@ -401,15 +409,17 @@ static void send_monitor_replay(struct sock *sk)
if (sock_queue_rcv_skb(sk, skb)) if (sock_queue_rcv_skb(sk, skb))
kfree_skb(skb); kfree_skb(skb);
if (!test_bit(HCI_UP, &hdev->flags)) if (test_bit(HCI_UP, &hdev->flags))
continue; skb = create_monitor_event(hdev, HCI_DEV_UP);
else if (hci_dev_test_flag(hdev, HCI_SETUP))
skb = create_monitor_event(hdev, HCI_DEV_UP); skb = create_monitor_event(hdev, HCI_DEV_SETUP);
if (!skb) else
continue; skb = NULL;
if (sock_queue_rcv_skb(sk, skb)) if (skb) {
kfree_skb(skb); if (sock_queue_rcv_skb(sk, skb))
kfree_skb(skb);
}
} }
read_unlock(&hci_dev_list_lock); read_unlock(&hci_dev_list_lock);
...@@ -1250,6 +1260,12 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1250,6 +1260,12 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
goto drop; goto drop;
} }
if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
err = -EINVAL;
goto drop;
}
skb_queue_tail(&hdev->raw_q, skb); skb_queue_tail(&hdev->raw_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work); queue_work(hdev->workqueue, &hdev->tx_work);
} }
......
...@@ -401,6 +401,20 @@ static void hidp_idle_timeout(unsigned long arg) ...@@ -401,6 +401,20 @@ static void hidp_idle_timeout(unsigned long arg)
{ {
struct hidp_session *session = (struct hidp_session *) arg; struct hidp_session *session = (struct hidp_session *) arg;
/* The HIDP user-space API only contains calls to add and remove
* devices. There is no way to forward events of any kind. Therefore,
* we have to forcefully disconnect a device on idle-timeouts. This is
* unfortunate and weird API design, but it is spec-compliant and
* required for backwards-compatibility. Hence, on idle-timeout, we
* signal driver-detach events, so poll() will be woken up with an
* error-condition on both sockets.
*/
session->intr_sock->sk->sk_err = EUNATCH;
session->ctrl_sock->sk->sk_err = EUNATCH;
wake_up_interruptible(sk_sleep(session->intr_sock->sk));
wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
hidp_session_terminate(session); hidp_session_terminate(session);
} }
......
...@@ -1111,53 +1111,76 @@ static int l2cap_sock_shutdown(struct socket *sock, int how) ...@@ -1111,53 +1111,76 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
if (!sk) if (!sk)
return 0; return 0;
lock_sock(sk);
if (sk->sk_shutdown)
goto shutdown_already;
BT_DBG("Handling sock shutdown");
/* prevent sk structure from being freed whilst unlocked */ /* prevent sk structure from being freed whilst unlocked */
sock_hold(sk); sock_hold(sk);
chan = l2cap_pi(sk)->chan; chan = l2cap_pi(sk)->chan;
/* prevent chan structure from being freed whilst unlocked */ /* prevent chan structure from being freed whilst unlocked */
l2cap_chan_hold(chan); l2cap_chan_hold(chan);
conn = chan->conn;
BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
if (chan->mode == L2CAP_MODE_ERTM &&
chan->unacked_frames > 0 &&
chan->state == BT_CONNECTED) {
err = __l2cap_wait_ack(sk, chan);
/* After waiting for ACKs, check whether shutdown
* has already been actioned to close the L2CAP
* link such as by l2cap_disconnection_req().
*/
if (sk->sk_shutdown)
goto has_shutdown;
}
sk->sk_shutdown = SHUTDOWN_MASK;
release_sock(sk);
l2cap_chan_lock(chan);
conn = chan->conn;
if (conn)
/* prevent conn structure from being freed */
l2cap_conn_get(conn);
l2cap_chan_unlock(chan);
if (conn) if (conn)
/* mutex lock must be taken before l2cap_chan_lock() */
mutex_lock(&conn->chan_lock); mutex_lock(&conn->chan_lock);
l2cap_chan_lock(chan); l2cap_chan_lock(chan);
lock_sock(sk); l2cap_chan_close(chan, 0);
l2cap_chan_unlock(chan);
if (!sk->sk_shutdown) { if (conn) {
if (chan->mode == L2CAP_MODE_ERTM && mutex_unlock(&conn->chan_lock);
chan->unacked_frames > 0 && l2cap_conn_put(conn);
chan->state == BT_CONNECTED) }
err = __l2cap_wait_ack(sk, chan);
sk->sk_shutdown = SHUTDOWN_MASK; lock_sock(sk);
release_sock(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
l2cap_chan_close(chan, 0); !(current->flags & PF_EXITING))
lock_sock(sk); err = bt_sock_wait_state(sk, BT_CLOSED,
sk->sk_lingertime);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && has_shutdown:
!(current->flags & PF_EXITING)) l2cap_chan_put(chan);
err = bt_sock_wait_state(sk, BT_CLOSED, sock_put(sk);
sk->sk_lingertime);
}
shutdown_already:
if (!err && sk->sk_err) if (!err && sk->sk_err)
err = -sk->sk_err; err = -sk->sk_err;
release_sock(sk); release_sock(sk);
l2cap_chan_unlock(chan);
if (conn)
mutex_unlock(&conn->chan_lock);
l2cap_chan_put(chan);
sock_put(sk);
BT_DBG("err: %d", err); BT_DBG("Sock shutdown complete err: %d", err);
return err; return err;
} }
......
...@@ -268,6 +268,14 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, ...@@ -268,6 +268,14 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
HCI_SOCK_TRUSTED, skip_sk); HCI_SOCK_TRUSTED, skip_sk);
} }
static u8 le_addr_type(u8 mgmt_addr_type)
{
if (mgmt_addr_type == BDADDR_LE_PUBLIC)
return ADDR_LE_DEV_PUBLIC;
else
return ADDR_LE_DEV_RANDOM;
}
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len) u16 data_len)
{ {
...@@ -1631,35 +1639,8 @@ static int clean_up_hci_state(struct hci_dev *hdev) ...@@ -1631,35 +1639,8 @@ static int clean_up_hci_state(struct hci_dev *hdev)
discov_stopped = hci_stop_discovery(&req); discov_stopped = hci_stop_discovery(&req);
list_for_each_entry(conn, &hdev->conn_hash.list, list) { list_for_each_entry(conn, &hdev->conn_hash.list, list) {
struct hci_cp_disconnect dc; /* 0x15 == Terminated due to Power Off */
struct hci_cp_reject_conn_req rej; __hci_abort_conn(&req, conn, 0x15);
switch (conn->state) {
case BT_CONNECTED:
case BT_CONFIG:
dc.handle = cpu_to_le16(conn->handle);
dc.reason = 0x15; /* Terminated due to Power Off */
hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
break;
case BT_CONNECT:
if (conn->type == LE_LINK)
hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
0, NULL);
else if (conn->type == ACL_LINK)
hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
6, &conn->dst);
break;
case BT_CONNECT2:
bacpy(&rej.bdaddr, &conn->dst);
rej.reason = 0x15; /* Terminated due to Power Off */
if (conn->type == ACL_LINK)
hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
sizeof(rej), &rej);
else if (conn->type == SCO_LINK)
hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
sizeof(rej), &rej);
break;
}
} }
err = hci_req_run(&req, clean_up_hci_complete); err = hci_req_run(&req, clean_up_hci_complete);
...@@ -3044,9 +3025,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -3044,9 +3025,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
{ {
struct mgmt_cp_unpair_device *cp = data; struct mgmt_cp_unpair_device *cp = data;
struct mgmt_rp_unpair_device rp; struct mgmt_rp_unpair_device rp;
struct hci_cp_disconnect dc; struct hci_conn_params *params;
struct mgmt_pending_cmd *cmd; struct mgmt_pending_cmd *cmd;
struct hci_conn *conn; struct hci_conn *conn;
u8 addr_type;
int err; int err;
memset(&rp, 0, sizeof(rp)); memset(&rp, 0, sizeof(rp));
...@@ -3087,36 +3069,23 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -3087,36 +3069,23 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
conn = NULL; conn = NULL;
err = hci_remove_link_key(hdev, &cp->addr.bdaddr); err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
} else { if (err < 0) {
u8 addr_type; err = mgmt_cmd_complete(sk, hdev->id,
MGMT_OP_UNPAIR_DEVICE,
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, MGMT_STATUS_NOT_PAIRED, &rp,
&cp->addr.bdaddr); sizeof(rp));
if (conn) { goto unlock;
/* Defer clearing up the connection parameters
* until closing to give a chance of keeping
* them if a repairing happens.
*/
set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
/* If disconnection is not requested, then
* clear the connection variable so that the
* link is not terminated.
*/
if (!cp->disconnect)
conn = NULL;
} }
if (cp->addr.type == BDADDR_LE_PUBLIC) goto done;
addr_type = ADDR_LE_DEV_PUBLIC; }
else
addr_type = ADDR_LE_DEV_RANDOM;
hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type); /* LE address type */
addr_type = le_addr_type(cp->addr.type);
err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type); hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
}
err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
if (err < 0) { if (err < 0) {
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
MGMT_STATUS_NOT_PAIRED, &rp, MGMT_STATUS_NOT_PAIRED, &rp,
...@@ -3124,6 +3093,36 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -3124,6 +3093,36 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock; goto unlock;
} }
conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
if (!conn) {
hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
goto done;
}
/* Abort any ongoing SMP pairing */
smp_cancel_pairing(conn);
/* Defer clearing up the connection parameters until closing to
* give a chance of keeping them if a repairing happens.
*/
set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
/* Disable auto-connection parameters if present */
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
if (params) {
if (params->explicit_connect)
params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
else
params->auto_connect = HCI_AUTO_CONN_DISABLED;
}
/* If disconnection is not requested, then clear the connection
* variable so that the link is not terminated.
*/
if (!cp->disconnect)
conn = NULL;
done:
/* If the connection variable is set, then termination of the /* If the connection variable is set, then termination of the
* link is requested. * link is requested.
*/ */
...@@ -3143,9 +3142,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -3143,9 +3142,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->cmd_complete = addr_cmd_complete; cmd->cmd_complete = addr_cmd_complete;
dc.handle = cpu_to_le16(conn->handle); err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
dc.reason = 0x13; /* Remote User Terminated Connection */
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
if (err < 0) if (err < 0)
mgmt_pending_remove(cmd); mgmt_pending_remove(cmd);
...@@ -3193,7 +3190,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -3193,7 +3190,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
&cp->addr.bdaddr); &cp->addr.bdaddr);
else else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
le_addr_type(cp->addr.type));
if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
...@@ -3544,16 +3542,9 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, ...@@ -3544,16 +3542,9 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
auth_type); auth_type);
} else { } else {
u8 addr_type; u8 addr_type = le_addr_type(cp->addr.type);
struct hci_conn_params *p; struct hci_conn_params *p;
/* Convert from L2CAP channel address type to HCI address type
*/
if (cp->addr.type == BDADDR_LE_PUBLIC)
addr_type = ADDR_LE_DEV_PUBLIC;
else
addr_type = ADDR_LE_DEV_RANDOM;
/* When pairing a new device, it is expected to remember /* When pairing a new device, it is expected to remember
* this device for future connections. Adding the connection * this device for future connections. Adding the connection
* parameter information ahead of time allows tracking * parameter information ahead of time allows tracking
...@@ -3697,7 +3688,8 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, ...@@ -3697,7 +3688,8 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
if (addr->type == BDADDR_BREDR) if (addr->type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
else else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr); conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
le_addr_type(addr->type));
if (!conn) { if (!conn) {
err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
...@@ -5600,14 +5592,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, ...@@ -5600,14 +5592,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
for (i = 0; i < irk_count; i++) { for (i = 0; i < irk_count; i++) {
struct mgmt_irk_info *irk = &cp->irks[i]; struct mgmt_irk_info *irk = &cp->irks[i];
u8 addr_type;
if (irk->addr.type == BDADDR_LE_PUBLIC)
addr_type = ADDR_LE_DEV_PUBLIC;
else
addr_type = ADDR_LE_DEV_RANDOM;
hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val, hci_add_irk(hdev, &irk->addr.bdaddr,
le_addr_type(irk->addr.type), irk->val,
BDADDR_ANY); BDADDR_ANY);
} }
...@@ -5687,12 +5674,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, ...@@ -5687,12 +5674,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
for (i = 0; i < key_count; i++) { for (i = 0; i < key_count; i++) {
struct mgmt_ltk_info *key = &cp->keys[i]; struct mgmt_ltk_info *key = &cp->keys[i];
u8 type, addr_type, authenticated; u8 type, authenticated;
if (key->addr.type == BDADDR_LE_PUBLIC)
addr_type = ADDR_LE_DEV_PUBLIC;
else
addr_type = ADDR_LE_DEV_RANDOM;
switch (key->type) { switch (key->type) {
case MGMT_LTK_UNAUTHENTICATED: case MGMT_LTK_UNAUTHENTICATED:
...@@ -5718,9 +5700,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, ...@@ -5718,9 +5700,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
continue; continue;
} }
hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type, hci_add_ltk(hdev, &key->addr.bdaddr,
authenticated, key->val, key->enc_size, key->ediv, le_addr_type(key->addr.type), type, authenticated,
key->rand); key->val, key->enc_size, key->ediv, key->rand);
} }
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
...@@ -6232,10 +6214,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, ...@@ -6232,10 +6214,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
goto added; goto added;
} }
if (cp->addr.type == BDADDR_LE_PUBLIC) addr_type = le_addr_type(cp->addr.type);
addr_type = ADDR_LE_DEV_PUBLIC;
else
addr_type = ADDR_LE_DEV_RANDOM;
if (cp->action == 0x02) if (cp->action == 0x02)
auto_conn = HCI_AUTO_CONN_ALWAYS; auto_conn = HCI_AUTO_CONN_ALWAYS;
...@@ -6364,10 +6343,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, ...@@ -6364,10 +6343,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
goto complete; goto complete;
} }
if (cp->addr.type == BDADDR_LE_PUBLIC) addr_type = le_addr_type(cp->addr.type);
addr_type = ADDR_LE_DEV_PUBLIC;
else
addr_type = ADDR_LE_DEV_RANDOM;
/* Kernel internally uses conn_params with resolvable private /* Kernel internally uses conn_params with resolvable private
* address, but Remove Device allows only identity addresses. * address, but Remove Device allows only identity addresses.
...@@ -7873,27 +7849,13 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) ...@@ -7873,27 +7849,13 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL); mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
} }
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk) void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
{ {
struct mgmt_ev_new_irk ev; struct mgmt_ev_new_irk ev;
memset(&ev, 0, sizeof(ev)); memset(&ev, 0, sizeof(ev));
/* For identity resolving keys from devices that are already ev.store_hint = persistent;
* using a public address or static random address, do not
* ask for storing this key. The identity resolving key really
* is only mandatory for devices using resolvable random
* addresses.
*
* Storing all identity resolving keys has the downside that
* they will be also loaded on next boot of they system. More
* identity resolving keys, means more time during scanning is
* needed to actually resolve these addresses.
*/
if (bacmp(&irk->rpa, BDADDR_ANY))
ev.store_hint = 0x01;
else
ev.store_hint = 0x00;
bacpy(&ev.rpa, &irk->rpa); bacpy(&ev.rpa, &irk->rpa);
bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr); bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
......
...@@ -811,7 +811,6 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason) ...@@ -811,7 +811,6 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason)
smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
&reason); &reason);
clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags);
mgmt_auth_failed(hcon, HCI_ERROR_AUTH_FAILURE); mgmt_auth_failed(hcon, HCI_ERROR_AUTH_FAILURE);
if (chan->data) if (chan->data)
...@@ -1046,8 +1045,24 @@ static void smp_notify_keys(struct l2cap_conn *conn) ...@@ -1046,8 +1045,24 @@ static void smp_notify_keys(struct l2cap_conn *conn)
struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1]; struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1];
bool persistent; bool persistent;
if (hcon->type == ACL_LINK) {
if (hcon->key_type == HCI_LK_DEBUG_COMBINATION)
persistent = false;
else
persistent = !test_bit(HCI_CONN_FLUSH_KEY,
&hcon->flags);
} else {
/* The LTKs, IRKs and CSRKs should be persistent only if
* both sides had the bonding bit set in their
* authentication requests.
*/
persistent = !!((req->auth_req & rsp->auth_req) &
SMP_AUTH_BONDING);
}
if (smp->remote_irk) { if (smp->remote_irk) {
mgmt_new_irk(hdev, smp->remote_irk); mgmt_new_irk(hdev, smp->remote_irk, persistent);
/* Now that user space can be considered to know the /* Now that user space can be considered to know the
* identity address track the connection based on it * identity address track the connection based on it
* from now on (assuming this is an LE link). * from now on (assuming this is an LE link).
...@@ -1075,21 +1090,6 @@ static void smp_notify_keys(struct l2cap_conn *conn) ...@@ -1075,21 +1090,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
} }
} }
if (hcon->type == ACL_LINK) {
if (hcon->key_type == HCI_LK_DEBUG_COMBINATION)
persistent = false;
else
persistent = !test_bit(HCI_CONN_FLUSH_KEY,
&hcon->flags);
} else {
/* The LTKs and CSRKs should be persistent only if both sides
* had the bonding bit set in their authentication requests.
*/
persistent = !!((req->auth_req & rsp->auth_req) &
SMP_AUTH_BONDING);
}
if (smp->csrk) { if (smp->csrk) {
smp->csrk->bdaddr_type = hcon->dst_type; smp->csrk->bdaddr_type = hcon->dst_type;
bacpy(&smp->csrk->bdaddr, &hcon->dst); bacpy(&smp->csrk->bdaddr, &hcon->dst);
...@@ -2380,6 +2380,32 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) ...@@ -2380,6 +2380,32 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
return ret; return ret;
} }
void smp_cancel_pairing(struct hci_conn *hcon)
{
struct l2cap_conn *conn = hcon->l2cap_data;
struct l2cap_chan *chan;
struct smp_chan *smp;
if (!conn)
return;
chan = conn->smp;
if (!chan)
return;
l2cap_chan_lock(chan);
smp = chan->data;
if (smp) {
if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
smp_failure(conn, 0);
else
smp_failure(conn, SMP_UNSPECIFIED);
}
l2cap_chan_unlock(chan);
}
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
{ {
struct smp_cmd_encrypt_info *rp = (void *) skb->data; struct smp_cmd_encrypt_info *rp = (void *) skb->data;
......
...@@ -180,6 +180,7 @@ enum smp_key_pref { ...@@ -180,6 +180,7 @@ enum smp_key_pref {
}; };
/* SMP Commands */ /* SMP Commands */
void smp_cancel_pairing(struct hci_conn *hcon);
bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
enum smp_key_pref key_pref); enum smp_key_pref key_pref);
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
......
...@@ -90,36 +90,12 @@ static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb) ...@@ -90,36 +90,12 @@ static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
int lowpan_iphc_decompress(struct sk_buff *skb) int lowpan_iphc_decompress(struct sk_buff *skb)
{ {
struct ieee802154_addr_sa sa, da;
struct ieee802154_hdr hdr; struct ieee802154_hdr hdr;
u8 iphc0, iphc1;
void *sap, *dap;
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
return -EINVAL; return -EINVAL;
raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); return lowpan_header_decompress(skb, skb->dev, &hdr.dest, &hdr.source);
if (lowpan_fetch_skb_u8(skb, &iphc0) ||
lowpan_fetch_skb_u8(skb, &iphc1))
return -EINVAL;
ieee802154_addr_to_sa(&sa, &hdr.source);
ieee802154_addr_to_sa(&da, &hdr.dest);
if (sa.addr_type == IEEE802154_ADDR_SHORT)
sap = &sa.short_addr;
else
sap = &sa.hwaddr;
if (da.addr_type == IEEE802154_ADDR_SHORT)
dap = &da.short_addr;
else
dap = &da.hwaddr;
return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
IEEE802154_ADDR_LEN, dap, da.addr_type,
IEEE802154_ADDR_LEN, iphc0, iphc1);
} }
static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb) static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
...@@ -308,16 +284,16 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev, ...@@ -308,16 +284,16 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
if (wdev->type != ARPHRD_IEEE802154 || if (wdev->type != ARPHRD_IEEE802154 ||
skb->pkt_type == PACKET_OTHERHOST || skb->pkt_type == PACKET_OTHERHOST ||
!lowpan_rx_h_check(skb)) !lowpan_rx_h_check(skb))
return NET_RX_DROP; goto drop;
ldev = wdev->ieee802154_ptr->lowpan_dev; ldev = wdev->ieee802154_ptr->lowpan_dev;
if (!ldev || !netif_running(ldev)) if (!ldev || !netif_running(ldev))
return NET_RX_DROP; goto drop;
/* Replacing skb->dev and followed rx handlers will manipulate skb. */ /* Replacing skb->dev and followed rx handlers will manipulate skb. */
skb = skb_share_check(skb, GFP_ATOMIC); skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb) if (!skb)
return NET_RX_DROP; goto out;
skb->dev = ldev; skb->dev = ldev;
/* When receive frag1 it's likely that we manipulate the buffer. /* When receive frag1 it's likely that we manipulate the buffer.
...@@ -328,10 +304,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev, ...@@ -328,10 +304,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
lowpan_is_iphc(*skb_network_header(skb))) { lowpan_is_iphc(*skb_network_header(skb))) {
skb = skb_unshare(skb, GFP_ATOMIC); skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) if (!skb)
return NET_RX_DROP; goto out;
} }
return lowpan_invoke_rx_handlers(skb); return lowpan_invoke_rx_handlers(skb);
drop:
kfree_skb(skb);
out:
return NET_RX_DROP;
} }
static struct packet_type lowpan_packet_type = { static struct packet_type lowpan_packet_type = {
......
...@@ -14,6 +14,9 @@ ...@@ -14,6 +14,9 @@
#include "6lowpan_i.h" #include "6lowpan_i.h"
#define LOWPAN_FRAG1_HEAD_SIZE 0x4
#define LOWPAN_FRAGN_HEAD_SIZE 0x5
/* don't save pan id, it's intra pan */ /* don't save pan id, it's intra pan */
struct lowpan_addr { struct lowpan_addr {
u8 mode; u8 mode;
...@@ -218,7 +221,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, ...@@ -218,7 +221,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
saddr = &info.saddr.u.extended_addr; saddr = &info.saddr.u.extended_addr;
*dgram_size = skb->len; *dgram_size = skb->len;
lowpan_header_compress(skb, ldev, ETH_P_IPV6, daddr, saddr, skb->len); lowpan_header_compress(skb, ldev, daddr, saddr);
/* dgram_offset = (saved bytes after compression) + lowpan header len */ /* dgram_offset = (saved bytes after compression) + lowpan header len */
*dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb); *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
...@@ -235,7 +238,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, ...@@ -235,7 +238,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
/* if the destination address is the broadcast address, use the /* if the destination address is the broadcast address, use the
* corresponding short address * corresponding short address
*/ */
if (lowpan_is_addr_broadcast((const u8 *)daddr)) { if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
da.mode = IEEE802154_ADDR_SHORT; da.mode = IEEE802154_ADDR_SHORT;
da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
cb->ackreq = false; cb->ackreq = false;
......
...@@ -55,7 +55,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec) ...@@ -55,7 +55,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec)
msl = container_of(sl, struct mac802154_llsec_seclevel, level); msl = container_of(sl, struct mac802154_llsec_seclevel, level);
list_del(&sl->list); list_del(&sl->list);
kfree(msl); kzfree(msl);
} }
list_for_each_entry_safe(dev, dn, &sec->table.devices, list) { list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
...@@ -72,7 +72,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec) ...@@ -72,7 +72,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec)
mkey = container_of(key->key, struct mac802154_llsec_key, key); mkey = container_of(key->key, struct mac802154_llsec_key, key);
list_del(&key->list); list_del(&key->list);
llsec_key_put(mkey); llsec_key_put(mkey);
kfree(key); kzfree(key);
} }
} }
...@@ -161,7 +161,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template) ...@@ -161,7 +161,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
if (key->tfm[i]) if (key->tfm[i])
crypto_free_aead(key->tfm[i]); crypto_free_aead(key->tfm[i]);
kfree(key); kzfree(key);
return NULL; return NULL;
} }
...@@ -176,7 +176,7 @@ static void llsec_key_release(struct kref *ref) ...@@ -176,7 +176,7 @@ static void llsec_key_release(struct kref *ref)
crypto_free_aead(key->tfm[i]); crypto_free_aead(key->tfm[i]);
crypto_free_blkcipher(key->tfm0); crypto_free_blkcipher(key->tfm0);
kfree(key); kzfree(key);
} }
static struct mac802154_llsec_key* static struct mac802154_llsec_key*
...@@ -267,7 +267,7 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec, ...@@ -267,7 +267,7 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec,
return 0; return 0;
fail: fail:
kfree(new); kzfree(new);
return -ENOMEM; return -ENOMEM;
} }
...@@ -347,10 +347,10 @@ static void llsec_dev_free(struct mac802154_llsec_device *dev) ...@@ -347,10 +347,10 @@ static void llsec_dev_free(struct mac802154_llsec_device *dev)
devkey); devkey);
list_del(&pos->list); list_del(&pos->list);
kfree(devkey); kzfree(devkey);
} }
kfree(dev); kzfree(dev);
} }
int mac802154_llsec_dev_add(struct mac802154_llsec *sec, int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
...@@ -681,7 +681,7 @@ llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, ...@@ -681,7 +681,7 @@ llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
rc = crypto_aead_encrypt(req); rc = crypto_aead_encrypt(req);
kfree(req); kzfree(req);
return rc; return rc;
} }
...@@ -881,7 +881,7 @@ llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec, ...@@ -881,7 +881,7 @@ llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
rc = crypto_aead_decrypt(req); rc = crypto_aead_decrypt(req);
kfree(req); kzfree(req);
skb_trim(skb, skb->len - authlen); skb_trim(skb, skb->len - authlen);
return rc; return rc;
...@@ -921,7 +921,7 @@ llsec_update_devkey_record(struct mac802154_llsec_device *dev, ...@@ -921,7 +921,7 @@ llsec_update_devkey_record(struct mac802154_llsec_device *dev,
if (!devkey) if (!devkey)
list_add_rcu(&next->devkey.list, &dev->dev.keys); list_add_rcu(&next->devkey.list, &dev->dev.keys);
else else
kfree(next); kzfree(next);
spin_unlock_bh(&dev->lock); spin_unlock_bh(&dev->lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment