Commit e9df2323 authored by Maksim Krasnyanskiy's avatar Maksim Krasnyanskiy

[PATCH] Bluetooth subsystem sync up

This updates 2.5.x Bluetooth subsystem and removes the EXPERIMENTAL
status of Bluetooth support.

         BlueZ Core:
                 New generic HCI connection manager.
                 Complete role switch and link policy support.
                 Security mode 1 and 3 support.
                 L2CAP service level security support.
                 HCI filter support.
                 HCI frame time-stamps.
                 SCO (voice links) support.
                 Improved HCI device unregistration (device destructors).
                 Support for L2CAP signalling frame fragmentation.
                 Improved L2CAP timeout handling.
                 New HCI ioctls for changing ACL and SCO MTU.
                 Killed HCI_MAX_DEV limit.
                 Security fixes.

         HCI USB driver:
                 Performance improvements.
                 Firmware loading support.
                 Stability fixes. URB and disconnect handling rewrite.

         HCI UART driver:
                 Support for multiple UART protocols.

         HCI PCMCIA driver:
                 Support for Nokia Bluetooth PC Cards.
                 Support for Anycom Bluetooth PC/CF Cards.
parent 0501bce1
......@@ -372,9 +372,7 @@ endmenu
source drivers/usb/Config.in
source drivers/input/Config.in
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
source net/bluetooth/Config.in
mainmenu_option next_comment
comment 'Kernel hacking'
......
......@@ -655,9 +655,7 @@ source drivers/misc/Config.in
source drivers/usb/Config.in
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
source net/bluetooth/Config.in
mainmenu_option next_comment
comment 'Kernel hacking'
......
......@@ -392,9 +392,7 @@ endmenu
source drivers/usb/Config.in
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
source net/bluetooth/Config.in
mainmenu_option next_comment
comment 'Kernel hacking'
......
......@@ -226,9 +226,7 @@ source drivers/usb/Config.in
source lib/Config.in
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
source net/bluetooth/Config.in
fi # !HP_SIM
......
......@@ -589,9 +589,7 @@ fi
source drivers/usb/Config.in
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
source net/bluetooth/Config.in
source lib/Config.in
......
......@@ -267,9 +267,7 @@ endmenu
source drivers/usb/Config.in
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
source net/bluetooth/Config.in
mainmenu_option next_comment
comment 'Watchdog'
......
......@@ -199,9 +199,7 @@ endmenu
source drivers/usb/Config.in
if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
source net/bluetooth/Config.in
fi
source net/bluetooth/Config.in
mainmenu_option next_comment
comment 'Kernel hacking'
......
HCI UART driver
CONFIG_BLUEZ_HCIUART
Bluetooth HCI UART driver.
This driver is required if you want to use Bluetooth devices with
serial port interface.
serial port interface. You will also need this driver if you have
UART based Bluetooth PCMCIA and CF devices like Xircom Credit Card
adapter and BrainBoxes Bluetooth PC Card.
Say Y here to compile support for Bluetooth UART devices into the
kernel or say M to compile it as module (hci_uart.o).
HCI UART (H4) protocol support
CONFIG_BLUEZ_HCIUART_H4
UART (H4) is serial protocol for communication between Bluetooth
device and host. This protocol is required for most UART based
Bluetooth device (including PCMCIA and CF).
Say Y here to compile support for HCI UART (H4) protocol.
HCI USB driver
CONFIG_BLUEZ_HCIUSB
Bluetooth HCI USB driver.
This driver is required if you want to use Bluetooth devices with
......@@ -14,6 +26,24 @@ CONFIG_BLUEZ_HCIUSB
Say Y here to compile support for Bluetooth USB devices into the
kernel or say M to compile it as module (hci_usb.o).
HCI USB firmware download support
CONFIG_BLUEZ_USB_FW_LOAD
Firmware download support for Bluetooth USB devices.
This support is required for devices like Broadcom BCM2033.
HCI USB driver uses external firmware downloader program provided
in BlueFW package.
For more information, see <http://bluez.sf.net/>.
HCI USB zero packet support
CONFIG_BLUEZ_USB_ZERO_PACKET
Support for USB zero packets.
This option is provided only as a work around for buggy Bluetooth USB
devices. Do _not_ enable it unless you know for sure that your device
requires zero packets.
Most people should say N here.
HCI VHCI Virtual HCI device driver
CONFIG_BLUEZ_HCIVHCI
Bluetooth Virtual HCI device driver.
This driver is required if you want to use HCI Emulation software.
......@@ -21,3 +51,24 @@ CONFIG_BLUEZ_HCIVHCI
Say Y here to compile support for virtual HCI devices into the
kernel or say M to compile it as module (hci_vhci.o).
HCI DTL1 (PC Card) device driver
CONFIG_BLUEZ_HCIDTL1
Bluetooth HCI DTL1 (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
Nokia DTL1 interface:
Nokia Bluetooth Card
Socket Bluetooth CF Card
Say Y here to compile support for HCI DTL1 devices into the
kernel or say M to compile it as module (dtl1_cs.o).
HCI BlueCard (PC Card) device driver
CONFIG_BLUEZ_HCIBLUECARD
Bluetooth HCI BlueCard (PC Card) driver.
This driver provides support for Bluetooth PCMCIA devices with
Anycom BlueCard interface:
Anycom Bluetooth PC Card
Anycom Bluetooth CF Card
Say Y here to compile support for HCI BlueCard devices into the
kernel or say M to compile it as module (bluecard_cs.o).
......@@ -2,7 +2,20 @@ mainmenu_option next_comment
comment 'Bluetooth device drivers'
dep_tristate 'HCI USB driver' CONFIG_BLUEZ_HCIUSB $CONFIG_BLUEZ $CONFIG_USB
if [ "$CONFIG_BLUEZ_HCIUSB" != "n" ]; then
bool ' Firmware download support' CONFIG_BLUEZ_USB_FW_LOAD
bool ' USB zero packet support' CONFIG_BLUEZ_USB_ZERO_PACKET
fi
dep_tristate 'HCI UART driver' CONFIG_BLUEZ_HCIUART $CONFIG_BLUEZ
dep_tristate 'HCI VHCI virtual HCI device driver' CONFIG_BLUEZ_HCIVHCI $CONFIG_BLUEZ
if [ "$CONFIG_BLUEZ_HCIUART" != "n" ]; then
bool ' UART (H4) protocol support' CONFIG_BLUEZ_HCIUART_H4
fi
dep_tristate 'HCI DTL1 (PC Card) driver' CONFIG_BLUEZ_HCIDTL1 $CONFIG_PCMCIA $CONFIG_BLUEZ
dep_tristate 'HCI BlueCard (PC Card) driver' CONFIG_BLUEZ_HCIBLUECARD $CONFIG_PCMCIA $CONFIG_BLUEZ
dep_tristate 'HCI VHCI (Virtual HCI device) driver' CONFIG_BLUEZ_HCIVHCI $CONFIG_BLUEZ
endmenu
......@@ -4,8 +4,19 @@
O_TARGET := bluetooth.o
list-multi := hci_uart.o
obj-$(CONFIG_BLUEZ_HCIUSB) += hci_usb.o
obj-$(CONFIG_BLUEZ_HCIUART) += hci_uart.o
obj-$(CONFIG_BLUEZ_HCIVHCI) += hci_vhci.o
obj-$(CONFIG_BLUEZ_HCIUART) += hci_uart.o
uart-y := hci_ldisc.o
uart-$(CONFIG_BLUEZ_HCIUART_H4) += hci_h4.o
obj-$(CONFIG_BLUEZ_HCIDTL1) += dtl1_cs.o
obj-$(CONFIG_BLUEZ_HCIBLUECARD) += bluecard_cs.o
include $(TOPDIR)/Rules.make
hci_uart.o: $(uart-y)
$(LD) -r -o $@ $(uart-y)
/*
*
* Bluetooth driver for the Anycom BlueCard (LSE039/LSE041)
*
* Copyright (C) 2001-2002 Marcel Holtmann <marcel@holtmann.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The initial developer of the original code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <asm/io.h>
#include <pcmcia/version.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
/* ======================== Module parameters ======================== */
/* Bit map of interrupts to choose from */
static u_int irq_mask = 0x86bc;
static int irq_list[4] = { -1 };
MODULE_PARM(irq_mask, "i");
MODULE_PARM(irq_list, "1-4i");
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("BlueZ driver for the Anycom BlueCard (LSE039/LSE041)");
MODULE_LICENSE("GPL");
/* ======================== Local structures ======================== */
typedef struct bluecard_info_t {
dev_link_t link;
dev_node_t node;
struct hci_dev hdev;
spinlock_t lock; /* For serializing operations */
struct timer_list timer; /* For LED control */
struct sk_buff_head txq;
unsigned long tx_state;
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
unsigned char ctrl_reg;
unsigned long hw_state; /* Status of the hardware and LED control */
} bluecard_info_t;
void bluecard_config(dev_link_t * link);
void bluecard_release(u_long arg);
int bluecard_event(event_t event, int priority, event_callback_args_t * args);
static dev_info_t dev_info = "bluecard_cs";
dev_link_t *bluecard_attach(void);
void bluecard_detach(dev_link_t *);
dev_link_t *dev_list = NULL;
/* Default baud rate: 57600, 115200, 230400 or 460800 */
#define DEFAULT_BAUD_RATE 230400
/* Hardware states */
#define CARD_READY 1
#define CARD_HAS_PCCARD_ID 4
#define CARD_HAS_POWER_LED 5
#define CARD_HAS_ACTIVITY_LED 6
/* Transmit states */
#define XMIT_SENDING 1
#define XMIT_WAKEUP 2
#define XMIT_BUFFER_NUMBER 5 /* unset = buffer one, set = buffer two */
#define XMIT_BUF_ONE_READY 6
#define XMIT_BUF_TWO_READY 7
#define XMIT_SENDING_READY 8
/* Receiver states */
#define RECV_WAIT_PACKET_TYPE 0
#define RECV_WAIT_EVENT_HEADER 1
#define RECV_WAIT_ACL_HEADER 2
#define RECV_WAIT_SCO_HEADER 3
#define RECV_WAIT_DATA 4
/* Special packet types */
#define PKT_BAUD_RATE_57600 0x80
#define PKT_BAUD_RATE_115200 0x81
#define PKT_BAUD_RATE_230400 0x82
#define PKT_BAUD_RATE_460800 0x83
/* These are the register offsets */
#define REG_COMMAND 0x20
#define REG_INTERRUPT 0x21
#define REG_CONTROL 0x22
#define REG_RX_CONTROL 0x24
#define REG_CARD_RESET 0x30
#define REG_LED_CTRL 0x30
/* REG_COMMAND */
#define REG_COMMAND_TX_BUF_ONE 0x01
#define REG_COMMAND_TX_BUF_TWO 0x02
#define REG_COMMAND_RX_BUF_ONE 0x04
#define REG_COMMAND_RX_BUF_TWO 0x08
#define REG_COMMAND_RX_WIN_ONE 0x00
#define REG_COMMAND_RX_WIN_TWO 0x10
/* REG_CONTROL */
#define REG_CONTROL_BAUD_RATE_57600 0x00
#define REG_CONTROL_BAUD_RATE_115200 0x01
#define REG_CONTROL_BAUD_RATE_230400 0x02
#define REG_CONTROL_BAUD_RATE_460800 0x03
#define REG_CONTROL_RTS 0x04
#define REG_CONTROL_BT_ON 0x08
#define REG_CONTROL_BT_RESET 0x10
#define REG_CONTROL_BT_RES_PU 0x20
#define REG_CONTROL_INTERRUPT 0x40
#define REG_CONTROL_CARD_RESET 0x80
/* REG_RX_CONTROL */
#define RTS_LEVEL_SHIFT_BITS 0x02
/* ======================== LED handling routines ======================== */
void bluecard_activity_led_timeout(u_long arg)
{
bluecard_info_t *info = (bluecard_info_t *) arg;
unsigned int iobase = info->link.io.BasePort1;
if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
/* Disable activity LED */
outb(0x08 | 0x20, iobase + 0x30);
} else {
/* Disable power LED */
outb(0x00, iobase + 0x30);
}
}
static void bluecard_enable_activity_led(bluecard_info_t * info)
{
unsigned int iobase = info->link.io.BasePort1;
if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
/* Enable activity LED */
outb(0x10 | 0x40, iobase + 0x30);
/* Stop the LED after HZ/4 */
mod_timer(&(info->timer), jiffies + HZ / 4);
} else {
/* Enable power LED */
outb(0x08 | 0x20, iobase + 0x30);
/* Stop the LED after HZ/2 */
mod_timer(&(info->timer), jiffies + HZ / 2);
}
}
/* ======================== Interrupt handling ======================== */
static int bluecard_write(unsigned int iobase, unsigned int offset,
__u8 * buf, int len)
{
int i, actual;
actual = (len > 15) ? 15 : len;
outb_p(actual, iobase + offset);
for (i = 0; i < actual; i++)
outb_p(buf[i], iobase + offset + i + 1);
return actual;
}
static void bluecard_write_wakeup(bluecard_info_t * info)
{
if (!info) {
printk(KERN_WARNING "bluecard_cs: Call of write_wakeup for unknown device.\n");
return;
}
if (!test_bit(XMIT_SENDING_READY, &(info->tx_state)))
return;
if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) {
set_bit(XMIT_WAKEUP, &(info->tx_state));
return;
}
do {
register unsigned int iobase = info->link.io.BasePort1;
register unsigned int offset;
register unsigned char command;
register unsigned long ready_bit;
register struct sk_buff *skb;
register int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
if (!(info->link.state & DEV_PRESENT))
return;
if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) {
if (!test_bit
(XMIT_BUF_TWO_READY, &(info->tx_state)))
break;
offset = 0x10;
command = REG_COMMAND_TX_BUF_TWO;
ready_bit = XMIT_BUF_TWO_READY;
} else {
if (!test_bit
(XMIT_BUF_ONE_READY, &(info->tx_state)))
break;
offset = 0x00;
command = REG_COMMAND_TX_BUF_ONE;
ready_bit = XMIT_BUF_ONE_READY;
}
if (!(skb = skb_dequeue(&(info->txq))))
break;
if (skb->pkt_type & 0x80) {
/* Disable RTS */
info->ctrl_reg |= REG_CONTROL_RTS;
outb(info->ctrl_reg, iobase + REG_CONTROL);
}
/* Activate LED */
bluecard_enable_activity_led(info);
/* Send frame */
len = bluecard_write(iobase, offset, skb->data, skb->len);
/* Tell the FPGA to send the data */
outb_p(command, iobase + REG_COMMAND);
/* Mark the buffer as dirty */
clear_bit(ready_bit, &(info->tx_state));
if (skb->pkt_type & 0x80) {
wait_queue_head_t wait;
unsigned char baud_reg;
switch (skb->pkt_type) {
case PKT_BAUD_RATE_460800:
baud_reg = REG_CONTROL_BAUD_RATE_460800;
break;
case PKT_BAUD_RATE_230400:
baud_reg = REG_CONTROL_BAUD_RATE_230400;
break;
case PKT_BAUD_RATE_115200:
baud_reg = REG_CONTROL_BAUD_RATE_115200;
break;
case PKT_BAUD_RATE_57600:
/* Fall through... */
default:
baud_reg = REG_CONTROL_BAUD_RATE_57600;
break;
}
/* Wait until the command reaches the baseband */
init_waitqueue_head(&wait);
interruptible_sleep_on_timeout(&wait, HZ / 10);
/* Set baud on baseband */
info->ctrl_reg &= ~0x03;
info->ctrl_reg |= baud_reg;
outb(info->ctrl_reg, iobase + REG_CONTROL);
/* Enable RTS */
info->ctrl_reg &= ~REG_CONTROL_RTS;
outb(info->ctrl_reg, iobase + REG_CONTROL);
/* Wait before the next HCI packet can be send */
interruptible_sleep_on_timeout(&wait, HZ);
}
if (len == skb->len) {
kfree_skb(skb);
} else {
skb_pull(skb, len);
skb_queue_head(&(info->txq), skb);
}
info->hdev.stat.byte_tx += len;
/* Change buffer */
change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state));
} while (test_bit(XMIT_WAKEUP, &(info->tx_state)));
clear_bit(XMIT_SENDING, &(info->tx_state));
}
static int bluecard_read(unsigned int iobase, unsigned int offset,
__u8 * buf, int size)
{
int i, n, len;
outb(REG_COMMAND_RX_WIN_ONE, iobase + REG_COMMAND);
len = inb(iobase + offset);
n = 0;
i = 1;
while (n < len) {
if (i == 16) {
outb(REG_COMMAND_RX_WIN_TWO, iobase + REG_COMMAND);
i = 0;
}
buf[n] = inb(iobase + offset + i);
n++;
i++;
}
return len;
}
static void bluecard_receive(bluecard_info_t * info, unsigned int offset)
{
unsigned int iobase;
unsigned char buf[31];
int i, len;
if (!info) {
printk(KERN_WARNING "bluecard_cs: Call of receive for unknown device.\n");
return;
}
iobase = info->link.io.BasePort1;
if (test_bit(XMIT_SENDING_READY, &(info->tx_state)))
bluecard_enable_activity_led(info);
len = bluecard_read(iobase, offset, buf, sizeof(buf));
for (i = 0; i < len; i++) {
/* Allocate packet */
if (info->rx_skb == NULL) {
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
if (!(info->rx_skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
printk(KERN_WARNING "bluecard_cs: Can't allocate mem for new packet.\n");
return;
}
}
if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
info->rx_skb->dev = (void *) &(info->hdev);
info->rx_skb->pkt_type = buf[i];
switch (info->rx_skb->pkt_type) {
case 0x00:
/* init packet */
if (offset != 0x00) {
set_bit(XMIT_BUF_ONE_READY, &(info->tx_state));
set_bit(XMIT_BUF_TWO_READY, &(info->tx_state));
set_bit(XMIT_SENDING_READY, &(info->tx_state));
bluecard_write_wakeup(info);
}
kfree_skb(info->rx_skb);
info->rx_skb = NULL;
break;
case HCI_EVENT_PKT:
info->rx_state = RECV_WAIT_EVENT_HEADER;
info->rx_count = HCI_EVENT_HDR_SIZE;
break;
case HCI_ACLDATA_PKT:
info->rx_state = RECV_WAIT_ACL_HEADER;
info->rx_count = HCI_ACL_HDR_SIZE;
break;
case HCI_SCODATA_PKT:
info->rx_state = RECV_WAIT_SCO_HEADER;
info->rx_count = HCI_SCO_HDR_SIZE;
break;
default:
/* unknown packet */
printk(KERN_WARNING "bluecard_cs: Unknown HCI packet with type 0x%02x received.\n", info->rx_skb->pkt_type);
info->hdev.stat.err_rx++;
kfree_skb(info->rx_skb);
info->rx_skb = NULL;
break;
}
} else {
*skb_put(info->rx_skb, 1) = buf[i];
info->rx_count--;
if (info->rx_count == 0) {
int dlen;
hci_event_hdr *eh;
hci_acl_hdr *ah;
hci_sco_hdr *sh;
switch (info->rx_state) {
case RECV_WAIT_EVENT_HEADER:
eh = (hci_event_hdr *) (info->rx_skb->data);
info->rx_state = RECV_WAIT_DATA;
info->rx_count = eh->plen;
break;
case RECV_WAIT_ACL_HEADER:
ah = (hci_acl_hdr *) (info->rx_skb->data);
dlen = __le16_to_cpu(ah->dlen);
info->rx_state = RECV_WAIT_DATA;
info->rx_count = dlen;
break;
case RECV_WAIT_SCO_HEADER:
sh = (hci_sco_hdr *) (info->rx_skb->data);
info->rx_state = RECV_WAIT_DATA;
info->rx_count = sh->dlen;
break;
case RECV_WAIT_DATA:
hci_recv_frame(info->rx_skb);
info->rx_skb = NULL;
break;
}
}
}
}
info->hdev.stat.byte_rx += len;
}
void bluecard_interrupt(int irq, void *dev_inst, struct pt_regs *regs)
{
bluecard_info_t *info = dev_inst;
unsigned int iobase;
unsigned char reg;
if (!info) {
printk(KERN_WARNING "bluecard_cs: Call of irq %d for unknown device.\n", irq);
return;
}
if (!test_bit(CARD_READY, &(info->hw_state)))
return;
iobase = info->link.io.BasePort1;
spin_lock(&(info->lock));
/* Disable interrupt */
info->ctrl_reg &= ~REG_CONTROL_INTERRUPT;
outb(info->ctrl_reg, iobase + REG_CONTROL);
reg = inb(iobase + REG_INTERRUPT);
if ((reg != 0x00) && (reg != 0xff)) {
if (reg & 0x04) {
bluecard_receive(info, 0x00);
outb(0x04, iobase + REG_INTERRUPT);
outb(REG_COMMAND_RX_BUF_ONE, iobase + REG_COMMAND);
}
if (reg & 0x08) {
bluecard_receive(info, 0x10);
outb(0x08, iobase + REG_INTERRUPT);
outb(REG_COMMAND_RX_BUF_TWO, iobase + REG_COMMAND);
}
if (reg & 0x01) {
set_bit(XMIT_BUF_ONE_READY, &(info->tx_state));
outb(0x01, iobase + REG_INTERRUPT);
bluecard_write_wakeup(info);
}
if (reg & 0x02) {
set_bit(XMIT_BUF_TWO_READY, &(info->tx_state));
outb(0x02, iobase + REG_INTERRUPT);
bluecard_write_wakeup(info);
}
}
/* Enable interrupt */
info->ctrl_reg |= REG_CONTROL_INTERRUPT;
outb(info->ctrl_reg, iobase + REG_CONTROL);
spin_unlock(&(info->lock));
}
/* ======================== Device specific HCI commands ======================== */
static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud)
{
bluecard_info_t *info = (bluecard_info_t *) (hdev->driver_data);
struct sk_buff *skb;
int i;
/* Ericsson baud rate command */
unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 };
if (!(skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
printk(KERN_WARNING "bluecard_cs: Can't allocate mem for new packet.\n");
return -1;
}
switch (baud) {
case 460800:
cmd[4] = 0x00;
skb->pkt_type = PKT_BAUD_RATE_460800;
break;
case 230400:
cmd[4] = 0x01;
skb->pkt_type = PKT_BAUD_RATE_230400;
break;
case 115200:
cmd[4] = 0x02;
skb->pkt_type = PKT_BAUD_RATE_115200;
break;
case 57600:
/* Fall through... */
default:
cmd[4] = 0x03;
skb->pkt_type = PKT_BAUD_RATE_57600;
break;
}
for (i = 0; i < sizeof(cmd); i++)
*skb_put(skb, 1) = cmd[i];
skb_queue_tail(&(info->txq), skb);
bluecard_write_wakeup(info);
return 0;
}
/* ======================== HCI interface ======================== */
static int bluecard_hci_flush(struct hci_dev *hdev)
{
bluecard_info_t *info = (bluecard_info_t *) (hdev->driver_data);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
return 0;
}
static int bluecard_hci_open(struct hci_dev *hdev)
{
bluecard_info_t *info = (bluecard_info_t *) (hdev->driver_data);
unsigned int iobase = info->link.io.BasePort1;
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
if (test_and_set_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
/* Enable LED */
outb(0x08 | 0x20, iobase + 0x30);
return 0;
}
static int bluecard_hci_close(struct hci_dev *hdev)
{
bluecard_info_t *info = (bluecard_info_t *) (hdev->driver_data);
unsigned int iobase = info->link.io.BasePort1;
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
bluecard_hci_flush(hdev);
/* Disable LED */
outb(0x00, iobase + 0x30);
return 0;
}
static int bluecard_hci_send_frame(struct sk_buff *skb)
{
bluecard_info_t *info;
struct hci_dev *hdev = (struct hci_dev *) (skb->dev);
if (!hdev) {
printk(KERN_WARNING "bluecard_cs: Frame for unknown HCI device (hdev=NULL).");
return -ENODEV;
}
info = (bluecard_info_t *) (hdev->driver_data);
switch (skb->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
hdev->stat.acl_tx++;
break;
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
break;
};
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &(skb->pkt_type), 1);
skb_queue_tail(&(info->txq), skb);
bluecard_write_wakeup(info);
return 0;
}
static void bluecard_hci_destruct(struct hci_dev *hdev)
{
}
static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd,
unsigned long arg)
{
return -ENOIOCTLCMD;
}
/* ======================== Card services HCI interaction ======================== */
int bluecard_open(bluecard_info_t * info)
{
unsigned int iobase = info->link.io.BasePort1;
struct hci_dev *hdev;
unsigned char id;
spin_lock_init(&(info->lock));
init_timer(&(info->timer));
info->timer.function = &bluecard_activity_led_timeout;
info->timer.data = (u_long) info;
skb_queue_head_init(&(info->txq));
info->rx_state = RECV_WAIT_PACKET_TYPE;
info->rx_count = 0;
info->rx_skb = NULL;
id = inb(iobase + 0x30);
if ((id & 0x0f) == 0x02)
set_bit(CARD_HAS_PCCARD_ID, &(info->hw_state));
if (id & 0x10)
set_bit(CARD_HAS_POWER_LED, &(info->hw_state));
if (id & 0x20)
set_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state));
/* Reset card */
info->ctrl_reg = REG_CONTROL_BT_RESET | REG_CONTROL_CARD_RESET;
outb(info->ctrl_reg, iobase + REG_CONTROL);
/* Turn FPGA off */
outb(0x80, iobase + 0x30);
/* Wait some time */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 100);
/* Turn FPGA on */
outb(0x00, iobase + 0x30);
/* Activate card */
info->ctrl_reg = REG_CONTROL_BT_ON | REG_CONTROL_BT_RES_PU;
outb(info->ctrl_reg, iobase + REG_CONTROL);
/* Enable interrupt */
outb(0xff, iobase + REG_INTERRUPT);
info->ctrl_reg |= REG_CONTROL_INTERRUPT;
outb(info->ctrl_reg, iobase + REG_CONTROL);
/* Start the RX buffers */
outb(REG_COMMAND_RX_BUF_ONE, iobase + REG_COMMAND);
outb(REG_COMMAND_RX_BUF_TWO, iobase + REG_COMMAND);
/* Signal that the hardware is ready */
set_bit(CARD_READY, &(info->hw_state));
/* Drop TX queue */
skb_queue_purge(&(info->txq));
/* Control the point at which RTS is enabled */
outb((0x0f << RTS_LEVEL_SHIFT_BITS) | 1, iobase + REG_RX_CONTROL);
/* Timeout before it is safe to send the first HCI packet */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((HZ * 5) / 4); // or set it to 3/2
/* Initialize and register HCI device */
hdev = &(info->hdev);
hdev->type = HCI_PCCARD;
hdev->driver_data = info;
hdev->open = bluecard_hci_open;
hdev->close = bluecard_hci_close;
hdev->flush = bluecard_hci_flush;
hdev->send = bluecard_hci_send_frame;
hdev->destruct = bluecard_hci_destruct;
hdev->ioctl = bluecard_hci_ioctl;
if (hci_register_dev(hdev) < 0) {
printk(KERN_WARNING "bluecard_cs: Can't register HCI device %s.\n", hdev->name);
return -ENODEV;
}
return 0;
}
int bluecard_close(bluecard_info_t * info)
{
unsigned int iobase = info->link.io.BasePort1;
struct hci_dev *hdev = &(info->hdev);
bluecard_hci_close(hdev);
clear_bit(CARD_READY, &(info->hw_state));
/* Reset card */
info->ctrl_reg = REG_CONTROL_BT_RESET | REG_CONTROL_CARD_RESET;
outb(info->ctrl_reg, iobase + REG_CONTROL);
/* Turn FPGA off */
outb(0x80, iobase + 0x30);
if (hci_unregister_dev(hdev) < 0)
printk(KERN_WARNING "bluecard_cs: Can't unregister HCI device %s.\n", hdev->name);
return 0;
}
/* ======================== Card services ======================== */
static void cs_error(client_handle_t handle, int func, int ret)
{
error_info_t err = { func, ret };
CardServices(ReportError, handle, &err);
}
dev_link_t *bluecard_attach(void)
{
bluecard_info_t *info;
client_reg_t client_reg;
dev_link_t *link;
int i, ret;
/* Create new info device */
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return NULL;
memset(info, 0, sizeof(*info));
link = &info->link;
link->priv = info;
link->release.function = &bluecard_release;
link->release.data = (u_long) link;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
if (irq_list[0] == -1)
link->irq.IRQInfo2 = irq_mask;
else
for (i = 0; i < 4; i++)
link->irq.IRQInfo2 |= 1 << irq_list[i];
link->irq.Handler = bluecard_interrupt;
link->irq.Instance = info;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
/* Register with Card Services */
link->next = dev_list;
dev_list = link;
client_reg.dev_info = &dev_info;
client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
client_reg.EventMask =
CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
client_reg.event_handler = &bluecard_event;
client_reg.Version = 0x0210;
client_reg.event_callback_args.client_data = link;
ret = CardServices(RegisterClient, &link->handle, &client_reg);
if (ret != CS_SUCCESS) {
cs_error(link->handle, RegisterClient, ret);
bluecard_detach(link);
return NULL;
}
return link;
}
void bluecard_detach(dev_link_t * link)
{
bluecard_info_t *info = link->priv;
dev_link_t **linkp;
int ret;
/* Locate device structure */
for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
if (*linkp == link)
break;
if (*linkp == NULL)
return;
del_timer(&link->release);
if (link->state & DEV_CONFIG)
bluecard_release((u_long) link);
if (link->handle) {
ret = CardServices(DeregisterClient, link->handle);
if (ret != CS_SUCCESS)
cs_error(link->handle, DeregisterClient, ret);
}
/* Unlink device structure, free bits */
*linkp = link->next;
kfree(info);
}
static int get_tuple(int fn, client_handle_t handle, tuple_t * tuple,
cisparse_t * parse)
{
int i;
i = CardServices(fn, handle, tuple);
if (i != CS_SUCCESS)
return CS_NO_MORE_ITEMS;
i = CardServices(GetTupleData, handle, tuple);
if (i != CS_SUCCESS)
return i;
return CardServices(ParseTuple, handle, tuple, parse);
}
#define first_tuple(a, b, c) get_tuple(GetFirstTuple, a, b, c)
#define next_tuple(a, b, c) get_tuple(GetNextTuple, a, b, c)
void bluecard_config(dev_link_t * link)
{
client_handle_t handle = link->handle;
bluecard_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
cisparse_t parse;
config_info_t config;
int i, n, last_ret, last_fn;
tuple.TupleData = (cisdata_t *) buf;
tuple.TupleOffset = 0;
tuple.TupleDataMax = 255;
tuple.Attributes = 0;
/* Get configuration register information */
tuple.DesiredTuple = CISTPL_CONFIG;
last_ret = first_tuple(handle, &tuple, &parse);
if (last_ret != CS_SUCCESS) {
last_fn = ParseTuple;
goto cs_failed;
}
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
/* Configure card */
link->state |= DEV_CONFIG;
i = CardServices(GetConfigurationInfo, handle, &config);
link->conf.Vcc = config.Vcc;
link->conf.ConfigIndex = 0x20;
link->io.NumPorts1 = 64;
link->io.IOAddrLines = 6;
for (n = 0; n < 0x400; n += 0x40) {
link->io.BasePort1 = n ^ 0x300;
i = CardServices(RequestIO, link->handle, &link->io);
if (i == CS_SUCCESS)
break;
}
if (i != CS_SUCCESS) {
cs_error(link->handle, RequestIO, i);
goto failed;
}
i = CardServices(RequestIRQ, link->handle, &link->irq);
if (i != CS_SUCCESS) {
cs_error(link->handle, RequestIRQ, i);
link->irq.AssignedIRQ = 0;
}
i = CardServices(RequestConfiguration, link->handle, &link->conf);
if (i != CS_SUCCESS) {
cs_error(link->handle, RequestConfiguration, i);
goto failed;
}
MOD_INC_USE_COUNT;
if (bluecard_open(info) != 0)
goto failed;
link->dev = &info->node;
link->state &= ~DEV_CONFIG_PENDING;
return;
cs_failed:
cs_error(link->handle, last_fn, last_ret);
failed:
bluecard_release((u_long) link);
}
void bluecard_release(u_long arg)
{
dev_link_t *link = (dev_link_t *) arg;
bluecard_info_t *info = link->priv;
if (link->state & DEV_PRESENT)
bluecard_close(info);
MOD_DEC_USE_COUNT;
link->dev = NULL;
CardServices(ReleaseConfiguration, link->handle);
CardServices(ReleaseIO, link->handle, &link->io);
CardServices(ReleaseIRQ, link->handle, &link->irq);
link->state &= ~DEV_CONFIG;
}
int bluecard_event(event_t event, int priority,
event_callback_args_t * args)
{
dev_link_t *link = args->client_data;
bluecard_info_t *info = link->priv;
switch (event) {
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
bluecard_close(info);
mod_timer(&link->release, jiffies + HZ / 20);
}
break;
case CS_EVENT_CARD_INSERTION:
link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
bluecard_config(link);
break;
case CS_EVENT_PM_SUSPEND:
link->state |= DEV_SUSPEND;
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG)
CardServices(ReleaseConfiguration, link->handle);
break;
case CS_EVENT_PM_RESUME:
link->state &= ~DEV_SUSPEND;
/* Fall through... */
case CS_EVENT_CARD_RESET:
if (DEV_OK(link))
CardServices(RequestConfiguration, link->handle,
&link->conf);
break;
}
return 0;
}
/* ======================== Module initialization ======================== */
int __init init_bluecard_cs(void)
{
servinfo_t serv;
int err;
CardServices(GetCardServicesInfo, &serv);
if (serv.Revision != CS_RELEASE_CODE) {
printk(KERN_NOTICE "bluecard_cs: Card Services release does not match!\n");
return -1;
}
err = register_pccard_driver(&dev_info, &bluecard_attach, &bluecard_detach);
return err;
}
void __exit exit_bluecard_cs(void)
{
unregister_pccard_driver(&dev_info);
while (dev_list != NULL)
bluecard_detach(dev_list);
}
module_init(init_bluecard_cs);
module_exit(exit_bluecard_cs);
EXPORT_NO_SYMBOLS;
/*
*
* A driver for Nokia Connectivity Card DTL-1 devices
*
* Copyright (C) 2001-2002 Marcel Holtmann <marcel@holtmann.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* The initial developer of the original code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/serial.h>
#include <linux/serial_reg.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <pcmcia/version.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
/* ======================== Module parameters ======================== */
/* Bit map of interrupts to choose from */
static u_int irq_mask = 0xffff;
static int irq_list[4] = { -1 };
MODULE_PARM(irq_mask, "i");
MODULE_PARM(irq_list, "1-4i");
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("BlueZ driver for Nokia Connectivity Card DTL-1");
MODULE_LICENSE("GPL");
/* ======================== Local structures ======================== */
typedef struct dtl1_info_t {
dev_link_t link;
dev_node_t node;
struct hci_dev hdev;
spinlock_t lock; /* For serializing operations */
unsigned long flowmask; /* HCI flow mask */
int ri_latch;
struct sk_buff_head txq;
unsigned long tx_state;
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
} dtl1_info_t;
void dtl1_config(dev_link_t *link);
void dtl1_release(u_long arg);
int dtl1_event(event_t event, int priority, event_callback_args_t *args);
static dev_info_t dev_info = "dtl1_cs";
dev_link_t *dtl1_attach(void);
void dtl1_detach(dev_link_t *);
dev_link_t *dev_list = NULL;
/* Transmit states */
#define XMIT_SENDING 1
#define XMIT_WAKEUP 2
#define XMIT_WAITING 8
/* Receiver States */
#define RECV_WAIT_NSH 0
#define RECV_WAIT_DATA 1
typedef struct {
u8 type;
u8 zero;
u16 len;
} __attribute__ ((packed)) nsh_t; /* Nokia Specific Header */
#define NSHL 4 /* Nokia Specific Header Length */
/* ======================== Interrupt handling ======================== */
static int dtl1_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) {
int actual = 0;
/* Tx FIFO should be empty */
if (!(inb(iobase + UART_LSR) & UART_LSR_THRE))
return 0;
/* Fill FIFO with current frame */
while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
outb(buf[actual], iobase + UART_TX);
actual++;
}
return actual;
}
static void dtl1_write_wakeup(dtl1_info_t *info) {
if (!info) {
printk(KERN_WARNING "dtl1_cs: Call of write_wakeup for unknown device.\n");
return;
}
if (test_bit(XMIT_WAITING, &(info->tx_state))) {
set_bit(XMIT_WAKEUP, &(info->tx_state));
return;
}
if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) {
set_bit(XMIT_WAKEUP, &(info->tx_state));
return;
}
do {
register unsigned int iobase = info->link.io.BasePort1;
register struct sk_buff *skb;
register int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
if (!(info->link.state & DEV_PRESENT))
return;
if (!(skb = skb_dequeue(&(info->txq))))
break;
/* Send frame */
len = dtl1_write(iobase, 32, skb->data, skb->len);
if (len == skb->len) {
set_bit(XMIT_WAITING, &(info->tx_state));
kfree_skb(skb);
}
else {
skb_pull(skb, len);
skb_queue_head(&(info->txq), skb);
}
info->hdev.stat.byte_tx += len;
} while (test_bit(XMIT_WAKEUP, &(info->tx_state)));
clear_bit(XMIT_SENDING, &(info->tx_state));
}
static void dtl1_control(dtl1_info_t *info, struct sk_buff *skb) {
u8 flowmask = *(u8 *)skb->data;
int i;
printk(KERN_INFO "dtl1_cs: Nokia control data = ");
for (i = 0; i < skb->len; i++) {
printk("%02x ", skb->data[i]);
}
printk("\n");
/* transition to active state */
if (((info->flowmask & 0x07) == 0) && ((flowmask & 0x07) != 0)) {
clear_bit(XMIT_WAITING, &(info->tx_state));
dtl1_write_wakeup(info);
}
info->flowmask = flowmask;
kfree_skb(skb);
}
static void dtl1_receive(dtl1_info_t *info) {
unsigned int iobase;
nsh_t *nsh;
int boguscount = 0;
if (!info) {
printk(KERN_WARNING "dtl1_cs: Call of receive for unknown device.\n");
return;
}
iobase = info->link.io.BasePort1;
do {
info->hdev.stat.byte_rx++;
/* Allocate packet */
if (info->rx_skb == NULL)
if (!(info->rx_skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
printk(KERN_WARNING "dtl1_cs: Can't allocate mem for new packet.\n");
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
return;
}
*skb_put(info->rx_skb, 1) = inb(iobase + UART_RX);
nsh = (nsh_t *)info->rx_skb->data;
info->rx_count--;
if (info->rx_count == 0) {
switch (info->rx_state) {
case RECV_WAIT_NSH:
info->rx_state = RECV_WAIT_DATA;
info->rx_count = nsh->len + (nsh->len & 0x0001);
break;
case RECV_WAIT_DATA:
info->rx_skb->pkt_type = nsh->type;
/* remove PAD byte if it exists */
if (nsh->len & 0x0001) {
info->rx_skb->tail--;
info->rx_skb->len--;
}
/* remove NSH */
skb_pull(info->rx_skb, NSHL);
switch (info->rx_skb->pkt_type) {
case 0x80:
/* control data for the Nokia Card */
dtl1_control(info, info->rx_skb);
break;
case 0x82:
case 0x83:
case 0x84:
/* send frame to the HCI layer */
info->rx_skb->dev = (void *)&(info->hdev);
info->rx_skb->pkt_type &= 0x0f;
hci_recv_frame(info->rx_skb);
break;
default:
/* unknown packet */
printk(KERN_WARNING "dtl1_cs: Unknown HCI packet with type 0x%02x received.\n", info->rx_skb->pkt_type);
kfree_skb(info->rx_skb);
break;
}
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
info->rx_skb = NULL;
break;
}
}
/* Make sure we don't stay here to long */
if (boguscount++ > 32)
break;
} while (inb(iobase + UART_LSR) & UART_LSR_DR);
}
void dtl1_interrupt(int irq, void *dev_inst, struct pt_regs *regs) {
dtl1_info_t *info = dev_inst;
unsigned int iobase;
unsigned char msr;
int boguscount = 0;
int iir, lsr;
if (!info) {
printk(KERN_WARNING "dtl1_cs: Call of irq %d for unknown device.\n", irq);
return;
}
iobase = info->link.io.BasePort1;
spin_lock(&(info->lock));
iir = inb(iobase + UART_IIR) & UART_IIR_ID;
while (iir) {
/* Clear interrupt */
lsr = inb(iobase + UART_LSR);
switch (iir) {
case UART_IIR_RLSI:
printk(KERN_NOTICE "dtl1_cs: RLSI\n");
break;
case UART_IIR_RDI:
/* Receive interrupt */
dtl1_receive(info);
break;
case UART_IIR_THRI:
if (lsr & UART_LSR_THRE) {
/* Transmitter ready for data */
dtl1_write_wakeup(info);
}
break;
default:
printk(KERN_NOTICE "dtl1_cs: Unhandled IIR=%#x\n", iir);
break;
}
/* Make sure we don't stay here to long */
if (boguscount++ > 100)
break;
iir = inb(iobase + UART_IIR) & UART_IIR_ID;
}
msr = inb(iobase + UART_MSR);
if (info->ri_latch ^ (msr & UART_MSR_RI)) {
info->ri_latch = msr & UART_MSR_RI;
clear_bit(XMIT_WAITING, &(info->tx_state));
dtl1_write_wakeup(info);
}
spin_unlock(&(info->lock));
}
/* ======================== HCI interface ======================== */
static int dtl1_hci_open(struct hci_dev *hdev) {
set_bit(HCI_RUNNING, &(hdev->flags));
return 0;
}
static int dtl1_hci_flush(struct hci_dev *hdev) {
dtl1_info_t *info = (dtl1_info_t *)(hdev->driver_data);
/* Drop TX queue */
skb_queue_purge(&(info->txq));
return 0;
}
static int dtl1_hci_close(struct hci_dev *hdev) {
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
dtl1_hci_flush(hdev);
return 0;
}
static int dtl1_hci_send_frame(struct sk_buff *skb) {
dtl1_info_t *info;
struct hci_dev* hdev = (struct hci_dev *)(skb->dev);
struct sk_buff *s;
nsh_t nsh;
if (!hdev) {
printk(KERN_WARNING "dtl1_cs: Frame for unknown HCI device (hdev=NULL).");
return -ENODEV;
}
info = (dtl1_info_t *)(hdev->driver_data);
switch (skb->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
nsh.type = 0x81;
break;
case HCI_ACLDATA_PKT:
hdev->stat.acl_tx++;
nsh.type = 0x82;
break;
case HCI_SCODATA_PKT:
hdev->stat.sco_tx++;
nsh.type = 0x83;
break;
};
nsh.zero = 0;
nsh.len = skb->len;
s = bluez_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC);
skb_reserve(s, NSHL);
memcpy(skb_put(s, skb->len), skb->data, skb->len);
if (skb->len & 0x0001)
*skb_put(s, 1) = 0; /* PAD */
/* Prepend skb with Nokia frame header and queue */
memcpy(skb_push(s, NSHL), &nsh, NSHL);
skb_queue_tail(&(info->txq), s);
dtl1_write_wakeup(info);
kfree_skb(skb);
return 0;
}
static void dtl1_hci_destruct(struct hci_dev *hdev) {
}
static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg) {
return -ENOIOCTLCMD;
}
/* ======================== Card services HCI interaction ======================== */
int dtl1_open(dtl1_info_t *info) {
unsigned long flags;
unsigned int iobase = info->link.io.BasePort1;
struct hci_dev *hdev;
spin_lock_init(&(info->lock));
skb_queue_head_init(&(info->txq));
info->rx_state = RECV_WAIT_NSH;
info->rx_count = NSHL;
info->rx_skb = NULL;
set_bit(XMIT_WAITING, &(info->tx_state));
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
outb(0, iobase + UART_MCR);
/* Turn off interrupts */
outb(0, iobase + UART_IER);
/* Initialize UART */
outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */
outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR);
info->ri_latch = inb(info->link.io.BasePort1 + UART_MSR) & UART_MSR_RI;
/* Turn on interrupts */
outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER);
spin_unlock_irqrestore(&(info->lock), flags);
/* Timeout before it is safe to send the first HCI packet */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ * 2);
/* Initialize and register HCI device */
hdev = &(info->hdev);
hdev->type = HCI_PCCARD;
hdev->driver_data = info;
hdev->open = dtl1_hci_open;
hdev->close = dtl1_hci_close;
hdev->flush = dtl1_hci_flush;
hdev->send = dtl1_hci_send_frame;
hdev->destruct = dtl1_hci_destruct;
hdev->ioctl = dtl1_hci_ioctl;
if (hci_register_dev(hdev) < 0) {
printk(KERN_WARNING "dtl1_cs: Can't register HCI device %s.\n", hdev->name);
return -ENODEV;
}
return 0;
}
int dtl1_close(dtl1_info_t *info) {
unsigned long flags;
unsigned int iobase = info->link.io.BasePort1;
struct hci_dev *hdev = &(info->hdev);
dtl1_hci_close(hdev);
spin_lock_irqsave(&(info->lock), flags);
/* Reset UART */
outb(0, iobase + UART_MCR);
/* Turn off interrupts */
outb(0, iobase + UART_IER);
spin_unlock_irqrestore(&(info->lock), flags);
if (hci_unregister_dev(hdev) < 0)
printk(KERN_WARNING "dtl1_cs: Can't unregister HCI device %s.\n", hdev->name);
return 0;
}
/* ======================== Card services ======================== */
static void cs_error(client_handle_t handle, int func, int ret) {
error_info_t err = { func, ret };
CardServices(ReportError, handle, &err);
}
dev_link_t *dtl1_attach(void) {
dtl1_info_t *info;
client_reg_t client_reg;
dev_link_t *link;
int i, ret;
/* Create new info device */
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return NULL;
memset(info, 0, sizeof(*info));
link = &info->link;
link->priv = info;
link->release.function = &dtl1_release;
link->release.data = (u_long)link;
link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts1 = 8;
link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
if (irq_list[0] == -1)
link->irq.IRQInfo2 = irq_mask;
else
for (i = 0; i < 4; i++)
link->irq.IRQInfo2 |= 1 << irq_list[i];
link->irq.Handler = dtl1_interrupt;
link->irq.Instance = info;
link->conf.Attributes = CONF_ENABLE_IRQ;
link->conf.Vcc = 50;
link->conf.IntType = INT_MEMORY_AND_IO;
/* Register with Card Services */
link->next = dev_list;
dev_list = link;
client_reg.dev_info = &dev_info;
client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
client_reg.EventMask = CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
CS_EVENT_RESET_PHYSICAL | CS_EVENT_CARD_RESET |
CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
client_reg.event_handler = &dtl1_event;
client_reg.Version = 0x0210;
client_reg.event_callback_args.client_data = link;
ret = CardServices(RegisterClient, &link->handle, &client_reg);
if (ret != CS_SUCCESS) {
cs_error(link->handle, RegisterClient, ret);
dtl1_detach(link);
return NULL;
}
return link;
}
void dtl1_detach(dev_link_t *link) {
dtl1_info_t *info = link->priv;
dev_link_t **linkp;
int ret;
/* Locate device structure */
for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
if (*linkp == link)
break;
if (*linkp == NULL)
return;
del_timer(&link->release);
if (link->state & DEV_CONFIG)
dtl1_release((u_long)link);
if (link->handle) {
ret = CardServices(DeregisterClient, link->handle);
if (ret != CS_SUCCESS)
cs_error(link->handle, DeregisterClient, ret);
}
/* Unlink device structure, free bits */
*linkp = link->next;
kfree(info);
}
static int get_tuple(int fn, client_handle_t handle, tuple_t *tuple,
cisparse_t *parse) {
int i;
i = CardServices(fn, handle, tuple);
if (i != CS_SUCCESS)
return CS_NO_MORE_ITEMS;
i = CardServices(GetTupleData, handle, tuple);
if (i != CS_SUCCESS)
return i;
return CardServices(ParseTuple, handle, tuple, parse);
}
#define first_tuple(a, b, c) get_tuple(GetFirstTuple, a, b, c)
#define next_tuple(a, b, c) get_tuple(GetNextTuple, a, b, c)
void dtl1_config(dev_link_t *link) {
client_handle_t handle = link->handle;
dtl1_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
cisparse_t parse;
cistpl_cftable_entry_t *cf = &parse.cftable_entry;
config_info_t config;
int i, last_ret, last_fn;
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleOffset = 0;
tuple.TupleDataMax = 255;
tuple.Attributes = 0;
/* Get configuration register information */
tuple.DesiredTuple = CISTPL_CONFIG;
last_ret = first_tuple(handle, &tuple, &parse);
if (last_ret != CS_SUCCESS) {
last_fn = ParseTuple;
goto cs_failed;
}
link->conf.ConfigBase = parse.config.base;
link->conf.Present = parse.config.rmask[0];
/* Configure card */
link->state |= DEV_CONFIG;
i = CardServices(GetConfigurationInfo, handle, &config);
link->conf.Vcc = config.Vcc;
tuple.TupleData = (cisdata_t *)buf;
tuple.TupleOffset = 0; tuple.TupleDataMax = 255;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
/* Look for a generic full-sized window */
link->io.NumPorts1 = 8;
i = first_tuple(handle, &tuple, &parse);
while (i != CS_NO_MORE_ITEMS) {
if ((i == CS_SUCCESS) && (cf->io.nwin == 1) && (cf->io.win[0].len > 8)) {
link->conf.ConfigIndex = cf->index;
link->io.BasePort1 = cf->io.win[0].base;
link->io.NumPorts1 = cf->io.win[0].len; /*yo*/
link->io.IOAddrLines = cf->io.flags & CISTPL_IO_LINES_MASK;
i = CardServices(RequestIO, link->handle, &link->io);
if (i == CS_SUCCESS)
break;
}
i = next_tuple(handle, &tuple, &parse);
}
if (i != CS_SUCCESS) {
cs_error(link->handle, RequestIO, i);
goto failed;
}
i = CardServices(RequestIRQ, link->handle, &link->irq);
if (i != CS_SUCCESS) {
cs_error(link->handle, RequestIRQ, i);
link->irq.AssignedIRQ = 0;
}
i = CardServices(RequestConfiguration, link->handle, &link->conf);
if (i != CS_SUCCESS) {
cs_error(link->handle, RequestConfiguration, i);
goto failed;
}
MOD_INC_USE_COUNT;
if (dtl1_open(info) != 0)
goto failed;
link->dev = &info->node;
link->state &= ~DEV_CONFIG_PENDING;
return;
cs_failed:
cs_error(link->handle, last_fn, last_ret);
failed:
dtl1_release((u_long)link);
}
void dtl1_release(u_long arg) {
dev_link_t *link = (dev_link_t *)arg;
dtl1_info_t *info = link->priv;
if (link->state & DEV_PRESENT)
dtl1_close(info);
MOD_DEC_USE_COUNT;
link->dev = NULL;
CardServices(ReleaseConfiguration, link->handle);
CardServices(ReleaseIO, link->handle, &link->io);
CardServices(ReleaseIRQ, link->handle, &link->irq);
link->state &= ~DEV_CONFIG;
}
int dtl1_event(event_t event, int priority, event_callback_args_t *args) {
dev_link_t *link = args->client_data;
dtl1_info_t *info = link->priv;
switch (event) {
case CS_EVENT_CARD_REMOVAL:
link->state &= ~DEV_PRESENT;
if (link->state & DEV_CONFIG) {
dtl1_close(info);
mod_timer(&link->release, jiffies + HZ/20);
}
break;
case CS_EVENT_CARD_INSERTION:
link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
dtl1_config(link);
break;
case CS_EVENT_PM_SUSPEND:
link->state |= DEV_SUSPEND;
/* Fall through... */
case CS_EVENT_RESET_PHYSICAL:
if (link->state & DEV_CONFIG)
CardServices(ReleaseConfiguration, link->handle);
break;
case CS_EVENT_PM_RESUME:
link->state &= ~DEV_SUSPEND;
/* Fall through... */
case CS_EVENT_CARD_RESET:
if (DEV_OK(link))
CardServices(RequestConfiguration, link->handle, &link->conf);
break;
}
return 0;
}
/* ======================== Module initialization ======================== */
int __init init_dtl1_cs(void) {
servinfo_t serv;
int err;
CardServices(GetCardServicesInfo, &serv);
if (serv.Revision != CS_RELEASE_CODE) {
printk(KERN_NOTICE "dtl1_cs: Card Services release does not match!\n");
return -1;
}
err = register_pccard_driver(&dev_info, &dtl1_attach, &dtl1_detach);
return err;
}
void __exit exit_dtl1_cs(void) {
unregister_pccard_driver(&dev_info);
while (dev_list != NULL)
dtl1_detach(dev_list);
}
module_init(init_dtl1_cs);
module_exit(exit_dtl1_cs);
EXPORT_NO_SYMBOLS;
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* BlueZ HCI UART(H4) protocol.
*
* $Id: hci_h4.c,v 1.2 2002/04/17 17:37:20 maxk Exp $
*/
#define VERSION "1.1"
#include <linux/config.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/signal.h>
#include <linux/ioctl.h>
#include <linux/skbuff.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include "hci_uart.h"
#include "hci_h4.h"
#ifndef HCI_UART_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#undef BT_DMP
#define BT_DMP( A... )
#endif
/* Initialize protocol */
static int h4_open(struct n_hci *n_hci)
{
struct h4_struct *h4;
BT_DBG("n_hci %p", n_hci);
h4 = kmalloc(sizeof(*h4), GFP_ATOMIC);
if (!h4)
return -ENOMEM;
memset(h4, 0, sizeof(*h4));
n_hci->priv = h4;
return 0;
}
/* Flush protocol data */
static int h4_flush(struct n_hci *n_hci)
{
BT_DBG("n_hci %p", n_hci);
return 0;
}
/* Close protocol */
static int h4_close(struct n_hci *n_hci)
{
struct h4_struct *h4 = n_hci->priv;
n_hci->priv = NULL;
BT_DBG("n_hci %p", n_hci);
if (h4->rx_skb)
kfree_skb(h4->rx_skb);
kfree(h4);
return 0;
}
/* Send data */
static int h4_send(struct n_hci *n_hci, void *data, int len)
{
struct tty_struct *tty = n_hci->tty;
BT_DBG("n_hci %p len %d", n_hci, len);
/* Send frame to TTY driver */
tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
return tty->driver.write(tty, 0, data, len);
}
/* Init frame before queueing (padding, crc, etc) */
static struct sk_buff* h4_preq(struct n_hci *n_hci, struct sk_buff *skb)
{
BT_DBG("n_hci %p skb %p", n_hci, skb);
/* Prepend skb with frame type */
memcpy(skb_push(skb, 1), &skb->pkt_type, 1);
return skb;
}
static inline int h4_check_data_len(struct h4_struct *h4, int len)
{
register int room = skb_tailroom(h4->rx_skb);
BT_DBG("len %d room %d", len, room);
if (!len) {
BT_DMP(h4->rx_skb->data, h4->rx_skb->len);
hci_recv_frame(h4->rx_skb);
} else if (len > room) {
BT_ERR("Data length is to large");
kfree_skb(h4->rx_skb);
} else {
h4->rx_state = H4_W4_DATA;
h4->rx_count = len;
return len;
}
h4->rx_state = H4_W4_PACKET_TYPE;
h4->rx_skb = NULL;
h4->rx_count = 0;
return 0;
}
/* Recv data */
static int h4_recv(struct n_hci *n_hci, void *data, int count)
{
struct h4_struct *h4 = n_hci->priv;
register char *ptr;
hci_event_hdr *eh;
hci_acl_hdr *ah;
hci_sco_hdr *sh;
register int len, type, dlen;
BT_DBG("n_hci %p count %d rx_state %ld rx_count %ld", n_hci, count, h4->rx_state, h4->rx_count);
ptr = data;
while (count) {
if (h4->rx_count) {
len = MIN(h4->rx_count, count);
memcpy(skb_put(h4->rx_skb, len), ptr, len);
h4->rx_count -= len; count -= len; ptr += len;
if (h4->rx_count)
continue;
switch (h4->rx_state) {
case H4_W4_DATA:
BT_DBG("Complete data");
BT_DMP(h4->rx_skb->data, h4->rx_skb->len);
hci_recv_frame(h4->rx_skb);
h4->rx_state = H4_W4_PACKET_TYPE;
h4->rx_skb = NULL;
continue;
case H4_W4_EVENT_HDR:
eh = (hci_event_hdr *) h4->rx_skb->data;
BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
h4_check_data_len(h4, eh->plen);
continue;
case H4_W4_ACL_HDR:
ah = (hci_acl_hdr *) h4->rx_skb->data;
dlen = __le16_to_cpu(ah->dlen);
BT_DBG("ACL header: dlen %d", dlen);
h4_check_data_len(h4, dlen);
continue;
case H4_W4_SCO_HDR:
sh = (hci_sco_hdr *) h4->rx_skb->data;
BT_DBG("SCO header: dlen %d", sh->dlen);
h4_check_data_len(h4, sh->dlen);
continue;
};
}
/* H4_W4_PACKET_TYPE */
switch (*ptr) {
case HCI_EVENT_PKT:
BT_DBG("Event packet");
h4->rx_state = H4_W4_EVENT_HDR;
h4->rx_count = HCI_EVENT_HDR_SIZE;
type = HCI_EVENT_PKT;
break;
case HCI_ACLDATA_PKT:
BT_DBG("ACL packet");
h4->rx_state = H4_W4_ACL_HDR;
h4->rx_count = HCI_ACL_HDR_SIZE;
type = HCI_ACLDATA_PKT;
break;
case HCI_SCODATA_PKT:
BT_DBG("SCO packet");
h4->rx_state = H4_W4_SCO_HDR;
h4->rx_count = HCI_SCO_HDR_SIZE;
type = HCI_SCODATA_PKT;
break;
default:
BT_ERR("Unknown HCI packet type %2.2x", (__u8)*ptr);
n_hci->hdev.stat.err_rx++;
ptr++; count--;
continue;
};
ptr++; count--;
/* Allocate packet */
h4->rx_skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
if (!h4->rx_skb) {
BT_ERR("Can't allocate mem for new packet");
h4->rx_state = H4_W4_PACKET_TYPE;
h4->rx_count = 0;
return 0;
}
h4->rx_skb->dev = (void *) &n_hci->hdev;
h4->rx_skb->pkt_type = type;
}
return count;
}
static struct hci_uart_proto h4p = {
id: HCI_UART_H4,
open: h4_open,
close: h4_close,
send: h4_send,
recv: h4_recv,
preq: h4_preq,
flush: h4_flush,
};
int h4_init(void)
{
return hci_uart_register_proto(&h4p);
}
int h4_deinit(void)
{
return hci_uart_unregister_proto(&h4p);
}
......@@ -23,40 +23,21 @@
*/
/*
* $Id: hci_uart.h,v 1.2 2001/06/02 01:40:08 maxk Exp $
* $Id: hci_h4.h,v 1.1.1.1 2002/03/08 21:03:15 maxk Exp $
*/
#ifndef N_HCI
#define N_HCI 15
#endif
#ifdef __KERNEL__
#define tty2n_hci(tty) ((struct n_hci *)((tty)->disc_data))
#define n_hci2tty(n_hci) ((n_hci)->tty)
struct n_hci {
struct tty_struct *tty;
struct hci_dev hdev;
struct sk_buff_head txq;
unsigned long tx_state;
spinlock_t rx_lock;
struct h4_struct {
unsigned long rx_state;
unsigned long rx_count;
struct sk_buff *rx_skb;
};
/* Transmit states */
#define TRANS_SENDING 1
#define TRANS_WAKEUP 2
/* Receiver States */
#define WAIT_PACKET_TYPE 0
#define WAIT_EVENT_HDR 1
#define WAIT_ACL_HDR 2
#define WAIT_SCO_HDR 3
#define WAIT_DATA 4
/* H4 receiver States */
#define H4_W4_PACKET_TYPE 0
#define H4_W4_EVENT_HDR 1
#define H4_W4_ACL_HDR 2
#define H4_W4_SCO_HDR 3
#define H4_W4_DATA 4
#endif /* __KERNEL__ */
......@@ -25,9 +25,9 @@
/*
* BlueZ HCI UART driver.
*
* $Id: hci_uart.c,v 1.5 2001/07/05 18:42:44 maxk Exp $
* $Id: hci_ldisc.c,v 1.2 2002/04/17 17:37:20 maxk Exp $
*/
#define VERSION "1.0"
#define VERSION "2.0"
#include <linux/config.h>
#include <linux/module.h>
......@@ -52,37 +52,68 @@
#include <linux/skbuff.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/bluez.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci_uart.h>
#include "hci_uart.h"
#ifndef HCI_UART_DEBUG
#undef DBG
#define DBG( A... )
#undef DMP
#define DMP( A... )
#undef BT_DBG
#define BT_DBG( A... )
#undef BT_DMP
#define BT_DMP( A... )
#endif
static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
int hci_uart_register_proto(struct hci_uart_proto *p)
{
if (p->id >= HCI_UART_MAX_PROTO)
return -EINVAL;
if (hup[p->id])
return -EEXIST;
hup[p->id] = p;
return 0;
}
int hci_uart_unregister_proto(struct hci_uart_proto *p)
{
if (p->id >= HCI_UART_MAX_PROTO)
return -EINVAL;
if (!hup[p->id])
return -EINVAL;
hup[p->id] = NULL;
return 0;
}
static struct hci_uart_proto *n_hci_get_proto(unsigned int id)
{
if (id >= HCI_UART_MAX_PROTO)
return NULL;
return hup[id];
}
/* ------- Interface to HCI layer ------ */
/* Initialize device */
int n_hci_open(struct hci_dev *hdev)
static int n_hci_open(struct hci_dev *hdev)
{
DBG("%s %p", hdev->name, hdev);
BT_DBG("%s %p", hdev->name, hdev);
/* Nothing to do for UART driver */
hdev->flags |= HCI_RUNNING;
set_bit(HCI_RUNNING, &hdev->flags);
return 0;
}
/* Reset device */
int n_hci_flush(struct hci_dev *hdev)
static int n_hci_flush(struct hci_dev *hdev)
{
struct n_hci *n_hci = (struct n_hci *) hdev->driver_data;
struct tty_struct *tty = n_hci->tty;
DBG("hdev %p tty %p", hdev, tty);
BT_DBG("hdev %p tty %p", hdev, tty);
/* Drop TX queue */
skb_queue_purge(&n_hci->txq);
......@@ -94,168 +125,158 @@ int n_hci_flush(struct hci_dev *hdev)
if (tty->driver.flush_buffer)
tty->driver.flush_buffer(tty);
if (n_hci->proto->flush)
n_hci->proto->flush(n_hci);
return 0;
}
/* Close device */
int n_hci_close(struct hci_dev *hdev)
static int n_hci_close(struct hci_dev *hdev)
{
DBG("hdev %p", hdev);
BT_DBG("hdev %p", hdev);
hdev->flags &= ~HCI_RUNNING;
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
n_hci_flush(hdev);
return 0;
}
int n_hci_tx_wakeup(struct n_hci *n_hci)
static int n_hci_tx_wakeup(struct n_hci *n_hci)
{
register struct tty_struct *tty = n_hci->tty;
if (test_and_set_bit(TRANS_SENDING, &n_hci->tx_state)) {
set_bit(TRANS_WAKEUP, &n_hci->tx_state);
struct hci_dev *hdev = &n_hci->hdev;
if (test_and_set_bit(N_HCI_SENDING, &n_hci->tx_state)) {
set_bit(N_HCI_TX_WAKEUP, &n_hci->tx_state);
return 0;
}
DBG("");
BT_DBG("");
do {
register struct sk_buff *skb;
register int len;
clear_bit(TRANS_WAKEUP, &n_hci->tx_state);
clear_bit(N_HCI_TX_WAKEUP, &n_hci->tx_state);
if (!(skb = skb_dequeue(&n_hci->txq)))
break;
DMP(skb->data, skb->len);
len = n_hci->proto->send(n_hci, skb->data, skb->len);
n_hci->hdev.stat.byte_tx += len;
/* Send frame to TTY driver */
tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
len = tty->driver.write(tty, 0, skb->data, skb->len);
if (len == skb->len) {
/* Complete frame was sent */
n_hci->hdev.stat.byte_tx += len;
switch (skb->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
break;
DBG("sent %d", len);
case HCI_ACLDATA_PKT:
hdev->stat.acl_tx++;
break;
case HCI_SCODATA_PKT:
hdev->stat.cmd_tx++;
break;
};
if (len == skb->len) {
/* Full frame was sent */
kfree_skb(skb);
} else {
/* Subtract sent part and requeue */
skb_pull(skb, len);
skb_queue_head(&n_hci->txq, skb);
}
} while (test_bit(TRANS_WAKEUP, &n_hci->tx_state));
clear_bit(TRANS_SENDING, &n_hci->tx_state);
} while (test_bit(N_HCI_TX_WAKEUP, &n_hci->tx_state));
clear_bit(N_HCI_SENDING, &n_hci->tx_state);
return 0;
}
/* Send frames from HCI layer */
int n_hci_send_frame(struct sk_buff *skb)
static int n_hci_send_frame(struct sk_buff *skb)
{
struct hci_dev* hdev = (struct hci_dev *) skb->dev;
struct tty_struct *tty;
struct n_hci *n_hci;
if (!hdev) {
ERR("Frame for uknown device (hdev=NULL)");
BT_ERR("Frame for uknown device (hdev=NULL)");
return -ENODEV;
}
if (!(hdev->flags & HCI_RUNNING))
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
n_hci = (struct n_hci *) hdev->driver_data;
tty = n_hci2tty(n_hci);
tty = n_hci->tty;
DBG("%s: type %d len %d", hdev->name, skb->pkt_type, skb->len);
BT_DBG("%s: type %d len %d", hdev->name, skb->pkt_type, skb->len);
switch (skb->pkt_type) {
case HCI_COMMAND_PKT:
hdev->stat.cmd_tx++;
break;
if (n_hci->proto->preq) {
skb = n_hci->proto->preq(n_hci, skb);
if (!skb)
return 0;
}
skb_queue_tail(&n_hci->txq, skb);
n_hci_tx_wakeup(n_hci);
return 0;
}
case HCI_ACLDATA_PKT:
hdev->stat.acl_tx++;
break;
static void n_hci_destruct(struct hci_dev *hdev)
{
struct n_hci *n_hci;
case HCI_SCODATA_PKT:
hdev->stat.cmd_tx++;
break;
};
if (!hdev) return;
/* Prepend skb with frame type and queue */
memcpy(skb_push(skb, 1), &skb->pkt_type, 1);
skb_queue_tail(&n_hci->txq, skb);
BT_DBG("%s", hdev->name);
n_hci_tx_wakeup(n_hci);
n_hci = (struct n_hci *) hdev->driver_data;
kfree(n_hci);
return 0;
MOD_DEC_USE_COUNT;
}
/* ------ LDISC part ------ */
/* n_hci_tty_open
*
* Called when line discipline changed to N_HCI.
*
* Arguments:
*
* Arguments:
* tty pointer to tty info structure
* Return Value:
* 0 if success, otherwise error code
*/
static int n_hci_tty_open(struct tty_struct *tty)
{
struct n_hci *n_hci = tty2n_hci(tty);
struct hci_dev *hdev;
struct n_hci *n_hci = (void *)tty->disc_data;
DBG("tty %p", tty);
BT_DBG("tty %p", tty);
if (n_hci)
return -EEXIST;
if (!(n_hci = kmalloc(sizeof(struct n_hci), GFP_KERNEL))) {
ERR("Can't allocate controll structure");
BT_ERR("Can't allocate controll structure");
return -ENFILE;
}
memset(n_hci, 0, sizeof(struct n_hci));
/* Initialize and register HCI device */
hdev = &n_hci->hdev;
hdev->type = HCI_UART;
hdev->driver_data = n_hci;
hdev->open = n_hci_open;
hdev->close = n_hci_close;
hdev->flush = n_hci_flush;
hdev->send = n_hci_send_frame;
if (hci_register_dev(hdev) < 0) {
ERR("Can't register HCI device %s", hdev->name);
kfree(n_hci);
return -ENODEV;
}
tty->disc_data = n_hci;
n_hci->tty = tty;
spin_lock_init(&n_hci->rx_lock);
n_hci->rx_state = WAIT_PACKET_TYPE;
skb_queue_head_init(&n_hci->txq);
MOD_INC_USE_COUNT;
/* Flush any pending characters in the driver and discipline. */
/* Flush any pending characters in the driver and line discipline */
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
if (tty->driver.flush_buffer)
tty->driver.flush_buffer(tty);
MOD_INC_USE_COUNT;
return 0;
}
......@@ -266,22 +287,22 @@ static int n_hci_tty_open(struct tty_struct *tty)
*/
static void n_hci_tty_close(struct tty_struct *tty)
{
struct n_hci *n_hci = tty2n_hci(tty);
struct hci_dev *hdev = &n_hci->hdev;
struct n_hci *n_hci = (void *)tty->disc_data;
DBG("tty %p hdev %p", tty, hdev);
BT_DBG("tty %p", tty);
if (n_hci != NULL) {
/* Detach from the tty */
tty->disc_data = NULL;
if (n_hci) {
struct hci_dev *hdev = &n_hci->hdev;
n_hci_close(hdev);
if (hci_unregister_dev(hdev) < 0) {
ERR("Can't unregister HCI device %s",hdev->name);
if (test_and_clear_bit(N_HCI_PROTO_SET, &n_hci->flags)) {
n_hci->proto->close(n_hci);
hci_unregister_dev(hdev);
}
hdev->driver_data = NULL;
tty->disc_data = NULL;
kfree(n_hci);
MOD_DEC_USE_COUNT;
}
}
......@@ -296,9 +317,9 @@ static void n_hci_tty_close(struct tty_struct *tty)
*/
static void n_hci_tty_wakeup( struct tty_struct *tty )
{
struct n_hci *n_hci = tty2n_hci(tty);
struct n_hci *n_hci = (void *)tty->disc_data;
DBG("");
BT_DBG("");
if (!n_hci)
return;
......@@ -325,135 +346,6 @@ static int n_hci_tty_room (struct tty_struct *tty)
return 65536;
}
static inline int n_hci_check_data_len(struct n_hci *n_hci, int len)
{
register int room = skb_tailroom(n_hci->rx_skb);
DBG("len %d room %d", len, room);
if (!len) {
DMP(n_hci->rx_skb->data, n_hci->rx_skb->len);
hci_recv_frame(n_hci->rx_skb);
} else if (len > room) {
ERR("Data length is to large");
kfree_skb(n_hci->rx_skb);
n_hci->hdev.stat.err_rx++;
} else {
n_hci->rx_state = WAIT_DATA;
n_hci->rx_count = len;
return len;
}
n_hci->rx_state = WAIT_PACKET_TYPE;
n_hci->rx_skb = NULL;
n_hci->rx_count = 0;
return 0;
}
static inline void n_hci_rx(struct n_hci *n_hci, const __u8 * data, char *flags, int count)
{
register const char *ptr;
hci_event_hdr *eh;
hci_acl_hdr *ah;
hci_sco_hdr *sh;
register int len, type, dlen;
DBG("count %d state %ld rx_count %ld", count, n_hci->rx_state, n_hci->rx_count);
n_hci->hdev.stat.byte_rx += count;
ptr = data;
while (count) {
if (n_hci->rx_count) {
len = MIN(n_hci->rx_count, count);
memcpy(skb_put(n_hci->rx_skb, len), ptr, len);
n_hci->rx_count -= len; count -= len; ptr += len;
if (n_hci->rx_count)
continue;
switch (n_hci->rx_state) {
case WAIT_DATA:
DBG("Complete data");
DMP(n_hci->rx_skb->data, n_hci->rx_skb->len);
hci_recv_frame(n_hci->rx_skb);
n_hci->rx_state = WAIT_PACKET_TYPE;
n_hci->rx_skb = NULL;
continue;
case WAIT_EVENT_HDR:
eh = (hci_event_hdr *) n_hci->rx_skb->data;
DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
n_hci_check_data_len(n_hci, eh->plen);
continue;
case WAIT_ACL_HDR:
ah = (hci_acl_hdr *) n_hci->rx_skb->data;
dlen = __le16_to_cpu(ah->dlen);
DBG("ACL header: dlen %d", dlen);
n_hci_check_data_len(n_hci, dlen);
continue;
case WAIT_SCO_HDR:
sh = (hci_sco_hdr *) n_hci->rx_skb->data;
DBG("SCO header: dlen %d", sh->dlen);
n_hci_check_data_len(n_hci, sh->dlen);
continue;
};
}
/* WAIT_PACKET_TYPE */
switch (*ptr) {
case HCI_EVENT_PKT:
DBG("Event packet");
n_hci->rx_state = WAIT_EVENT_HDR;
n_hci->rx_count = HCI_EVENT_HDR_SIZE;
type = HCI_EVENT_PKT;
break;
case HCI_ACLDATA_PKT:
DBG("ACL packet");
n_hci->rx_state = WAIT_ACL_HDR;
n_hci->rx_count = HCI_ACL_HDR_SIZE;
type = HCI_ACLDATA_PKT;
break;
case HCI_SCODATA_PKT:
DBG("SCO packet");
n_hci->rx_state = WAIT_SCO_HDR;
n_hci->rx_count = HCI_SCO_HDR_SIZE;
type = HCI_SCODATA_PKT;
break;
default:
ERR("Unknown HCI packet type %2.2x", (__u8)*ptr);
n_hci->hdev.stat.err_rx++;
ptr++; count--;
continue;
};
ptr++; count--;
/* Allocate packet */
if (!(n_hci->rx_skb = bluez_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC))) {
ERR("Can't allocate mem for new packet");
n_hci->rx_state = WAIT_PACKET_TYPE;
n_hci->rx_count = 0;
return;
}
n_hci->rx_skb->dev = (void *) &n_hci->hdev;
n_hci->rx_skb->pkt_type = type;
}
}
/* n_hci_tty_receive()
*
* Called by tty low level driver when receive data is
......@@ -468,19 +360,72 @@ static inline void n_hci_rx(struct n_hci *n_hci, const __u8 * data, char *flags,
*/
static void n_hci_tty_receive(struct tty_struct *tty, const __u8 * data, char *flags, int count)
{
struct n_hci *n_hci = tty2n_hci(tty);
struct n_hci *n_hci = (void *)tty->disc_data;
if (!n_hci || tty != n_hci->tty)
return;
if (!test_bit(N_HCI_PROTO_SET, &n_hci->flags))
return;
spin_lock(&n_hci->rx_lock);
n_hci_rx(n_hci, data, flags, count);
n_hci->proto->recv(n_hci, (void *) data, count);
n_hci->hdev.stat.byte_rx += count;
spin_unlock(&n_hci->rx_lock);
if (test_and_clear_bit(TTY_THROTTLED,&tty->flags) && tty->driver.unthrottle)
tty->driver.unthrottle(tty);
}
static int n_hci_register_dev(struct n_hci *n_hci)
{
struct hci_dev *hdev;
BT_DBG("");
/* Initialize and register HCI device */
hdev = &n_hci->hdev;
hdev->type = HCI_UART;
hdev->driver_data = n_hci;
hdev->open = n_hci_open;
hdev->close = n_hci_close;
hdev->flush = n_hci_flush;
hdev->send = n_hci_send_frame;
hdev->destruct = n_hci_destruct;
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device %s", hdev->name);
return -ENODEV;
}
MOD_INC_USE_COUNT;
return 0;
}
static int n_hci_set_proto(struct n_hci *n_hci, int id)
{
struct hci_uart_proto *p;
int err;
p = n_hci_get_proto(id);
if (!p)
return -EPROTONOSUPPORT;
err = p->open(n_hci);
if (err)
return err;
n_hci->proto = p;
err = n_hci_register_dev(n_hci);
if (err) {
p->close(n_hci);
return err;
}
return 0;
}
/* n_hci_tty_ioctl()
*
* Process IOCTL system call for the tty device.
......@@ -494,25 +439,41 @@ static void n_hci_tty_receive(struct tty_struct *tty, const __u8 * data, char *f
*
* Return Value: Command dependent
*/
static int n_hci_tty_ioctl (struct tty_struct *tty, struct file * file,
static int n_hci_tty_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
{
struct n_hci *n_hci = tty2n_hci(tty);
int error = 0;
struct n_hci *n_hci = (void *)tty->disc_data;
int err = 0;
DBG("");
BT_DBG("");
/* Verify the status of the device */
if (!n_hci)
return -EBADF;
switch (cmd) {
default:
error = n_tty_ioctl(tty, file, cmd, arg);
break;
case HCIUARTSETPROTO:
if (!test_and_set_bit(N_HCI_PROTO_SET, &n_hci->flags)) {
err = n_hci_set_proto(n_hci, arg);
if (err) {
clear_bit(N_HCI_PROTO_SET, &n_hci->flags);
return err;
}
tty->low_latency = 1;
} else
return -EBUSY;
case HCIUARTGETPROTO:
if (test_bit(N_HCI_PROTO_SET, &n_hci->flags))
return n_hci->proto->id;
return -EUNATCH;
default:
err = n_tty_ioctl(tty, file, cmd, arg);
break;
};
return error;
return err;
}
/*
......@@ -531,14 +492,19 @@ static unsigned int n_hci_tty_poll(struct tty_struct *tty, struct file *filp, po
return 0;
}
#ifdef CONFIG_BLUEZ_HCIUART_H4
int h4_init(void);
int h4_deinit(void);
#endif
int __init n_hci_init(void)
{
static struct tty_ldisc n_hci_ldisc;
int err;
INF("BlueZ HCI UART driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
BT_INFO("BlueZ HCI UART driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
VERSION);
INF("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
/* Register the tty discipline */
......@@ -556,10 +522,14 @@ int __init n_hci_init(void)
n_hci_ldisc.write_wakeup= n_hci_tty_wakeup;
if ((err = tty_register_ldisc(N_HCI, &n_hci_ldisc))) {
ERR("Can't register HCI line discipline (%d)", err);
BT_ERR("Can't register HCI line discipline (%d)", err);
return err;
}
#ifdef CONFIG_BLUEZ_HCIUART_H4
h4_init();
#endif
return 0;
}
......@@ -567,9 +537,13 @@ void n_hci_cleanup(void)
{
int err;
#ifdef CONFIG_BLUEZ_HCIUART_H4
h4_deinit();
#endif
/* Release tty registration of line discipline */
if ((err = tty_register_ldisc(N_HCI, NULL)))
ERR("Can't unregister HCI line discipline (%d)", err);
BT_ERR("Can't unregister HCI line discipline (%d)", err);
}
module_init(n_hci_init);
......
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* $Id: hci_uart.h,v 1.1.1.1 2002/03/08 21:03:15 maxk Exp $
*/
#ifndef N_HCI
#define N_HCI 15
#endif
/* Ioctls */
#define HCIUARTSETPROTO _IOW('U', 200, int)
#define HCIUARTGETPROTO _IOR('U', 201, int)
/* UART protocols */
#define HCI_UART_MAX_PROTO 3
#define HCI_UART_H4 0
#define HCI_UART_BCSP 1
#define HCI_UART_NCSP 2
#ifdef __KERNEL__
struct n_hci;
struct hci_uart_proto {
unsigned int id;
int (*open)(struct n_hci *n_hci);
int (*recv)(struct n_hci *n_hci, void *data, int len);
int (*send)(struct n_hci *n_hci, void *data, int len);
int (*close)(struct n_hci *n_hci);
int (*flush)(struct n_hci *n_hci);
struct sk_buff* (*preq)(struct n_hci *n_hci, struct sk_buff *skb);
};
struct n_hci {
struct tty_struct *tty;
struct hci_dev hdev;
unsigned long flags;
struct hci_uart_proto *proto;
void *priv;
struct sk_buff_head txq;
unsigned long tx_state;
spinlock_t rx_lock;
};
/* N_HCI flag bits */
#define N_HCI_PROTO_SET 0x00
/* TX states */
#define N_HCI_SENDING 1
#define N_HCI_TX_WAKEUP 2
int hci_uart_register_proto(struct hci_uart_proto *p);
int hci_uart_unregister_proto(struct hci_uart_proto *p);
#endif /* __KERNEL__ */
......@@ -28,191 +28,351 @@
* Copyright (c) 2000 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2000 Mark Douglas Corner <mcorner@umich.edu>
*
* $Id: hci_usb.c,v 1.5 2001/07/05 18:42:44 maxk Exp $
* $Id: hci_usb.c,v 1.6 2002/04/17 17:37:20 maxk Exp $
*/
#define VERSION "1.0"
#define VERSION "2.0"
#include <linux/config.h>
#include <linux/module.h>
#define __KERNEL_SYSCALLS__
#include <linux/version.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/signal.h>
#include <linux/ioctl.h>
#include <linux/skbuff.h>
#include <linux/kmod.h>
#include <linux/usb.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/bluez.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci_usb.h>
#include "hci_usb.h"
#define HCI_MAX_PENDING (HCI_MAX_BULK_RX + HCI_MAX_BULK_TX + 1)
#ifndef HCI_USB_DEBUG
#undef DBG
#define DBG( A... )
#undef DMP
#define DMP( A... )
#undef BT_DBG
#define BT_DBG( A... )
#undef BT_DMP
#define BT_DMP( A... )
#endif
#ifndef CONFIG_BLUEZ_USB_ZERO_PACKET
#undef USB_ZERO_PACKET
#define USB_ZERO_PACKET 0
#endif
static struct usb_driver hci_usb_driver;
static struct usb_device_id usb_bluetooth_ids [] = {
/* Generic Bluetooth USB device */
{ USB_DEVICE_INFO(HCI_DEV_CLASS, HCI_DEV_SUBCLASS, HCI_DEV_PROTOCOL) },
/* Ericsson with non-standard id */
{ USB_DEVICE(0x0bdb, 0x1002) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, usb_bluetooth_ids);
static int hci_usb_ctrl_msg(struct hci_usb *husb, struct sk_buff *skb);
static int hci_usb_write_msg(struct hci_usb *husb, struct sk_buff *skb);
static void hci_usb_interrupt(struct urb *urb);
static void hci_usb_rx_complete(struct urb *urb);
static void hci_usb_tx_complete(struct urb *urb);
static void hci_usb_unlink_urbs(struct hci_usb *husb)
static struct urb *hci_usb_get_completed(struct hci_usb *husb)
{
usb_unlink_urb(husb->read_urb);
usb_unlink_urb(husb->intr_urb);
usb_unlink_urb(husb->ctrl_urb);
usb_unlink_urb(husb->write_urb);
struct sk_buff *skb;
struct urb *urb = NULL;
skb = skb_dequeue(&husb->completed_q);
if (skb) {
urb = ((struct hci_usb_scb *) skb->cb)->urb;
kfree_skb(skb);
}
BT_DBG("%s urb %p", husb->hdev.name, urb);
return urb;
}
static void hci_usb_free_bufs(struct hci_usb *husb)
static int hci_usb_enable_intr(struct hci_usb *husb)
{
if (husb->read_urb) {
if (husb->read_urb->transfer_buffer)
kfree(husb->read_urb->transfer_buffer);
usb_free_urb(husb->read_urb);
struct urb *urb;
int pipe, size;
void *buf;
BT_DBG("%s", husb->hdev.name);
if (!(urb = usb_alloc_urb(0, GFP_KERNEL)))
return -ENOMEM;
if (!(buf = kmalloc(HCI_MAX_EVENT_SIZE, GFP_KERNEL))) {
usb_free_urb(urb);
return -ENOMEM;
}
if (husb->intr_urb) {
if (husb->intr_urb->transfer_buffer)
kfree(husb->intr_urb->transfer_buffer);
usb_free_urb(husb->intr_urb);
husb->intr_urb = urb;
pipe = usb_rcvintpipe(husb->udev, husb->intr_ep);
size = usb_maxpacket(husb->udev, pipe, usb_pipeout(pipe));
FILL_INT_URB(urb, husb->udev, pipe, buf, size,
hci_usb_interrupt, husb, husb->intr_interval);
return usb_submit_urb(urb, GFP_KERNEL);
}
static int hci_usb_disable_intr(struct hci_usb *husb)
{
struct urb *urb = husb->intr_urb;
struct sk_buff *skb;
BT_DBG("%s", husb->hdev.name);
usb_unlink_urb(urb); usb_free_urb(urb);
husb->intr_urb = NULL;
skb = husb->intr_skb;
if (skb) {
husb->intr_skb = NULL;
kfree_skb(skb);
}
if (husb->ctrl_urb)
usb_free_urb(husb->ctrl_urb);
return 0;
}
if (husb->write_urb)
usb_free_urb(husb->write_urb);
static int hci_usb_rx_submit(struct hci_usb *husb, struct urb *urb)
{
struct hci_usb_scb *scb;
struct sk_buff *skb;
int pipe, size, err;
if (husb->intr_skb)
kfree_skb(husb->intr_skb);
if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
return -ENOMEM;
size = HCI_MAX_FRAME_SIZE;
if (!(skb = bluez_skb_alloc(size, GFP_ATOMIC))) {
usb_free_urb(urb);
return -ENOMEM;
}
BT_DBG("%s urb %p", husb->hdev.name, urb);
skb->dev = (void *) &husb->hdev;
skb->pkt_type = HCI_ACLDATA_PKT;
scb = (struct hci_usb_scb *) skb->cb;
scb->urb = urb;
pipe = usb_rcvbulkpipe(husb->udev, husb->bulk_in_ep);
FILL_BULK_URB(urb, husb->udev, pipe, skb->data, size, hci_usb_rx_complete, skb);
urb->transfer_flags = USB_QUEUE_BULK;
skb_queue_tail(&husb->pending_q, skb);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
BT_ERR("%s bulk rx submit failed urb %p err %d",
husb->hdev.name, urb, err);
skb_unlink(skb);
usb_free_urb(urb);
}
return err;
}
/* ------- Interface to HCI layer ------ */
/* Initialize device */
int hci_usb_open(struct hci_dev *hdev)
static int hci_usb_open(struct hci_dev *hdev)
{
struct hci_usb *husb = (struct hci_usb *) hdev->driver_data;
int status;
DBG("%s", hdev->name);
int i, err;
long flags;
husb->read_urb->dev = husb->udev;
if ((status = usb_submit_urb(husb->read_urb, GFP_KERNEL)))
DBG("read submit failed. %d", status);
BT_DBG("%s", hdev->name);
husb->intr_urb->dev = husb->udev;
if ((status = usb_submit_urb(husb->intr_urb, GFP_KERNEL)))
DBG("interrupt submit failed. %d", status);
if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
return 0;
hdev->flags |= HCI_RUNNING;
write_lock_irqsave(&husb->completion_lock, flags);
return 0;
err = hci_usb_enable_intr(husb);
if (!err) {
for (i = 0; i < HCI_MAX_BULK_TX; i++)
hci_usb_rx_submit(husb, NULL);
} else
clear_bit(HCI_RUNNING, &hdev->flags);
write_unlock_irqrestore(&husb->completion_lock, flags);
return err;
}
/* Reset device */
int hci_usb_flush(struct hci_dev *hdev)
static int hci_usb_flush(struct hci_dev *hdev)
{
struct hci_usb *husb = (struct hci_usb *) hdev->driver_data;
DBG("%s", hdev->name);
/* Drop TX queues */
skb_queue_purge(&husb->tx_ctrl_q);
skb_queue_purge(&husb->tx_write_q);
BT_DBG("%s", hdev->name);
skb_queue_purge(&husb->cmd_q);
skb_queue_purge(&husb->acl_q);
return 0;
}
static inline void hci_usb_unlink_urbs(struct hci_usb *husb)
{
struct sk_buff *skb;
struct urb *urb;
BT_DBG("%s", husb->hdev.name);
while ((skb = skb_dequeue(&husb->pending_q))) {
urb = ((struct hci_usb_scb *) skb->cb)->urb;
usb_unlink_urb(urb);
kfree_skb(skb);
}
while ((urb = hci_usb_get_completed(husb)))
usb_free_urb(urb);
}
/* Close device */
int hci_usb_close(struct hci_dev *hdev)
static int hci_usb_close(struct hci_dev *hdev)
{
struct hci_usb *husb = (struct hci_usb *) hdev->driver_data;
long flags;
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
DBG("%s", hdev->name);
BT_DBG("%s", hdev->name);
hdev->flags &= ~HCI_RUNNING;
write_lock_irqsave(&husb->completion_lock, flags);
hci_usb_disable_intr(husb);
hci_usb_unlink_urbs(husb);
hci_usb_flush(hdev);
write_unlock_irqrestore(&husb->completion_lock, flags);
return 0;
}
void hci_usb_ctrl_wakeup(struct hci_usb *husb)
static inline int hci_usb_send_ctrl(struct hci_usb *husb, struct sk_buff *skb)
{
struct sk_buff *skb;
struct hci_usb_scb *scb = (void *) skb->cb;
struct urb *urb = hci_usb_get_completed(husb);
struct usb_ctrlrequest *cr;
int pipe, err;
if (test_and_set_bit(HCI_TX_CTRL, &husb->tx_state))
return;
if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
return -ENOMEM;
DBG("%s", husb->hdev.name);
if (!(cr = kmalloc(sizeof(*cr), GFP_ATOMIC))) {
usb_free_urb(urb);
return -ENOMEM;
}
pipe = usb_sndctrlpipe(husb->udev, 0);
if (!(skb = skb_dequeue(&husb->tx_ctrl_q)))
goto done;
cr->bRequestType = HCI_CTRL_REQ;
cr->bRequest = 0;
cr->wIndex = 0;
cr->wValue = 0;
cr->wLength = __cpu_to_le16(skb->len);
if (hci_usb_ctrl_msg(husb, skb)){
kfree_skb(skb);
goto done;
}
FILL_CONTROL_URB(urb, husb->udev, pipe, (void *) cr,
skb->data, skb->len, hci_usb_tx_complete, skb);
DMP(skb->data, skb->len);
BT_DBG("%s urb %p len %d", husb->hdev.name, urb, skb->len);
husb->hdev.stat.byte_tx += skb->len;
return;
scb->urb = urb;
done:
clear_bit(HCI_TX_CTRL, &husb->tx_state);
return;
skb_queue_tail(&husb->pending_q, skb);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
BT_ERR("%s ctrl tx submit failed urb %p err %d",
husb->hdev.name, urb, err);
skb_unlink(skb);
usb_free_urb(urb); kfree(cr);
}
return err;
}
void hci_usb_write_wakeup(struct hci_usb *husb)
static inline int hci_usb_send_bulk(struct hci_usb *husb, struct sk_buff *skb)
{
struct sk_buff *skb;
struct hci_usb_scb *scb = (void *) skb->cb;
struct urb *urb = hci_usb_get_completed(husb);
int pipe, err;
if (test_and_set_bit(HCI_TX_WRITE, &husb->tx_state))
return;
if (!urb && !(urb = usb_alloc_urb(0, GFP_ATOMIC)))
return -ENOMEM;
DBG("%s", husb->hdev.name);
pipe = usb_sndbulkpipe(husb->udev, husb->bulk_out_ep);
FILL_BULK_URB(urb, husb->udev, pipe, skb->data, skb->len,
hci_usb_tx_complete, skb);
urb->transfer_flags = USB_QUEUE_BULK | USB_ZERO_PACKET;
if (!(skb = skb_dequeue(&husb->tx_write_q)))
goto done;
BT_DBG("%s urb %p len %d", husb->hdev.name, urb, skb->len);
if (hci_usb_write_msg(husb, skb)) {
skb_queue_head(&husb->tx_write_q, skb);
goto done;
scb->urb = urb;
skb_queue_tail(&husb->pending_q, skb);
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err) {
BT_ERR("%s bulk tx submit failed urb %p err %d",
husb->hdev.name, urb, err);
skb_unlink(skb);
usb_free_urb(urb);
}
return err;
}
static void hci_usb_tx_process(struct hci_usb *husb)
{
struct sk_buff *skb;
DMP(skb->data, skb->len);
BT_DBG("%s", husb->hdev.name);
husb->hdev.stat.byte_tx += skb->len;
return;
do {
clear_bit(HCI_USB_TX_WAKEUP, &husb->state);
/* Process ACL queue */
while (skb_queue_len(&husb->pending_q) < HCI_MAX_PENDING &&
(skb = skb_dequeue(&husb->acl_q))) {
if (hci_usb_send_bulk(husb, skb) < 0) {
skb_queue_head(&husb->acl_q, skb);
break;
}
}
done:
clear_bit(HCI_TX_WRITE, &husb->tx_state);
return;
/* Process command queue */
if (!test_bit(HCI_USB_CTRL_TX, &husb->state) &&
(skb = skb_dequeue(&husb->cmd_q)) != NULL) {
set_bit(HCI_USB_CTRL_TX, &husb->state);
if (hci_usb_send_ctrl(husb, skb) < 0) {
skb_queue_head(&husb->cmd_q, skb);
clear_bit(HCI_USB_CTRL_TX, &husb->state);
}
}
} while(test_bit(HCI_USB_TX_WAKEUP, &husb->state));
}
static inline void hci_usb_tx_wakeup(struct hci_usb *husb)
{
/* Serialize TX queue processing to avoid data reordering */
if (!test_and_set_bit(HCI_USB_TX_PROCESS, &husb->state)) {
hci_usb_tx_process(husb);
clear_bit(HCI_USB_TX_PROCESS, &husb->state);
} else
set_bit(HCI_USB_TX_WAKEUP, &husb->state);
}
/* Send frames from HCI layer */
......@@ -222,376 +382,401 @@ int hci_usb_send_frame(struct sk_buff *skb)
struct hci_usb *husb;
if (!hdev) {
ERR("frame for uknown device (hdev=NULL)");
BT_ERR("frame for uknown device (hdev=NULL)");
return -ENODEV;
}
if (!(hdev->flags & HCI_RUNNING))
return 0;
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
husb = (struct hci_usb *) hdev->driver_data;
DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
read_lock(&husb->completion_lock);
switch (skb->pkt_type) {
case HCI_COMMAND_PKT:
skb_queue_tail(&husb->tx_ctrl_q, skb);
hci_usb_ctrl_wakeup(husb);
hdev->stat.cmd_tx++;
return 0;
case HCI_ACLDATA_PKT:
skb_queue_tail(&husb->tx_write_q, skb);
hci_usb_write_wakeup(husb);
hdev->stat.acl_tx++;
return 0;
case HCI_SCODATA_PKT:
return -EOPNOTSUPP;
};
case HCI_COMMAND_PKT:
skb_queue_tail(&husb->cmd_q, skb);
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
skb_queue_tail(&husb->acl_q, skb);
hdev->stat.acl_tx++;
break;
case HCI_SCODATA_PKT:
default:
kfree_skb(skb);
break;
}
hci_usb_tx_wakeup(husb);
read_unlock(&husb->completion_lock);
return 0;
}
/* ---------- USB ------------- */
static void hci_usb_ctrl(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct hci_dev *hdev;
struct hci_usb *husb;
if (!skb)
return;
hdev = (struct hci_dev *) skb->dev;
husb = (struct hci_usb *) hdev->driver_data;
DBG("%s", hdev->name);
if (urb->status)
DBG("%s ctrl status: %d", hdev->name, urb->status);
clear_bit(HCI_TX_CTRL, &husb->tx_state);
kfree_skb(skb);
/* Wake up device */
hci_usb_ctrl_wakeup(husb);
}
static void hci_usb_bulk_write(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct hci_dev *hdev;
struct hci_usb *husb;
if (!skb)
return;
hdev = (struct hci_dev *) skb->dev;
husb = (struct hci_usb *) hdev->driver_data;
DBG("%s", hdev->name);
if (urb->status)
DBG("%s bulk write status: %d", hdev->name, urb->status);
clear_bit(HCI_TX_WRITE, &husb->tx_state);
kfree_skb(skb);
/* Wake up device */
hci_usb_write_wakeup(husb);
return;
}
static void hci_usb_intr(struct urb *urb)
static void hci_usb_interrupt(struct urb *urb)
{
struct hci_usb *husb = (struct hci_usb *) urb->context;
unsigned char *data = urb->transfer_buffer;
register int count = urb->actual_length;
register struct sk_buff *skb = husb->intr_skb;
struct hci_usb *husb = (void *) urb->context;
struct hci_usb_scb *scb;
struct sk_buff *skb;
hci_event_hdr *eh;
register int len;
__u8 *data = urb->transfer_buffer;
int count = urb->actual_length;
int len = HCI_EVENT_HDR_SIZE;
if (!husb)
return;
BT_DBG("%s urb %p count %d", husb->hdev.name, urb, count);
DBG("%s count %d", husb->hdev.name, count);
if (!test_bit(HCI_RUNNING, &husb->hdev.flags))
return;
if (urb->status || !count) {
DBG("%s intr status %d, count %d", husb->hdev.name, urb->status, count);
BT_DBG("%s intr status %d, count %d",
husb->hdev.name, urb->status, count);
return;
}
/* Do we really have to handle continuations here ? */
if (!skb) {
/* New frame */
if (count < HCI_EVENT_HDR_SIZE) {
DBG("%s bad frame len %d", husb->hdev.name, count);
return;
}
read_lock(&husb->completion_lock);
husb->hdev.stat.byte_rx += count;
eh = (hci_event_hdr *) data;
if (!(skb = husb->intr_skb)) {
/* Start of the frame */
if (count < HCI_EVENT_HDR_SIZE)
goto bad_len;
eh = (hci_event_hdr *) data;
len = eh->plen + HCI_EVENT_HDR_SIZE;
if (count > len) {
DBG("%s corrupted frame, len %d", husb->hdev.name, count);
return;
}
if (count > len)
goto bad_len;
/* Allocate skb */
if (!(skb = bluez_skb_alloc(len, GFP_ATOMIC))) {
ERR("Can't allocate mem for new packet");
return;
skb = bluez_skb_alloc(len, GFP_ATOMIC);
if (!skb) {
BT_ERR("%s no memory for event packet", husb->hdev.name);
goto done;
}
scb = (void *) skb->cb;
skb->dev = (void *) &husb->hdev;
skb->pkt_type = HCI_EVENT_PKT;
husb->intr_skb = skb;
husb->intr_count = len;
scb->intr_len = len;
} else {
/* Continuation */
if (count > husb->intr_count) {
ERR("%s bad frame len %d (expected %d)", husb->hdev.name, count, husb->intr_count);
kfree_skb(skb);
scb = (void *) skb->cb;
len = scb->intr_len;
if (count > len) {
husb->intr_skb = NULL;
husb->intr_count = 0;
return;
kfree_skb(skb);
goto bad_len;
}
}
memcpy(skb_put(skb, count), data, count);
husb->intr_count -= count;
scb->intr_len -= count;
DMP(data, count);
if (!scb->intr_len) {
/* Complete frame */
husb->intr_skb = NULL;
hci_recv_frame(skb);
}
if (!husb->intr_count) {
/* Got complete frame */
done:
read_unlock(&husb->completion_lock);
return;
husb->hdev.stat.byte_rx += skb->len;
hci_recv_frame(skb);
bad_len:
BT_ERR("%s bad frame len %d expected %d", husb->hdev.name, count, len);
husb->hdev.stat.err_rx++;
read_unlock(&husb->completion_lock);
}
husb->intr_skb = NULL;
static void hci_usb_tx_complete(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
struct hci_usb *husb = (struct hci_usb *) hdev->driver_data;
BT_DBG("%s urb %p status %d flags %x", husb->hdev.name, urb,
urb->status, urb->transfer_flags);
if (urb->pipe == usb_sndctrlpipe(husb->udev, 0)) {
kfree(urb->setup_packet);
clear_bit(HCI_USB_CTRL_TX, &husb->state);
}
if (!test_bit(HCI_RUNNING, &hdev->flags))
return;
read_lock(&husb->completion_lock);
if (!urb->status)
husb->hdev.stat.byte_tx += skb->len;
else
husb->hdev.stat.err_tx++;
skb_unlink(skb);
skb_queue_tail(&husb->completed_q, skb);
hci_usb_tx_wakeup(husb);
read_unlock(&husb->completion_lock);
return;
}
static void hci_usb_bulk_read(struct urb *urb)
static void hci_usb_rx_complete(struct urb *urb)
{
struct hci_usb *husb = (struct hci_usb *) urb->context;
unsigned char *data = urb->transfer_buffer;
int count = urb->actual_length, status;
struct sk_buff *skb;
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
struct hci_usb *husb = (struct hci_usb *) hdev->driver_data;
int status, count = urb->actual_length;
hci_acl_hdr *ah;
register __u16 dlen;
int dlen, size;
if (!husb)
BT_DBG("%s urb %p status %d count %d flags %x", husb->hdev.name, urb,
urb->status, count, urb->transfer_flags);
if (!test_bit(HCI_RUNNING, &hdev->flags))
return;
DBG("%s status %d, count %d, flags %x", husb->hdev.name, urb->status, count, urb->transfer_flags);
read_lock(&husb->completion_lock);
if (urb->status) {
/* Do not re-submit URB on critical errors */
switch (urb->status) {
case -ENOENT:
return;
default:
goto resubmit;
};
}
if (!count)
if (urb->status || !count)
goto resubmit;
DMP(data, count);
husb->hdev.stat.byte_rx += count;
ah = (hci_acl_hdr *) data;
dlen = le16_to_cpu(ah->dlen);
ah = (hci_acl_hdr *) skb->data;
dlen = __le16_to_cpu(ah->dlen);
size = HCI_ACL_HDR_SIZE + dlen;
/* Verify frame len and completeness */
if ((count - HCI_ACL_HDR_SIZE) != dlen) {
ERR("%s corrupted ACL packet: count %d, plen %d", husb->hdev.name, count, dlen);
if (count != size) {
BT_ERR("%s corrupted ACL packet: count %d, dlen %d",
husb->hdev.name, count, dlen);
bluez_dump("hci_usb", skb->data, count);
husb->hdev.stat.err_rx++;
goto resubmit;
}
/* Allocate packet */
if (!(skb = bluez_skb_alloc(count, GFP_ATOMIC))) {
ERR("Can't allocate mem for new packet");
goto resubmit;
}
memcpy(skb_put(skb, count), data, count);
skb->dev = (void *) &husb->hdev;
skb->pkt_type = HCI_ACLDATA_PKT;
husb->hdev.stat.byte_rx += skb->len;
skb_unlink(skb);
skb_put(skb, count);
hci_recv_frame(skb);
resubmit:
husb->read_urb->dev = husb->udev;
if ((status = usb_submit_urb(husb->read_urb, GFP_KERNEL)))
DBG("%s read URB submit failed %d", husb->hdev.name, status);
hci_usb_rx_submit(husb, urb);
DBG("%s read URB re-submited", husb->hdev.name);
read_unlock(&husb->completion_lock);
return;
resubmit:
urb->dev = husb->udev;
status = usb_submit_urb(urb, GFP_ATOMIC);
BT_DBG("%s URB resubmit status %d", husb->hdev.name, status);
read_unlock(&husb->completion_lock);
}
static int hci_usb_ctrl_msg(struct hci_usb *husb, struct sk_buff *skb)
static void hci_usb_destruct(struct hci_dev *hdev)
{
struct urb *urb = husb->ctrl_urb;
struct usb_ctrlrequest *dr = &husb->dev_req;
int pipe, status;
struct hci_usb *husb;
DBG("%s len %d", husb->hdev.name, skb->len);
if (!hdev) return;
pipe = usb_sndctrlpipe(husb->udev, 0);
BT_DBG("%s", hdev->name);
husb = (struct hci_usb *) hdev->driver_data;
kfree(husb);
dr->bRequestType = HCI_CTRL_REQ;
dr->bRequest = 0;
dr->wIndex = 0;
dr->wValue = 0;
dr->wLength = cpu_to_le16(skb->len);
MOD_DEC_USE_COUNT;
}
FILL_CONTROL_URB(urb, husb->udev, pipe, (void*)dr, skb->data, skb->len,
hci_usb_ctrl, skb);
#ifdef CONFIG_BLUEZ_USB_FW_LOAD
if ((status = usb_submit_urb(urb, GFP_KERNEL))) {
DBG("%s control URB submit failed %d", husb->hdev.name, status);
return status;
}
/* Support for user mode Bluetooth USB firmware loader */
return 0;
}
#define FW_LOADER "/sbin/bluefw"
static int errno;
static int hci_usb_write_msg(struct hci_usb *husb, struct sk_buff *skb)
static int hci_usb_fw_exec(void *dev)
{
struct urb *urb = husb->write_urb;
int pipe, status;
char *envp[] = { "HOME=/", "TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
char *argv[] = { FW_LOADER, dev, NULL };
int err;
DBG("%s len %d", husb->hdev.name, skb->len);
err = exec_usermodehelper(FW_LOADER, argv, envp);
if (err)
BT_ERR("failed to exec %s %s", FW_LOADER, (char *)dev);
return err;
}
pipe = usb_sndbulkpipe(husb->udev, husb->bulk_out_ep_addr);
static int hci_usb_fw_load(struct usb_device *udev)
{
sigset_t tmpsig;
char dev[16];
pid_t pid;
int result;
/* Check if root fs is mounted */
if (!current->fs->root) {
BT_ERR("root fs not mounted");
return -EPERM;
}
FILL_BULK_URB(urb, husb->udev, pipe, skb->data, skb->len,
hci_usb_bulk_write, skb);
urb->transfer_flags |= USB_QUEUE_BULK;
sprintf(dev, "%3.3d/%3.3d", udev->bus->busnum, udev->devnum);
if ((status = usb_submit_urb(urb, GFP_KERNEL))) {
DBG("%s write URB submit failed %d", husb->hdev.name, status);
return status;
pid = kernel_thread(hci_usb_fw_exec, (void *)dev, 0);
if (pid < 0) {
BT_ERR("fork failed, errno %d\n", -pid);
return pid;
}
/* Block signals, everything but SIGKILL/SIGSTOP */
spin_lock_irq(&current->sigmask_lock);
tmpsig = current->blocked;
siginitsetinv(&current->blocked, sigmask(SIGKILL) | sigmask(SIGSTOP));
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
result = waitpid(pid, NULL, __WCLONE);
/* Allow signals again */
spin_lock_irq(&current->sigmask_lock);
current->blocked = tmpsig;
recalc_sigpending();
spin_unlock_irq(&current->sigmask_lock);
if (result != pid) {
BT_ERR("waitpid failed pid %d errno %d\n", pid, -result);
return -result;
}
return 0;
}
#endif /* CONFIG_BLUEZ_USB_FW_LOAD */
static void * hci_usb_probe(struct usb_device *udev, unsigned int ifnum, const struct usb_device_id *id)
{
struct usb_endpoint_descriptor *bulk_out_ep, *intr_in_ep, *bulk_in_ep;
struct usb_endpoint_descriptor *bulk_out_ep[HCI_MAX_IFACE_NUM];
struct usb_endpoint_descriptor *isoc_out_ep[HCI_MAX_IFACE_NUM];
struct usb_endpoint_descriptor *bulk_in_ep[HCI_MAX_IFACE_NUM];
struct usb_endpoint_descriptor *isoc_in_ep[HCI_MAX_IFACE_NUM];
struct usb_endpoint_descriptor *intr_in_ep[HCI_MAX_IFACE_NUM];
struct usb_interface_descriptor *uif;
struct usb_endpoint_descriptor *ep;
struct usb_interface *iface, *isoc_iface;
struct hci_usb *husb;
struct hci_dev *hdev;
int i, size, pipe;
__u8 * buf;
int i, a, e, size, ifn, isoc_ifnum, isoc_alts;
DBG("udev %p ifnum %d", udev, ifnum);
BT_DBG("udev %p ifnum %d", udev, ifnum);
/* Check device signature */
if ((udev->descriptor.bDeviceClass != HCI_DEV_CLASS) ||
(udev->descriptor.bDeviceSubClass != HCI_DEV_SUBCLASS)||
(udev->descriptor.bDeviceProtocol != HCI_DEV_PROTOCOL) )
/* Check number of endpoints */
if (udev->actconfig->interface[ifnum].altsetting[0].bNumEndpoints < 3)
return NULL;
MOD_INC_USE_COUNT;
uif = &udev->actconfig->interface[ifnum].altsetting[0];
if (uif->bNumEndpoints != 3) {
DBG("Wrong number of endpoints %d", uif->bNumEndpoints);
MOD_DEC_USE_COUNT;
return NULL;
}
#ifdef CONFIG_BLUEZ_USB_FW_LOAD
hci_usb_fw_load(udev);
#endif
bulk_out_ep = intr_in_ep = bulk_in_ep = NULL;
memset(bulk_out_ep, 0, sizeof(bulk_out_ep));
memset(isoc_out_ep, 0, sizeof(isoc_out_ep));
memset(bulk_in_ep, 0, sizeof(bulk_in_ep));
memset(isoc_in_ep, 0, sizeof(isoc_in_ep));
memset(intr_in_ep, 0, sizeof(intr_in_ep));
size = 0;
isoc_iface = NULL;
isoc_alts = isoc_ifnum = 0;
/* Find endpoints that we need */
for ( i = 0; i < uif->bNumEndpoints; ++i) {
ep = &uif->endpoint[i];
switch (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK:
if (ep->bEndpointAddress & USB_DIR_IN)
bulk_in_ep = ep;
else
bulk_out_ep = ep;
break;
case USB_ENDPOINT_XFER_INT:
intr_in_ep = ep;
break;
};
ifn = MIN(udev->actconfig->bNumInterfaces, HCI_MAX_IFACE_NUM);
for (i = 0; i < ifn; i++) {
iface = &udev->actconfig->interface[i];
for (a = 0; a < iface->num_altsetting; a++) {
uif = &iface->altsetting[a];
for (e = 0; e < uif->bNumEndpoints; e++) {
ep = &uif->endpoint[e];
switch (ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_INT:
if (ep->bEndpointAddress & USB_DIR_IN)
intr_in_ep[i] = ep;
break;
case USB_ENDPOINT_XFER_BULK:
if (ep->bEndpointAddress & USB_DIR_IN)
bulk_in_ep[i] = ep;
else
bulk_out_ep[i] = ep;
break;
case USB_ENDPOINT_XFER_ISOC:
if (ep->wMaxPacketSize < size)
break;
size = ep->wMaxPacketSize;
isoc_iface = iface;
isoc_alts = a;
isoc_ifnum = i;
if (ep->bEndpointAddress & USB_DIR_IN)
isoc_in_ep[i] = ep;
else
isoc_out_ep[i] = ep;
break;
}
}
}
}
if (!bulk_in_ep || !bulk_out_ep || !intr_in_ep) {
DBG("Endpoints not found: %p %p %p", bulk_in_ep, bulk_out_ep, intr_in_ep);
MOD_DEC_USE_COUNT;
return NULL;
if (!bulk_in_ep[0] || !bulk_out_ep[0] || !intr_in_ep[0]) {
BT_DBG("Bulk endpoints not found");
goto done;
}
if (!isoc_in_ep[1] || !isoc_out_ep[1]) {
BT_DBG("Isoc endpoints not found");
isoc_iface = NULL;
}
if (!(husb = kmalloc(sizeof(struct hci_usb), GFP_KERNEL))) {
ERR("Can't allocate: control structure");
MOD_DEC_USE_COUNT;
return NULL;
BT_ERR("Can't allocate: control structure");
goto done;
}
memset(husb, 0, sizeof(struct hci_usb));
husb->udev = udev;
husb->bulk_out_ep_addr = bulk_out_ep->bEndpointAddress;
if (!(husb->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL))) {
ERR("Can't allocate: control URB");
goto probe_error;
}
if (!(husb->write_urb = usb_alloc_urb(0, GFP_KERNEL))) {
ERR("Can't allocate: write URB");
goto probe_error;
}
if (!(husb->read_urb = usb_alloc_urb(0, GFP_KERNEL))) {
ERR("Can't allocate: read URB");
goto probe_error;
}
ep = bulk_in_ep;
pipe = usb_rcvbulkpipe(udev, ep->bEndpointAddress);
size = HCI_MAX_FRAME_SIZE;
if (!(buf = kmalloc(size, GFP_KERNEL))) {
ERR("Can't allocate: read buffer");
goto probe_error;
}
FILL_BULK_URB(husb->read_urb, udev, pipe, buf, size, hci_usb_bulk_read, husb);
husb->read_urb->transfer_flags |= USB_QUEUE_BULK;
husb->bulk_out_ep = bulk_out_ep[0]->bEndpointAddress;
husb->bulk_in_ep = bulk_in_ep[0]->bEndpointAddress;
ep = intr_in_ep;
pipe = usb_rcvintpipe(udev, ep->bEndpointAddress);
size = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
husb->intr_ep = intr_in_ep[0]->bEndpointAddress;
husb->intr_interval = intr_in_ep[0]->bInterval;
if (!(husb->intr_urb = usb_alloc_urb(0, GFP_KERNEL))) {
ERR("Can't allocate: interrupt URB");
goto probe_error;
}
if (isoc_iface) {
if (usb_set_interface(udev, isoc_ifnum, isoc_alts)) {
BT_ERR("Can't set isoc interface settings");
isoc_iface = NULL;
}
usb_driver_claim_interface(&hci_usb_driver, isoc_iface, husb);
husb->isoc_iface = isoc_iface;
if (!(buf = kmalloc(size, GFP_KERNEL))) {
ERR("Can't allocate: interrupt buffer");
goto probe_error;
husb->isoc_in_ep = isoc_in_ep[1]->bEndpointAddress;
husb->isoc_out_ep = isoc_in_ep[1]->bEndpointAddress;
}
FILL_INT_URB(husb->intr_urb, udev, pipe, buf, size, hci_usb_intr, husb, ep->bInterval);
skb_queue_head_init(&husb->tx_ctrl_q);
skb_queue_head_init(&husb->tx_write_q);
husb->completion_lock = RW_LOCK_UNLOCKED;
skb_queue_head_init(&husb->acl_q);
skb_queue_head_init(&husb->cmd_q);
skb_queue_head_init(&husb->pending_q);
skb_queue_head_init(&husb->completed_q);
/* Initialize and register HCI device */
hdev = &husb->hdev;
......@@ -602,18 +787,20 @@ static void * hci_usb_probe(struct usb_device *udev, unsigned int ifnum, const s
hdev->open = hci_usb_open;
hdev->close = hci_usb_close;
hdev->flush = hci_usb_flush;
hdev->send = hci_usb_send_frame;
hdev->send = hci_usb_send_frame;
hdev->destruct = hci_usb_destruct;
if (hci_register_dev(hdev) < 0) {
ERR("Can't register HCI device %s", hdev->name);
BT_ERR("Can't register HCI device");
goto probe_error;
}
return husb;
probe_error:
hci_usb_free_bufs(husb);
kfree(husb);
done:
MOD_DEC_USE_COUNT;
return NULL;
}
......@@ -626,22 +813,18 @@ static void hci_usb_disconnect(struct usb_device *udev, void *ptr)
if (!husb)
return;
DBG("%s", hdev->name);
BT_DBG("%s", hdev->name);
hci_usb_close(hdev);
if (hci_unregister_dev(hdev) < 0) {
ERR("Can't unregister HCI device %s", hdev->name);
}
if (husb->isoc_iface)
usb_driver_release_interface(&hci_usb_driver, husb->isoc_iface);
hci_usb_free_bufs(husb);
kfree(husb);
MOD_DEC_USE_COUNT;
if (hci_unregister_dev(hdev) < 0)
BT_ERR("Can't unregister HCI device %s", hdev->name);
}
static struct usb_driver hci_usb_driver =
{
static struct usb_driver hci_usb_driver = {
name: "hci_usb",
probe: hci_usb_probe,
disconnect: hci_usb_disconnect,
......@@ -652,12 +835,12 @@ int hci_usb_init(void)
{
int err;
INF("BlueZ HCI USB driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
BT_INFO("BlueZ HCI USB driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
VERSION);
INF("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
if ((err = usb_register(&hci_usb_driver)) < 0)
ERR("Failed to register HCI USB driver");
BT_ERR("Failed to register HCI USB driver");
return err;
}
......
......@@ -23,7 +23,7 @@
*/
/*
* $Id: hci_usb.h,v 1.3 2001/06/02 01:40:08 maxk Exp $
* $Id: hci_usb.h,v 1.2 2002/03/18 19:10:04 maxk Exp $
*/
#ifdef __KERNEL__
......@@ -35,34 +35,45 @@
#define HCI_CTRL_REQ 0x20
struct hci_usb {
struct usb_device *udev;
struct usb_ctrlrequest dev_req;
struct urb *ctrl_urb;
struct urb *intr_urb;
struct urb *read_urb;
struct urb *write_urb;
__u8 *read_buf;
__u8 *intr_buf;
struct sk_buff *intr_skb;
int intr_count;
#define HCI_MAX_IFACE_NUM 3
__u8 bulk_out_ep_addr;
__u8 bulk_in_ep_addr;
__u8 intr_in_ep_addr;
__u8 intr_in_interval;
#define HCI_MAX_BULK_TX 4
#define HCI_MAX_BULK_RX 1
struct hci_usb {
struct hci_dev hdev;
unsigned long tx_state;
struct sk_buff_head tx_ctrl_q;
struct sk_buff_head tx_write_q;
unsigned long state;
struct usb_device *udev;
struct usb_interface *isoc_iface;
__u8 bulk_out_ep;
__u8 bulk_in_ep;
__u8 isoc_out_ep;
__u8 isoc_in_ep;
__u8 intr_ep;
__u8 intr_interval;
struct urb * intr_urb;
struct sk_buff * intr_skb;
rwlock_t completion_lock;
struct sk_buff_head cmd_q; // TX Commands
struct sk_buff_head acl_q; // TX ACLs
struct sk_buff_head pending_q; // Pending requests
struct sk_buff_head completed_q; // Completed requests
};
struct hci_usb_scb {
struct urb *urb;
int intr_len;
};
/* Transmit states */
#define HCI_TX_CTRL 1
#define HCI_TX_WRITE 2
/* States */
#define HCI_USB_TX_PROCESS 1
#define HCI_USB_TX_WAKEUP 2
#define HCI_USB_CTRL_TX 3
#endif /* __KERNEL__ */
......@@ -25,9 +25,9 @@
/*
* BlueZ HCI virtual device driver.
*
* $Id: hci_vhci.c,v 1.3 2001/08/03 04:19:50 maxk Exp $
* $Id: hci_vhci.c,v 1.3 2002/04/17 17:37:20 maxk Exp $
*/
#define VERSION "1.0"
#define VERSION "1.1"
#include <linux/config.h>
#include <linux/module.h>
......@@ -49,43 +49,56 @@
#include <asm/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/bluez.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/hci_vhci.h>
#include "hci_vhci.h"
/* HCI device part */
int hci_vhci_open(struct hci_dev *hdev)
static int hci_vhci_open(struct hci_dev *hdev)
{
hdev->flags |= HCI_RUNNING;
set_bit(HCI_RUNNING, &hdev->flags);
return 0;
}
int hci_vhci_flush(struct hci_dev *hdev)
static int hci_vhci_flush(struct hci_dev *hdev)
{
struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) hdev->driver_data;
skb_queue_purge(&hci_vhci->readq);
return 0;
}
int hci_vhci_close(struct hci_dev *hdev)
static int hci_vhci_close(struct hci_dev *hdev)
{
hdev->flags &= ~HCI_RUNNING;
if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
return 0;
hci_vhci_flush(hdev);
return 0;
}
int hci_vhci_send_frame(struct sk_buff *skb)
static void hci_vhci_destruct(struct hci_dev *hdev)
{
struct hci_vhci_struct *vhci;
if (!hdev) return;
vhci = (struct hci_vhci_struct *) hdev->driver_data;
kfree(vhci);
MOD_DEC_USE_COUNT;
}
static int hci_vhci_send_frame(struct sk_buff *skb)
{
struct hci_dev* hdev = (struct hci_dev *) skb->dev;
struct hci_vhci_struct *hci_vhci;
if (!hdev) {
ERR("Frame for uknown device (hdev=NULL)");
BT_ERR("Frame for uknown device (hdev=NULL)");
return -ENODEV;
}
if (!(hdev->flags & HCI_RUNNING))
if (!test_bit(HCI_RUNNING, &hdev->flags))
return -EBUSY;
hci_vhci = (struct hci_vhci_struct *) hdev->driver_data;
......@@ -188,7 +201,7 @@ static ssize_t hci_vhci_chr_read(struct file * file, char * buf, size_t count, l
add_wait_queue(&hci_vhci->read_wait, &wait);
while (count) {
current->state = TASK_INTERRUPTIBLE;
set_current_state(TASK_INTERRUPTIBLE);
/* Read frames from device queue */
if (!(skb = skb_dequeue(&hci_vhci->readq))) {
......@@ -214,13 +227,17 @@ static ssize_t hci_vhci_chr_read(struct file * file, char * buf, size_t count, l
kfree_skb(skb);
break;
}
current->state = TASK_RUNNING;
set_current_state(TASK_RUNNING);
remove_wait_queue(&hci_vhci->read_wait, &wait);
return ret;
}
static loff_t hci_vhci_chr_lseek(struct file * file, loff_t offset, int origin)
{
return -ESPIPE;
}
static int hci_vhci_chr_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
return -EINVAL;
......@@ -265,11 +282,13 @@ static int hci_vhci_chr_open(struct inode *inode, struct file * file)
hdev->close = hci_vhci_close;
hdev->flush = hci_vhci_flush;
hdev->send = hci_vhci_send_frame;
hdev->destruct = hci_vhci_destruct;
if (hci_register_dev(hdev) < 0) {
kfree(hci_vhci);
return -EBUSY;
}
MOD_INC_USE_COUNT;
file->private_data = hci_vhci;
return 0;
......@@ -280,18 +299,16 @@ static int hci_vhci_chr_close(struct inode *inode, struct file *file)
struct hci_vhci_struct *hci_vhci = (struct hci_vhci_struct *) file->private_data;
if (hci_unregister_dev(&hci_vhci->hdev) < 0) {
ERR("Can't unregister HCI device %s", hci_vhci->hdev.name);
BT_ERR("Can't unregister HCI device %s", hci_vhci->hdev.name);
}
kfree(hci_vhci);
file->private_data = NULL;
return 0;
}
static struct file_operations hci_vhci_fops = {
owner: THIS_MODULE,
llseek: no_llseek,
llseek: hci_vhci_chr_lseek,
read: hci_vhci_chr_read,
write: hci_vhci_chr_write,
poll: hci_vhci_chr_poll,
......@@ -310,12 +327,12 @@ static struct miscdevice hci_vhci_miscdev=
int __init hci_vhci_init(void)
{
INF("BlueZ VHCI driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
BT_INFO("BlueZ VHCI driver ver %s Copyright (C) 2000,2001 Qualcomm Inc",
VERSION);
INF("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
if (misc_register(&hci_vhci_miscdev)) {
ERR("Can't register misc device %d\n", VHCI_MINOR);
BT_ERR("Can't register misc device %d\n", VHCI_MINOR);
return -EIO;
}
......@@ -332,4 +349,4 @@ module_exit(hci_vhci_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
MODULE_DESCRIPTION("BlueZ VHCI driver ver " VERSION);
MODULE_LICENSE("GPL");
MODULE_LICENSE("GPL");
......@@ -23,7 +23,7 @@
*/
/*
* $Id: hci_vhci.h,v 1.2 2001/08/01 01:02:20 maxk Exp $
* $Id: hci_vhci.h,v 1.1.1.1 2002/03/08 21:03:15 maxk Exp $
*/
#ifndef __HCI_VHCI_H
......
......@@ -23,7 +23,7 @@
*/
/*
* $Id: bluetooth.h,v 1.6 2001/08/03 04:19:49 maxk Exp $
* $Id: bluetooth.h,v 1.8 2002/04/17 17:37:20 maxk Exp $
*/
#ifndef __BLUETOOTH_H
......@@ -31,17 +31,61 @@
#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/poll.h>
#include <net/sock.h>
#ifndef AF_BLUETOOTH
#define AF_BLUETOOTH 31
#define PF_BLUETOOTH AF_BLUETOOTH
#endif
/* Reserv for core and drivers use */
#define BLUEZ_SKB_RESERVE 8
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
#define BTPROTO_L2CAP 0
#define BTPROTO_HCI 1
#define BTPROTO_SCO 2
#define BTPROTO_RFCOMM 3
#define SOL_HCI 0
#define SOL_L2CAP 6
#define SOL_SCO 17
#define SOL_RFCOMM 18
/* Debugging */
#ifdef CONFIG_BLUEZ_DEBUG
#define HCI_CORE_DEBUG 1
#define HCI_SOCK_DEBUG 1
#define HCI_UART_DEBUG 1
#define HCI_USB_DEBUG 1
//#define HCI_DATA_DUMP 1
#define L2CAP_DEBUG 1
#define SCO_DEBUG 1
#define AF_BLUETOOTH_DEBUG 1
#endif /* CONFIG_BLUEZ_DEBUG */
extern void bluez_dump(char *pref, __u8 *buf, int count);
#if __GNUC__ <= 2 && __GNUC_MINOR__ < 95
#define __func__ __FUNCTION__
#endif
#define BT_INFO(fmt, arg...) printk(KERN_INFO fmt "\n" , ## arg)
#define BT_DBG(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n" , __func__ , ## arg)
#define BT_ERR(fmt, arg...) printk(KERN_ERR "%s: " fmt "\n" , __func__ , ## arg)
#ifdef HCI_DATA_DUMP
#define BT_DMP(buf, len) bluez_dump(__func__, buf, len)
#else
#define BT_DMP(D...)
#endif
/* Connection and socket states */
enum {
......@@ -50,6 +94,7 @@ enum {
BT_BOUND,
BT_LISTEN,
BT_CONNECT,
BT_CONNECT2,
BT_CONFIG,
BT_DISCONN,
BT_CLOSED
......@@ -66,7 +111,8 @@ typedef struct {
__u8 b[6];
} __attribute__((packed)) bdaddr_t;
#define BDADDR_ANY ((bdaddr_t *)"\000\000\000\000\000")
#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}})
/* Copy, swap, convert BD Address */
static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
......@@ -82,6 +128,89 @@ void baswap(bdaddr_t *dst, bdaddr_t *src);
char *batostr(bdaddr_t *ba);
bdaddr_t *strtoba(char *str);
/* Common socket structures and functions */
#define bluez_sk(__sk) ((struct bluez_sock *) __sk)
struct bluez_sock {
struct sock sk;
bdaddr_t src;
bdaddr_t dst;
struct list_head accept_q;
struct sock *parent;
};
struct bluez_sock_list {
struct sock *head;
rwlock_t lock;
};
int bluez_sock_register(int proto, struct net_proto_family *ops);
int bluez_sock_unregister(int proto);
struct sock *bluez_sock_alloc(struct socket *sock, int proto, int pi_size, int prio);
void bluez_sock_link(struct bluez_sock_list *l, struct sock *s);
void bluez_sock_unlink(struct bluez_sock_list *l, struct sock *s);
int bluez_sock_recvmsg(struct socket *sock, struct msghdr *msg, int len, int flags, struct scm_cookie *scm);
uint bluez_sock_poll(struct file * file, struct socket *sock, poll_table *wait);
int bluez_sock_w4_connect(struct sock *sk, int flags);
void bluez_accept_enqueue(struct sock *parent, struct sock *sk);
struct sock *bluez_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
struct bluez_skb_cb {
int incomming;
};
#define bluez_cb(skb) ((struct bluez_skb_cb *)(skb->cb))
static inline struct sk_buff *bluez_skb_alloc(unsigned int len, int how)
{
struct sk_buff *skb;
if ((skb = alloc_skb(len + BLUEZ_SKB_RESERVE, how))) {
skb_reserve(skb, BLUEZ_SKB_RESERVE);
bluez_cb(skb)->incomming = 0;
}
return skb;
}
static inline struct sk_buff *bluez_skb_send_alloc(struct sock *sk, unsigned long len,
int nb, int *err)
{
struct sk_buff *skb;
if ((skb = sock_alloc_send_skb(sk, len + BLUEZ_SKB_RESERVE, nb, err))) {
skb_reserve(skb, BLUEZ_SKB_RESERVE);
bluez_cb(skb)->incomming = 0;
}
return skb;
}
static inline int skb_frags_no(struct sk_buff *skb)
{
register struct sk_buff *frag = skb_shinfo(skb)->frag_list;
register int n = 1;
for (; frag; frag=frag->next, n++);
return n;
}
int hci_core_init(void);
int hci_core_cleanup(void);
int hci_sock_init(void);
int hci_sock_cleanup(void);
int bterr(__u16 code);
#ifndef MODULE_LICENSE
#define MODULE_LICENSE(x)
#endif
#ifndef list_for_each_safe
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
#endif
#endif /* __BLUETOOTH_H */
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* $Id: bluez.h,v 1.4 2001/08/03 04:19:49 maxk Exp $
*/
#ifndef __IF_BLUEZ_H
#define __IF_BLUEZ_H
#include <net/sock.h>
#define BLUEZ_MAX_PROTO 2
/* Reserv for core and drivers use */
#define BLUEZ_SKB_RESERVE 8
#ifndef MIN
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#endif
/* Debugging */
#ifdef BLUEZ_DEBUG
#define HCI_CORE_DEBUG 1
#define HCI_SOCK_DEBUG 1
#define HCI_UART_DEBUG 1
#define HCI_USB_DEBUG 1
//#define HCI_DATA_DUMP 1
#define L2CAP_DEBUG 1
#endif /* BLUEZ_DEBUG */
extern void bluez_dump(char *pref, __u8 *buf, int count);
#define INF(fmt, arg...) printk(KERN_INFO fmt "\n" , ## arg)
#define DBG(fmt, arg...) printk(KERN_INFO __FUNCTION__ ": " fmt "\n" , ## arg)
#define ERR(fmt, arg...) printk(KERN_ERR __FUNCTION__ ": " fmt "\n" , ## arg)
#ifdef HCI_DATA_DUMP
#define DMP(buf, len) bluez_dump(__FUNCTION__, buf, len)
#else
#define DMP(D...)
#endif
/* ----- Sockets ------ */
struct bluez_sock_list {
struct sock *head;
rwlock_t lock;
};
extern int bluez_sock_register(int proto, struct net_proto_family *ops);
extern int bluez_sock_unregister(int proto);
extern void bluez_sock_link(struct bluez_sock_list *l, struct sock *s);
extern void bluez_sock_unlink(struct bluez_sock_list *l, struct sock *s);
/* ----- SKB helpers ----- */
struct bluez_skb_cb {
int incomming;
};
#define bluez_cb(skb) ((struct bluez_skb_cb *)(skb->cb))
static inline struct sk_buff *bluez_skb_alloc(unsigned int len, int how)
{
struct sk_buff *skb;
if ((skb = alloc_skb(len + BLUEZ_SKB_RESERVE, how))) {
skb_reserve(skb, BLUEZ_SKB_RESERVE);
bluez_cb(skb)->incomming = 0;
}
return skb;
}
static inline struct sk_buff *bluez_skb_send_alloc(struct sock *sk, unsigned long len,
int nb, int *err)
{
struct sk_buff *skb;
if ((skb = sock_alloc_send_skb(sk, len + BLUEZ_SKB_RESERVE, nb, err))) {
skb_reserve(skb, BLUEZ_SKB_RESERVE);
bluez_cb(skb)->incomming = 0;
}
return skb;
}
static inline int skb_frags_no(struct sk_buff *skb)
{
register struct sk_buff *frag = skb_shinfo(skb)->frag_list;
register int n = 1;
for (; frag; frag=frag->next, n++);
return n;
}
extern int hci_core_init(void);
extern int hci_core_cleanup(void);
extern int hci_sock_init(void);
extern int hci_sock_cleanup(void);
#endif /* __IF_BLUEZ_H */
......@@ -23,16 +23,16 @@
*/
/*
* $Id: hci.h,v 1.15 2001/08/05 06:02:15 maxk Exp $
* $Id: hci.h,v 1.4 2002/04/18 22:26:15 maxk Exp $
*/
#ifndef __HCI_H
#define __HCI_H
#include <asm/byteorder.h>
#define HCI_MAX_DEV 8
#define HCI_MAX_FRAME_SIZE 2048
#define HCI_MAX_ACL_SIZE 1024
#define HCI_MAX_SCO_SIZE 255
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
/* HCI dev events */
#define HCI_DEV_REG 1
......@@ -41,41 +41,54 @@
#define HCI_DEV_DOWN 4
/* HCI device types */
#define HCI_UART 0
#define HCI_VHCI 0
#define HCI_USB 1
#define HCI_VHCI 2
/* HCI device modes */
#define HCI_NORMAL 0x0001
#define HCI_RAW 0x0002
#define HCI_MODE_MASK (HCI_NORMAL | HCI_RAW)
#define HCI_SOCK 0x1000
/* HCI device states */
#define HCI_INIT 0x0010
#define HCI_UP 0x0020
#define HCI_RUNNING 0x0040
#define HCI_PCCARD 2
#define HCI_UART 3
#define HCI_RS232 4
/* HCI device flags */
#define HCI_PSCAN 0x0100
#define HCI_ISCAN 0x0200
#define HCI_AUTH 0x0400
enum {
HCI_UP,
HCI_INIT,
HCI_RUNNING,
HCI_PSCAN,
HCI_ISCAN,
HCI_AUTH,
HCI_ENCRYPT,
HCI_INQUIRY,
HCI_RAW
};
/* HCI Ioctl defines */
/* HCI ioctl defines */
#define HCIDEVUP _IOW('H', 201, int)
#define HCIDEVDOWN _IOW('H', 202, int)
#define HCIDEVRESET _IOW('H', 203, int)
#define HCIRESETSTAT _IOW('H', 204, int)
#define HCIGETINFO _IOR('H', 205, int)
#define HCIGETDEVLIST _IOR('H', 206, int)
#define HCISETRAW _IOW('H', 207, int)
#define HCISETSCAN _IOW('H', 208, int)
#define HCISETAUTH _IOW('H', 209, int)
#define HCIINQUIRY _IOR('H', 210, int)
#define HCISETPTYPE _IOW('H', 211, int)
#define HCIDEVRESTAT _IOW('H', 204, int)
#define HCIGETDEVLIST _IOR('H', 210, int)
#define HCIGETDEVINFO _IOR('H', 211, int)
#define HCIGETCONNLIST _IOR('H', 212, int)
#define HCIGETCONNINFO _IOR('H', 213, int)
#ifndef __NO_HCI_DEFS
#define HCISETRAW _IOW('H', 220, int)
#define HCISETSCAN _IOW('H', 221, int)
#define HCISETAUTH _IOW('H', 222, int)
#define HCISETENCRYPT _IOW('H', 223, int)
#define HCISETPTYPE _IOW('H', 224, int)
#define HCISETLINKPOL _IOW('H', 225, int)
#define HCISETLINKMODE _IOW('H', 226, int)
#define HCISETACLMTU _IOW('H', 227, int)
#define HCISETSCOMTU _IOW('H', 228, int)
#define HCIINQUIRY _IOR('H', 240, int)
/* HCI timeouts */
#define HCI_CONN_TIMEOUT (HZ * 40)
#define HCI_DISCONN_TIMEOUT (HZ * 2)
#define HCI_CONN_IDLE_TIMEOUT (HZ * 60)
/* HCI Packet types */
#define HCI_COMMAND_PKT 0x01
......@@ -92,6 +105,13 @@
#define HCI_DH3 0x0800
#define HCI_DH5 0x8000
#define HCI_HV1 0x0020
#define HCI_HV2 0x0040
#define HCI_HV3 0x0080
#define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3)
#define ACL_PTYPE_MASK (~SCO_PTYPE_MASK)
/* ACL flags */
#define ACL_CONT 0x0001
#define ACL_START 0x0002
......@@ -125,6 +145,19 @@
#define LMP_PSCHEME 0x02
#define LMP_PCONTROL 0x04
/* Link policies */
#define HCI_LP_RSWITCH 0x0001
#define HCI_LP_HOLD 0x0002
#define HCI_LP_SNIFF 0x0004
#define HCI_LP_PARK 0x0008
/* Link mode */
#define HCI_LM_ACCEPT 0x8000
#define HCI_LM_MASTER 0x0001
#define HCI_LM_AUTH 0x0002
#define HCI_LM_ENCRYPT 0x0004
#define HCI_LM_TRUSTED 0x0008
/* ----- HCI Commands ----- */
/* OGF & OCF values */
......@@ -137,9 +170,10 @@ typedef struct {
__u8 hci_ver;
__u16 hci_rev;
__u8 lmp_ver;
__u16 man_name;
__u16 lmp_sub;
__u16 manufacturer;
__u16 lmp_subver;
} __attribute__ ((packed)) read_local_version_rp;
#define READ_LOCAL_VERSION_RP_SIZE 9
#define OCF_READ_LOCAL_FEATURES 0x0003
typedef struct {
......@@ -165,18 +199,24 @@ typedef struct {
/* Host Controller and Baseband */
#define OGF_HOST_CTL 0x03
#define OCF_RESET 0x0003
#define OCF_READ_AUTH_ENABLE 0x001F
#define OCF_WRITE_AUTH_ENABLE 0x0020
#define AUTH_DISABLED 0x00
#define AUTH_ENABLED 0x01
#define AUTH_DISABLED 0x00
#define AUTH_ENABLED 0x01
#define OCF_READ_ENCRYPT_MODE 0x0021
#define OCF_WRITE_ENCRYPT_MODE 0x0022
#define ENCRYPT_DISABLED 0x00
#define ENCRYPT_P2P 0x01
#define ENCRYPT_BOTH 0x02
#define OCF_WRITE_CA_TIMEOUT 0x0016
#define OCF_WRITE_PG_TIMEOUT 0x0018
#define OCF_WRITE_SCAN_ENABLE 0x001A
#define SCANS_DISABLED 0x00
#define IS_ENA_PS_DIS 0x01
#define IS_DIS_PS_ENA 0x02
#define IS_ENA_PS_ENA 0x03
#define SCAN_DISABLED 0x00
#define SCAN_INQUIRY 0x01
#define SCAN_PAGE 0x02
#define OCF_SET_EVENT_FLT 0x0005
typedef struct {
......@@ -226,9 +266,18 @@ typedef struct {
} __attribute__ ((packed)) write_class_of_dev_cp;
#define WRITE_CLASS_OF_DEV_CP_SIZE 3
#define OCF_HOST_BUFFER_SIZE 0x0033
typedef struct {
__u16 acl_mtu;
__u8 sco_mtu;
__u16 acl_max_pkt;
__u16 sco_max_pkt;
} __attribute__ ((packed)) host_buffer_size_cp;
#define HOST_BUFFER_SIZE_CP_SIZE 7
/* Link Control */
#define OGF_LINK_CTL 0x01
#define OCF_CREATE_CONN 0x0005
#define OCF_CREATE_CONN 0x0005
typedef struct {
bdaddr_t bdaddr;
__u16 pkt_type;
......@@ -246,6 +295,13 @@ typedef struct {
} __attribute__ ((packed)) accept_conn_req_cp;
#define ACCEPT_CONN_REQ_CP_SIZE 7
#define OCF_REJECT_CONN_REQ 0x000a
typedef struct {
bdaddr_t bdaddr;
__u8 reason;
} __attribute__ ((packed)) reject_conn_req_cp;
#define REJECT_CONN_REQ_CP_SIZE 7
#define OCF_DISCONNECT 0x0006
typedef struct {
__u16 handle;
......@@ -253,17 +309,134 @@ typedef struct {
} __attribute__ ((packed)) disconnect_cp;
#define DISCONNECT_CP_SIZE 3
#define OCF_ADD_SCO 0x0007
typedef struct {
__u16 handle;
__u16 pkt_type;
} __attribute__ ((packed)) add_sco_cp;
#define ADD_SCO_CP_SIZE 4
#define OCF_INQUIRY 0x0001
typedef struct {
__u8 lap[3];
__u8 lenght;
__u8 length;
__u8 num_rsp;
} __attribute__ ((packed)) inquiry_cp;
#define INQUIRY_CP_SIZE 5
#define OGF_LINK_POLICY 0x02 /* Link Policy */
typedef struct {
__u8 status;
bdaddr_t bdaddr;
} __attribute__ ((packed)) status_bdaddr_rp;
#define STATUS_BDADDR_RP_SIZE 7
#define OCF_LINK_KEY_REPLY 0x000B
#define OCF_LINK_KEY_NEG_REPLY 0x000C
typedef struct {
bdaddr_t bdaddr;
__u8 link_key[16];
} __attribute__ ((packed)) link_key_reply_cp;
#define LINK_KEY_REPLY_CP_SIZE 22
#define OCF_PIN_CODE_REPLY 0x000D
#define OCF_PIN_CODE_NEG_REPLY 0x000E
typedef struct {
bdaddr_t bdaddr;
__u8 pin_len;
__u8 pin_code[16];
} __attribute__ ((packed)) pin_code_reply_cp;
#define PIN_CODE_REPLY_CP_SIZE 23
#define OCF_CHANGE_CONN_PTYPE 0x000F
typedef struct {
__u16 handle;
__u16 pkt_type;
} __attribute__ ((packed)) change_conn_ptype_cp;
#define CHANGE_CONN_PTYPE_CP_SIZE 4
#define OCF_AUTH_REQUESTED 0x0011
typedef struct {
__u16 handle;
} __attribute__ ((packed)) auth_requested_cp;
#define AUTH_REQUESTED_CP_SIZE 2
#define OCF_SET_CONN_ENCRYPT 0x0013
typedef struct {
__u16 handle;
__u8 encrypt;
} __attribute__ ((packed)) set_conn_encrypt_cp;
#define SET_CONN_ENCRYPT_CP_SIZE 3
#define OCF_REMOTE_NAME_REQ 0x0019
typedef struct {
bdaddr_t bdaddr;
__u8 pscan_rep_mode;
__u8 pscan_mode;
__u16 clock_offset;
} __attribute__ ((packed)) remote_name_req_cp;
#define REMOTE_NAME_REQ_CP_SIZE 10
#define OCF_READ_REMOTE_FEATURES 0x001B
typedef struct {
__u16 handle;
} __attribute__ ((packed)) read_remote_features_cp;
#define READ_REMOTE_FEATURES_CP_SIZE 2
#define OCF_READ_REMOTE_VERSION 0x001D
typedef struct {
__u16 handle;
} __attribute__ ((packed)) read_remote_version_cp;
#define READ_REMOTE_VERSION_CP_SIZE 2
/* --------- HCI Events --------- */
/* Link Policy */
#define OGF_LINK_POLICY 0x02
#define OCF_ROLE_DISCOVERY 0x0009
typedef struct {
__u16 handle;
} __attribute__ ((packed)) role_discovery_cp;
#define ROLE_DISCOVERY_CP_SIZE 2
typedef struct {
__u8 status;
__u16 handle;
__u8 role;
} __attribute__ ((packed)) role_discovery_rp;
#define ROLE_DISCOVERY_RP_SIZE 4
#define OCF_READ_LINK_POLICY 0x000C
typedef struct {
__u16 handle;
} __attribute__ ((packed)) read_link_policy_cp;
#define READ_LINK_POLICY_CP_SIZE 2
typedef struct {
__u8 status;
__u16 handle;
__u16 policy;
} __attribute__ ((packed)) read_link_policy_rp;
#define READ_LINK_POLICY_RP_SIZE 5
#define OCF_SWITCH_ROLE 0x000B
typedef struct {
bdaddr_t bdaddr;
__u8 role;
} __attribute__ ((packed)) switch_role_cp;
#define SWITCH_ROLE_CP_SIZE 7
#define OCF_WRITE_LINK_POLICY 0x000D
typedef struct {
__u16 handle;
__u16 policy;
} __attribute__ ((packed)) write_link_policy_cp;
#define WRITE_LINK_POLICY_CP_SIZE 4
typedef struct {
__u8 status;
__u16 handle;
} __attribute__ ((packed)) write_link_policy_rp;
#define WRITE_LINK_POLICY_RP_SIZE 3
/* Status params */
#define OGF_STATUS_PARAM 0x05
/* ---- HCI Events ---- */
#define EVT_INQUIRY_COMPLETE 0x01
#define EVT_INQUIRY_RESULT 0x02
......@@ -272,7 +445,7 @@ typedef struct {
__u8 pscan_rep_mode;
__u8 pscan_period_mode;
__u8 pscan_mode;
__u8 class[3];
__u8 dev_class[3];
__u16 clock_offset;
} __attribute__ ((packed)) inquiry_info;
#define INQUIRY_INFO_SIZE 14
......@@ -303,6 +476,44 @@ typedef struct {
} __attribute__ ((packed)) evt_disconn_complete;
#define EVT_DISCONN_COMPLETE_SIZE 4
#define EVT_AUTH_COMPLETE 0x06
typedef struct {
__u8 status;
__u16 handle;
} __attribute__ ((packed)) evt_auth_complete;
#define EVT_AUTH_COMPLETE_SIZE 3
#define EVT_REMOTE_NAME_REQ_COMPLETE 0x07
typedef struct {
__u8 status;
bdaddr_t bdaddr;
__u8 name[248];
} __attribute__ ((packed)) evt_remote_name_req_complete;
#define EVT_REMOTE_NAME_REQ_COMPLETE_SIZE 255
#define EVT_ENCRYPT_CHANGE 0x08
typedef struct {
__u8 status;
__u16 handle;
__u8 encrypt;
} __attribute__ ((packed)) evt_encrypt_change;
#define EVT_ENCRYPT_CHANGE_SIZE 5
#define EVT_QOS_SETUP_COMPLETE 0x0D
typedef struct {
__u8 service_type;
__u32 token_rate;
__u32 peak_bandwidth;
__u32 latency;
__u32 delay_variation;
} __attribute__ ((packed)) hci_qos;
typedef struct {
__u8 status;
__u16 handle;
hci_qos qos;
} __attribute__ ((packed)) evt_qos_setup_complete;
#define EVT_QOS_SETUP_COMPLETE_SIZE 20
#define EVT_CMD_COMPLETE 0x0e
typedef struct {
__u8 ncmd;
......@@ -321,16 +532,78 @@ typedef struct {
#define EVT_NUM_COMP_PKTS 0x13
typedef struct {
__u8 num_hndl;
/* variable lenght part */
/* variable length part */
} __attribute__ ((packed)) evt_num_comp_pkts;
#define EVT_NUM_COMP_PKTS_SIZE 1
#define EVT_HCI_DEV_EVENT 0xfd
#define EVT_ROLE_CHANGE 0x12
typedef struct {
__u8 status;
bdaddr_t bdaddr;
__u8 role;
} __attribute__ ((packed)) evt_role_change;
#define EVT_ROLE_CHANGE_SIZE 1
#define EVT_PIN_CODE_REQ 0x16
typedef struct {
bdaddr_t bdaddr;
} __attribute__ ((packed)) evt_pin_code_req;
#define EVT_PIN_CODE_REQ_SIZE 6
#define EVT_LINK_KEY_REQ 0x17
typedef struct {
bdaddr_t bdaddr;
} __attribute__ ((packed)) evt_link_key_req;
#define EVT_LINK_KEY_REQ_SIZE 6
#define EVT_LINK_KEY_NOTIFY 0x18
typedef struct {
bdaddr_t bdaddr;
__u8 link_key[16];
__u8 key_type;
} __attribute__ ((packed)) evt_link_key_notify;
#define EVT_LINK_KEY_NOTIFY_SIZE 23
#define EVT_READ_REMOTE_FEATURES_COMPLETE 0x0B
typedef struct {
__u8 status;
__u16 handle;
__u8 features[8];
} __attribute__ ((packed)) evt_read_remote_features_complete;
#define EVT_READ_REMOTE_FEATURES_COMPLETE_SIZE 11
#define EVT_READ_REMOTE_VERSION_COMPLETE 0x0C
typedef struct {
__u8 status;
__u16 handle;
__u8 lmp_ver;
__u16 manufacturer;
__u16 lmp_subver;
} __attribute__ ((packed)) evt_read_remote_version_complete;
#define EVT_READ_REMOTE_VERSION_COMPLETE_SIZE 8
/* Internal events generated by BlueZ stack */
#define EVT_STACK_INTERNAL 0xfd
typedef struct {
__u16 type;
__u8 data[0];
} __attribute__ ((packed)) evt_stack_internal;
#define EVT_STACK_INTERNAL_SIZE 2
#define EVT_SI_DEVICE 0x01
typedef struct {
__u16 event;
__u16 dev_id;
} __attribute__ ((packed)) evt_si_device;
#define EVT_SI_DEVICE_SIZE 4
#define EVT_SI_SECURITY 0x02
typedef struct {
__u16 event;
__u16 param;
} __attribute__ ((packed)) evt_hci_dev_event;
#define EVT_HCI_DEV_EVENT_SIZE 4
__u16 proto;
__u16 subproto;
__u8 incomming;
} __attribute__ ((packed)) evt_si_security;
/* -------- HCI Packet structures -------- */
#define HCI_TYPE_LEN 1
......@@ -369,14 +642,14 @@ typedef struct {
#define acl_handle(h) (h & 0x0fff)
#define acl_flags(h) (h >> 12)
#endif /* _NO_HCI_DEFS */
/* HCI Socket options */
#define HCI_DATA_DIR 0x0001
#define HCI_FILTER 0x0002
#define HCI_DATA_DIR 1
#define HCI_FILTER 2
#define HCI_TIME_STAMP 3
/* HCI CMSG flags */
#define HCI_CMSG_DIR 0x0001
#define HCI_CMSG_TSTAMP 0x0002
struct sockaddr_hci {
sa_family_t hci_family;
......@@ -387,27 +660,29 @@ struct sockaddr_hci {
struct hci_filter {
__u32 type_mask;
__u32 event_mask[2];
__u16 opcode;
};
struct hci_dev_req {
__u16 dev_id;
__u32 dev_opt;
};
struct hci_dev_list_req {
__u16 dev_num;
struct hci_dev_req dev_req[0]; /* hci_dev_req structures */
};
struct hci_inquiry_req {
__u16 dev_id;
__u16 flags;
__u8 lap[3];
__u8 length;
__u8 num_rsp;
};
#define IREQ_CACHE_FLUSH 0x0001
#define HCI_FLT_TYPE_BITS 31
#define HCI_FLT_EVENT_BITS 63
#define HCI_FLT_OGF_BITS 63
#define HCI_FLT_OCF_BITS 127
#if BITS_PER_LONG == 64
static inline void hci_set_bit(int nr, void *addr)
{
*((__u32 *) addr + (nr >> 5)) |= ((__u32) 1 << (nr & 31));
}
static inline int hci_test_bit(int nr, void *addr)
{
return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
}
#else
#define hci_set_bit set_bit
#define hci_test_bit test_bit
#endif
/* Ioctl requests structures */
struct hci_dev_stats {
__u32 err_rx;
__u32 err_tx;
......@@ -433,11 +708,13 @@ struct hci_dev_info {
__u8 features[8];
__u32 pkt_type;
__u32 link_policy;
__u32 link_mode;
__u16 acl_mtu;
__u16 acl_max;
__u16 acl_pkts;
__u16 sco_mtu;
__u16 sco_max;
__u16 sco_pkts;
struct hci_dev_stats stat;
};
......@@ -445,6 +722,20 @@ struct hci_dev_info {
struct hci_conn_info {
__u16 handle;
bdaddr_t bdaddr;
__u8 type;
__u8 out;
__u16 state;
__u32 link_mode;
};
struct hci_dev_req {
__u16 dev_id;
__u32 dev_opt;
};
struct hci_dev_list_req {
__u16 dev_num;
struct hci_dev_req dev_req[0]; /* hci_dev_req structures */
};
struct hci_conn_list_req {
......@@ -453,4 +744,26 @@ struct hci_conn_list_req {
struct hci_conn_info conn_info[0];
};
struct hci_conn_info_req {
bdaddr_t bdaddr;
__u8 type;
struct hci_conn_info conn_info[0];
};
struct hci_inquiry_req {
__u16 dev_id;
__u16 flags;
__u8 lap[3];
__u8 length;
__u8 num_rsp;
};
#define IREQ_CACHE_FLUSH 0x0001
struct hci_remotename_req {
__u16 dev_id;
__u16 flags;
bdaddr_t bdaddr;
__u8 name[248];
};
#endif /* __HCI_H */
......@@ -23,7 +23,7 @@
*/
/*
* $Id: hci_core.h,v 1.11 2001/08/05 06:02:15 maxk Exp $
* $Id: hci_core.h,v 1.3 2002/04/17 18:55:21 maxk Exp $
*/
#ifndef __HCI_CORE_H
......@@ -32,14 +32,12 @@
#include <net/bluetooth/hci.h>
/* HCI upper protocols */
#define HCI_MAX_PROTO 1
#define HCI_PROTO_L2CAP 0
#define HCI_PROTO_SCO 1
#define HCI_INIT_TIMEOUT (HZ * 10)
/* ----- Inquiry cache ----- */
#define INQUIRY_CACHE_AGE_MAX (HZ*5) // 5 seconds
#define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
/* HCI Core structures */
struct inquiry_entry {
struct inquiry_entry *next;
......@@ -53,111 +51,180 @@ struct inquiry_cache {
struct inquiry_entry *list;
};
static inline void inquiry_cache_init(struct inquiry_cache *cache)
{
spin_lock_init(&cache->lock);
cache->list = NULL;
}
struct conn_hash {
struct list_head list;
spinlock_t lock;
unsigned int num;
};
static inline void inquiry_cache_lock(struct inquiry_cache *cache)
{
spin_lock(&cache->lock);
}
struct hci_dev {
struct list_head list;
spinlock_t lock;
atomic_t refcnt;
static inline void inquiry_cache_unlock(struct inquiry_cache *cache)
{
spin_unlock(&cache->lock);
}
char name[8];
unsigned long flags;
__u16 id;
__u8 type;
bdaddr_t bdaddr;
__u8 features[8];
static inline void inquiry_cache_lock_bh(struct inquiry_cache *cache)
{
spin_lock_bh(&cache->lock);
}
__u16 pkt_type;
__u16 link_policy;
__u16 link_mode;
atomic_t cmd_cnt;
unsigned int acl_cnt;
unsigned int sco_cnt;
static inline void inquiry_cache_unlock_bh(struct inquiry_cache *cache)
{
spin_unlock_bh(&cache->lock);
}
unsigned int acl_mtu;
unsigned int sco_mtu;
unsigned int acl_pkts;
unsigned int sco_pkts;
static inline long inquiry_cache_age(struct inquiry_cache *cache)
{
return jiffies - cache->timestamp;
}
unsigned long cmd_last_tx;
unsigned long acl_last_tx;
unsigned long sco_last_tx;
struct tasklet_struct cmd_task;
struct tasklet_struct rx_task;
struct tasklet_struct tx_task;
static inline long inquiry_entry_age(struct inquiry_entry *e)
{
return jiffies - e->timestamp;
}
extern void inquiry_cache_flush(struct inquiry_cache *cache);
struct sk_buff_head rx_q;
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
struct hci_dev;
struct sk_buff *sent_cmd;
struct semaphore req_lock;
wait_queue_head_t req_wait_q;
__u32 req_status;
__u32 req_result;
struct inquiry_cache inq_cache;
struct conn_hash conn_hash;
struct hci_dev_stats stat;
void *driver_data;
void *core_data;
atomic_t promisc;
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
int (*send)(struct sk_buff *skb);
void (*destruct)(struct hci_dev *hdev);
int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
};
/* ----- HCI Connections ----- */
struct hci_conn {
struct list_head list;
atomic_t refcnt;
spinlock_t lock;
bdaddr_t dst;
__u16 handle;
__u16 state;
__u8 type;
unsigned int sent;
__u8 out;
__u32 link_mode;
unsigned long pend;
unsigned int sent;
struct sk_buff_head data_q;
struct timer_list timer;
struct hci_dev *hdev;
void *l2cap_data;
void *sco_data;
void *priv;
struct sk_buff_head data_q;
struct hci_conn *link;
};
struct conn_hash {
struct list_head list;
spinlock_t lock;
unsigned int num;
};
extern struct hci_proto *hci_proto[];
extern struct list_head hdev_list;
extern spinlock_t hdev_list_lock;
/* ----- Inquiry cache ----- */
#define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
#define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
#define inquiry_cache_lock(c) spin_lock(&c->lock)
#define inquiry_cache_unlock(c) spin_unlock(&c->lock)
#define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
#define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
static inline void conn_hash_init(struct conn_hash *h)
static inline void inquiry_cache_init(struct hci_dev *hdev)
{
INIT_LIST_HEAD(&h->list);
spin_lock_init(&h->lock);
h->num = 0;
struct inquiry_cache *c = &hdev->inq_cache;
spin_lock_init(&c->lock);
c->list = NULL;
}
static inline void conn_hash_lock(struct conn_hash *h)
static inline long inquiry_cache_age(struct hci_dev *hdev)
{
spin_lock(&h->lock);
struct inquiry_cache *c = &hdev->inq_cache;
return jiffies - c->timestamp;
}
static inline void conn_hash_unlock(struct conn_hash *h)
static inline long inquiry_entry_age(struct inquiry_entry *e)
{
spin_unlock(&h->lock);
return jiffies - e->timestamp;
}
static inline void __conn_hash_add(struct conn_hash *h, __u16 handle, struct hci_conn *c)
struct inquiry_entry *inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
void inquiry_cache_update(struct hci_dev *hdev, inquiry_info *info);
void inquiry_cache_flush(struct hci_dev *hdev);
int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf);
/* ----- HCI Connections ----- */
enum {
HCI_CONN_AUTH_PEND,
HCI_CONN_ENCRYPT_PEND
};
#define hci_conn_lock(c) spin_lock(&c->lock)
#define hci_conn_unlock(c) spin_unlock(&c->lock)
#define hci_conn_lock_bh(c) spin_lock_bh(&c->lock)
#define hci_conn_unlock_bh(c) spin_unlock_bh(&c->lock)
#define conn_hash_lock(d) spin_lock(&d->conn_hash->lock)
#define conn_hash_unlock(d) spin_unlock(&d->conn_hash->lock)
#define conn_hash_lock_bh(d) spin_lock_bh(&d->conn_hash->lock)
#define conn_hash_unlock_bh(d) spin_unlock_bh(&d->conn_hash->lock)
static inline void conn_hash_init(struct hci_dev *hdev)
{
list_add(&c->list, &h->list);
h->num++;
struct conn_hash *h = &hdev->conn_hash;
INIT_LIST_HEAD(&h->list);
spin_lock_init(&h->lock);
h->num = 0;
}
static inline void conn_hash_add(struct conn_hash *h, __u16 handle, struct hci_conn *c)
static inline void conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
{
conn_hash_lock(h);
__conn_hash_add(h, handle, c);
conn_hash_unlock(h);
struct conn_hash *h = &hdev->conn_hash;
list_add(&c->list, &h->list);
h->num++;
}
static inline void __conn_hash_del(struct conn_hash *h, struct hci_conn *c)
static inline void conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
{
struct conn_hash *h = &hdev->conn_hash;
list_del(&c->list);
h->num--;
}
static inline void conn_hash_del(struct conn_hash *h, struct hci_conn *c)
{
conn_hash_lock(h);
__conn_hash_del(h, c);
conn_hash_unlock(h);
}
static inline struct hci_conn *__conn_hash_lookup(struct conn_hash *h, __u16 handle)
static inline struct hci_conn *conn_hash_lookup_handle(struct hci_dev *hdev,
__u16 handle)
{
register struct conn_hash *h = &hdev->conn_hash;
register struct list_head *p;
register struct hci_conn *c;
......@@ -169,101 +236,89 @@ static inline struct hci_conn *__conn_hash_lookup(struct conn_hash *h, __u16 ha
return NULL;
}
static inline struct hci_conn *conn_hash_lookup(struct conn_hash *h, __u16 handle)
static inline struct hci_conn *conn_hash_lookup_ba(struct hci_dev *hdev,
__u8 type, bdaddr_t *ba)
{
struct hci_conn *conn;
register struct conn_hash *h = &hdev->conn_hash;
register struct list_head *p;
register struct hci_conn *c;
conn_hash_lock(h);
conn = __conn_hash_lookup(h, handle);
conn_hash_unlock(h);
return conn;
list_for_each(p, &h->list) {
c = list_entry(p, struct hci_conn, list);
if (c->type == type && !bacmp(&c->dst, ba))
return c;
}
return NULL;
}
/* ----- HCI Devices ----- */
struct hci_dev {
atomic_t refcnt;
char name[8];
__u32 flags;
__u16 id;
__u8 type;
bdaddr_t bdaddr;
__u8 features[8];
void hci_acl_connect(struct hci_conn *conn);
void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
void hci_add_sco(struct hci_conn *conn, __u16 handle);
__u16 pkt_type;
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
int hci_conn_del(struct hci_conn *conn);
void hci_conn_hash_flush(struct hci_dev *hdev);
atomic_t cmd_cnt;
unsigned int acl_cnt;
unsigned int sco_cnt;
unsigned int acl_mtu;
unsigned int sco_mtu;
unsigned int acl_max;
unsigned int sco_max;
void *driver_data;
void *l2cap_data;
void *priv;
struct tasklet_struct cmd_task;
struct tasklet_struct rx_task;
struct tasklet_struct tx_task;
struct sk_buff_head rx_q;
struct sk_buff_head raw_q;
struct sk_buff_head cmd_q;
struct sk_buff *sent_cmd;
struct semaphore req_lock;
wait_queue_head_t req_wait_q;
__u32 req_status;
__u32 req_result;
struct inquiry_cache inq_cache;
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
int hci_conn_auth(struct hci_conn *conn);
int hci_conn_encrypt(struct hci_conn *conn);
struct conn_hash conn_hash;
struct hci_dev_stats stat;
static inline void hci_conn_set_timer(struct hci_conn *conn, long timeout)
{
mod_timer(&conn->timer, jiffies + timeout);
}
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
int (*flush)(struct hci_dev *hdev);
int (*send)(struct sk_buff *skb);
};
static inline void hci_conn_del_timer(struct hci_conn *conn)
{
del_timer(&conn->timer);
}
static inline void hci_dev_hold(struct hci_dev *hdev)
static inline void hci_conn_hold(struct hci_conn *conn)
{
atomic_inc(&hdev->refcnt);
atomic_inc(&conn->refcnt);
hci_conn_del_timer(conn);
}
static inline void hci_dev_put(struct hci_dev *hdev)
static inline void hci_conn_put(struct hci_conn *conn)
{
atomic_dec(&hdev->refcnt);
if (atomic_dec_and_test(&conn->refcnt) && conn->out)
hci_conn_set_timer(conn, HCI_DISCONN_TIMEOUT);
}
extern struct hci_dev *hci_dev_get(int index);
extern int hci_register_dev(struct hci_dev *hdev);
extern int hci_unregister_dev(struct hci_dev *hdev);
extern int hci_dev_open(__u16 dev);
extern int hci_dev_close(__u16 dev);
extern int hci_dev_reset(__u16 dev);
extern int hci_dev_reset_stat(__u16 dev);
extern int hci_dev_info(unsigned long arg);
extern int hci_dev_list(unsigned long arg);
extern int hci_dev_setscan(unsigned long arg);
extern int hci_dev_setauth(unsigned long arg);
extern int hci_dev_setptype(unsigned long arg);
extern int hci_conn_list(unsigned long arg);
extern int hci_inquiry(unsigned long arg);
extern __u32 hci_dev_setmode(struct hci_dev *hdev, __u32 mode);
extern __u32 hci_dev_getmode(struct hci_dev *hdev);
extern int hci_recv_frame(struct sk_buff *skb);
/* ----- HCI Devices ----- */
static inline void hci_dev_put(struct hci_dev *d)
{
if (atomic_dec_and_test(&d->refcnt))
d->destruct(d);
}
#define hci_dev_hold(d) atomic_inc(&d->refcnt)
#define hci_dev_lock(d) spin_lock(&d->lock)
#define hci_dev_unlock(d) spin_unlock(&d->lock)
#define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
#define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
struct hci_dev *hci_dev_get(int index);
struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
int hci_register_dev(struct hci_dev *hdev);
int hci_unregister_dev(struct hci_dev *hdev);
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_reset(__u16 dev);
int hci_dev_reset_stat(__u16 dev);
int hci_dev_cmd(unsigned int cmd, unsigned long arg);
int hci_get_dev_list(unsigned long arg);
int hci_get_dev_info(unsigned long arg);
int hci_get_conn_list(unsigned long arg);
int hci_get_conn_info(struct hci_dev *hdev, unsigned long arg);
int hci_inquiry(unsigned long arg);
int hci_recv_frame(struct sk_buff *skb);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
/* ----- LMP capabilities ----- */
#define lmp_rswitch_capable(dev) (dev->features[0] & LMP_RSWITCH)
#define lmp_encrypt_capable(dev) (dev->features[0] & LMP_ENCRYPT)
/* ----- HCI tasks ----- */
static inline void hci_sched_cmd(struct hci_dev *hdev)
......@@ -289,38 +344,127 @@ struct hci_proto {
void *priv;
int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr);
int (*connect_cfm) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 status, struct hci_conn *conn);
int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
int (*connect_cfm) (struct hci_conn *conn, __u8 status);
int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb , __u16 flags);
int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
int (*auth_cfm) (struct hci_conn *conn, __u8 status);
int (*encrypt_cfm) (struct hci_conn *conn, __u8 status);
};
extern int hci_register_proto(struct hci_proto *hproto);
extern int hci_unregister_proto(struct hci_proto *hproto);
extern int hci_register_notifier(struct notifier_block *nb);
extern int hci_unregister_notifier(struct notifier_block *nb);
extern int hci_connect(struct hci_dev * hdev, bdaddr_t * bdaddr);
extern int hci_disconnect(struct hci_conn *conn, __u8 reason);
extern int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void * param);
extern int hci_send_raw(struct sk_buff *skb);
extern int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
extern int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
{
register struct hci_proto *hp;
int mask = 0;
hp = hci_proto[HCI_PROTO_L2CAP];
if (hp && hp->connect_ind)
mask |= hp->connect_ind(hdev, bdaddr, type);
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->connect_ind)
mask |= hp->connect_ind(hdev, bdaddr, type);
return mask;
}
static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
{
register struct hci_proto *hp;
hp = hci_proto[HCI_PROTO_L2CAP];
if (hp && hp->connect_cfm)
hp->connect_cfm(conn, status);
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->connect_cfm)
hp->connect_cfm(conn, status);
}
static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
{
register struct hci_proto *hp;
hp = hci_proto[HCI_PROTO_L2CAP];
if (hp && hp->disconn_ind)
hp->disconn_ind(conn, reason);
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->disconn_ind)
hp->disconn_ind(conn, reason);
}
static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
{
register struct hci_proto *hp;
hp = hci_proto[HCI_PROTO_L2CAP];
if (hp && hp->auth_cfm)
hp->auth_cfm(conn, status);
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->auth_cfm)
hp->auth_cfm(conn, status);
}
static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
{
register struct hci_proto *hp;
hp = hci_proto[HCI_PROTO_L2CAP];
if (hp && hp->encrypt_cfm)
hp->encrypt_cfm(conn, status);
hp = hci_proto[HCI_PROTO_SCO];
if (hp && hp->encrypt_cfm)
hp->encrypt_cfm(conn, status);
}
int hci_register_proto(struct hci_proto *hproto);
int hci_unregister_proto(struct hci_proto *hproto);
int hci_register_notifier(struct notifier_block *nb);
int hci_unregister_notifier(struct notifier_block *nb);
int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
int hci_send_raw(struct sk_buff *skb);
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
/* ----- HCI Sockets ----- */
extern void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
/* HCI info for socket */
#define hci_pi(sk) ((struct hci_pinfo *) &sk->protinfo)
#define hci_pi(sk) ((struct hci_pinfo *) sk->protinfo)
struct hci_pinfo {
struct hci_dev *hdev;
struct hci_filter filter;
__u32 cmsg_mask;
};
/* HCI security filter */
#define HCI_SFLT_MAX_OGF 4
struct hci_sec_filter {
__u32 type_mask;
__u32 event_mask[2];
__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
};
/* ----- HCI requests ----- */
#define HCI_REQ_DONE 0
#define HCI_REQ_PEND 1
#define HCI_REQ_CANCELED 2
#define hci_req_lock(d) down(&d->req_lock)
#define hci_req_unlock(d) up(&d->req_lock)
void hci_req_complete(struct hci_dev *hdev, int result);
void hci_req_cancel(struct hci_dev *hdev, int err);
#endif /* __HCI_CORE_H */
......@@ -23,22 +23,17 @@
*/
/*
* $Id: l2cap.h,v 1.5 2001/06/14 21:28:26 maxk Exp $
* $Id: l2cap.h,v 1.1.1.1 2002/03/08 21:03:15 maxk Exp $
*/
#ifndef __L2CAP_H
#define __L2CAP_H
#include <asm/types.h>
#include <asm/byteorder.h>
/* L2CAP defaults */
#define L2CAP_DEFAULT_MTU 672
#define L2CAP_DEFAULT_FLUSH_TO 0xFFFF
#define L2CAP_CONN_TIMEOUT (HZ * 40)
#define L2CAP_DISCONN_TIMEOUT (HZ * 2)
#define L2CAP_CONN_IDLE_TIMEOUT (HZ * 60)
/* L2CAP socket address */
struct sockaddr_l2 {
......@@ -47,17 +42,12 @@ struct sockaddr_l2 {
bdaddr_t l2_bdaddr;
};
/* set/get sockopt defines */
#define L2CAP_OPTIONS 0x01
/* Socket options */
#define L2CAP_OPTIONS 0x01
struct l2cap_options {
__u16 omtu;
__u16 imtu;
__u16 flush_to;
__u32 token_rate;
__u32 bucket_size;
__u32 pick_band;
__u32 latency;
__u32 delay_var;
};
#define L2CAP_CONNINFO 0x02
......@@ -65,6 +55,26 @@ struct l2cap_conninfo {
__u16 hci_handle;
};
#define L2CAP_LM 0x03
#define L2CAP_LM_MASTER 0x0001
#define L2CAP_LM_AUTH 0x0002
#define L2CAP_LM_ENCRYPT 0x0004
#define L2CAP_LM_TRUSTED 0x0008
#define L2CAP_QOS 0x04
struct l2cap_qos {
__u16 service_type;
__u32 token_rate;
__u32 token_bucket_size;
__u32 peak_bandwidth;
__u32 latency;
__u32 delay_variation;
};
#define L2CAP_SERV_NO_TRAFFIC 0x00
#define L2CAP_SERV_BEST_EFFORT 0x01
#define L2CAP_SERV_GUARANTEED 0x02
/* L2CAP command codes */
#define L2CAP_COMMAND_REJ 0x01
#define L2CAP_CONN_REQ 0x02
......@@ -79,7 +89,6 @@ struct l2cap_conninfo {
#define L2CAP_INFO_RSP 0x0b
/* L2CAP structures */
typedef struct {
__u16 len;
__u16 cid;
......@@ -112,11 +121,17 @@ typedef struct {
} __attribute__ ((packed)) l2cap_conn_rsp;
#define L2CAP_CONN_RSP_SIZE 8
#define L2CAP_CONN_SUCCESS 0x0000
#define L2CAP_CONN_PEND 0x0001
#define L2CAP_CONN_BAD_PSM 0x0002
#define L2CAP_CONN_SEC_BLOCK 0x0003
#define L2CAP_CONN_NO_MEM 0x0004
/* connect result */
#define L2CAP_CR_SUCCESS 0x0000
#define L2CAP_CR_PEND 0x0001
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
/* connect status */
#define L2CAP_CS_NO_INFO 0x0000
#define L2CAP_CS_AUTHEN_PEND 0x0001
#define L2CAP_CS_AUTHOR_PEND 0x0002
typedef struct {
__u16 dcid;
......@@ -147,6 +162,8 @@ typedef struct {
#define L2CAP_CONF_FLUSH_TO 0x02
#define L2CAP_CONF_QOS 0x03
#define L2CAP_CONF_MAX_SIZE 22
typedef struct {
__u16 dcid;
__u16 scid;
......@@ -159,4 +176,70 @@ typedef struct {
} __attribute__ ((packed)) l2cap_disconn_rsp;
#define L2CAP_DISCONN_RSP_SIZE 4
typedef struct {
__u16 type;
__u8 data[0];
} __attribute__ ((packed)) l2cap_info_req;
#define L2CAP_INFO_REQ_SIZE 2
typedef struct {
__u16 type;
__u16 result;
__u8 data[0];
} __attribute__ ((packed)) l2cap_info_rsp;
#define L2CAP_INFO_RSP_SIZE 4
/* ----- L2CAP connections ----- */
struct l2cap_chan_list {
struct sock *head;
rwlock_t lock;
long num;
};
struct l2cap_conn {
struct hci_conn *hcon;
bdaddr_t *dst;
bdaddr_t *src;
unsigned int mtu;
spinlock_t lock;
struct sk_buff *rx_skb;
__u32 rx_len;
__u8 rx_ident;
__u8 tx_ident;
struct l2cap_chan_list chan_list;
};
/* ----- L2CAP channel and socket info ----- */
#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk->protinfo)
struct l2cap_pinfo {
__u16 psm;
__u16 dcid;
__u16 scid;
__u16 imtu;
__u16 omtu;
__u16 flush_to;
__u32 link_mode;
__u8 conf_state;
__u16 conf_mtu;
__u8 ident;
struct l2cap_conn *conn;
struct sock *next_c;
struct sock *prev_c;
};
#define CONF_REQ_SENT 0x01
#define CONF_INPUT_DONE 0x02
#define CONF_OUTPUT_DONE 0x04
#endif /* __L2CAP_H */
......@@ -23,122 +23,59 @@
*/
/*
* $Id: l2cap_core.h,v 1.6 2001/08/03 04:19:49 maxk Exp $
* $Id: sco.h,v 1.1.1.1 2002/03/08 21:03:15 maxk Exp $
*/
#ifndef __L2CAP_CORE_H
#define __L2CAP_CORE_H
#ifndef __SCO_H
#define __SCO_H
#ifdef __KERNEL__
/* SCO defaults */
#define SCO_DEFAULT_MTU 500
#define SCO_DEFAULT_FLUSH_TO 0xFFFF
/* ----- L2CAP interface ----- */
struct l2cap_iff {
struct list_head list;
struct hci_dev *hdev;
bdaddr_t *bdaddr;
__u16 mtu;
spinlock_t lock;
struct list_head conn_list;
};
#define SCO_CONN_TIMEOUT (HZ * 40)
#define SCO_DISCONN_TIMEOUT (HZ * 2)
#define SCO_CONN_IDLE_TIMEOUT (HZ * 60)
static inline void l2cap_iff_lock(struct l2cap_iff *iff)
{
spin_lock(&iff->lock);
}
static inline void l2cap_iff_unlock(struct l2cap_iff *iff)
{
spin_unlock(&iff->lock);
}
/* ----- L2CAP connections ----- */
struct l2cap_chan_list {
struct sock *head;
rwlock_t lock;
long num;
/* SCO socket address */
struct sockaddr_sco {
sa_family_t sco_family;
bdaddr_t sco_bdaddr;
};
struct l2cap_conn {
struct l2cap_iff *iff;
struct list_head list;
/* set/get sockopt defines */
#define SCO_OPTIONS 0x01
struct sco_options {
__u16 mtu;
};
struct hci_conn *hconn;
#define SCO_CONNINFO 0x02
struct sco_conninfo {
__u16 hci_handle;
};
__u16 state;
__u8 out;
bdaddr_t src;
bdaddr_t dst;
/* ---- SCO connections ---- */
struct sco_conn {
struct hci_conn *hcon;
bdaddr_t *dst;
bdaddr_t *src;
spinlock_t lock;
atomic_t refcnt;
struct sk_buff *rx_skb;
__u32 rx_len;
__u8 rx_ident;
__u8 tx_ident;
struct l2cap_chan_list chan_list;
struct sock *sk;
struct timer_list timer;
unsigned int mtu;
};
static inline void __l2cap_conn_link(struct l2cap_iff *iff, struct l2cap_conn *c)
{
list_add(&c->list, &iff->conn_list);
}
#define sco_conn_lock(c) spin_lock(&c->lock);
#define sco_conn_unlock(c) spin_unlock(&c->lock);
static inline void __l2cap_conn_unlink(struct l2cap_iff *iff, struct l2cap_conn *c)
{
list_del(&c->list);
}
/* ----- SCO socket info ----- */
#define sco_pi(sk) ((struct sco_pinfo *) sk->protinfo)
/* ----- L2CAP channel and socket info ----- */
#define l2cap_pi(sk) ((struct l2cap_pinfo *) &sk->protinfo)
struct l2cap_accept_q {
struct sock *head;
struct sock *tail;
};
struct l2cap_pinfo {
bdaddr_t src;
bdaddr_t dst;
__u16 psm;
__u16 dcid;
__u16 scid;
struct sco_pinfo {
__u32 flags;
__u16 imtu;
__u16 omtu;
__u16 flush_to;
__u8 conf_state;
__u16 conf_mtu;
__u8 ident;
struct l2cap_conn *conn;
struct sock *next_c;
struct sock *prev_c;
struct sock *parent;
struct sock *next_q;
struct sock *prev_q;
struct l2cap_accept_q accept_q;
struct sco_conn *conn;
};
#define CONF_REQ_SENT 0x01
#define CONF_INPUT_DONE 0x02
#define CONF_OUTPUT_DONE 0x04
extern struct bluez_sock_list l2cap_sk_list;
extern struct list_head l2cap_iff_list;
extern rwlock_t l2cap_rt_lock;
extern void l2cap_register_proc(void);
extern void l2cap_unregister_proc(void);
#endif /* __KERNEL__ */
#endif /* __L2CAP_CORE_H */
#endif /* __SCO_H */
Bluetooth subsystem support
CONFIG_BLUEZ
Bluetooth is low-cost, low-power, short-range wireless technology.
It was designed as a replacement for cables and other short-range
......@@ -6,11 +7,12 @@ CONFIG_BLUEZ
Bluetooth can be found at <http://www.bluetooth.com/>.
Linux Bluetooth subsystem consist of several layers:
HCI Core (device and connection manager, scheduler)
BlueZ Core (HCI device and connection manager, scheduler)
HCI Device drivers (interface to the hardware)
L2CAP Module (L2CAP protocol)
SCO Module (SCO links)
Say Y here to enable Linux Bluetooth support and to build HCI Core
Say Y here to enable Linux Bluetooth support and to build BlueZ Core
layer.
To use Linux Bluetooth subsystem, you will need several user-space
......@@ -18,8 +20,9 @@ CONFIG_BLUEZ
Bluetooth kernel modules are provided in the BlueZ package.
For more information, see <http://bluez.sourceforge.net/>.
If you want to compile HCI Core as module (hci.o) say M here.
If you want to compile BlueZ Core as module (bluez.o) say M here.
L2CAP protocol support
CONFIG_BLUEZ_L2CAP
L2CAP (Logical Link Control and Adaptation Protocol) provides
connection oriented and connection-less data transport. L2CAP
......@@ -28,3 +31,11 @@ CONFIG_BLUEZ_L2CAP
Say Y here to compile L2CAP support into the kernel or say M to
compile it as module (l2cap.o).
SCO links support
CONFIG_BLUEZ_SCO
SCO link provides voice transport over Bluetooth. SCO support is
required for voice applications like Headset and Audio.
Say Y here to compile SCO support into the kernel or say M to
compile it as module (sco.o).
......@@ -9,6 +9,7 @@ if [ "$CONFIG_NET" != "n" ]; then
if [ "$CONFIG_BLUEZ" != "n" ]; then
dep_tristate 'L2CAP protocol support' CONFIG_BLUEZ_L2CAP $CONFIG_BLUEZ
dep_tristate 'SCO links support' CONFIG_BLUEZ_SCO $CONFIG_BLUEZ
source drivers/bluetooth/Config.in
fi
endmenu
......
#
# Makefile for the Bluetooth subsystem
#
O_TARGET := bluetooth.o
list-multi := bluez.o
export-objs := syms.o
hci-objs := af_bluetooth.o hci_core.o hci_sock.o lib.o syms.o
l2cap-objs := l2cap_core.o l2cap_proc.o
bluez-objs := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o lib.o syms.o
obj-$(CONFIG_BLUEZ) += hci.o
obj-$(CONFIG_BLUEZ) += bluez.o
obj-$(CONFIG_BLUEZ_L2CAP) += l2cap.o
obj-$(CONFIG_BLUEZ_SCO) += sco.o
include $(TOPDIR)/Rules.make
bluez.o: $(bluez-objs)
$(LD) -r -o $@ $(bluez-objs)
......@@ -25,14 +25,15 @@
/*
* BlueZ Bluetooth address family and sockets.
*
* $Id: af_bluetooth.c,v 1.4 2001/07/05 18:42:44 maxk Exp $
* $Id: af_bluetooth.c,v 1.3 2002/04/17 17:37:15 maxk Exp $
*/
#define VERSION "1.1"
#define VERSION "2.0"
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
......@@ -40,6 +41,7 @@
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <net/sock.h>
......@@ -48,32 +50,39 @@
#endif
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/bluez.h>
#ifndef AF_BLUETOOTH_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
/* Bluetooth sockets */
static struct net_proto_family *bluez_sock[BLUEZ_MAX_PROTO];
#define BLUEZ_MAX_PROTO 4
static struct net_proto_family *bluez_proto[BLUEZ_MAX_PROTO];
static kmem_cache_t *bluez_sock_cache;
int bluez_sock_register(int proto, struct net_proto_family *ops)
{
if (proto > BLUEZ_MAX_PROTO)
if (proto >= BLUEZ_MAX_PROTO)
return -EINVAL;
if (bluez_sock[proto])
if (bluez_proto[proto])
return -EEXIST;
bluez_sock[proto] = ops;
bluez_proto[proto] = ops;
return 0;
}
int bluez_sock_unregister(int proto)
{
if (proto > BLUEZ_MAX_PROTO)
if (proto >= BLUEZ_MAX_PROTO)
return -EINVAL;
if (!bluez_sock[proto])
if (!bluez_proto[proto])
return -ENOENT;
bluez_sock[proto] = NULL;
bluez_proto[proto] = NULL;
return 0;
}
......@@ -83,27 +92,54 @@ static int bluez_sock_create(struct socket *sock, int proto)
return -EINVAL;
#if defined(CONFIG_KMOD)
if (!bluez_sock[proto]) {
if (!bluez_proto[proto]) {
char module_name[30];
sprintf(module_name, "bt-proto-%d", proto);
request_module(module_name);
}
#endif
if (!bluez_sock[proto])
if (!bluez_proto[proto])
return -ENOENT;
return bluez_sock[proto]->create(sock, proto);
return bluez_proto[proto]->create(sock, proto);
}
struct sock *bluez_sock_alloc(struct socket *sock, int proto, int pi_size, int prio)
{
struct sock *sk;
void *pi;
sk = sk_alloc(PF_BLUETOOTH, prio, sizeof(struct bluez_sock), bluez_sock_cache);
if (!sk)
return NULL;
if (pi_size) {
pi = kmalloc(pi_size, prio);
if (!pi) {
sk_free(sk);
return NULL;
}
memset(pi, 0, pi_size);
sk->protinfo = pi;
}
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bluez_sk(sk)->accept_q);
sk->zapped = 0;
sk->protocol = proto;
sk->state = BT_OPEN;
return sk;
}
void bluez_sock_link(struct bluez_sock_list *l, struct sock *sk)
{
write_lock(&l->lock);
sk->next = l->head;
l->head = sk;
sock_hold(sk);
write_unlock(&l->lock);
}
......@@ -122,39 +158,197 @@ void bluez_sock_unlink(struct bluez_sock_list *l, struct sock *sk)
write_unlock(&l->lock);
}
struct net_proto_family bluez_sock_family_ops = {
family: PF_BLUETOOTH,
create: bluez_sock_create,
void bluez_accept_enqueue(struct sock *parent, struct sock *sk)
{
BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
list_add_tail(&bluez_sk(sk)->accept_q, &bluez_sk(parent)->accept_q);
bluez_sk(sk)->parent = parent;
parent->ack_backlog++;
}
static void bluez_accept_unlink(struct sock *sk)
{
BT_DBG("sk %p state %d", sk, sk->state);
list_del_init(&bluez_sk(sk)->accept_q);
bluez_sk(sk)->parent->ack_backlog--;
bluez_sk(sk)->parent = NULL;
sock_put(sk);
}
struct sock *bluez_accept_dequeue(struct sock *parent, struct socket *newsock)
{
struct list_head *p, *n;
struct sock *sk;
BT_DBG("parent %p", parent);
list_for_each_safe(p, n, &bluez_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bluez_sock, accept_q);
lock_sock(sk);
if (sk->state == BT_CLOSED) {
release_sock(sk);
bluez_accept_unlink(sk);
continue;
}
if (sk->state == BT_CONNECTED || !newsock) {
bluez_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
release_sock(sk);
return sk;
}
release_sock(sk);
}
return NULL;
}
int bluez_sock_recvmsg(struct socket *sock, struct msghdr *msg, int len, int flags, struct scm_cookie *scm)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
BT_DBG("sock %p sk %p len %d", sock, sk, len);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (!(skb = skb_recv_datagram(sk, flags, noblock, &err))) {
if (sk->shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
msg->msg_namelen = 0;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb->h.raw = skb->data;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
skb_free_datagram(sk, skb);
return err ? : copied;
}
unsigned int bluez_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask;
BT_DBG("sock %p, sk %p", sock, sk);
poll_wait(file, sk->sleep, wait);
mask = 0;
if (sk->err || !skb_queue_empty(&sk->error_queue))
mask |= POLLERR;
if (sk->shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
if (!skb_queue_empty(&sk->receive_queue) ||
!list_empty(&bluez_sk(sk)->accept_q) ||
(sk->shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
if (sk->state == BT_CLOSED)
mask |= POLLHUP;
if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
return mask;
}
int bluez_sock_w4_connect(struct sock *sk, int flags)
{
DECLARE_WAITQUEUE(wait, current);
long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
int err = 0;
BT_DBG("sk %p", sk);
add_wait_queue(sk->sleep, &wait);
while (sk->state != BT_CONNECTED) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
err = 0;
if (sk->state == BT_CONNECTED)
break;
if (sk->err) {
err = sock_error(sk);
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk->sleep, &wait);
return err;
}
struct net_proto_family bluez_sock_family_ops =
{
PF_BLUETOOTH, bluez_sock_create
};
int bluez_init(void)
static int __init bluez_init(void)
{
INF("BlueZ HCI Core ver %s Copyright (C) 2000,2001 Qualcomm Inc",
BT_INFO("BlueZ Core ver %s Copyright (C) 2000,2001 Qualcomm Inc",
VERSION);
INF("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
proc_mkdir("bluetooth", NULL);
/* Init socket cache */
bluez_sock_cache = kmem_cache_create("bluez_sock",
sizeof(struct bluez_sock), 0,
SLAB_HWCACHE_ALIGN, 0, 0);
if (!bluez_sock_cache) {
BT_ERR("BlueZ socket cache creation failed");
return -ENOMEM;
}
sock_register(&bluez_sock_family_ops);
/* Init HCI Core */
hci_core_init();
/* Init sockets */
hci_sock_init();
return 0;
}
void bluez_cleanup(void)
static void __exit bluez_cleanup(void)
{
/* Release socket */
hci_sock_cleanup();
/* Release core */
hci_core_cleanup();
sock_unregister(PF_BLUETOOTH);
kmem_cache_destroy(bluez_sock_cache);
remove_proc_entry("bluetooth", NULL);
}
......@@ -164,5 +358,6 @@ module_init(bluez_init);
module_exit(bluez_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
MODULE_DESCRIPTION("BlueZ HCI Core ver " VERSION);
MODULE_DESCRIPTION("BlueZ Core ver " VERSION);
MODULE_LICENSE("GPL");
#endif
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* HCI Connection handling.
*
* $Id: hci_conn.c,v 1.2 2002/04/17 17:37:16 maxk Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#ifndef HCI_CORE_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
void hci_acl_connect(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
struct inquiry_entry *ie;
create_conn_cp cp;
BT_DBG("%p", conn);
conn->state = BT_CONNECT;
conn->out = 1;
conn->link_mode = HCI_LM_MASTER;
memset(&cp, 0, sizeof(cp));
bacpy(&cp.bdaddr, &conn->dst);
if ((ie = inquiry_cache_lookup(hdev, &conn->dst)) &&
inquiry_entry_age(ie) > INQUIRY_ENTRY_AGE_MAX) {
cp.pscan_rep_mode = ie->info.pscan_rep_mode;
cp.pscan_mode = ie->info.pscan_mode;
cp.clock_offset = ie->info.clock_offset | __cpu_to_le16(0x8000);
}
cp.pkt_type = __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK);
if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
cp.role_switch = 0x01;
else
cp.role_switch = 0x00;
hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN,
CREATE_CONN_CP_SIZE, &cp);
}
void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
{
disconnect_cp cp;
BT_DBG("%p", conn);
conn->state = BT_DISCONN;
cp.handle = __cpu_to_le16(conn->handle);
cp.reason = reason;
hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_DISCONNECT,
DISCONNECT_CP_SIZE, &cp);
}
void hci_add_sco(struct hci_conn *conn, __u16 handle)
{
struct hci_dev *hdev = conn->hdev;
add_sco_cp cp;
BT_DBG("%p", conn);
conn->state = BT_CONNECT;
conn->out = 1;
cp.pkt_type = __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
cp.handle = __cpu_to_le16(handle);
hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, ADD_SCO_CP_SIZE, &cp);
}
static void hci_conn_timeout(unsigned long arg)
{
struct hci_conn *conn = (void *)arg;
struct hci_dev *hdev = conn->hdev;
BT_DBG("conn %p state %d", conn, conn->state);
if (atomic_read(&conn->refcnt))
return;
hci_dev_lock(hdev);
if (conn->state == BT_CONNECTED)
hci_acl_disconn(conn, 0x13);
else
conn->state = BT_CLOSED;
hci_dev_unlock(hdev);
return;
}
static void hci_conn_init_timer(struct hci_conn *conn)
{
init_timer(&conn->timer);
conn->timer.function = hci_conn_timeout;
conn->timer.data = (unsigned long)conn;
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
{
struct hci_conn *conn;
BT_DBG("%s dst %s", hdev->name, batostr(dst));
if (!(conn = kmalloc(sizeof(struct hci_conn), GFP_ATOMIC)))
return NULL;
memset(conn, 0, sizeof(struct hci_conn));
bacpy(&conn->dst, dst);
conn->type = type;
conn->hdev = hdev;
conn->state = BT_OPEN;
skb_queue_head_init(&conn->data_q);
hci_conn_init_timer(conn);
atomic_set(&conn->refcnt, 0);
hci_dev_hold(hdev);
tasklet_disable(&hdev->tx_task);
conn_hash_add(hdev, conn);
tasklet_enable(&hdev->tx_task);
return conn;
}
int hci_conn_del(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
hci_conn_del_timer(conn);
if (conn->type == SCO_LINK) {
struct hci_conn *acl = conn->link;
if (acl) {
acl->link = NULL;
hci_conn_put(acl);
}
/* Unacked frames */
hdev->sco_cnt += conn->sent;
} else {
struct hci_conn *sco = conn->link;
if (sco)
sco->link = NULL;
/* Unacked frames */
hdev->acl_cnt += conn->sent;
}
tasklet_disable(&hdev->tx_task);
conn_hash_del(hdev, conn);
tasklet_enable(&hdev->tx_task);
skb_queue_purge(&conn->data_q);
hci_dev_put(hdev);
kfree(conn);
return 0;
}
struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
{
int use_src = bacmp(src, BDADDR_ANY);
struct hci_dev *hdev = NULL;
struct list_head *p;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
spin_lock_bh(&hdev_list_lock);
list_for_each(p, &hdev_list) {
struct hci_dev *d;
d = list_entry(p, struct hci_dev, list);
if (!test_bit(HCI_UP, &d->flags))
continue;
/* Simple routing:
* No source address - find interface with bdaddr != dst
* Source address - find interface with bdaddr == src
*/
if (use_src) {
if (!bacmp(&d->bdaddr, src)) {
hdev = d; break;
}
} else {
if (bacmp(&d->bdaddr, dst)) {
hdev = d; break;
}
}
}
if (hdev)
hci_dev_hold(hdev);
spin_unlock_bh(&hdev_list_lock);
return hdev;
}
/* Create SCO or ACL connection.
* Device _must_ be locked */
struct hci_conn * hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst)
{
struct hci_conn *acl;
BT_DBG("%s dst %s", hdev->name, batostr(dst));
if (!(acl = conn_hash_lookup_ba(hdev, ACL_LINK, dst))) {
if (!(acl = hci_conn_add(hdev, ACL_LINK, dst)))
return NULL;
}
hci_conn_hold(acl);
if (acl->state == BT_OPEN || acl->state == BT_CLOSED)
hci_acl_connect(acl);
if (type == SCO_LINK) {
struct hci_conn *sco;
if (!(sco = conn_hash_lookup_ba(hdev, SCO_LINK, dst))) {
if (!(sco = hci_conn_add(hdev, SCO_LINK, dst))) {
hci_conn_put(acl);
return NULL;
}
}
acl->link = sco;
sco->link = acl;
hci_conn_hold(sco);
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED))
hci_add_sco(sco, acl->handle);
return sco;
} else {
return acl;
}
}
/* Authenticate remote device */
int hci_conn_auth(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
if (conn->link_mode & HCI_LM_AUTH)
return 1;
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
auth_requested_cp ar;
ar.handle = __cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_AUTH_REQUESTED,
AUTH_REQUESTED_CP_SIZE, &ar);
}
return 0;
}
/* Enable encryption */
int hci_conn_encrypt(struct hci_conn *conn)
{
BT_DBG("conn %p", conn);
if (conn->link_mode & HCI_LM_ENCRYPT)
return 1;
if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
return 0;
if (hci_conn_auth(conn)) {
set_conn_encrypt_cp ce;
ce.handle = __cpu_to_le16(conn->handle);
ce.encrypt = 1;
hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT,
SET_CONN_ENCRYPT_CP_SIZE, &ce);
}
return 0;
}
/* Drop all connection on the device */
void hci_conn_hash_flush(struct hci_dev *hdev)
{
struct conn_hash *h = &hdev->conn_hash;
struct list_head *p;
BT_DBG("hdev %s", hdev->name);
p = h->list.next;
while (p != &h->list) {
struct hci_conn *c;
c = list_entry(p, struct hci_conn, list);
p = p->next;
c->state = BT_CLOSED;
hci_proto_disconn_ind(c, 0x16);
hci_conn_del(c);
}
}
int hci_get_conn_list(unsigned long arg)
{
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
struct list_head *p;
int n = 0, size;
if (copy_from_user(&req, (void *) arg, sizeof(req)))
return -EFAULT;
if (!(hdev = hci_dev_get(req.dev_id)))
return -ENODEV;
size = req.conn_num * sizeof(struct hci_conn_info) + sizeof(req);
if (verify_area(VERIFY_WRITE, (void *)arg, size))
return -EFAULT;
if (!(cl = (void *) kmalloc(size, GFP_KERNEL)))
return -ENOMEM;
ci = cl->conn_info;
hci_dev_lock_bh(hdev);
list_for_each(p, &hdev->conn_hash.list) {
register struct hci_conn *c;
c = list_entry(p, struct hci_conn, list);
bacpy(&(ci + n)->bdaddr, &c->dst);
(ci + n)->handle = c->handle;
(ci + n)->type = c->type;
(ci + n)->out = c->out;
(ci + n)->state = c->state;
(ci + n)->link_mode = c->link_mode;
n++;
}
hci_dev_unlock_bh(hdev);
cl->dev_id = hdev->id;
cl->conn_num = n;
size = n * sizeof(struct hci_conn_info) + sizeof(req);
hci_dev_put(hdev);
copy_to_user((void *) arg, cl, size);
kfree(cl);
return 0;
}
int hci_get_conn_info(struct hci_dev *hdev, unsigned long arg)
{
struct hci_conn_info_req req;
struct hci_conn_info ci;
struct hci_conn *conn;
char *ptr = (void *) arg + sizeof(req);
if (copy_from_user(&req, (void *) arg, sizeof(req)))
return -EFAULT;
if (verify_area(VERIFY_WRITE, ptr, sizeof(ci)))
return -EFAULT;
hci_dev_lock_bh(hdev);
conn = conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
if (conn) {
bacpy(&ci.bdaddr, &conn->dst);
ci.handle = conn->handle;
ci.type = conn->type;
ci.out = conn->out;
ci.state = conn->state;
ci.link_mode = conn->link_mode;
}
hci_dev_unlock_bh(hdev);
if (!conn)
return -ENOENT;
copy_to_user(ptr, &ci, sizeof(ci));
return 0;
}
......@@ -25,7 +25,7 @@
/*
* BlueZ HCI Core.
*
* $Id: hci_core.c,v 1.22 2001/08/03 04:19:50 maxk Exp $
* $Id: hci_core.c,v 1.6 2002/04/17 17:37:16 maxk Exp $
*/
#include <linux/config.h>
......@@ -50,12 +50,11 @@
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/bluez.h>
#include <net/bluetooth/hci_core.h>
#ifndef HCI_CORE_DEBUG
#undef DBG
#define DBG( A... )
#undef BT_DBG
#define BT_DBG( A... )
#endif
static void hci_cmd_task(unsigned long arg);
......@@ -63,279 +62,43 @@ static void hci_rx_task(unsigned long arg);
static void hci_tx_task(unsigned long arg);
static void hci_notify(struct hci_dev *hdev, int event);
static rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
rwlock_t hci_task_lock = RW_LOCK_UNLOCKED;
/* HCI device list */
struct hci_dev *hdev_list[HCI_MAX_DEV];
LIST_HEAD(hdev_list);
spinlock_t hdev_list_lock;
#define GET_HDEV(a) (hdev_list[a])
/* HCI protocol list */
struct hci_proto *hproto_list[HCI_MAX_PROTO];
#define GET_HPROTO(a) (hproto_list[a])
/* HCI protocols */
#define HCI_MAX_PROTO 2
struct hci_proto *hci_proto[HCI_MAX_PROTO];
/* HCI notifiers list */
struct notifier_block *hci_dev_notifier;
static struct notifier_block *hci_notifier;
/* HCI device notifications */
int hci_register_notifier(struct notifier_block *nb)
{
int err, i;
struct hci_dev *hdev;
if ((err = notifier_chain_register(&hci_dev_notifier, nb)))
return err;
/* Notify about already registered devices */
spin_lock(&hdev_list_lock);
for (i = 0; i < HCI_MAX_DEV; i++) {
if (!(hdev = GET_HDEV(i)))
continue;
if (hdev->flags & HCI_UP)
(*nb->notifier_call)(nb, HCI_DEV_UP, hdev);
}
spin_unlock(&hdev_list_lock);
return 0;
}
int hci_unregister_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&hci_dev_notifier, nb);
}
static inline void hci_notify(struct hci_dev *hdev, int event)
{
notifier_call_chain(&hci_dev_notifier, event, hdev);
}
/* Get HCI device by index (device is locked on return)*/
struct hci_dev *hci_dev_get(int index)
{
struct hci_dev *hdev;
DBG("%d", index);
if (index < 0 || index >= HCI_MAX_DEV)
return NULL;
spin_lock(&hdev_list_lock);
if ((hdev = GET_HDEV(index)))
hci_dev_hold(hdev);
spin_unlock(&hdev_list_lock);
return hdev;
}
/* Flush inquiry cache */
void inquiry_cache_flush(struct inquiry_cache *cache)
{
struct inquiry_entry *next = cache->list, *e;
DBG("cache %p", cache);
cache->list = NULL;
while ((e = next)) {
next = e->next;
kfree(e);
}
}
/* Lookup by bdaddr.
* Cache must be locked. */
static struct inquiry_entry * __inquiry_cache_lookup(struct inquiry_cache *cache, bdaddr_t *bdaddr)
{
struct inquiry_entry *e;
DBG("cache %p, %s", cache, batostr(bdaddr));
for (e = cache->list; e; e = e->next)
if (!bacmp(&e->info.bdaddr, bdaddr))
break;
return e;
}
static void inquiry_cache_update(struct inquiry_cache *cache, inquiry_info *info)
{
struct inquiry_entry *e;
DBG("cache %p, %s", cache, batostr(&info->bdaddr));
inquiry_cache_lock(cache);
if (!(e = __inquiry_cache_lookup(cache, &info->bdaddr))) {
/* Entry not in the cache. Add new one. */
if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
goto unlock;
memset(e, 0, sizeof(struct inquiry_entry));
e->next = cache->list;
cache->list = e;
}
memcpy(&e->info, info, sizeof(inquiry_info));
e->timestamp = jiffies;
cache->timestamp = jiffies;
unlock:
inquiry_cache_unlock(cache);
}
static int inquiry_cache_dump(struct inquiry_cache *cache, int num, __u8 *buf)
{
inquiry_info *info = (inquiry_info *) buf;
struct inquiry_entry *e;
int copied = 0;
inquiry_cache_lock(cache);
for (e = cache->list; e && copied < num; e = e->next, copied++)
memcpy(info++, &e->info, sizeof(inquiry_info));
inquiry_cache_unlock(cache);
DBG("cache %p, copied %d", cache, copied);
return copied;
}
/* --------- BaseBand connections --------- */
static struct hci_conn *hci_conn_add(struct hci_dev *hdev, __u16 handle, __u8 type, bdaddr_t *dst)
{
struct hci_conn *conn;
DBG("%s handle %d dst %s", hdev->name, handle, batostr(dst));
if ( conn_hash_lookup(&hdev->conn_hash, handle)) {
ERR("%s handle 0x%x already exists", hdev->name, handle);
return NULL;
}
if (!(conn = kmalloc(sizeof(struct hci_conn), GFP_ATOMIC)))
return NULL;
memset(conn, 0, sizeof(struct hci_conn));
bacpy(&conn->dst, dst);
conn->handle = handle;
conn->type = type;
conn->hdev = hdev;
skb_queue_head_init(&conn->data_q);
hci_dev_hold(hdev);
conn_hash_add(&hdev->conn_hash, handle, conn);
return conn;
}
/* ---- HCI notifications ---- */
static int hci_conn_del(struct hci_dev *hdev, struct hci_conn *conn)
{
DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
conn_hash_del(&hdev->conn_hash, conn);
hci_dev_put(hdev);
/* Unacked frames */
hdev->acl_cnt += conn->sent;
skb_queue_purge(&conn->data_q);
kfree(conn);
return 0;
}
/* Drop all connection on the device */
static void hci_conn_hash_flush(struct hci_dev *hdev)
int hci_register_notifier(struct notifier_block *nb)
{
struct conn_hash *h = &hdev->conn_hash;
struct hci_proto *hp;
struct list_head *p;
DBG("hdev %s", hdev->name);
p = h->list.next;
while (p != &h->list) {
struct hci_conn *c;
c = list_entry(p, struct hci_conn, list);
p = p->next;
if (c->type == ACL_LINK) {
/* ACL link notify L2CAP layer */
if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->disconn_ind)
hp->disconn_ind(c, 0x16);
} else {
/* SCO link (no notification) */
}
hci_conn_del(hdev, c);
}
return notifier_chain_register(&hci_notifier, nb);
}
int hci_connect(struct hci_dev *hdev, bdaddr_t *bdaddr)
int hci_unregister_notifier(struct notifier_block *nb)
{
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
create_conn_cp cc;
__u16 clock_offset;
DBG("%s bdaddr %s", hdev->name, batostr(bdaddr));
if (!(hdev->flags & HCI_UP))
return -ENODEV;
inquiry_cache_lock_bh(cache);
if (!(e = __inquiry_cache_lookup(cache, bdaddr)) || inquiry_entry_age(e) > INQUIRY_ENTRY_AGE_MAX) {
cc.pscan_rep_mode = 0;
cc.pscan_mode = 0;
clock_offset = 0;
} else {
cc.pscan_rep_mode = e->info.pscan_rep_mode;
cc.pscan_mode = e->info.pscan_mode;
clock_offset = __le16_to_cpu(e->info.clock_offset) & 0x8000;
}
inquiry_cache_unlock_bh(cache);
bacpy(&cc.bdaddr, bdaddr);
cc.pkt_type = __cpu_to_le16(hdev->pkt_type);
cc.clock_offset = __cpu_to_le16(clock_offset);
if (lmp_rswitch_capable(hdev))
cc.role_switch = 0x01;
else
cc.role_switch = 0x00;
hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, CREATE_CONN_CP_SIZE, &cc);
return 0;
return notifier_chain_unregister(&hci_notifier, nb);
}
int hci_disconnect(struct hci_conn *conn, __u8 reason)
void hci_notify(struct hci_dev *hdev, int event)
{
disconnect_cp dc;
DBG("conn %p handle %d", conn, conn->handle);
dc.handle = __cpu_to_le16(conn->handle);
dc.reason = reason;
hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_DISCONNECT, DISCONNECT_CP_SIZE, &dc);
return 0;
notifier_call_chain(&hci_notifier, event, hdev);
}
/* --------- HCI request handling ------------ */
static inline void hci_req_lock(struct hci_dev *hdev)
{
down(&hdev->req_lock);
}
static inline void hci_req_unlock(struct hci_dev *hdev)
{
up(&hdev->req_lock);
}
/* ---- HCI requests ---- */
static inline void hci_req_complete(struct hci_dev *hdev, int result)
void hci_req_complete(struct hci_dev *hdev, int result)
{
DBG("%s result 0x%2.2x", hdev->name, result);
BT_DBG("%s result 0x%2.2x", hdev->name, result);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
......@@ -344,9 +107,9 @@ static inline void hci_req_complete(struct hci_dev *hdev, int result)
}
}
static inline void hci_req_cancel(struct hci_dev *hdev, int err)
void hci_req_cancel(struct hci_dev *hdev, int err)
{
DBG("%s err 0x%2.2x", hdev->name, err);
BT_DBG("%s err 0x%2.2x", hdev->name, err);
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = err;
......@@ -356,23 +119,22 @@ static inline void hci_req_cancel(struct hci_dev *hdev, int err)
}
/* Execute request and wait for completion. */
static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
unsigned long opt, __u32 timeout)
static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), unsigned long opt, __u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
DBG("%s start", hdev->name);
BT_DBG("%s start", hdev->name);
hdev->req_status = HCI_REQ_PEND;
add_wait_queue(&hdev->req_wait_q, &wait);
current->state = TASK_INTERRUPTIBLE;
set_current_state(TASK_INTERRUPTIBLE);
req(hdev, opt);
schedule_timeout(timeout);
current->state = TASK_RUNNING;
set_current_state(TASK_RUNNING);
remove_wait_queue(&hdev->req_wait_q, &wait);
if (signal_pending(current))
......@@ -394,7 +156,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
hdev->req_status = hdev->req_result = 0;
DBG("%s end: err %d", hdev->name, err);
BT_DBG("%s end: err %d", hdev->name, err);
return err;
}
......@@ -412,10 +174,9 @@ static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *
return ret;
}
/* --------- HCI requests ---------- */
static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
{
DBG("%s %ld", hdev->name, opt);
BT_DBG("%s %ld", hdev->name, opt);
/* Reset device */
hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
......@@ -423,10 +184,10 @@ static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
{
set_event_flt_cp ec;
set_event_flt_cp ef;
__u16 param;
DBG("%s %ld", hdev->name, opt);
BT_DBG("%s %ld", hdev->name, opt);
/* Mandatory initialization */
......@@ -436,14 +197,27 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
#if 0
/* Host buffer size */
{
host_buffer_size_cp bs;
bs.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
bs.sco_mtu = HCI_MAX_SCO_SIZE;
bs.acl_max_pkt = __cpu_to_le16(0xffff);
bs.sco_max_pkt = __cpu_to_le16(0xffff);
hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE,
HOST_BUFFER_SIZE_CP_SIZE, &bs);
}
#endif
/* Read BD Address */
hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
/* Optional initialization */
/* Clear Event Filters */
ec.flt_type = FLT_CLEAR_ALL;
hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &ec);
ef.flt_type = FLT_CLEAR_ALL;
hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &ef);
/* Page timeout ~20 secs */
param = __cpu_to_le16(0x8000);
......@@ -458,7 +232,7 @@ static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
{
__u8 scan = opt;
DBG("%s %x", hdev->name, scan);
BT_DBG("%s %x", hdev->name, scan);
/* Inquiry and Page scans */
hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
......@@ -468,116 +242,269 @@ static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
{
__u8 auth = opt;
DBG("%s %x", hdev->name, auth);
BT_DBG("%s %x", hdev->name, auth);
/* Authentication */
hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
}
static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
{
struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
inquiry_cp ic;
__u8 encrypt = opt;
DBG("%s", hdev->name);
BT_DBG("%s %x", hdev->name, encrypt);
/* Start Inquiry */
memcpy(&ic.lap, &ir->lap, 3);
ic.lenght = ir->length;
ic.num_rsp = ir->num_rsp;
hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, INQUIRY_CP_SIZE, &ic);
/* Authentication */
hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
}
/* HCI ioctl helpers */
int hci_dev_open(__u16 dev)
/* Get HCI device by index.
* Device is locked on return. */
struct hci_dev *hci_dev_get(int index)
{
struct hci_dev *hdev;
int ret = 0;
if (!(hdev = hci_dev_get(dev)))
return -ENODEV;
struct list_head *p;
DBG("%s %p", hdev->name, hdev);
BT_DBG("%d", index);
hci_req_lock(hdev);
if (index < 0)
return NULL;
if (hdev->flags & HCI_UP) {
ret = -EALREADY;
goto done;
spin_lock(&hdev_list_lock);
list_for_each(p, &hdev_list) {
hdev = list_entry(p, struct hci_dev, list);
if (hdev->id == index) {
hci_dev_hold(hdev);
goto done;
}
}
hdev = NULL;
done:
spin_unlock(&hdev_list_lock);
return hdev;
}
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
}
/* ---- Inquiry support ---- */
void inquiry_cache_flush(struct hci_dev *hdev)
{
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *next = cache->list, *e;
if (hdev->flags & HCI_NORMAL) {
atomic_set(&hdev->cmd_cnt, 1);
hdev->flags |= HCI_INIT;
BT_DBG("cache %p", cache);
//__hci_request(hdev, hci_reset_req, 0, HZ);
ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
hdev->flags &= ~HCI_INIT;
cache->list = NULL;
while ((e = next)) {
next = e->next;
kfree(e);
}
}
if (!ret) {
hdev->flags |= HCI_UP;
hci_notify(hdev, HCI_DEV_UP);
} else {
/* Init failed, cleanup */
tasklet_kill(&hdev->rx_task);
tasklet_kill(&hdev->tx_task);
tasklet_kill(&hdev->cmd_task);
struct inquiry_entry *inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
BT_DBG("cache %p, %s", cache, batostr(bdaddr));
if (hdev->flush)
hdev->flush(hdev);
for (e = cache->list; e; e = e->next)
if (!bacmp(&e->info.bdaddr, bdaddr))
break;
return e;
}
if (hdev->sent_cmd) {
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}
void inquiry_cache_update(struct hci_dev *hdev, inquiry_info *info)
{
struct inquiry_cache *cache = &hdev->inq_cache;
struct inquiry_entry *e;
hdev->close(hdev);
}
BT_DBG("cache %p, %s", cache, batostr(&info->bdaddr));
done:
hci_req_unlock(hdev);
hci_dev_put(hdev);
if (!(e = inquiry_cache_lookup(hdev, &info->bdaddr))) {
/* Entry not in the cache. Add new one. */
if (!(e = kmalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
return;
memset(e, 0, sizeof(struct inquiry_entry));
e->next = cache->list;
cache->list = e;
}
return ret;
memcpy(&e->info, info, sizeof(inquiry_info));
e->timestamp = jiffies;
cache->timestamp = jiffies;
}
int hci_dev_close(__u16 dev)
int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
{
struct hci_dev *hdev;
struct inquiry_cache *cache = &hdev->inq_cache;
inquiry_info *info = (inquiry_info *) buf;
struct inquiry_entry *e;
int copied = 0;
if (!(hdev = hci_dev_get(dev)))
return -ENODEV;
for (e = cache->list; e && copied < num; e = e->next, copied++)
memcpy(info++, &e->info, sizeof(inquiry_info));
DBG("%s %p", hdev->name, hdev);
BT_DBG("cache %p, copied %d", cache, copied);
return copied;
}
hci_req_cancel(hdev, ENODEV);
hci_req_lock(hdev);
static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
{
struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
inquiry_cp ic;
if (!(hdev->flags & HCI_UP))
goto done;
BT_DBG("%s", hdev->name);
/* Kill RX and TX tasks */
tasklet_kill(&hdev->rx_task);
tasklet_kill(&hdev->tx_task);
if (test_bit(HCI_INQUIRY, &hdev->flags))
return;
inquiry_cache_flush(&hdev->inq_cache);
/* Start Inquiry */
memcpy(&ic.lap, &ir->lap, 3);
ic.length = ir->length;
ic.num_rsp = ir->num_rsp;
hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, INQUIRY_CP_SIZE, &ic);
}
hci_conn_hash_flush(hdev);
int hci_inquiry(unsigned long arg)
{
struct hci_inquiry_req ir;
struct hci_dev *hdev;
int err = 0, do_inquiry = 0;
long timeo;
__u8 *buf, *ptr;
/* Clear flags */
hdev->flags &= HCI_SOCK;
hdev->flags |= HCI_NORMAL;
ptr = (void *) arg;
if (copy_from_user(&ir, ptr, sizeof(ir)))
return -EFAULT;
if (!(hdev = hci_dev_get(ir.dev_id)))
return -ENODEV;
hci_dev_lock_bh(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
hci_dev_unlock_bh(hdev);
timeo = ir.length * 2 * HZ;
if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
goto done;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space.
*/
if (!(buf = kmalloc(sizeof(inquiry_info) * ir.num_rsp, GFP_KERNEL))) {
err = -ENOMEM;
goto done;
}
hci_dev_lock_bh(hdev);
ir.num_rsp = inquiry_cache_dump(hdev, ir.num_rsp, buf);
hci_dev_unlock_bh(hdev);
BT_DBG("num_rsp %d", ir.num_rsp);
if (!verify_area(VERIFY_WRITE, ptr, sizeof(ir) +
(sizeof(inquiry_info) * ir.num_rsp))) {
copy_to_user(ptr, &ir, sizeof(ir));
ptr += sizeof(ir);
copy_to_user(ptr, buf, sizeof(inquiry_info) * ir.num_rsp);
} else
err = -EFAULT;
kfree(buf);
done:
hci_dev_put(hdev);
return err;
}
/* ---- HCI ioctl helpers ---- */
int hci_dev_open(__u16 dev)
{
struct hci_dev *hdev;
int ret = 0;
if (!(hdev = hci_dev_get(dev)))
return -ENODEV;
BT_DBG("%s %p", hdev->name, hdev);
hci_req_lock(hdev);
if (test_bit(HCI_UP, &hdev->flags)) {
ret = -EALREADY;
goto done;
}
if (hdev->open(hdev)) {
ret = -EIO;
goto done;
}
if (!test_bit(HCI_RAW, &hdev->flags)) {
atomic_set(&hdev->cmd_cnt, 1);
set_bit(HCI_INIT, &hdev->flags);
//__hci_request(hdev, hci_reset_req, 0, HZ);
ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
clear_bit(HCI_INIT, &hdev->flags);
}
if (!ret) {
set_bit(HCI_UP, &hdev->flags);
hci_notify(hdev, HCI_DEV_UP);
} else {
/* Init failed, cleanup */
tasklet_kill(&hdev->rx_task);
tasklet_kill(&hdev->tx_task);
tasklet_kill(&hdev->cmd_task);
skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q);
if (hdev->flush)
hdev->flush(hdev);
if (hdev->sent_cmd) {
kfree_skb(hdev->sent_cmd);
hdev->sent_cmd = NULL;
}
hdev->close(hdev);
hdev->flags = 0;
}
done:
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
}
static int hci_dev_do_close(struct hci_dev *hdev)
{
BT_DBG("%s %p", hdev->name, hdev);
hci_req_cancel(hdev, ENODEV);
hci_req_lock(hdev);
if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
hci_req_unlock(hdev);
return 0;
}
/* Kill RX and TX tasks */
tasklet_kill(&hdev->rx_task);
tasklet_kill(&hdev->tx_task);
hci_dev_lock_bh(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
hci_dev_unlock_bh(hdev);
hci_notify(hdev, HCI_DEV_DOWN);
if (hdev->flush)
......@@ -586,9 +513,9 @@ int hci_dev_close(__u16 dev)
/* Reset device */
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
hdev->flags |= HCI_INIT;
__hci_request(hdev, hci_reset_req, 0, HZ);
hdev->flags &= ~HCI_INIT;
set_bit(HCI_INIT, &hdev->flags);
__hci_request(hdev, hci_reset_req, 0, HZ/4);
clear_bit(HCI_INIT, &hdev->flags);
/* Kill cmd task */
tasklet_kill(&hdev->cmd_task);
......@@ -605,17 +532,28 @@ int hci_dev_close(__u16 dev)
}
/* After this point our queues are empty
* and no tasks are scheduled.
*/
* and no tasks are scheduled. */
hdev->close(hdev);
done:
hci_req_unlock(hdev);
hci_dev_put(hdev);
/* Clear flags */
hdev->flags = 0;
hci_req_unlock(hdev);
return 0;
}
int hci_dev_close(__u16 dev)
{
struct hci_dev *hdev;
int err;
if (!(hdev = hci_dev_get(dev)))
return -ENODEV;
err = hci_dev_do_close(hdev);
hci_dev_put(hdev);
return err;
}
int hci_dev_reset(__u16 dev)
{
struct hci_dev *hdev;
......@@ -627,16 +565,17 @@ int hci_dev_reset(__u16 dev)
hci_req_lock(hdev);
tasklet_disable(&hdev->tx_task);
if (!(hdev->flags & HCI_UP))
if (!test_bit(HCI_UP, &hdev->flags))
goto done;
/* Drop queues */
skb_queue_purge(&hdev->rx_q);
skb_queue_purge(&hdev->cmd_q);
inquiry_cache_flush(&hdev->inq_cache);
hci_dev_lock_bh(hdev);
inquiry_cache_flush(hdev);
hci_conn_hash_flush(hdev);
hci_dev_unlock_bh(hdev);
if (hdev->flush)
hdev->flush(hdev);
......@@ -650,7 +589,6 @@ int hci_dev_reset(__u16 dev)
tasklet_enable(&hdev->tx_task);
hci_req_unlock(hdev);
hci_dev_put(hdev);
return ret;
}
......@@ -669,30 +607,11 @@ int hci_dev_reset_stat(__u16 dev)
return ret;
}
int hci_dev_setauth(unsigned long arg)
{
struct hci_dev *hdev;
struct hci_dev_req dr;
int ret = 0;
if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
return -EFAULT;
if (!(hdev = hci_dev_get(dr.dev_id)))
return -ENODEV;
ret = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
hci_dev_put(hdev);
return ret;
}
int hci_dev_setscan(unsigned long arg)
int hci_dev_cmd(unsigned int cmd, unsigned long arg)
{
struct hci_dev *hdev;
struct hci_dev_req dr;
int ret = 0;
int err = 0;
if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
return -EFAULT;
......@@ -700,45 +619,75 @@ int hci_dev_setscan(unsigned long arg)
if (!(hdev = hci_dev_get(dr.dev_id)))
return -ENODEV;
ret = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
hci_dev_put(hdev);
switch (cmd) {
case HCISETAUTH:
err = hci_request(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT);
break;
return ret;
}
case HCISETENCRYPT:
if (!lmp_encrypt_capable(hdev)) {
err = -EOPNOTSUPP;
break;
}
int hci_dev_setptype(unsigned long arg)
{
struct hci_dev *hdev;
struct hci_dev_req dr;
int ret = 0;
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
err = hci_request(hdev, hci_auth_req,
dr.dev_opt, HCI_INIT_TIMEOUT);
if (err)
break;
}
err = hci_request(hdev, hci_encrypt_req,
dr.dev_opt, HCI_INIT_TIMEOUT);
break;
case HCISETSCAN:
err = hci_request(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT);
break;
case HCISETPTYPE:
hdev->pkt_type = (__u16) dr.dev_opt;
break;
case HCISETLINKPOL:
hdev->link_policy = (__u16) dr.dev_opt;
break;
if (copy_from_user(&dr, (void *) arg, sizeof(dr)))
return -EFAULT;
case HCISETLINKMODE:
hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
break;
if (!(hdev = hci_dev_get(dr.dev_id)))
return -ENODEV;
case HCISETACLMTU:
hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
break;
hdev->pkt_type = (__u16) dr.dev_opt;
case HCISETSCOMTU:
hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
break;
default:
err = -EINVAL;
break;
}
hci_dev_put(hdev);
return ret;
return err;
}
int hci_dev_list(unsigned long arg)
int hci_get_dev_list(unsigned long arg)
{
struct hci_dev_list_req *dl;
struct hci_dev_req *dr;
struct hci_dev *hdev;
int i, n, size;
struct list_head *p;
int n = 0, size;
__u16 dev_num;
if (get_user(dev_num, (__u16 *) arg))
return -EFAULT;
/* Avoid long loop, overflow */
if (dev_num > 2048)
if (!dev_num)
return -EINVAL;
size = dev_num * sizeof(struct hci_dev_req) + sizeof(__u16);
......@@ -751,12 +700,13 @@ int hci_dev_list(unsigned long arg)
dr = dl->dev_req;
spin_lock_bh(&hdev_list_lock);
for (i = 0, n = 0; i < HCI_MAX_DEV && n < dev_num; i++) {
if ((hdev = hdev_list[i])) {
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = hdev->flags;
n++;
}
list_for_each(p, &hdev_list) {
struct hci_dev *hdev;
hdev = list_entry(p, struct hci_dev, list);
(dr + n)->dev_id = hdev->id;
(dr + n)->dev_opt = hdev->flags;
if (++n >= dev_num)
break;
}
spin_unlock_bh(&hdev_list_lock);
......@@ -764,11 +714,12 @@ int hci_dev_list(unsigned long arg)
size = n * sizeof(struct hci_dev_req) + sizeof(__u16);
copy_to_user((void *) arg, dl, size);
kfree(dl);
return 0;
}
int hci_dev_info(unsigned long arg)
int hci_get_dev_info(unsigned long arg)
{
struct hci_dev *hdev;
struct hci_dev_info di;
......@@ -786,9 +737,11 @@ int hci_dev_info(unsigned long arg)
di.flags = hdev->flags;
di.pkt_type = hdev->pkt_type;
di.acl_mtu = hdev->acl_mtu;
di.acl_max = hdev->acl_max;
di.acl_pkts = hdev->acl_pkts;
di.sco_mtu = hdev->sco_mtu;
di.sco_max = hdev->sco_max;
di.sco_pkts = hdev->sco_pkts;
di.link_policy = hdev->link_policy;
di.link_mode = hdev->link_mode;
memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
memcpy(&di.features, &hdev->features, sizeof(di.features));
......@@ -801,258 +754,149 @@ int hci_dev_info(unsigned long arg)
return err;
}
__u32 hci_dev_setmode(struct hci_dev *hdev, __u32 mode)
{
__u32 omode = hdev->flags & HCI_MODE_MASK;
hdev->flags &= ~HCI_MODE_MASK;
hdev->flags |= (mode & HCI_MODE_MASK);
return omode;
}
/* ---- Interface to HCI drivers ---- */
__u32 hci_dev_getmode(struct hci_dev *hdev)
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
return hdev->flags & HCI_MODE_MASK;
}
struct list_head *head = &hdev_list, *p;
int id = 0;
int hci_conn_list(unsigned long arg)
{
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
struct list_head *p;
int n = 0, size;
BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
if (copy_from_user(&req, (void *) arg, sizeof(req)))
return -EFAULT;
if (!hdev->open || !hdev->close || !hdev->destruct)
return -EINVAL;
if (!(hdev = hci_dev_get(req.dev_id)))
return -ENODEV;
spin_lock_bh(&hdev_list_lock);
/* Set a limit to avoid overlong loops, and also numeric overflow - AC */
if(req.conn_num < 2048)
return -EINVAL;
/* Find first available device id */
list_for_each(p, &hdev_list) {
if (list_entry(p, struct hci_dev, list)->id != id)
break;
head = p; id++;
}
size = req.conn_num * sizeof(struct hci_conn_info) + sizeof(req);
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
list_add(&hdev->list, head);
if (!(cl = kmalloc(size, GFP_KERNEL)))
return -ENOMEM;
ci = cl->conn_info;
atomic_set(&hdev->refcnt, 1);
spin_lock_init(&hdev->lock);
hdev->flags = 0;
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
local_bh_disable();
conn_hash_lock(&hdev->conn_hash);
list_for_each(p, &hdev->conn_hash.list) {
register struct hci_conn *c;
c = list_entry(p, struct hci_conn, list);
tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
(ci + n)->handle = c->handle;
bacpy(&(ci + n)->bdaddr, &c->dst);
n++;
}
conn_hash_unlock(&hdev->conn_hash);
local_bh_enable();
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
cl->dev_id = hdev->id;
cl->conn_num = n;
size = n * sizeof(struct hci_conn_info) + sizeof(req);
init_waitqueue_head(&hdev->req_wait_q);
init_MUTEX(&hdev->req_lock);
hci_dev_put(hdev);
inquiry_cache_init(hdev);
if(copy_to_user((void *) arg, cl, size))
return -EFAULT;
return 0;
conn_hash_init(hdev);
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
atomic_set(&hdev->promisc, 0);
hci_notify(hdev, HCI_DEV_REG);
MOD_INC_USE_COUNT;
spin_unlock_bh(&hdev_list_lock);
return id;
}
int hci_inquiry(unsigned long arg)
/* Unregister HCI device */
int hci_unregister_dev(struct hci_dev *hdev)
{
struct inquiry_cache *cache;
struct hci_inquiry_req ir;
struct hci_dev *hdev;
int err = 0, do_inquiry = 0;
long timeo;
__u8 *buf, *ptr;
BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
ptr = (void *) arg;
if (copy_from_user(&ir, ptr, sizeof(ir)))
return -EFAULT;
spin_lock_bh(&hdev_list_lock);
list_del(&hdev->list);
spin_unlock_bh(&hdev_list_lock);
if (!(hdev = hci_dev_get(ir.dev_id)))
return -ENODEV;
hci_dev_do_close(hdev);
cache = &hdev->inq_cache;
hci_notify(hdev, HCI_DEV_UNREG);
hci_dev_put(hdev);
inquiry_cache_lock(cache);
if (inquiry_cache_age(cache) > INQUIRY_CACHE_AGE_MAX || ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(cache);
do_inquiry = 1;
}
inquiry_cache_unlock(cache);
MOD_DEC_USE_COUNT;
return 0;
}
/* Limit inquiry time, also avoid overflows */
/* Receive frame from HCI drivers */
int hci_recv_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if(ir.length > 2048 || ir.num_rsp > 2048)
{
err = -EINVAL;
goto done;
if (!hdev || (!test_bit(HCI_UP, &hdev->flags) &&
!test_bit(HCI_INIT, &hdev->flags)) ) {
kfree_skb(skb);
return -1;
}
timeo = ir.length * 2 * HZ;
if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
goto done;
BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
* copy it to the user space.
*/
if (!(buf = kmalloc(sizeof(inquiry_info) * ir.num_rsp, GFP_KERNEL))) {
err = -ENOMEM;
goto done;
}
ir.num_rsp = inquiry_cache_dump(cache, ir.num_rsp, buf);
DBG("num_rsp %d", ir.num_rsp);
if (!verify_area(VERIFY_WRITE, ptr, sizeof(ir) + (sizeof(inquiry_info) * ir.num_rsp))) {
copy_to_user(ptr, &ir, sizeof(ir));
ptr += sizeof(ir);
copy_to_user(ptr, buf, sizeof(inquiry_info) * ir.num_rsp);
} else
err = -EFAULT;
kfree(buf);
done:
hci_dev_put(hdev);
return err;
}
/* Interface to HCI drivers */
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
int i;
DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
/* Find free slot */
spin_lock_bh(&hdev_list_lock);
for (i = 0; i < HCI_MAX_DEV; i++) {
if (!hdev_list[i]) {
hdev_list[i] = hdev;
sprintf(hdev->name, "hci%d", i);
atomic_set(&hdev->refcnt, 0);
hdev->id = i;
hdev->flags = HCI_NORMAL;
hdev->pkt_type = (HCI_DM1 | HCI_DH1);
tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
skb_queue_head_init(&hdev->rx_q);
skb_queue_head_init(&hdev->cmd_q);
skb_queue_head_init(&hdev->raw_q);
init_waitqueue_head(&hdev->req_wait_q);
init_MUTEX(&hdev->req_lock);
inquiry_cache_init(&hdev->inq_cache);
conn_hash_init(&hdev->conn_hash);
memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
hci_notify(hdev, HCI_DEV_REG);
MOD_INC_USE_COUNT;
break;
}
}
spin_unlock_bh(&hdev_list_lock);
return (i == HCI_MAX_DEV) ? -1 : i;
}
/* Unregister HCI device */
int hci_unregister_dev(struct hci_dev *hdev)
{
int i;
DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
if (hdev->flags & HCI_UP)
hci_dev_close(hdev->id);
/* Find device slot */
spin_lock(&hdev_list_lock);
for (i = 0; i < HCI_MAX_DEV; i++) {
if (hdev_list[i] == hdev) {
hdev_list[i] = NULL;
MOD_DEC_USE_COUNT;
break;
}
}
spin_unlock(&hdev_list_lock);
hci_notify(hdev, HCI_DEV_UNREG);
/* Sleep while device is in use */
while (atomic_read(&hdev->refcnt)) {
int sleep_cnt = 100;
/* Incomming skb */
bluez_cb(skb)->incomming = 1;
DBG("%s sleeping on lock %d", hdev->name, atomic_read(&hdev->refcnt));
sleep_on_timeout(&hdev->req_wait_q, HZ*10);
if (!(--sleep_cnt))
break;
}
/* Time stamp */
do_gettimeofday(&skb->stamp);
/* Queue frame for rx task */
skb_queue_tail(&hdev->rx_q, skb);
hci_sched_rx(hdev);
return 0;
}
/* Interface to upper protocols */
/* ---- Interface to upper protocols ---- */
/* Register/Unregister protocols.
* hci_task_lock is used to ensure that no tasks are running.
*/
int hci_register_proto(struct hci_proto *hproto)
* hci_task_lock is used to ensure that no tasks are running. */
int hci_register_proto(struct hci_proto *hp)
{
int err = 0;
DBG("%p name %s", hproto, hproto->name);
BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
if (hproto->id >= HCI_MAX_PROTO)
if (hp->id >= HCI_MAX_PROTO)
return -EINVAL;
write_lock_bh(&hci_task_lock);
if (!hproto_list[hproto->id])
hproto_list[hproto->id] = hproto;
if (!hci_proto[hp->id])
hci_proto[hp->id] = hp;
else
err = -1;
err = -EEXIST;
write_unlock_bh(&hci_task_lock);
return err;
}
int hci_unregister_proto(struct hci_proto *hproto)
int hci_unregister_proto(struct hci_proto *hp)
{
int err = 0;
DBG("%p name %s", hproto, hproto->name);
BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
if (hproto->id > HCI_MAX_PROTO)
if (hp->id >= HCI_MAX_PROTO)
return -EINVAL;
write_lock_bh(&hci_task_lock);
if (hproto_list[hproto->id])
hproto_list[hproto->id] = NULL;
if (hci_proto[hp->id])
hci_proto[hp->id] = NULL;
else
err = -ENOENT;
......@@ -1070,10 +914,14 @@ static int hci_send_frame(struct sk_buff *skb)
return -ENODEV;
}
DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
if (atomic_read(&hdev->promisc)) {
/* Time stamp */
do_gettimeofday(&skb->stamp);
if (hdev->flags & HCI_SOCK)
hci_send_to_sock(hdev, skb);
}
/* Get rid of skb owner, prior to sending to the driver. */
skb_orphan(skb);
......@@ -1081,94 +929,6 @@ static int hci_send_frame(struct sk_buff *skb)
return hdev->send(skb);
}
/* Connection scheduler */
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
{
struct conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL;
int num = 0, min = 0xffff;
struct list_head *p;
conn_hash_lock(h);
list_for_each(p, &h->list) {
register struct hci_conn *c;
c = list_entry(p, struct hci_conn, list);
if (c->type != type || skb_queue_empty(&c->data_q))
continue;
num++;
if (c->sent < min) {
min = c->sent;
conn = c;
}
}
conn_hash_unlock(h);
if (conn) {
int q = hdev->acl_cnt / num;
*quote = q ? q : 1;
} else
*quote = 0;
DBG("conn %p quote %d", conn, *quote);
return conn;
}
static inline void hci_sched_acl(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
int quote;
DBG("%s", hdev->name);
while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
while (quote && (skb = skb_dequeue(&conn->data_q))) {
DBG("skb %p len %d", skb, skb->len);
hci_send_frame(skb);
conn->sent++;
hdev->acl_cnt--;
quote--;
}
}
}
/* Schedule SCO */
static inline void hci_sched_sco(struct hci_dev *hdev)
{
/* FIXME: For now we queue SCO packets to the raw queue
while (hdev->sco_cnt && (skb = skb_dequeue(&conn->data_q))) {
hci_send_frame(skb);
conn->sco_sent++;
hdev->sco_cnt--;
}
*/
}
/* Get data from the previously sent command */
static void * hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
{
hci_command_hdr *hc;
if (!hdev->sent_cmd)
return NULL;
hc = (void *) hdev->sent_cmd->data;
if (hc->opcode != __cpu_to_le16(cmd_opcode_pack(ogf, ocf)))
return NULL;
DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
/* Send raw HCI frame */
int hci_send_raw(struct sk_buff *skb)
{
......@@ -1179,9 +939,9 @@ int hci_send_raw(struct sk_buff *skb)
return -ENODEV;
}
DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
BT_DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
if (hdev->flags & HCI_NORMAL) {
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* Queue frame according it's type */
switch (skb->pkt_type) {
case HCI_COMMAND_PKT:
......@@ -1210,10 +970,10 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
hci_command_hdr *hc;
struct sk_buff *skb;
DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
if (!(skb = bluez_skb_alloc(len, GFP_ATOMIC))) {
ERR("%s Can't allocate memory for HCI command", hdev->name);
BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
return -ENOMEM;
}
......@@ -1224,7 +984,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
if (plen)
memcpy(skb_put(skb, plen), param, plen);
DBG("skb len %d", skb->len);
BT_DBG("skb len %d", skb->len);
skb->pkt_type = HCI_COMMAND_PKT;
skb->dev = (void *) hdev;
......@@ -1234,10 +994,28 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *p
return 0;
}
/* Get data from the previously sent command */
void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
{
hci_command_hdr *hc;
if (!hdev->sent_cmd)
return NULL;
hc = (void *) hdev->sent_cmd->data;
if (hc->opcode != __cpu_to_le16(cmd_opcode_pack(ogf, ocf)))
return NULL;
BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
/* Send ACL data */
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
{
int len = skb->len;
int len = skb->len;
hci_acl_hdr *ah;
ah = (hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
......@@ -1252,7 +1030,7 @@ int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
skb->dev = (void *) hdev;
skb->pkt_type = HCI_ACLDATA_PKT;
......@@ -1260,12 +1038,12 @@ int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
if (!(list = skb_shinfo(skb)->frag_list)) {
/* Non fragmented */
DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
skb_queue_tail(&conn->data_q, skb);
} else {
/* Fragmented */
DBG("%s frag %p len %d", hdev->name, skb, skb->len);
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
skb_shinfo(skb)->frag_list = NULL;
......@@ -1280,7 +1058,7 @@ int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
skb->pkt_type = HCI_ACLDATA_PKT;
hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
DBG("%s frag %p len %d", hdev->name, skb, skb->len);
BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
__skb_queue_tail(&conn->data_q, skb);
} while (list);
......@@ -1298,7 +1076,7 @@ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
struct hci_dev *hdev = conn->hdev;
hci_sco_hdr hs;
DBG("%s len %d", hdev->name, skb->len);
BT_DBG("%s len %d", hdev->name, skb->len);
if (skb->len > hdev->sco_mtu) {
kfree_skb(skb);
......@@ -1315,544 +1093,120 @@ int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb->pkt_type = HCI_SCODATA_PKT;
skb_queue_tail(&conn->data_q, skb);
hci_sched_tx(hdev);
return 0;
}
/* Handle HCI Event packets */
/* Command Complete OGF LINK_CTL */
static void hci_cc_link_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
{
DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
default:
DBG("%s Command complete: ogf LINK_CTL ocf %x", hdev->name, ocf);
break;
};
}
/* Command Complete OGF LINK_POLICY */
static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
{
DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
default:
DBG("%s: Command complete: ogf LINK_POLICY ocf %x", hdev->name, ocf);
break;
};
}
/* Command Complete OGF HOST_CTL */
static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
{
__u8 status, param;
void *sent;
DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
case OCF_RESET:
status = *((__u8 *) skb->data);
hci_req_complete(hdev, status);
break;
case OCF_SET_EVENT_FLT:
status = *((__u8 *) skb->data);
if (status) {
DBG("%s SET_EVENT_FLT failed %d", hdev->name, status);
} else {
DBG("%s SET_EVENT_FLT succeseful", hdev->name);
}
break;
case OCF_WRITE_AUTH_ENABLE:
if (!(sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE)))
break;
status = *((__u8 *) skb->data);
param = *((__u8 *) sent);
/* ---- HCI TX task (outgoing data) ---- */
if (!status) {
if (param == AUTH_ENABLED)
hdev->flags |= HCI_AUTH;
else
hdev->flags &= ~HCI_AUTH;
}
hci_req_complete(hdev, status);
break;
case OCF_WRITE_CA_TIMEOUT:
status = *((__u8 *) skb->data);
if (status) {
DBG("%s OCF_WRITE_CA_TIMEOUT failed %d", hdev->name, status);
} else {
DBG("%s OCF_WRITE_CA_TIMEOUT succeseful", hdev->name);
}
break;
case OCF_WRITE_PG_TIMEOUT:
status = *((__u8 *) skb->data);
if (status) {
DBG("%s OCF_WRITE_PG_TIMEOUT failed %d", hdev->name, status);
} else {
DBG("%s: OCF_WRITE_PG_TIMEOUT succeseful", hdev->name);
}
break;
case OCF_WRITE_SCAN_ENABLE:
if (!(sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE)))
break;
status = *((__u8 *) skb->data);
param = *((__u8 *) sent);
DBG("param 0x%x", param);
if (!status) {
switch (param) {
case IS_ENA_PS_ENA:
hdev->flags |= HCI_PSCAN | HCI_ISCAN;
break;
case IS_ENA_PS_DIS:
hdev->flags &= ~HCI_PSCAN;
hdev->flags |= HCI_ISCAN;
break;
case IS_DIS_PS_ENA:
hdev->flags &= ~HCI_ISCAN;
hdev->flags |= HCI_PSCAN;
break;
default:
hdev->flags &= ~(HCI_ISCAN | HCI_PSCAN);
break;
};
}
hci_req_complete(hdev, status);
break;
default:
DBG("%s Command complete: ogf HOST_CTL ocf %x", hdev->name, ocf);
break;
};
}
/* Command Complete OGF INFO_PARAM */
static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
{
read_local_features_rp *lf;
read_buffer_size_rp *bs;
read_bd_addr_rp *ba;
DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
case OCF_READ_LOCAL_FEATURES:
lf = (read_local_features_rp *) skb->data;
if (lf->status) {
DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status);
break;
}
memcpy(hdev->features, lf->features, sizeof(hdev->features));
/* Adjust default settings according to features
* supported by device. */
if (hdev->features[0] & LMP_3SLOT)
hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
if (hdev->features[0] & LMP_5SLOT)
hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
DBG("%s: features 0x%x 0x%x 0x%x", hdev->name, lf->features[0], lf->features[1], lf->features[2]);
break;
case OCF_READ_BUFFER_SIZE:
bs = (read_buffer_size_rp *) skb->data;
if (bs->status) {
DBG("%s READ_BUFFER_SIZE failed %d", hdev->name, bs->status);
break;
}
hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu);
hdev->sco_mtu = bs->sco_mtu;
hdev->acl_max = hdev->acl_cnt = __le16_to_cpu(bs->acl_max_pkt);
hdev->sco_max = hdev->sco_cnt = __le16_to_cpu(bs->sco_max_pkt);
DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name,
hdev->acl_mtu, hdev->sco_mtu, hdev->acl_max, hdev->sco_max);
break;
case OCF_READ_BD_ADDR:
ba = (read_bd_addr_rp *) skb->data;
if (!ba->status) {
bacpy(&hdev->bdaddr, &ba->bdaddr);
} else {
DBG("%s: READ_BD_ADDR failed %d", hdev->name, ba->status);
}
hci_req_complete(hdev, ba->status);
break;
default:
DBG("%s Command complete: ogf INFO_PARAM ocf %x", hdev->name, ocf);
break;
};
}
/* Command Status OGF LINK_CTL */
static void hci_cs_link_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status)
/* HCI Connection scheduler */
static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
{
struct hci_proto * hp;
DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
case OCF_CREATE_CONN:
if (status) {
create_conn_cp *cc = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_CREATE_CONN);
struct conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL;
int num = 0, min = 0xffff;
struct list_head *p;
if (!cc)
break;
/* We don't have to lock device here. Connections are always
* added and removed with TX task disabled. */
list_for_each(p, &h->list) {
struct hci_conn *c;
DBG("%s Create connection error: status 0x%x %s", hdev->name,
status, batostr(&cc->bdaddr));
c = list_entry(p, struct hci_conn, list);
/* Notify upper protocols */
if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->connect_cfm) {
tasklet_disable(&hdev->tx_task);
hp->connect_cfm(hdev, &cc->bdaddr, status, NULL);
tasklet_enable(&hdev->tx_task);
}
}
break;
if (c->type != type || c->state != BT_CONNECTED
|| skb_queue_empty(&c->data_q))
continue;
num++;
case OCF_INQUIRY:
if (status) {
DBG("%s Inquiry error: status 0x%x", hdev->name, status);
hci_req_complete(hdev, status);
if (c->sent < min || type == SCO_LINK) {
min = c->sent;
conn = c;
}
break;
default:
DBG("%s Command status: ogf LINK_CTL ocf %x", hdev->name, ocf);
break;
};
}
/* Command Status OGF LINK_POLICY */
static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status)
{
DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
default:
DBG("%s Command status: ogf HOST_POLICY ocf %x", hdev->name, ocf);
break;
};
}
/* Command Status OGF HOST_CTL */
static void hci_cs_host_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status)
{
DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
default:
DBG("%s Command status: ogf HOST_CTL ocf %x", hdev->name, ocf);
break;
};
}
}
/* Command Status OGF INFO_PARAM */
static void hci_cs_info_param(struct hci_dev *hdev, __u16 ocf, __u8 status)
{
DBG("%s: hci_cs_info_param: ocf 0x%x", hdev->name, ocf);
if (conn) {
int q = hdev->acl_cnt / num;
*quote = q ? q : 1;
} else
*quote = 0;
switch (ocf) {
default:
DBG("%s Command status: ogf INFO_PARAM ocf %x", hdev->name, ocf);
break;
};
BT_DBG("conn %p quote %d", conn, *quote);
return conn;
}
/* Inquiry Complete */
static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
static inline void hci_sched_acl(struct hci_dev *hdev)
{
__u8 status = *((__u8 *) skb->data);
DBG("%s status %d", hdev->name, status);
struct hci_conn *conn;
struct sk_buff *skb;
int quote;
hci_req_complete(hdev, status);
}
BT_DBG("%s", hdev->name);
/* Inquiry Result */
static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
inquiry_info *info = (inquiry_info *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
DBG("%s num_rsp %d", hdev->name, num_rsp);
if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > HZ*5) {
BT_ERR("%s ACL tx timeout", hdev->name);
hdev->acl_cnt++;
}
while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
while (quote && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
for (; num_rsp; num_rsp--)
inquiry_cache_update(&hdev->inq_cache, info++);
}
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
/* Connect Request */
static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
evt_conn_request *cr = (evt_conn_request *) skb->data;
struct hci_proto *hp;
accept_conn_req_cp ac;
int accept = 0;
DBG("%s Connection request: %s type 0x%x", hdev->name, batostr(&cr->bdaddr), cr->link_type);
/* Notify upper protocols */
if (cr->link_type == ACL_LINK) {
/* ACL link notify L2CAP */
if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->connect_ind) {
tasklet_disable(&hdev->tx_task);
accept = hp->connect_ind(hdev, &cr->bdaddr);
tasklet_enable(&hdev->tx_task);
conn->sent++;
hdev->acl_cnt--;
quote--;
}
} else {
/* SCO link (no notification) */
/* FIXME: Should be accept it here or let the requester (app) accept it ? */
accept = 1;
}
if (accept) {
/* Connection accepted by upper layer */
bacpy(&ac.bdaddr, &cr->bdaddr);
ac.role = 0x01; /* Remain slave */
hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ACCEPT_CONN_REQ, ACCEPT_CONN_REQ_CP_SIZE, &ac);
} else {
/* Connection rejected by upper layer */
/* FIXME:
* Should we use HCI reject here ?
*/
return;
}
}
/* Connect Complete */
static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* Schedule SCO */
static inline void hci_sched_sco(struct hci_dev *hdev)
{
evt_conn_complete *cc = (evt_conn_complete *) skb->data;
struct hci_conn *conn = NULL;
struct hci_proto *hp;
DBG("%s", hdev->name);
tasklet_disable(&hdev->tx_task);
if (!cc->status)
conn = hci_conn_add(hdev, __le16_to_cpu(cc->handle), cc->link_type, &cc->bdaddr);
/* Notify upper protocols */
if (cc->link_type == ACL_LINK) {
/* ACL link notify L2CAP layer */
if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->connect_cfm)
hp->connect_cfm(hdev, &cc->bdaddr, cc->status, conn);
} else {
/* SCO link (no notification) */
}
struct hci_conn *conn;
struct sk_buff *skb;
int quote;
tasklet_enable(&hdev->tx_task);
}
BT_DBG("%s", hdev->name);
/* Disconnect Complete */
static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
evt_disconn_complete *dc = (evt_disconn_complete *) skb->data;
struct hci_conn *conn = NULL;
struct hci_proto *hp;
__u16 handle = __le16_to_cpu(dc->handle);
DBG("%s", hdev->name);
while ((conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
while (quote && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
if (!dc->status && (conn = conn_hash_lookup(&hdev->conn_hash, handle))) {
tasklet_disable(&hdev->tx_task);
hci_send_frame(skb);
/* Notify upper protocols */
if (conn->type == ACL_LINK) {
/* ACL link notify L2CAP layer */
if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->disconn_ind)
hp->disconn_ind(conn, dc->reason);
} else {
/* SCO link (no notification) */
//conn->sent++;
//hdev->sco_cnt--;
quote--;
}
hci_conn_del(hdev, conn);
tasklet_enable(&hdev->tx_task);
}
}
/* Number of completed packets */
static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
static void hci_tx_task(unsigned long arg)
{
evt_num_comp_pkts *nc = (evt_num_comp_pkts *) skb->data;
__u16 *ptr;
int i;
skb_pull(skb, EVT_NUM_COMP_PKTS_SIZE);
DBG("%s num_hndl %d", hdev->name, nc->num_hndl);
struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
if (skb->len < nc->num_hndl * 4) {
DBG("%s bad parameters", hdev->name);
return;
}
read_lock(&hci_task_lock);
tasklet_disable(&hdev->tx_task);
BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
for (i = 0, ptr = (__u16 *) skb->data; i < nc->num_hndl; i++) {
struct hci_conn *conn;
__u16 handle, count;
/* Schedule queues and send stuff to HCI driver */
handle = __le16_to_cpu(get_unaligned(ptr++));
count = __le16_to_cpu(get_unaligned(ptr++));
hci_sched_acl(hdev);
hdev->acl_cnt += count;
hci_sched_sco(hdev);
if ((conn = conn_hash_lookup(&hdev->conn_hash, handle)))
conn->sent -= count;
}
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
tasklet_enable(&hdev->tx_task);
hci_sched_tx(hdev);
read_unlock(&hci_task_lock);
}
static inline void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
hci_event_hdr *he = (hci_event_hdr *) skb->data;
evt_cmd_status *cs;
evt_cmd_complete *ec;
__u16 opcode, ocf, ogf;
skb_pull(skb, HCI_EVENT_HDR_SIZE);
DBG("%s evt 0x%x", hdev->name, he->evt);
switch (he->evt) {
case EVT_NUM_COMP_PKTS:
hci_num_comp_pkts_evt(hdev, skb);
break;
case EVT_INQUIRY_COMPLETE:
hci_inquiry_complete_evt(hdev, skb);
break;
case EVT_INQUIRY_RESULT:
hci_inquiry_result_evt(hdev, skb);
break;
case EVT_CONN_REQUEST:
hci_conn_request_evt(hdev, skb);
break;
case EVT_CONN_COMPLETE:
hci_conn_complete_evt(hdev, skb);
break;
case EVT_DISCONN_COMPLETE:
hci_disconn_complete_evt(hdev, skb);
break;
case EVT_CMD_STATUS:
cs = (evt_cmd_status *) skb->data;
skb_pull(skb, EVT_CMD_STATUS_SIZE);
opcode = __le16_to_cpu(cs->opcode);
ogf = cmd_opcode_ogf(opcode);
ocf = cmd_opcode_ocf(opcode);
switch (ogf) {
case OGF_INFO_PARAM:
hci_cs_info_param(hdev, ocf, cs->status);
break;
case OGF_HOST_CTL:
hci_cs_host_ctl(hdev, ocf, cs->status);
break;
case OGF_LINK_CTL:
hci_cs_link_ctl(hdev, ocf, cs->status);
break;
case OGF_LINK_POLICY:
hci_cs_link_policy(hdev, ocf, cs->status);
break;
default:
DBG("%s Command Status OGF %x", hdev->name, ogf);
break;
};
if (cs->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
hci_sched_cmd(hdev);
}
break;
case EVT_CMD_COMPLETE:
ec = (evt_cmd_complete *) skb->data;
skb_pull(skb, EVT_CMD_COMPLETE_SIZE);
opcode = __le16_to_cpu(ec->opcode);
ogf = cmd_opcode_ogf(opcode);
ocf = cmd_opcode_ocf(opcode);
switch (ogf) {
case OGF_INFO_PARAM:
hci_cc_info_param(hdev, ocf, skb);
break;
case OGF_HOST_CTL:
hci_cc_host_ctl(hdev, ocf, skb);
break;
case OGF_LINK_CTL:
hci_cc_link_ctl(hdev, ocf, skb);
break;
case OGF_LINK_POLICY:
hci_cc_link_policy(hdev, ocf, skb);
break;
default:
DBG("%s Command Completed OGF %x", hdev->name, ogf);
break;
};
if (ec->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
hci_sched_cmd(hdev);
}
break;
};
kfree_skb(skb);
hdev->stat.evt_rx++;
}
/* ----- HCI RX task (incomming data proccessing) ----- */
/* ACL data packet */
static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
......@@ -1867,51 +1221,86 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
flags = acl_flags(handle);
handle = acl_handle(handle);
DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
hdev->stat.acl_rx++;
if ((conn = conn_hash_lookup(&hdev->conn_hash, handle))) {
hci_dev_lock(hdev);
conn = conn_hash_lookup_handle(hdev, handle);
hci_dev_unlock(hdev);
if (conn) {
register struct hci_proto *hp;
/* Send to upper protocol */
if ((hp = GET_HPROTO(HCI_PROTO_L2CAP)) && hp->recv_acldata) {
if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
hp->recv_acldata(conn, skb, flags);
goto sent;
return;
}
} else {
ERR("%s ACL packet for unknown connection handle %d", hdev->name, handle);
BT_ERR("%s ACL packet for unknown connection handle %d",
hdev->name, handle);
}
kfree_skb(skb);
sent:
hdev->stat.acl_rx++;
}
/* SCO data packet */
static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
DBG("%s len %d", hdev->name, skb->len);
hci_sco_hdr *sh = (void *) skb->data;
struct hci_conn *conn;
__u16 handle;
skb_pull(skb, HCI_SCO_HDR_SIZE);
handle = __le16_to_cpu(sh->handle);
BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
kfree_skb(skb);
hdev->stat.sco_rx++;
hci_dev_lock(hdev);
conn = conn_hash_lookup_handle(hdev, handle);
hci_dev_unlock(hdev);
if (conn) {
register struct hci_proto *hp;
/* Send to upper protocol */
if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
hp->recv_scodata(conn, skb);
return;
}
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
hdev->name, handle);
}
kfree_skb(skb);
}
/* ----- HCI tasks ----- */
void hci_rx_task(unsigned long arg)
{
struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
DBG("%s", hdev->name);
BT_DBG("%s", hdev->name);
read_lock(&hci_task_lock);
while ((skb = skb_dequeue(&hdev->rx_q))) {
if (hdev->flags & HCI_SOCK) {
if (atomic_read(&hdev->promisc)) {
/* Send copy to the sockets */
hci_send_to_sock(hdev, skb);
}
if (hdev->flags & HCI_INIT) {
if (test_bit(HCI_RAW, &hdev->flags)) {
kfree_skb(skb);
continue;
}
if (test_bit(HCI_INIT, &hdev->flags)) {
/* Don't process data packets in this states. */
switch (skb->pkt_type) {
case HCI_ACLDATA_PKT:
......@@ -1921,64 +1310,43 @@ void hci_rx_task(unsigned long arg)
};
}
if (hdev->flags & HCI_NORMAL) {
/* Process frame */
switch (skb->pkt_type) {
case HCI_EVENT_PKT:
hci_event_packet(hdev, skb);
break;
/* Process frame */
switch (skb->pkt_type) {
case HCI_EVENT_PKT:
hci_event_packet(hdev, skb);
break;
case HCI_ACLDATA_PKT:
DBG("%s ACL data packet", hdev->name);
hci_acldata_packet(hdev, skb);
break;
case HCI_ACLDATA_PKT:
BT_DBG("%s ACL data packet", hdev->name);
hci_acldata_packet(hdev, skb);
break;
case HCI_SCODATA_PKT:
DBG("%s SCO data packet", hdev->name);
hci_scodata_packet(hdev, skb);
break;
case HCI_SCODATA_PKT:
BT_DBG("%s SCO data packet", hdev->name);
hci_scodata_packet(hdev, skb);
break;
default:
kfree_skb(skb);
break;
};
} else {
default:
kfree_skb(skb);
break;
}
}
read_unlock(&hci_task_lock);
}
static void hci_tx_task(unsigned long arg)
{
struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
read_lock(&hci_task_lock);
DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
/* Schedule queues and send stuff to HCI driver */
hci_sched_acl(hdev);
hci_sched_sco(hdev);
/* Send next queued raw (unknown type) packet */
while ((skb = skb_dequeue(&hdev->raw_q)))
hci_send_frame(skb);
read_unlock(&hci_task_lock);
}
static void hci_cmd_task(unsigned long arg)
{
struct hci_dev *hdev = (struct hci_dev *) arg;
struct sk_buff *skb;
DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
BT_ERR("%s command tx timeout", hdev->name);
atomic_set(&hdev->cmd_cnt, 1);
}
/* Send queued commands */
if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
if (hdev->sent_cmd)
......@@ -1987,6 +1355,7 @@ static void hci_cmd_task(unsigned long arg)
if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
atomic_dec(&hdev->cmd_cnt);
hci_send_frame(skb);
hdev->cmd_last_tx = jiffies;
} else {
skb_queue_head(&hdev->cmd_q, skb);
hci_sched_cmd(hdev);
......@@ -1994,27 +1363,7 @@ static void hci_cmd_task(unsigned long arg)
}
}
/* Receive frame from HCI drivers */
int hci_recv_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if (!hdev || !(hdev->flags & (HCI_UP | HCI_INIT))) {
kfree_skb(skb);
return -1;
}
DBG("%s type %d len %d", hdev->name, skb->pkt_type, skb->len);
/* Incomming skb */
bluez_cb(skb)->incomming = 1;
/* Queue frame for rx task */
skb_queue_tail(&hdev->rx_q, skb);
hci_sched_rx(hdev);
return 0;
}
/* ---- Initialization ---- */
int hci_core_init(void)
{
......@@ -2028,5 +1377,3 @@ int hci_core_cleanup(void)
{
return 0;
}
MODULE_LICENSE("GPL");
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* HCI Events.
*
* $Id: hci_event.c,v 1.3 2002/04/17 17:37:16 maxk Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#ifndef HCI_CORE_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
/* Handle HCI Event packets */
/* Command Complete OGF LINK_CTL */
static void hci_cc_link_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
{
BT_DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
default:
BT_DBG("%s Command complete: ogf LINK_CTL ocf %x", hdev->name, ocf);
break;
};
}
/* Command Complete OGF LINK_POLICY */
static void hci_cc_link_policy(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
{
struct hci_conn *conn;
role_discovery_rp *rd;
BT_DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
case OCF_ROLE_DISCOVERY:
rd = (void *) skb->data;
if (rd->status)
break;
hci_dev_lock(hdev);
conn = conn_hash_lookup_handle(hdev, __le16_to_cpu(rd->handle));
if (conn) {
if (rd->role)
conn->link_mode &= ~HCI_LM_MASTER;
else
conn->link_mode |= HCI_LM_MASTER;
}
hci_dev_unlock(hdev);
break;
default:
BT_DBG("%s: Command complete: ogf LINK_POLICY ocf %x",
hdev->name, ocf);
break;
};
}
/* Command Complete OGF HOST_CTL */
static void hci_cc_host_ctl(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
{
__u8 status, param;
void *sent;
BT_DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
case OCF_RESET:
status = *((__u8 *) skb->data);
hci_req_complete(hdev, status);
break;
case OCF_SET_EVENT_FLT:
status = *((__u8 *) skb->data);
if (status) {
BT_DBG("%s SET_EVENT_FLT failed %d", hdev->name, status);
} else {
BT_DBG("%s SET_EVENT_FLT succeseful", hdev->name);
}
break;
case OCF_WRITE_AUTH_ENABLE:
sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE);
if (!sent)
break;
status = *((__u8 *) skb->data);
param = *((__u8 *) sent);
if (!status) {
if (param == AUTH_ENABLED)
set_bit(HCI_AUTH, &hdev->flags);
else
clear_bit(HCI_AUTH, &hdev->flags);
}
hci_req_complete(hdev, status);
break;
case OCF_WRITE_ENCRYPT_MODE:
sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE);
if (!sent)
break;
status = *((__u8 *) skb->data);
param = *((__u8 *) sent);
if (!status) {
if (param)
set_bit(HCI_ENCRYPT, &hdev->flags);
else
clear_bit(HCI_ENCRYPT, &hdev->flags);
}
hci_req_complete(hdev, status);
break;
case OCF_WRITE_CA_TIMEOUT:
status = *((__u8 *) skb->data);
if (status) {
BT_DBG("%s OCF_WRITE_CA_TIMEOUT failed %d", hdev->name, status);
} else {
BT_DBG("%s OCF_WRITE_CA_TIMEOUT succeseful", hdev->name);
}
break;
case OCF_WRITE_PG_TIMEOUT:
status = *((__u8 *) skb->data);
if (status) {
BT_DBG("%s OCF_WRITE_PG_TIMEOUT failed %d", hdev->name, status);
} else {
BT_DBG("%s: OCF_WRITE_PG_TIMEOUT succeseful", hdev->name);
}
break;
case OCF_WRITE_SCAN_ENABLE:
sent = hci_sent_cmd_data(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE);
if (!sent)
break;
status = *((__u8 *) skb->data);
param = *((__u8 *) sent);
BT_DBG("param 0x%x", param);
if (!status) {
clear_bit(HCI_PSCAN, &hdev->flags);
clear_bit(HCI_ISCAN, &hdev->flags);
if (param & SCAN_INQUIRY)
set_bit(HCI_ISCAN, &hdev->flags);
if (param & SCAN_PAGE)
set_bit(HCI_PSCAN, &hdev->flags);
}
hci_req_complete(hdev, status);
break;
case OCF_HOST_BUFFER_SIZE:
status = *((__u8 *) skb->data);
if (status) {
BT_DBG("%s OCF_BUFFER_SIZE failed %d", hdev->name, status);
hci_req_complete(hdev, status);
}
break;
default:
BT_DBG("%s Command complete: ogf HOST_CTL ocf %x", hdev->name, ocf);
break;
};
}
/* Command Complete OGF INFO_PARAM */
static void hci_cc_info_param(struct hci_dev *hdev, __u16 ocf, struct sk_buff *skb)
{
read_local_features_rp *lf;
read_buffer_size_rp *bs;
read_bd_addr_rp *ba;
BT_DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
case OCF_READ_LOCAL_FEATURES:
lf = (read_local_features_rp *) skb->data;
if (lf->status) {
BT_DBG("%s READ_LOCAL_FEATURES failed %d", hdev->name, lf->status);
break;
}
memcpy(hdev->features, lf->features, sizeof(hdev->features));
/* Adjust default settings according to features
* supported by device. */
if (hdev->features[0] & LMP_3SLOT)
hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
if (hdev->features[0] & LMP_5SLOT)
hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
if (hdev->features[1] & LMP_HV2)
hdev->pkt_type |= (HCI_HV2);
if (hdev->features[1] & LMP_HV3)
hdev->pkt_type |= (HCI_HV3);
BT_DBG("%s: features 0x%x 0x%x 0x%x", hdev->name, lf->features[0], lf->features[1], lf->features[2]);
break;
case OCF_READ_BUFFER_SIZE:
bs = (read_buffer_size_rp *) skb->data;
if (bs->status) {
BT_DBG("%s READ_BUFFER_SIZE failed %d", hdev->name, bs->status);
hci_req_complete(hdev, bs->status);
break;
}
hdev->acl_mtu = __le16_to_cpu(bs->acl_mtu);
hdev->sco_mtu = bs->sco_mtu ? bs->sco_mtu : 64;
hdev->acl_pkts = hdev->acl_cnt = __le16_to_cpu(bs->acl_max_pkt);
hdev->sco_pkts = hdev->sco_cnt = __le16_to_cpu(bs->sco_max_pkt);
BT_DBG("%s mtu: acl %d, sco %d max_pkt: acl %d, sco %d", hdev->name,
hdev->acl_mtu, hdev->sco_mtu, hdev->acl_pkts, hdev->sco_pkts);
break;
case OCF_READ_BD_ADDR:
ba = (read_bd_addr_rp *) skb->data;
if (!ba->status) {
bacpy(&hdev->bdaddr, &ba->bdaddr);
} else {
BT_DBG("%s: READ_BD_ADDR failed %d", hdev->name, ba->status);
}
hci_req_complete(hdev, ba->status);
break;
default:
BT_DBG("%s Command complete: ogf INFO_PARAM ocf %x", hdev->name, ocf);
break;
};
}
/* Command Status OGF LINK_CTL */
static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_conn *conn;
create_conn_cp *cc = hci_sent_cmd_data(hdev, OGF_LINK_CTL, OCF_CREATE_CONN);
if (!cc)
return;
hci_dev_lock(hdev);
conn = conn_hash_lookup_ba(hdev, ACL_LINK, &cc->bdaddr);
BT_DBG("%s status 0x%x bdaddr %s conn %p", hdev->name,
status, batostr(&cc->bdaddr), conn);
if (status) {
if (conn) {
conn->state = BT_CLOSED;
hci_proto_connect_cfm(conn, status);
hci_conn_del(conn);
}
} else {
if (!conn) {
conn = hci_conn_add(hdev, ACL_LINK, &cc->bdaddr);
if (conn) {
conn->out = 1;
conn->link_mode |= HCI_LM_MASTER;
} else
BT_ERR("No memmory for new connection");
}
}
hci_dev_unlock(hdev);
}
static void hci_cs_link_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status)
{
BT_DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
case OCF_CREATE_CONN:
hci_cs_create_conn(hdev, status);
break;
case OCF_ADD_SCO:
if (status) {
struct hci_conn *acl, *sco;
add_sco_cp *cp = hci_sent_cmd_data(hdev,
OGF_LINK_CTL, OCF_ADD_SCO);
__u16 handle;
if (!cp)
break;
handle = __le16_to_cpu(cp->handle);
BT_DBG("%s Add SCO error: handle %d status 0x%x", hdev->name, handle, status);
hci_dev_lock(hdev);
acl = conn_hash_lookup_handle(hdev, handle);
if (!acl || !(sco = acl->link)) {
hci_dev_unlock(hdev);
break;
}
sco->state = BT_CLOSED;
hci_proto_connect_cfm(sco, status);
hci_conn_del(sco);
hci_dev_unlock(hdev);
}
break;
case OCF_INQUIRY:
if (status) {
BT_DBG("%s Inquiry error: status 0x%x", hdev->name, status);
hci_req_complete(hdev, status);
} else {
set_bit(HCI_INQUIRY, &hdev->flags);
}
break;
default:
BT_DBG("%s Command status: ogf LINK_CTL ocf %x status %d",
hdev->name, ocf, status);
break;
};
}
/* Command Status OGF LINK_POLICY */
static void hci_cs_link_policy(struct hci_dev *hdev, __u16 ocf, __u8 status)
{
BT_DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
default:
BT_DBG("%s Command status: ogf HOST_POLICY ocf %x", hdev->name, ocf);
break;
};
}
/* Command Status OGF HOST_CTL */
static void hci_cs_host_ctl(struct hci_dev *hdev, __u16 ocf, __u8 status)
{
BT_DBG("%s ocf 0x%x", hdev->name, ocf);
switch (ocf) {
default:
BT_DBG("%s Command status: ogf HOST_CTL ocf %x", hdev->name, ocf);
break;
};
}
/* Command Status OGF INFO_PARAM */
static void hci_cs_info_param(struct hci_dev *hdev, __u16 ocf, __u8 status)
{
BT_DBG("%s: hci_cs_info_param: ocf 0x%x", hdev->name, ocf);
switch (ocf) {
default:
BT_DBG("%s Command status: ogf INFO_PARAM ocf %x", hdev->name, ocf);
break;
};
}
/* Inquiry Complete */
static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
BT_DBG("%s status %d", hdev->name, status);
clear_bit(HCI_INQUIRY, &hdev->flags);
hci_req_complete(hdev, status);
}
/* Inquiry Result */
static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
inquiry_info *info = (inquiry_info *) (skb->data + 1);
int num_rsp = *((__u8 *) skb->data);
BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
hci_dev_lock(hdev);
for (; num_rsp; num_rsp--)
inquiry_cache_update(hdev, info++);
hci_dev_unlock(hdev);
}
/* Connect Request */
static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
evt_conn_request *cr = (evt_conn_request *) skb->data;
int mask = hdev->link_mode;
BT_DBG("%s Connection request: %s type 0x%x", hdev->name,
batostr(&cr->bdaddr), cr->link_type);
mask |= hci_proto_connect_ind(hdev, &cr->bdaddr, cr->link_type);
if (mask & HCI_LM_ACCEPT) {
/* Connection accepted */
struct hci_conn *conn;
accept_conn_req_cp ac;
hci_dev_lock(hdev);
conn = conn_hash_lookup_ba(hdev, cr->link_type, &cr->bdaddr);
if (!conn) {
if (!(conn = hci_conn_add(hdev, cr->link_type, &cr->bdaddr))) {
BT_ERR("No memmory for new connection");
hci_dev_unlock(hdev);
return;
}
}
conn->state = BT_CONNECT;
hci_dev_unlock(hdev);
bacpy(&ac.bdaddr, &cr->bdaddr);
if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
ac.role = 0x00; /* Become master */
else
ac.role = 0x01; /* Remain slave */
hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ACCEPT_CONN_REQ,
ACCEPT_CONN_REQ_CP_SIZE, &ac);
} else {
/* Connection rejected */
reject_conn_req_cp rc;
bacpy(&rc.bdaddr, &cr->bdaddr);
rc.reason = 0x0f;
hci_send_cmd(hdev, OGF_LINK_CTL, OCF_REJECT_CONN_REQ,
REJECT_CONN_REQ_CP_SIZE, &rc);
}
}
/* Connect Complete */
static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
evt_conn_complete *cc = (evt_conn_complete *) skb->data;
struct hci_conn *conn = NULL;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
conn = conn_hash_lookup_ba(hdev, cc->link_type, &cc->bdaddr);
if (!conn) {
hci_dev_unlock(hdev);
return;
}
if (!cc->status) {
conn->handle = __le16_to_cpu(cc->handle);
conn->state = BT_CONNECTED;
if (test_bit(HCI_AUTH, &hdev->flags))
conn->link_mode |= HCI_LM_AUTH;
if (test_bit(HCI_ENCRYPT, &hdev->flags))
conn->link_mode |= HCI_LM_ENCRYPT;
/* Set link policy */
if (conn->type == ACL_LINK && hdev->link_policy) {
write_link_policy_cp lp;
lp.handle = cc->handle;
lp.policy = __cpu_to_le16(hdev->link_policy);
hci_send_cmd(hdev, OGF_LINK_POLICY, OCF_WRITE_LINK_POLICY,
WRITE_LINK_POLICY_CP_SIZE, &lp);
}
/* Set packet type for incomming connection */
if (!conn->out) {
change_conn_ptype_cp cp;
cp.handle = cc->handle;
cp.pkt_type = (conn->type == ACL_LINK) ?
__cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK):
__cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK);
hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_PTYPE,
CHANGE_CONN_PTYPE_CP_SIZE, &cp);
}
} else
conn->state = BT_CLOSED;
if (conn->type == ACL_LINK) {
struct hci_conn *sco = conn->link;
if (sco) {
if (!cc->status)
hci_add_sco(sco, conn->handle);
else {
hci_proto_connect_cfm(sco, cc->status);
hci_conn_del(sco);
}
}
}
hci_proto_connect_cfm(conn, cc->status);
if (cc->status)
hci_conn_del(conn);
hci_dev_unlock(hdev);
}
/* Disconnect Complete */
static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
evt_disconn_complete *dc = (evt_disconn_complete *) skb->data;
struct hci_conn *conn = NULL;
__u16 handle = __le16_to_cpu(dc->handle);
BT_DBG("%s status %d", hdev->name, dc->status);
if (dc->status)
return;
hci_dev_lock(hdev);
conn = conn_hash_lookup_handle(hdev, handle);
if (conn) {
conn->state = BT_CLOSED;
hci_proto_disconn_ind(conn, dc->reason);
hci_conn_del(conn);
}
hci_dev_unlock(hdev);
}
/* Number of completed packets */
static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
evt_num_comp_pkts *nc = (evt_num_comp_pkts *) skb->data;
__u16 *ptr;
int i;
skb_pull(skb, EVT_NUM_COMP_PKTS_SIZE);
BT_DBG("%s num_hndl %d", hdev->name, nc->num_hndl);
if (skb->len < nc->num_hndl * 4) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
tasklet_disable(&hdev->tx_task);
for (i = 0, ptr = (__u16 *) skb->data; i < nc->num_hndl; i++) {
struct hci_conn *conn;
__u16 handle, count;
handle = __le16_to_cpu(get_unaligned(ptr++));
count = __le16_to_cpu(get_unaligned(ptr++));
conn = conn_hash_lookup_handle(hdev, handle);
if (conn) {
conn->sent -= count;
if (conn->type == SCO_LINK) {
if ((hdev->sco_cnt += count) > hdev->sco_pkts)
hdev->sco_cnt = hdev->sco_pkts;
} else {
if ((hdev->acl_cnt += count) > hdev->acl_pkts)
hdev->acl_cnt = hdev->acl_pkts;
}
}
}
hci_sched_tx(hdev);
tasklet_enable(&hdev->tx_task);
}
/* Role Change */
static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
evt_role_change *rc = (evt_role_change *) skb->data;
struct hci_conn *conn = NULL;
BT_DBG("%s status %d", hdev->name, rc->status);
if (rc->status)
return;
hci_dev_lock(hdev);
conn = conn_hash_lookup_ba(hdev, ACL_LINK, &rc->bdaddr);
if (conn) {
if (rc->role)
conn->link_mode &= ~HCI_LM_MASTER;
else
conn->link_mode |= HCI_LM_MASTER;
}
hci_dev_unlock(hdev);
}
/* Authentication Complete */
static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
evt_auth_complete *ac = (evt_auth_complete *) skb->data;
struct hci_conn *conn = NULL;
__u16 handle = __le16_to_cpu(ac->handle);
BT_DBG("%s status %d", hdev->name, ac->status);
hci_dev_lock(hdev);
conn = conn_hash_lookup_handle(hdev, handle);
if (conn) {
if (!ac->status)
conn->link_mode |= HCI_LM_AUTH;
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
hci_proto_auth_cfm(conn, ac->status);
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
if (!ac->status) {
set_conn_encrypt_cp ce;
ce.handle = __cpu_to_le16(conn->handle);
ce.encrypt = 1;
hci_send_cmd(conn->hdev, OGF_LINK_CTL,
OCF_SET_CONN_ENCRYPT,
SET_CONN_ENCRYPT_CP_SIZE, &ce);
} else {
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
hci_proto_encrypt_cfm(conn, ac->status);
}
}
}
hci_dev_unlock(hdev);
}
/* Encryption Change */
static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
evt_encrypt_change *ec = (evt_encrypt_change *) skb->data;
struct hci_conn *conn = NULL;
__u16 handle = __le16_to_cpu(ec->handle);
BT_DBG("%s status %d", hdev->name, ec->status);
hci_dev_lock(hdev);
conn = conn_hash_lookup_handle(hdev, handle);
if (conn) {
if (!ec->status) {
if (ec->encrypt)
conn->link_mode |= HCI_LM_ENCRYPT;
else
conn->link_mode &= ~HCI_LM_ENCRYPT;
}
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
hci_proto_encrypt_cfm(conn, ec->status);
}
hci_dev_unlock(hdev);
}
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
hci_event_hdr *he = (hci_event_hdr *) skb->data;
evt_cmd_status *cs;
evt_cmd_complete *ec;
__u16 opcode, ocf, ogf;
skb_pull(skb, HCI_EVENT_HDR_SIZE);
BT_DBG("%s evt 0x%x", hdev->name, he->evt);
switch (he->evt) {
case EVT_NUM_COMP_PKTS:
hci_num_comp_pkts_evt(hdev, skb);
break;
case EVT_INQUIRY_COMPLETE:
hci_inquiry_complete_evt(hdev, skb);
break;
case EVT_INQUIRY_RESULT:
hci_inquiry_result_evt(hdev, skb);
break;
case EVT_CONN_REQUEST:
hci_conn_request_evt(hdev, skb);
break;
case EVT_CONN_COMPLETE:
hci_conn_complete_evt(hdev, skb);
break;
case EVT_DISCONN_COMPLETE:
hci_disconn_complete_evt(hdev, skb);
break;
case EVT_ROLE_CHANGE:
hci_role_change_evt(hdev, skb);
break;
case EVT_AUTH_COMPLETE:
hci_auth_complete_evt(hdev, skb);
break;
case EVT_ENCRYPT_CHANGE:
hci_encrypt_change_evt(hdev, skb);
break;
case EVT_CMD_STATUS:
cs = (evt_cmd_status *) skb->data;
skb_pull(skb, EVT_CMD_STATUS_SIZE);
opcode = __le16_to_cpu(cs->opcode);
ogf = cmd_opcode_ogf(opcode);
ocf = cmd_opcode_ocf(opcode);
switch (ogf) {
case OGF_INFO_PARAM:
hci_cs_info_param(hdev, ocf, cs->status);
break;
case OGF_HOST_CTL:
hci_cs_host_ctl(hdev, ocf, cs->status);
break;
case OGF_LINK_CTL:
hci_cs_link_ctl(hdev, ocf, cs->status);
break;
case OGF_LINK_POLICY:
hci_cs_link_policy(hdev, ocf, cs->status);
break;
default:
BT_DBG("%s Command Status OGF %x", hdev->name, ogf);
break;
};
if (cs->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
hci_sched_cmd(hdev);
}
break;
case EVT_CMD_COMPLETE:
ec = (evt_cmd_complete *) skb->data;
skb_pull(skb, EVT_CMD_COMPLETE_SIZE);
opcode = __le16_to_cpu(ec->opcode);
ogf = cmd_opcode_ogf(opcode);
ocf = cmd_opcode_ocf(opcode);
switch (ogf) {
case OGF_INFO_PARAM:
hci_cc_info_param(hdev, ocf, skb);
break;
case OGF_HOST_CTL:
hci_cc_host_ctl(hdev, ocf, skb);
break;
case OGF_LINK_CTL:
hci_cc_link_ctl(hdev, ocf, skb);
break;
case OGF_LINK_POLICY:
hci_cc_link_policy(hdev, ocf, skb);
break;
default:
BT_DBG("%s Command Completed OGF %x", hdev->name, ogf);
break;
};
if (ec->ncmd) {
atomic_set(&hdev->cmd_cnt, 1);
if (!skb_queue_empty(&hdev->cmd_q))
hci_sched_cmd(hdev);
}
break;
};
kfree_skb(skb);
hdev->stat.evt_rx++;
}
/* General internal stack event */
void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
{
hci_event_hdr *eh;
evt_stack_internal *si;
struct sk_buff *skb;
int size;
void *ptr;
size = HCI_EVENT_HDR_SIZE + EVT_STACK_INTERNAL_SIZE + dlen;
skb = bluez_skb_alloc(size, GFP_ATOMIC);
if (!skb)
return;
ptr = skb_put(skb, size);
eh = ptr;
eh->evt = EVT_STACK_INTERNAL;
eh->plen = EVT_STACK_INTERNAL_SIZE + dlen;
ptr += HCI_EVENT_HDR_SIZE;
si = ptr;
si->type = type;
memcpy(si->data, data, dlen);
skb->pkt_type = HCI_EVENT_PKT;
skb->dev = (void *) hdev;
hci_send_to_sock(hdev, skb);
kfree_skb(skb);
}
......@@ -25,7 +25,7 @@
/*
* BlueZ HCI socket layer.
*
* $Id: hci_sock.c,v 1.9 2001/08/05 06:02:16 maxk Exp $
* $Id: hci_sock.c,v 1.4 2002/04/18 22:26:14 maxk Exp $
*/
#include <linux/config.h>
......@@ -51,43 +51,48 @@
#include <asm/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/bluez.h>
#include <net/bluetooth/hci_core.h>
#ifndef HCI_SOCK_DEBUG
#undef DBG
#define DBG( A... )
#undef BT_DBG
#define BT_DBG( A... )
#endif
/* HCI socket interface */
/* ----- HCI socket interface ----- */
/* Security filter */
static struct hci_sec_filter hci_sec_filter = {
/* Packet types */
0x10,
/* Events */
{ 0xd9fe, 0x0 },
/* Commands */
{
/* OGF_LINK_CTL */
{ 0x2a000002, 0x0, 0x0, 0x0 },
/* OGF_LINK_POLICY */
{ 0x1200, 0x0, 0x0, 0x0 },
/* OGF_HOST_CTL */
{ 0x80100000, 0xa, 0x0, 0x0 },
/* OGF_INFO_PARAM */
{ 0x22a, 0x0, 0x0, 0x0 }
}
};
static struct bluez_sock_list hci_sk_list = {
lock: RW_LOCK_UNLOCKED
};
static struct sock *hci_sock_lookup(struct hci_dev *hdev)
{
struct sock *sk;
read_lock(&hci_sk_list.lock);
for (sk = hci_sk_list.head; sk; sk = sk->next) {
if (hci_pi(sk)->hdev == hdev)
break;
}
read_unlock(&hci_sk_list.lock);
return sk;
}
/* Send frame to RAW socket */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
{
struct sock * sk;
DBG("hdev %p len %d", hdev, skb->len);
BT_DBG("hdev %p len %d", hdev, skb->len);
read_lock(&hci_sk_list.lock);
for (sk = hci_sk_list.head; sk; sk = sk->next) {
struct hci_filter *flt;
struct hci_filter *flt;
struct sk_buff *nskb;
if (sk->state != BT_BOUND || hci_pi(sk)->hdev != hdev)
......@@ -100,13 +105,19 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
/* Apply filter */
flt = &hci_pi(sk)->filter;
if (!test_bit(skb->pkt_type, &flt->type_mask))
if (!hci_test_bit((skb->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
continue;
if (skb->pkt_type == HCI_EVENT_PKT) {
register int evt = (*(__u8 *)skb->data & 63);
register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
if (!hci_test_bit(evt, &flt->event_mask))
continue;
if (!test_bit(evt, &flt->event_mask))
if (flt->opcode && ((evt == EVT_CMD_COMPLETE &&
flt->opcode != *(__u16 *)(skb->data + 3)) ||
(evt == EVT_CMD_STATUS &&
flt->opcode != *(__u16 *)(skb->data + 4))))
continue;
}
......@@ -116,8 +127,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
/* Put type byte before the data */
memcpy(skb_push(nskb, 1), &nskb->pkt_type, 1);
skb_queue_tail(&sk->receive_queue, nskb);
sk->data_ready(sk, nskb->len);
if (sock_queue_rcv_skb(sk, nskb))
kfree_skb(nskb);
}
read_unlock(&hci_sk_list.lock);
}
......@@ -127,7 +139,7 @@ static int hci_sock_release(struct socket *sock)
struct sock *sk = sock->sk;
struct hci_dev *hdev = hci_pi(sk)->hdev;
DBG("sock %p sk %p", sock, sk);
BT_DBG("sock %p sk %p", sock, sk);
if (!sk)
return 0;
......@@ -135,9 +147,7 @@ static int hci_sock_release(struct socket *sock)
bluez_sock_unlink(&hci_sk_list, sk);
if (hdev) {
if (!hci_sock_lookup(hdev))
hdev->flags &= ~HCI_SOCK;
atomic_dec(&hdev->promisc);
hci_dev_put(hdev);
}
......@@ -149,24 +159,55 @@ static int hci_sock_release(struct socket *sock)
sock_put(sk);
MOD_DEC_USE_COUNT;
return 0;
}
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
/* Ioctls that require bound socket */
static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct hci_dev *hdev = hci_pi(sk)->hdev;
__u32 mode;
DBG("cmd %x arg %lx", cmd, arg);
if (!hdev)
return -EBADFD;
switch (cmd) {
case HCIGETINFO:
return hci_dev_info(arg);
case HCISETRAW:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (arg)
set_bit(HCI_RAW, &hdev->flags);
else
clear_bit(HCI_RAW, &hdev->flags);
return 0;
case HCIGETCONNINFO:
return hci_get_conn_info(hdev, arg);
default:
if (hdev->ioctl)
return hdev->ioctl(hdev, cmd, arg);
return -EINVAL;
}
}
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
switch (cmd) {
case HCIGETDEVLIST:
return hci_dev_list(arg);
return hci_get_dev_list(arg);
case HCIGETDEVINFO:
return hci_get_dev_info(arg);
case HCIGETCONNLIST:
return hci_get_conn_list(arg);
case HCIDEVUP:
if (!capable(CAP_NET_ADMIN))
......@@ -183,48 +224,31 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
return -EACCES;
return hci_dev_reset(arg);
case HCIRESETSTAT:
case HCIDEVRESTAT:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_reset_stat(arg);
case HCISETSCAN:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_setscan(arg);
case HCISETAUTH:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_setauth(arg);
case HCISETRAW:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (!hdev)
return -EBADFD;
if (arg)
mode = HCI_RAW;
else
mode = HCI_NORMAL;
return hci_dev_setmode(hdev, mode);
case HCISETENCRYPT:
case HCISETPTYPE:
case HCISETLINKPOL:
case HCISETLINKMODE:
case HCISETACLMTU:
case HCISETSCOMTU:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
return hci_dev_setptype(arg);
return hci_dev_cmd(cmd, arg);
case HCIINQUIRY:
return hci_inquiry(arg);
case HCIGETCONNLIST:
return hci_conn_list(arg);
default:
return -EINVAL;
lock_sock(sk);
err = hci_sock_bound_ioctl(sk, cmd, arg);
release_sock(sk);
return err;
};
}
......@@ -233,28 +257,35 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
struct sock *sk = sock->sk;
struct hci_dev *hdev = NULL;
int err = 0;
DBG("sock %p sk %p", sock, sk);
BT_DBG("sock %p sk %p", sock, sk);
if (!haddr || haddr->hci_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (hci_pi(sk)->hdev) {
/* Already bound */
return 0;
err = -EALREADY;
goto done;
}
if (haddr->hci_dev != HCI_DEV_NONE) {
if (!(hdev = hci_dev_get(haddr->hci_dev)))
return -ENODEV;
if (!(hdev = hci_dev_get(haddr->hci_dev))) {
err = -ENODEV;
goto done;
}
hdev->flags |= HCI_SOCK;
atomic_inc(&hdev->promisc);
}
hci_pi(sk)->hdev = hdev;
sk->state = BT_BOUND;
return 0;
done:
release_sock(sk);
return err;
}
static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
......@@ -262,73 +293,44 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
struct sock *sk = sock->sk;
DBG("sock %p sk %p", sock, sk);
BT_DBG("sock %p sk %p", sock, sk);
lock_sock(sk);
*addr_len = sizeof(*haddr);
haddr->hci_family = AF_BLUETOOTH;
haddr->hci_dev = hci_pi(sk)->hdev->id;
release_sock(sk);
return 0;
}
static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, int len,
struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
struct hci_dev *hdev = hci_pi(sk)->hdev;
struct sk_buff *skb;
int err;
DBG("sock %p sk %p", sock, sk);
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
return -EINVAL;
if (!hdev)
return -EBADFD;
if (!(skb = bluez_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err)))
return err;
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
kfree_skb(skb);
return -EFAULT;
}
skb->dev = (void *) hdev;
skb->pkt_type = *((unsigned char *) skb->data);
skb_pull(skb, 1);
/* Send frame to HCI core */
hci_send_raw(skb);
return len;
}
static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
{
__u32 mask = hci_pi(sk)->cmsg_mask;
if (mask & HCI_CMSG_DIR)
put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(int), &bluez_cb(skb)->incomming);
if (mask & HCI_CMSG_TSTAMP)
put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, sizeof(skb->stamp), &skb->stamp);
}
static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, int len,
int flags, struct scm_cookie *scm)
static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, int len, int flags, struct scm_cookie *scm)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
int copied, err;
DBG("sock %p sk %p", sock, sk);
BT_DBG("sock %p, sk %p", sock, sk);
if (flags & (MSG_OOB | MSG_PEEK))
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->state == BT_CLOSED)
return 0;
if (!(skb = skb_recv_datagram(sk, flags, noblock, &err)))
return err;
......@@ -343,28 +345,95 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, int len,
skb->h.raw = skb->data;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
if (hci_pi(sk)->cmsg_mask)
hci_sock_cmsg(sk, msg, skb);
hci_sock_cmsg(sk, msg, skb);
skb_free_datagram(sk, skb);
return err ? : copied;
}
static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, int len,
struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
struct hci_dev *hdev;
struct sk_buff *skb;
int err;
BT_DBG("sock %p sk %p", sock, sk);
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
return -EINVAL;
if (len < 4)
return -EINVAL;
lock_sock(sk);
if (!(hdev = hci_pi(sk)->hdev)) {
err = -EBADFD;
goto done;
}
if (!(skb = bluez_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err)))
goto done;
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
err = -EFAULT;
goto drop;
}
skb->pkt_type = *((unsigned char *) skb->data);
skb_pull(skb, 1);
if (!capable(CAP_NET_RAW)) {
err = -EPERM;
if (skb->pkt_type == HCI_COMMAND_PKT) {
__u16 opcode = __le16_to_cpu(*(__u16 *)skb->data);
__u16 ogf = cmd_opcode_ogf(opcode) - 1;
__u16 ocf = cmd_opcode_ocf(opcode) & HCI_FLT_OCF_BITS;
if (ogf > HCI_SFLT_MAX_OGF ||
!hci_test_bit(ocf, &hci_sec_filter.ocf_mask[ogf]))
goto drop;
} else
goto drop;
}
/* Send frame to HCI core */
skb->dev = (void *) hdev;
hci_send_raw(skb);
err = len;
done:
release_sock(sk);
return err;
drop:
kfree_skb(skb);
goto done;
}
int hci_sock_setsockopt(struct socket *sock, int level, int optname, char *optval, int len)
{
struct sock *sk = sock->sk;
struct hci_filter flt;
struct hci_filter flt = { opcode: 0 };
int err = 0, opt = 0;
DBG("sk %p, opt %d", sk, optname);
BT_DBG("sk %p, opt %d", sk, optname);
lock_sock(sk);
switch (optname) {
case HCI_DATA_DIR:
if (get_user(opt, (int *)optval))
return -EFAULT;
if (get_user(opt, (int *)optval)) {
err = -EFAULT;
break;
}
if (opt)
hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
......@@ -372,12 +441,31 @@ int hci_sock_setsockopt(struct socket *sock, int level, int optname, char *optva
hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
break;
case HCI_TIME_STAMP:
if (get_user(opt, (int *)optval)) {
err = -EFAULT;
break;
}
if (opt)
hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
else
hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
break;
case HCI_FILTER:
len = MIN(len, sizeof(struct hci_filter));
if (copy_from_user(&flt, optval, len)) {
err = -EFAULT;
break;
}
if (!capable(CAP_NET_RAW)) {
flt.type_mask &= hci_sec_filter.type_mask;
flt.event_mask[0] &= hci_sec_filter.event_mask[0];
flt.event_mask[1] &= hci_sec_filter.event_mask[1];
}
memcpy(&hci_pi(sk)->filter, &flt, len);
break;
......@@ -409,6 +497,16 @@ int hci_sock_getsockopt(struct socket *sock, int level, int optname, char *optva
return -EFAULT;
break;
case HCI_TIME_STAMP:
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
opt = 1;
else
opt = 0;
if (put_user(opt, optval))
return -EFAULT;
break;
case HCI_FILTER:
len = MIN(len, sizeof(struct hci_filter));
if (copy_to_user(optval, &hci_pi(sk)->filter, len))
......@@ -446,62 +544,45 @@ static int hci_sock_create(struct socket *sock, int protocol)
{
struct sock *sk;
DBG("sock %p", sock);
BT_DBG("sock %p", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sock->ops = &hci_sock_ops;
if (!(sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, 1, NULL)))
sk = bluez_sock_alloc(sock, protocol, sizeof(struct hci_pinfo), GFP_KERNEL);
if (!sk)
return -ENOMEM;
sock->state = SS_UNCONNECTED;
sock_init_data(sock, sk);
memset(&sk->protinfo, 0, sizeof(struct hci_pinfo));
sk->destruct = NULL;
sk->protocol = protocol;
sk->state = BT_OPEN;
/* Initialize filter */
hci_pi(sk)->filter.type_mask = (1<<HCI_EVENT_PKT);
hci_pi(sk)->filter.event_mask[0] = ~0L;
hci_pi(sk)->filter.event_mask[1] = ~0L;
sk->state = BT_OPEN;
bluez_sock_link(&hci_sk_list, sk);
MOD_INC_USE_COUNT;
return 0;
}
static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct hci_dev *hdev = (struct hci_dev *) ptr;
struct sk_buff *skb;
DBG("hdev %s event %ld", hdev->name, event);
evt_si_device sd;
BT_DBG("hdev %s event %ld", hdev->name, event);
/* Send event to sockets */
if ((skb = bluez_skb_alloc(HCI_EVENT_HDR_SIZE + EVT_HCI_DEV_EVENT_SIZE, GFP_ATOMIC))) {
hci_event_hdr eh = { EVT_HCI_DEV_EVENT, EVT_HCI_DEV_EVENT_SIZE };
evt_hci_dev_event he = { event, hdev->id };
skb->pkt_type = HCI_EVENT_PKT;
memcpy(skb_put(skb, HCI_EVENT_HDR_SIZE), &eh, HCI_EVENT_HDR_SIZE);
memcpy(skb_put(skb, EVT_HCI_DEV_EVENT_SIZE), &he, EVT_HCI_DEV_EVENT_SIZE);
hci_send_to_sock(NULL, skb);
kfree_skb(skb);
}
sd.event = event;
sd.dev_id = hdev->id;
hci_si_event(NULL, EVT_SI_DEVICE, EVT_SI_DEVICE_SIZE, &sd);
if (event == HCI_DEV_UNREG) {
struct sock *sk;
/* Detach sockets from device */
read_lock(&hci_sk_list.lock);
for (sk = hci_sk_list.head; sk; sk = sk->next) {
bh_lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL;
sk->err = EPIPE;
......@@ -510,6 +591,7 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
hci_dev_put(hdev);
}
bh_unlock_sock(sk);
}
read_unlock(&hci_sk_list.lock);
}
......@@ -519,7 +601,7 @@ static int hci_sock_dev_event(struct notifier_block *this, unsigned long event,
struct net_proto_family hci_sock_family_ops = {
family: PF_BLUETOOTH,
create: hci_sock_create,
create: hci_sock_create
};
struct notifier_block hci_sock_nblock = {
......@@ -529,21 +611,19 @@ struct notifier_block hci_sock_nblock = {
int hci_sock_init(void)
{
if (bluez_sock_register(BTPROTO_HCI, &hci_sock_family_ops)) {
ERR("Can't register HCI socket");
BT_ERR("Can't register HCI socket");
return -EPROTO;
}
hci_register_notifier(&hci_sock_nblock);
return 0;
}
int hci_sock_cleanup(void)
{
if (bluez_sock_unregister(BTPROTO_HCI))
ERR("Can't unregister HCI socket");
BT_ERR("Can't unregister HCI socket");
hci_unregister_notifier(&hci_sock_nblock);
return 0;
}
......@@ -25,9 +25,9 @@
/*
* BlueZ L2CAP core and sockets.
*
* $Id: l2cap_core.c,v 1.19 2001/08/03 04:19:50 maxk Exp $
* $Id: l2cap.c,v 1.8 2002/04/19 00:01:39 maxk Exp $
*/
#define VERSION "1.1"
#define VERSION "2.0"
#include <linux/config.h>
#include <linux/module.h>
......@@ -53,147 +53,42 @@
#include <asm/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/bluez.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/l2cap_core.h>
#ifndef L2CAP_DEBUG
#undef DBG
#define DBG( A... )
#undef BT_DBG
#define BT_DBG( A... )
#endif
struct proto_ops l2cap_sock_ops;
static struct proto_ops l2cap_sock_ops;
struct bluez_sock_list l2cap_sk_list = {
lock: RW_LOCK_UNLOCKED
};
struct list_head l2cap_iff_list = LIST_HEAD_INIT(l2cap_iff_list);
rwlock_t l2cap_rt_lock = RW_LOCK_UNLOCKED;
static int l2cap_conn_del(struct l2cap_conn *conn, int err);
static int l2cap_conn_del(struct hci_conn *conn, int err);
static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent);
static void l2cap_chan_del(struct sock *sk, int err);
static int l2cap_chan_send(struct sock *sk, struct msghdr *msg, int len);
static void __l2cap_sock_close(struct sock *sk, int reason);
static void l2cap_sock_close(struct sock *sk);
static void l2cap_sock_kill(struct sock *sk);
static int l2cap_send_req(struct l2cap_conn *conn, __u8 code, __u16 len, void *data);
static int l2cap_send_rsp(struct l2cap_conn *conn, __u8 ident, __u8 code, __u16 len, void *data);
/* -------- L2CAP interfaces & routing --------- */
/* Add/delete L2CAP interface.
* Must be called with locked rt_lock
*/
static void l2cap_iff_add(struct hci_dev *hdev)
{
struct l2cap_iff *iff;
DBG("%s", hdev->name);
DBG("iff_list %p next %p prev %p", &l2cap_iff_list, l2cap_iff_list.next, l2cap_iff_list.prev);
/* Allocate new interface and lock HCI device */
if (!(iff = kmalloc(sizeof(struct l2cap_iff), GFP_KERNEL))) {
ERR("Can't allocate new interface %s", hdev->name);
return;
}
memset(iff, 0, sizeof(struct l2cap_iff));
hci_dev_hold(hdev);
hdev->l2cap_data = iff;
iff->hdev = hdev;
iff->mtu = hdev->acl_mtu - HCI_ACL_HDR_SIZE;
iff->bdaddr = &hdev->bdaddr;
spin_lock_init(&iff->lock);
INIT_LIST_HEAD(&iff->conn_list);
list_add(&iff->list, &l2cap_iff_list);
}
static void l2cap_iff_del(struct hci_dev *hdev)
{
struct l2cap_iff *iff;
if (!(iff = hdev->l2cap_data))
return;
DBG("%s iff %p", hdev->name, iff);
list_del(&iff->list);
l2cap_iff_lock(iff);
/* Drop connections */
while (!list_empty(&iff->conn_list)) {
struct l2cap_conn *c;
c = list_entry(iff->conn_list.next, struct l2cap_conn, list);
l2cap_conn_del(c, ENODEV);
}
l2cap_iff_unlock(iff);
/* Unlock HCI device */
hdev->l2cap_data = NULL;
hci_dev_put(hdev);
kfree(iff);
}
/* Get route. Returns L2CAP interface.
* Must be called with locked rt_lock
*/
static struct l2cap_iff *l2cap_get_route(bdaddr_t *src, bdaddr_t *dst)
{
struct list_head *p;
int use_src;
DBG("%s -> %s", batostr(src), batostr(dst));
use_src = bacmp(src, BDADDR_ANY) ? 0 : 1;
/* Simple routing:
* No source address - find interface with bdaddr != dst
* Source address - find interface with bdaddr == src
*/
list_for_each(p, &l2cap_iff_list) {
struct l2cap_iff *iff;
iff = list_entry(p, struct l2cap_iff, list);
if (use_src && !bacmp(iff->bdaddr, src))
return iff;
else if (bacmp(iff->bdaddr, dst))
return iff;
}
return NULL;
}
/* ----- L2CAP timers ------ */
static void l2cap_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
DBG("sock %p state %d", sk, sk->state);
BT_DBG("sock %p state %d", sk, sk->state);
bh_lock_sock(sk);
switch (sk->state) {
case BT_DISCONN:
l2cap_chan_del(sk, ETIMEDOUT);
break;
default:
sk->err = ETIMEDOUT;
sk->state_change(sk);
break;
};
__l2cap_sock_close(sk, ETIMEDOUT);
bh_unlock_sock(sk);
l2cap_sock_kill(sk);
......@@ -202,7 +97,7 @@ static void l2cap_sock_timeout(unsigned long arg)
static void l2cap_sock_set_timer(struct sock *sk, long timeout)
{
DBG("sock %p state %d timeout %ld", sk, sk->state, timeout);
BT_DBG("sk %p state %d timeout %ld", sk, sk->state, timeout);
if (!mod_timer(&sk->timer, jiffies + timeout))
sock_hold(sk);
......@@ -210,7 +105,7 @@ static void l2cap_sock_set_timer(struct sock *sk, long timeout)
static void l2cap_sock_clear_timer(struct sock *sk)
{
DBG("sock %p state %d", sk, sk->state);
BT_DBG("sock %p state %d", sk, sk->state);
if (timer_pending(&sk->timer) && del_timer(&sk->timer))
__sock_put(sk);
......@@ -223,86 +118,46 @@ static void l2cap_sock_init_timer(struct sock *sk)
sk->timer.data = (unsigned long)sk;
}
static void l2cap_conn_timeout(unsigned long arg)
{
struct l2cap_conn *conn = (void *)arg;
DBG("conn %p state %d", conn, conn->state);
if (conn->state == BT_CONNECTED) {
hci_disconnect(conn->hconn, 0x13);
}
return;
}
static void l2cap_conn_set_timer(struct l2cap_conn *conn, long timeout)
{
DBG("conn %p state %d timeout %ld", conn, conn->state, timeout);
mod_timer(&conn->timer, jiffies + timeout);
}
static void l2cap_conn_clear_timer(struct l2cap_conn *conn)
{
DBG("conn %p state %d", conn, conn->state);
del_timer(&conn->timer);
}
static void l2cap_conn_init_timer(struct l2cap_conn *conn)
{
init_timer(&conn->timer);
conn->timer.function = l2cap_conn_timeout;
conn->timer.data = (unsigned long)conn;
}
/* -------- L2CAP connections --------- */
/* Add new connection to the interface.
* Interface must be locked
*/
static struct l2cap_conn *l2cap_conn_add(struct l2cap_iff *iff, bdaddr_t *dst)
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, __u8 status)
{
struct l2cap_conn *conn;
bdaddr_t *src = iff->bdaddr;
if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_KERNEL)))
return NULL;
if ((conn = hcon->l2cap_data))
return conn;
memset(conn, 0, sizeof(struct l2cap_conn));
if (status)
return conn;
conn->state = BT_OPEN;
conn->iff = iff;
bacpy(&conn->src, src);
bacpy(&conn->dst, dst);
if (!(conn = kmalloc(sizeof(struct l2cap_conn), GFP_ATOMIC)))
return NULL;
memset(conn, 0, sizeof(struct l2cap_conn));
hcon->l2cap_data = conn;
conn->hcon = hcon;
conn->mtu = hcon->hdev->acl_mtu;
conn->src = &hcon->hdev->bdaddr;
conn->dst = &hcon->dst;
spin_lock_init(&conn->lock);
conn->chan_list.lock = RW_LOCK_UNLOCKED;
l2cap_conn_init_timer(conn);
__l2cap_conn_link(iff, conn);
DBG("%s -> %s, %p", batostr(src), batostr(dst), conn);
BT_DBG("hcon %p conn %p", hcon, conn);
MOD_INC_USE_COUNT;
return conn;
}
/* Delete connection on the interface.
* Interface must be locked
*/
static int l2cap_conn_del(struct l2cap_conn *conn, int err)
static int l2cap_conn_del(struct hci_conn *hcon, int err)
{
struct l2cap_conn *conn;
struct sock *sk;
DBG("conn %p, state %d, err %d", conn, conn->state, err);
l2cap_conn_clear_timer(conn);
__l2cap_conn_unlink(conn->iff, conn);
if (!(conn = hcon->l2cap_data))
return 0;
conn->state = BT_CLOSED;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
if (conn->rx_skb)
kfree_skb(conn->rx_skb);
......@@ -310,75 +165,57 @@ static int l2cap_conn_del(struct l2cap_conn *conn, int err)
/* Kill channels */
while ((sk = conn->chan_list.head)) {
bh_lock_sock(sk);
l2cap_sock_clear_timer(sk);
l2cap_chan_del(sk, err);
bh_unlock_sock(sk);
l2cap_sock_kill(sk);
}
hcon->l2cap_data = NULL;
kfree(conn);
MOD_DEC_USE_COUNT;
return 0;
}
static inline struct l2cap_conn *l2cap_get_conn_by_addr(struct l2cap_iff *iff, bdaddr_t *dst)
{
struct list_head *p;
list_for_each(p, &iff->conn_list) {
struct l2cap_conn *c;
c = list_entry(p, struct l2cap_conn, list);
if (!bacmp(&c->dst, dst))
return c;
}
return NULL;
}
int l2cap_connect(struct sock *sk)
{
bdaddr_t *src = &l2cap_pi(sk)->src;
bdaddr_t *dst = &l2cap_pi(sk)->dst;
bdaddr_t *src = &bluez_sk(sk)->src;
bdaddr_t *dst = &bluez_sk(sk)->dst;
struct l2cap_conn *conn;
struct l2cap_iff *iff;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err = 0;
DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
read_lock_bh(&l2cap_rt_lock);
if (!(hdev = hci_get_route(dst, src)))
return -EHOSTUNREACH;
/* Get route to remote BD address */
if (!(iff = l2cap_get_route(src, dst))) {
err = -EHOSTUNREACH;
goto done;
}
hci_dev_lock_bh(hdev);
/* Update source addr of the socket */
bacpy(src, iff->bdaddr);
err = -ENOMEM;
l2cap_iff_lock(iff);
hcon = hci_connect(hdev, ACL_LINK, dst);
if (!hcon)
goto done;
if (!(conn = l2cap_get_conn_by_addr(iff, dst))) {
/* Connection doesn't exist */
if (!(conn = l2cap_conn_add(iff, dst))) {
l2cap_iff_unlock(iff);
err = -ENOMEM;
goto done;
}
conn->out = 1;
conn = l2cap_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
goto done;
}
l2cap_iff_unlock(iff);
err = 0;
/* Update source addr of the socket */
bacpy(src, conn->src);
l2cap_chan_add(conn, sk, NULL);
sk->state = BT_CONNECT;
l2cap_sock_set_timer(sk, sk->sndtimeo);
switch (conn->state) {
case BT_CONNECTED:
if (hcon->state == BT_CONNECTED) {
if (sk->type == SOCK_SEQPACKET) {
l2cap_conn_req req;
req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
......@@ -388,92 +225,14 @@ int l2cap_connect(struct sock *sk)
l2cap_sock_clear_timer(sk);
sk->state = BT_CONNECTED;
}
break;
case BT_CONNECT:
break;
default:
/* Create ACL connection */
conn->state = BT_CONNECT;
hci_connect(iff->hdev, dst);
break;
};
}
done:
read_unlock_bh(&l2cap_rt_lock);
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
}
/* ------ Channel queues for listening sockets ------ */
void l2cap_accept_queue(struct sock *parent, struct sock *sk)
{
struct l2cap_accept_q *q = &l2cap_pi(parent)->accept_q;
DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
l2cap_pi(sk)->parent = parent;
l2cap_pi(sk)->next_q = NULL;
if (!q->head) {
q->head = q->tail = sk;
} else {
struct sock *tail = q->tail;
l2cap_pi(sk)->prev_q = tail;
l2cap_pi(tail)->next_q = sk;
q->tail = sk;
}
parent->ack_backlog++;
}
void l2cap_accept_unlink(struct sock *sk)
{
struct sock *parent = l2cap_pi(sk)->parent;
struct l2cap_accept_q *q = &l2cap_pi(parent)->accept_q;
struct sock *next, *prev;
DBG("sk %p", sk);
next = l2cap_pi(sk)->next_q;
prev = l2cap_pi(sk)->prev_q;
if (sk == q->head)
q->head = next;
if (sk == q->tail)
q->tail = prev;
if (next)
l2cap_pi(next)->prev_q = prev;
if (prev)
l2cap_pi(prev)->next_q = next;
l2cap_pi(sk)->parent = NULL;
parent->ack_backlog--;
__sock_put(sk);
}
/* Get next connected channel in queue. */
struct sock *l2cap_accept_dequeue(struct sock *parent, int state)
{
struct l2cap_accept_q *q = &l2cap_pi(parent)->accept_q;
struct sock *sk;
for (sk = q->head; sk; sk = l2cap_pi(sk)->next_q) {
if (!state || sk->state == state) {
l2cap_accept_unlink(sk);
break;
}
}
DBG("parent %p, sk %p", parent, sk);
return sk;
}
/* -------- Socket interface ---------- */
static struct sock *__l2cap_get_sock_by_addr(struct sockaddr_l2 *addr)
{
......@@ -483,7 +242,7 @@ static struct sock *__l2cap_get_sock_by_addr(struct sockaddr_l2 *addr)
for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
if (l2cap_pi(sk)->psm == psm &&
!bacmp(&l2cap_pi(sk)->src, src))
!bacmp(&bluez_sk(sk)->src, src))
break;
}
......@@ -500,36 +259,34 @@ static struct sock *l2cap_get_sock_listen(bdaddr_t *src, __u16 psm)
read_lock(&l2cap_sk_list.lock);
for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
struct l2cap_pinfo *pi;
if (sk->state != BT_LISTEN)
continue;
pi = l2cap_pi(sk);
if (pi->psm == psm) {
if (l2cap_pi(sk)->psm == psm) {
/* Exact match. */
if (!bacmp(&pi->src, src))
if (!bacmp(&bluez_sk(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&pi->src, BDADDR_ANY))
if (!bacmp(&bluez_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
read_unlock(&l2cap_sk_list.lock);
return sk ? sk : sk1;
}
static void l2cap_sock_destruct(struct sock *sk)
{
DBG("sk %p", sk);
BT_DBG("sk %p", sk);
skb_queue_purge(&sk->receive_queue);
skb_queue_purge(&sk->write_queue);
if (sk->protinfo)
kfree(sk->protinfo);
MOD_DEC_USE_COUNT;
}
......@@ -537,10 +294,10 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
DBG("parent %p", parent);
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
while ((sk = l2cap_accept_dequeue(parent, 0)))
while ((sk = bluez_accept_dequeue(parent, NULL)))
l2cap_sock_close(sk);
parent->state = BT_CLOSED;
......@@ -555,7 +312,7 @@ static void l2cap_sock_kill(struct sock *sk)
if (!sk->zapped || sk->socket)
return;
DBG("sk %p state %d", sk, sk->state);
BT_DBG("sk %p state %d", sk, sk->state);
/* Kill poor orphan */
bluez_sock_unlink(&l2cap_sk_list, sk);
......@@ -564,19 +321,10 @@ static void l2cap_sock_kill(struct sock *sk)
}
/* Close socket.
* Must be called on unlocked socket.
*/
static void l2cap_sock_close(struct sock *sk)
static void __l2cap_sock_close(struct sock *sk, int reason)
{
struct l2cap_conn *conn;
l2cap_sock_clear_timer(sk);
lock_sock(sk);
conn = l2cap_pi(sk)->conn;
DBG("sk %p state %d conn %p socket %p", sk, sk->state, conn, sk->socket);
BT_DBG("sk %p state %d socket %p", sk, sk->state, sk->socket);
switch (sk->state) {
case BT_LISTEN:
......@@ -586,30 +334,39 @@ static void l2cap_sock_close(struct sock *sk)
case BT_CONNECTED:
case BT_CONFIG:
if (sk->type == SOCK_SEQPACKET) {
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
l2cap_disconn_req req;
sk->state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ * 5);
req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
l2cap_send_req(conn, L2CAP_DISCONN_REQ, L2CAP_DISCONN_REQ_SIZE, &req);
l2cap_sock_set_timer(sk, sk->sndtimeo);
} else {
l2cap_chan_del(sk, ECONNRESET);
l2cap_chan_del(sk, reason);
}
break;
case BT_CONNECT:
case BT_CONNECT2:
case BT_DISCONN:
l2cap_chan_del(sk, ECONNRESET);
l2cap_chan_del(sk, reason);
break;
default:
sk->zapped = 1;
break;
};
}
/* Must be called on unlocked socket. */
static void l2cap_sock_close(struct sock *sk)
{
l2cap_sock_clear_timer(sk);
lock_sock(sk);
__l2cap_sock_close(sk, ECONNRESET);
release_sock(sk);
l2cap_sock_kill(sk);
......@@ -619,16 +376,18 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
DBG("sk %p", sk);
BT_DBG("sk %p", sk);
if (parent) {
sk->type = parent->type;
pi->imtu = l2cap_pi(parent)->imtu;
pi->omtu = l2cap_pi(parent)->omtu;
pi->link_mode = l2cap_pi(parent)->link_mode;
} else {
pi->imtu = L2CAP_DEFAULT_MTU;
pi->omtu = 0;
pi->link_mode = 0;
}
/* Default config options */
......@@ -640,17 +399,12 @@ static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
{
struct sock *sk;
if (!(sk = sk_alloc(PF_BLUETOOTH, prio, 1, NULL)))
sk = bluez_sock_alloc(sock, proto, sizeof(struct l2cap_pinfo), prio);
if (!sk)
return NULL;
sock_init_data(sock, sk);
sk->zapped = 0;
sk->destruct = l2cap_sock_destruct;
sk->sndtimeo = L2CAP_CONN_TIMEOUT;
sk->protocol = proto;
sk->state = BT_OPEN;
l2cap_sock_init_timer(sk);
......@@ -658,7 +412,6 @@ static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio)
bluez_sock_link(&l2cap_sk_list, sk);
MOD_INC_USE_COUNT;
return sk;
}
......@@ -666,20 +419,19 @@ static int l2cap_sock_create(struct socket *sock, int protocol)
{
struct sock *sk;
DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
BT_DBG("sock %p", sock);
if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sock->ops = &l2cap_sock_ops;
sock->state = SS_UNCONNECTED;
sock->ops = &l2cap_sock_ops;
if (!(sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL)))
sk = l2cap_sock_alloc(sock, protocol, GFP_KERNEL);
if (!sk)
return -ENOMEM;
l2cap_sock_init(sk, NULL);
return 0;
}
......@@ -689,7 +441,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
struct sock *sk = sock->sk;
int err = 0;
DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
......@@ -709,7 +461,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
}
/* Save source address */
bacpy(&l2cap_pi(sk)->src, &la->l2_bdaddr);
bacpy(&bluez_sk(sk)->src, &la->l2_bdaddr);
l2cap_pi(sk)->psm = la->l2_psm;
sk->state = BT_BOUND;
......@@ -718,48 +470,6 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_
done:
release_sock(sk);
return err;
}
static int l2cap_sock_w4_connect(struct sock *sk, int flags)
{
DECLARE_WAITQUEUE(wait, current);
long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
int err = 0;
DBG("sk %p", sk);
add_wait_queue(sk->sleep, &wait);
current->state = TASK_INTERRUPTIBLE;
while (sk->state != BT_CONNECTED) {
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
err = 0;
if (sk->state == BT_CONNECTED)
break;
if (sk->err) {
err = sock_error(sk);
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
current->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &wait);
return err;
}
......@@ -771,31 +481,48 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
lock_sock(sk);
DBG("sk %p", sk);
BT_DBG("sk %p", sk);
if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
err = -EINVAL;
goto done;
}
if (sk->state != BT_OPEN && sk->state != BT_BOUND) {
err = -EBADFD;
if (sk->type == SOCK_SEQPACKET && !la->l2_psm) {
err = -EINVAL;
goto done;
}
if (sk->type == SOCK_SEQPACKET && !la->l2_psm) {
err = -EINVAL;
switch(sk->state) {
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
/* Already connecting */
goto wait;
case BT_CONNECTED:
/* Already connected */
goto done;
case BT_OPEN:
case BT_BOUND:
/* Can connect */
break;
default:
err = -EBADFD;
goto done;
}
/* Set destination address and psm */
bacpy(&l2cap_pi(sk)->dst, &la->l2_bdaddr);
bacpy(&bluez_sk(sk)->dst, &la->l2_bdaddr);
l2cap_pi(sk)->psm = la->l2_psm;
if ((err = l2cap_connect(sk)))
goto done;
err = l2cap_sock_w4_connect(sk, flags);
wait:
err = bluez_sock_w4_connect(sk, flags);
done:
release_sock(sk);
......@@ -807,7 +534,7 @@ int l2cap_sock_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
int err = 0;
DBG("sk %p backlog %d", sk, backlog);
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
......@@ -833,7 +560,7 @@ int l2cap_sock_listen(struct socket *sock, int backlog)
int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *ch;
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
......@@ -846,12 +573,12 @@ int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
DBG("sk %p timeo %ld", sk, timeo);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk->sleep, &wait);
current->state = TASK_INTERRUPTIBLE;
while (!(ch = l2cap_accept_dequeue(sk, BT_CONNECTED))) {
while (!(nsk = bluez_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
......@@ -871,20 +598,18 @@ int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
break;
}
}
current->state = TASK_RUNNING;
set_current_state(TASK_RUNNING);
remove_wait_queue(sk->sleep, &wait);
if (err)
goto done;
sock_graft(ch, newsock);
newsock->state = SS_CONNECTED;
DBG("new socket %p", ch);
BT_DBG("new socket %p", nsk);
done:
release_sock(sk);
return err;
}
......@@ -893,15 +618,15 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
struct sock *sk = sock->sk;
DBG("sock %p, sk %p", sock, sk);
BT_DBG("sock %p, sk %p", sock, sk);
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_l2);
if (peer)
bacpy(&la->l2_bdaddr, &l2cap_pi(sk)->dst);
bacpy(&la->l2_bdaddr, &bluez_sk(sk)->dst);
else
bacpy(&la->l2_bdaddr, &l2cap_pi(sk)->src);
bacpy(&la->l2_bdaddr, &bluez_sk(sk)->src);
la->l2_psm = l2cap_pi(sk)->psm;
......@@ -913,7 +638,7 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, int len,
struct sock *sk = sock->sk;
int err = 0;
DBG("sock %p, sk %p", sock, sk);
BT_DBG("sock %p, sk %p", sock, sk);
if (sk->err)
return sock_error(sk);
......@@ -932,58 +657,35 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, int len,
return err;
}
static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, int len, int flags, struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
int noblock = flags & MSG_DONTWAIT;
int copied, err;
struct sk_buff *skb;
DBG("sock %p, sk %p", sock, sk);
if (flags & (MSG_OOB))
return -EOPNOTSUPP;
if (sk->state == BT_CLOSED)
return 0;
if (!(skb = skb_recv_datagram(sk, flags, noblock, &err)))
return err;
msg->msg_namelen = 0;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb->h.raw = skb->data;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
skb_free_datagram(sk, skb);
return err ? : copied;
}
int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen)
static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen)
{
struct sock *sk = sock->sk;
struct l2cap_options opts;
int err = 0;
int err = 0, len;
__u32 opt;
DBG("sk %p", sk);
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case L2CAP_OPTIONS:
if (copy_from_user((char *)&opts, optval, optlen)) {
len = MIN(sizeof(opts), optlen);
if (copy_from_user((char *)&opts, optval, len)) {
err = -EFAULT;
break;
}
l2cap_pi(sk)->imtu = opts.imtu;
l2cap_pi(sk)->omtu = opts.omtu;
l2cap_pi(sk)->imtu = opts.imtu;
l2cap_pi(sk)->omtu = opts.omtu;
break;
case L2CAP_LM:
if (get_user(opt, (__u32 *)optval)) {
err = -EFAULT;
break;
}
l2cap_pi(sk)->link_mode = opt;
break;
default:
......@@ -995,7 +697,7 @@ int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char *opt
return err;
}
int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen)
static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen)
{
struct sock *sk = sock->sk;
struct l2cap_options opts;
......@@ -1019,13 +721,18 @@ int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char *opt
break;
case L2CAP_LM:
if (put_user(l2cap_pi(sk)->link_mode, (__u32 *)optval))
err = -EFAULT;
break;
case L2CAP_CONNINFO:
if (sk->state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
cinfo.hci_handle = l2cap_pi(sk)->conn->hconn->handle;
cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
len = MIN(len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
......@@ -1036,57 +743,23 @@ int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char *opt
default:
err = -ENOPROTOOPT;
break;
};
release_sock(sk);
return err;
}
static unsigned int l2cap_sock_poll(struct file * file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct l2cap_accept_q *aq;
unsigned int mask;
DBG("sock %p, sk %p", sock, sk);
poll_wait(file, sk->sleep, wait);
mask = 0;
if (sk->err || !skb_queue_empty(&sk->error_queue))
mask |= POLLERR;
if (sk->shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
aq = &l2cap_pi(sk)->accept_q;
if (!skb_queue_empty(&sk->receive_queue) || aq->head || (sk->shutdown & RCV_SHUTDOWN))
mask |= POLLIN | POLLRDNORM;
if (sk->state == BT_CLOSED)
mask |= POLLHUP;
if (sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
};
return mask;
release_sock(sk);
return err;
}
static int l2cap_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
DBG("sock %p, sk %p", sock, sk);
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sock_orphan(sk);
l2cap_sock_close(sk);
return 0;
}
......@@ -1103,17 +776,6 @@ static struct sock * __l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, __u16 c
return s;
}
static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, __u16 cid)
{
struct sock *s;
read_lock(&l->lock);
s = __l2cap_get_chan_by_dcid(l, cid);
read_unlock(&l->lock);
return s;
}
static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, __u16 cid)
{
struct sock *s;
......@@ -1125,37 +787,16 @@ static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, __u16 ci
return s;
}
/* Find channel with given SCID.
* Returns locked socket */
static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, __u16 cid)
{
struct sock *s;
read_lock(&l->lock);
s = __l2cap_get_chan_by_scid(l, cid);
if (s) bh_lock_sock(s);
read_unlock(&l->lock);
return s;
}
static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, __u8 ident)
{
struct sock *s;
for (s = l->head; s; s = l2cap_pi(s)->next_c) {
if (l2cap_pi(s)->ident == ident)
break;
}
return s;
}
static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, __u8 ident)
{
struct sock *s;
read_lock(&l->lock);
s = __l2cap_get_chan_by_ident(l, ident);
read_unlock(&l->lock);
return s;
}
......@@ -1204,11 +845,8 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
{
struct l2cap_chan_list *l = &conn->chan_list;
DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
l2cap_conn_clear_timer(conn);
atomic_inc(&conn->refcnt);
l2cap_pi(sk)->conn = conn;
if (sk->type == SOCK_SEQPACKET) {
......@@ -1224,13 +862,12 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct so
__l2cap_chan_link(l, sk);
if (parent)
l2cap_accept_queue(parent, sk);
bluez_accept_enqueue(parent, sk);
}
static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
{
struct l2cap_chan_list *l = &conn->chan_list;
write_lock(&l->lock);
__l2cap_chan_add(conn, sk, parent);
write_unlock(&l->lock);
......@@ -1240,44 +877,28 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
* Must be called on the locked socket. */
static void l2cap_chan_del(struct sock *sk, int err)
{
struct l2cap_conn *conn;
struct sock *parent;
conn = l2cap_pi(sk)->conn;
parent = l2cap_pi(sk)->parent;
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
struct sock *parent = bluez_sk(sk)->parent;
DBG("sk %p, conn %p, err %d", sk, conn, err);
l2cap_sock_clear_timer(sk);
if (parent) {
/* Unlink from parent accept queue */
bh_lock_sock(parent);
l2cap_accept_unlink(sk);
bh_unlock_sock(parent);
}
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
long timeout;
/* Unlink from channel list */
l2cap_chan_unlink(&conn->chan_list, sk);
l2cap_pi(sk)->conn = NULL;
if (conn->out)
timeout = L2CAP_DISCONN_TIMEOUT;
else
timeout = L2CAP_CONN_IDLE_TIMEOUT;
if (atomic_dec_and_test(&conn->refcnt) && conn->state == BT_CONNECTED) {
/* Schedule Baseband disconnect */
l2cap_conn_set_timer(conn, timeout);
}
hci_conn_put(conn->hcon);
}
sk->state = BT_CLOSED;
sk->err = err;
sk->state_change(sk);
sk->state = BT_CLOSED;
sk->err = err;
sk->zapped = 1;
if (parent)
parent->data_ready(parent, 0);
else
sk->state_change(sk);
}
static void l2cap_conn_ready(struct l2cap_conn *conn)
......@@ -1285,7 +906,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
struct l2cap_chan_list *l = &conn->chan_list;
struct sock *sk;
DBG("conn %p", conn);
BT_DBG("conn %p", conn);
read_lock(&l->lock);
......@@ -1293,16 +914,14 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
bh_lock_sock(sk);
if (sk->type != SOCK_SEQPACKET) {
l2cap_sock_clear_timer(sk);
sk->state = BT_CONNECTED;
sk->state_change(sk);
l2cap_sock_clear_timer(sk);
} else if (sk->state == BT_CONNECT) {
l2cap_conn_req req;
req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
req.psm = l2cap_pi(sk)->psm;
l2cap_send_req(conn, L2CAP_CONN_REQ, L2CAP_CONN_REQ_SIZE, &req);
l2cap_sock_set_timer(sk, sk->sndtimeo);
}
bh_unlock_sock(sk);
......@@ -1313,9 +932,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
static void l2cap_chan_ready(struct sock *sk)
{
struct sock *parent = l2cap_pi(sk)->parent;
struct sock *parent = bluez_sk(sk)->parent;
DBG("sk %p, parent %p", sk, parent);
BT_DBG("sk %p, parent %p", sk, parent);
l2cap_pi(sk)->conf_state = 0;
l2cap_sock_clear_timer(sk);
......@@ -1330,7 +949,7 @@ static void l2cap_chan_ready(struct sock *sk)
/* Incomming channel.
* Wake up socket sleeping on accept.
*/
parent->data_ready(parent, 1);
parent->data_ready(parent, 0);
}
}
......@@ -1341,7 +960,7 @@ void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
struct sk_buff *nskb;
struct sock * sk;
DBG("conn %p", conn);
BT_DBG("conn %p", conn);
read_lock(&l->lock);
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
......@@ -1355,8 +974,8 @@ void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
continue;
skb_queue_tail(&sk->receive_queue, nskb);
sk->data_ready(sk, nskb->len);
if (sock_queue_rcv_skb(sk, nskb))
kfree_skb(nskb);
}
read_unlock(&l->lock);
}
......@@ -1372,10 +991,10 @@ static int l2cap_chan_send(struct sock *sk, struct msghdr *msg, int len)
if (len > l2cap_pi(sk)->omtu)
return -EINVAL;
DBG("sk %p len %d", sk, len);
BT_DBG("sk %p len %d", sk, len);
/* First fragment (with L2CAP header) */
count = MIN(conn->iff->mtu - L2CAP_HDR_SIZE, len);
count = MIN(conn->mtu - L2CAP_HDR_SIZE, len);
size = L2CAP_HDR_SIZE + count;
if (!(skb = bluez_skb_send_alloc(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)))
return err;
......@@ -1396,7 +1015,7 @@ static int l2cap_chan_send(struct sock *sk, struct msghdr *msg, int len)
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
count = MIN(conn->iff->mtu, len);
count = MIN(conn->mtu, len);
*frag = bluez_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
if (!*frag)
......@@ -1413,7 +1032,7 @@ static int l2cap_chan_send(struct sock *sk, struct msghdr *msg, int len)
frag = &(*frag)->next;
}
if ((err = hci_send_acl(conn->hconn, skb, 0)) < 0)
if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
goto fail;
return sent;
......@@ -1445,64 +1064,98 @@ static inline __u8 l2cap_get_ident(struct l2cap_conn *conn)
return id;
}
static inline struct sk_buff *l2cap_build_cmd(__u8 code, __u8 ident, __u16 len, void *data)
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
__u8 code, __u8 ident, __u16 dlen, void *data)
{
struct sk_buff *skb;
struct sk_buff *skb, **frag;
l2cap_cmd_hdr *cmd;
l2cap_hdr *lh;
int size;
int len, count;
DBG("code 0x%2.2x, ident 0x%2.2x, len %d", code, ident, len);
BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
size = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + len;
if (!(skb = bluez_skb_alloc(size, GFP_ATOMIC)))
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
count = MIN(conn->mtu, len);
skb = bluez_skb_alloc(count, GFP_ATOMIC);
if (!skb)
return NULL;
lh = (l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + len);
lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
lh->cid = __cpu_to_le16(0x0001);
cmd = (l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
cmd->code = code;
cmd->ident = ident;
cmd->len = __cpu_to_le16(len);
cmd->len = __cpu_to_le16(dlen);
if (dlen) {
count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
memcpy(skb_put(skb, count), data, count);
data += count;
}
len -= skb->len;
/* Continuation fragments (no L2CAP header) */
frag = &skb_shinfo(skb)->frag_list;
while (len) {
count = MIN(conn->mtu, len);
*frag = bluez_skb_alloc(count, GFP_ATOMIC);
if (!*frag)
goto fail;
memcpy(skb_put(*frag, count), data, count);
if (len)
memcpy(skb_put(skb, len), data, len);
len -= count;
data += count;
frag = &(*frag)->next;
}
return skb;
fail:
kfree_skb(skb);
return NULL;
}
static int l2cap_send_req(struct l2cap_conn *conn, __u8 code, __u16 len, void *data)
{
struct sk_buff *skb;
__u8 ident;
__u8 ident = l2cap_get_ident(conn);
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
DBG("code 0x%2.2x", code);
BT_DBG("code 0x%2.2x", code);
ident = l2cap_get_ident(conn);
if (!(skb = l2cap_build_cmd(code, ident, len, data)))
if (!skb)
return -ENOMEM;
return hci_send_acl(conn->hconn, skb, 0);
return hci_send_acl(conn->hcon, skb, 0);
}
static int l2cap_send_rsp(struct l2cap_conn *conn, __u8 ident, __u8 code, __u16 len, void *data)
{
struct sk_buff *skb;
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
DBG("code 0x%2.2x", code);
BT_DBG("code 0x%2.2x", code);
if (!(skb = l2cap_build_cmd(code, ident, len, data)))
if (!skb)
return -ENOMEM;
return hci_send_acl(conn->hconn, skb, 0);
return hci_send_acl(conn->hcon, skb, 0);
}
static inline int l2cap_get_conf_opt(__u8 **ptr, __u8 *type, __u32 *val)
static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
{
l2cap_conf_opt *opt = (l2cap_conf_opt *) (*ptr);
l2cap_conf_opt *opt = *ptr;
int len;
len = L2CAP_CONF_OPT_SIZE + opt->len;
*ptr += len;
*type = opt->type;
*olen = opt->len;
switch (opt->len) {
case 1:
*val = *((__u8 *) opt->val);
......@@ -1517,28 +1170,24 @@ static inline int l2cap_get_conf_opt(__u8 **ptr, __u8 *type, __u32 *val)
break;
default:
*val = 0L;
*val = (unsigned long) opt->val;
break;
};
DBG("type 0x%2.2x len %d val 0x%8.8x", *type, opt->len, *val);
len = L2CAP_CONF_OPT_SIZE + opt->len;
*ptr += len;
BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
return len;
}
static inline void l2cap_parse_conf_req(struct sock *sk, char *data, int len)
static inline void l2cap_parse_conf_req(struct sock *sk, void *data, int len)
{
__u8 type, hint; __u32 val;
__u8 *ptr = data;
int type, hint, olen;
unsigned long val;
void *ptr = data;
DBG("sk %p len %d", sk, len);
BT_DBG("sk %p len %d", sk, len);
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&ptr, &type, &val);
len -= l2cap_get_conf_opt(&ptr, &type, &olen, &val);
hint = type & 0x80;
type &= 0x7f;
......@@ -1559,20 +1208,21 @@ static inline void l2cap_parse_conf_req(struct sock *sk, char *data, int len)
if (hint)
break;
/* FIXME: Reject unknon option */
/* FIXME: Reject unknown option */
break;
};
}
}
static inline void l2cap_add_conf_opt(__u8 **ptr, __u8 type, __u8 len, __u32 val)
static void l2cap_add_conf_opt(void **ptr, __u8 type, __u8 len, unsigned long val)
{
register l2cap_conf_opt *opt = (l2cap_conf_opt *) (*ptr);
register l2cap_conf_opt *opt = *ptr;
DBG("type 0x%2.2x len %d val 0x%8.8x", type, len, val);
BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
opt->type = type;
opt->len = len;
switch (len) {
case 1:
*((__u8 *) opt->val) = val;
......@@ -1585,18 +1235,22 @@ static inline void l2cap_add_conf_opt(__u8 **ptr, __u8 type, __u8 len, __u32 val
case 4:
*((__u32 *) opt->val) = __cpu_to_le32(val);
break;
default:
memcpy(opt->val, (void *) val, len);
break;
};
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
static int l2cap_build_conf_req(struct sock *sk, __u8 *data)
static int l2cap_build_conf_req(struct sock *sk, void *data)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
l2cap_conf_req *req = (l2cap_conf_req *) data;
__u8 *ptr = req->data;
void *ptr = req->data;
DBG("sk %p", sk);
BT_DBG("sk %p", sk);
if (pi->imtu != L2CAP_DEFAULT_MTU)
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
......@@ -1611,31 +1265,31 @@ static int l2cap_build_conf_req(struct sock *sk, __u8 *data)
return ptr - data;
}
static int l2cap_conf_output(struct sock *sk, __u8 **ptr)
static inline int l2cap_conf_output(struct sock *sk, void **ptr)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
int result = 0;
/* Configure output options and let other side know
/* Configure output options and let the other side know
* which ones we don't like.
*/
if (pi->conf_mtu < pi->omtu) {
l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, l2cap_pi(sk)->omtu);
l2cap_add_conf_opt(ptr, L2CAP_CONF_MTU, 2, pi->omtu);
result = L2CAP_CONF_UNACCEPT;
} else {
pi->omtu = pi->conf_mtu;
}
DBG("sk %p result %d", sk, result);
BT_DBG("sk %p result %d", sk, result);
return result;
}
static int l2cap_build_conf_rsp(struct sock *sk, __u8 *data, int *result)
static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result)
{
l2cap_conf_rsp *rsp = (l2cap_conf_rsp *) data;
__u8 *ptr = rsp->data;
void *ptr = rsp->data;
DBG("sk %p complete %d", sk, result ? 1 : 0);
BT_DBG("sk %p complete %d", sk, result ? 1 : 0);
if (result)
*result = l2cap_conf_output(sk, &ptr);
......@@ -1653,62 +1307,78 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd,
l2cap_conn_req *req = (l2cap_conn_req *) data;
l2cap_conn_rsp rsp;
struct sock *sk, *parent;
int result = 0, status = 0;
__u16 scid = __le16_to_cpu(req->scid);
__u16 dcid = 0, scid = __le16_to_cpu(req->scid);
__u16 psm = req->psm;
DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
/* Check if we have socket listening on psm */
if (!(parent = l2cap_get_sock_listen(&conn->src, psm)))
goto reject;
if (!(parent = l2cap_get_sock_listen(conn->src, psm))) {
result = L2CAP_CR_BAD_PSM;
goto resp;
}
bh_lock_sock(parent);
write_lock(&list->lock);
bh_lock_sock(parent);
result = L2CAP_CR_NO_MEM;
/* Check if we already have channel with that dcid */
if (__l2cap_get_chan_by_dcid(list, scid))
goto unlock;
/* Check for backlog size */
if (parent->ack_backlog > parent->max_ack_backlog)
if (parent->ack_backlog > parent->max_ack_backlog) {
BT_DBG("backlog full %d", parent->ack_backlog);
goto unlock;
}
if (!(sk = l2cap_sock_alloc(NULL, BTPROTO_L2CAP, GFP_ATOMIC)))
goto unlock;
l2cap_sock_init(sk, parent);
bacpy(&l2cap_pi(sk)->src, &conn->src);
bacpy(&l2cap_pi(sk)->dst, &conn->dst);
bacpy(&bluez_sk(sk)->src, conn->src);
bacpy(&bluez_sk(sk)->dst, conn->dst);
l2cap_pi(sk)->psm = psm;
l2cap_pi(sk)->dcid = scid;
hci_conn_hold(conn->hcon);
__l2cap_chan_add(conn, sk, parent);
sk->state = BT_CONFIG;
dcid = l2cap_pi(sk)->scid;
write_unlock(&list->lock);
bh_unlock_sock(parent);
l2cap_sock_set_timer(sk, sk->sndtimeo);
rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
rsp.result = __cpu_to_le16(0);
rsp.status = __cpu_to_le16(0);
l2cap_send_rsp(conn, cmd->ident, L2CAP_CONN_RSP, L2CAP_CONN_RSP_SIZE, &rsp);
/* Service level security */
result = L2CAP_CR_PEND;
status = L2CAP_CS_AUTHEN_PEND;
sk->state = BT_CONNECT2;
l2cap_pi(sk)->ident = cmd->ident;
if (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) {
if (!hci_conn_encrypt(conn->hcon))
goto unlock;
} else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
if (!hci_conn_auth(conn->hcon))
goto unlock;
}
return 0;
sk->state = BT_CONFIG;
result = status = 0;
unlock:
write_unlock(&list->lock);
bh_unlock_sock(parent);
write_unlock(&list->lock);
reject:
resp:
rsp.scid = __cpu_to_le16(scid);
rsp.dcid = __cpu_to_le16(0);
rsp.status = __cpu_to_le16(0);
rsp.result = __cpu_to_le16(L2CAP_CONN_NO_MEM);
rsp.dcid = __cpu_to_le16(dcid);
rsp.result = __cpu_to_le16(result);
rsp.status = __cpu_to_le16(status);
l2cap_send_rsp(conn, cmd->ident, L2CAP_CONN_RSP, L2CAP_CONN_RSP_SIZE, &rsp);
return 0;
}
......@@ -1717,29 +1387,33 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd,
l2cap_conn_rsp *rsp = (l2cap_conn_rsp *) data;
__u16 scid, dcid, result, status;
struct sock *sk;
char req[128];
scid = __le16_to_cpu(rsp->scid);
dcid = __le16_to_cpu(rsp->dcid);
result = __le16_to_cpu(rsp->result);
status = __le16_to_cpu(rsp->status);
DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
return -ENOENT;
bh_lock_sock(sk);
if (!result) {
char req[64];
switch (result) {
case L2CAP_CR_SUCCESS:
sk->state = BT_CONFIG;
l2cap_pi(sk)->dcid = dcid;
l2cap_pi(sk)->conf_state |= CONF_REQ_SENT;
l2cap_send_req(conn, L2CAP_CONF_REQ, l2cap_build_conf_req(sk, req), req);
} else {
break;
case L2CAP_CR_PEND:
break;
default:
l2cap_chan_del(sk, ECONNREFUSED);
break;
}
bh_unlock_sock(sk);
......@@ -1757,13 +1431,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd,
dcid = __le16_to_cpu(req->dcid);
flags = __le16_to_cpu(req->flags);
DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
return -ENOENT;
bh_lock_sock(sk);
l2cap_parse_conf_req(sk, req->data, cmd->len - L2CAP_CONF_REQ_SIZE);
if (flags & 0x01) {
......@@ -1791,7 +1463,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd,
unlock:
bh_unlock_sock(sk);
return 0;
}
......@@ -1806,13 +1477,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd,
flags = __le16_to_cpu(rsp->flags);
result = __le16_to_cpu(rsp->result);
DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
return -ENOENT;
bh_lock_sock(sk);
if (result) {
l2cap_disconn_req req;
......@@ -1820,12 +1489,11 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd,
* Close channel.
*/
sk->state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ * 5);
req.dcid = __cpu_to_le16(l2cap_pi(sk)->dcid);
req.scid = __cpu_to_le16(l2cap_pi(sk)->scid);
l2cap_send_req(conn, L2CAP_DISCONN_REQ, L2CAP_DISCONN_REQ_SIZE, &req);
l2cap_sock_set_timer(sk, sk->sndtimeo);
goto done;
}
......@@ -1842,7 +1510,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd,
done:
bh_unlock_sock(sk);
return err;
}
......@@ -1856,23 +1523,21 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, l2cap_cmd_hdr *c
scid = __le16_to_cpu(req->scid);
dcid = __le16_to_cpu(req->dcid);
DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
return 0;
bh_lock_sock(sk);
rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, L2CAP_DISCONN_RSP_SIZE, &rsp);
sk->shutdown = SHUTDOWN_MASK;
l2cap_chan_del(sk, ECONNRESET);
bh_unlock_sock(sk);
l2cap_sock_kill(sk);
return 0;
}
......@@ -1885,18 +1550,14 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, l2cap_cmd_hdr *c
scid = __le16_to_cpu(rsp->scid);
dcid = __le16_to_cpu(rsp->dcid);
DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
return -ENOENT;
bh_lock_sock(sk);
l2cap_sock_clear_timer(sk);
return 0;
l2cap_chan_del(sk, ECONNABORTED);
bh_unlock_sock(sk);
l2cap_sock_kill(sk);
return 0;
}
......@@ -1914,10 +1575,10 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
cmd.len = __le16_to_cpu(cmd.len);
DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd.len, cmd.ident);
if (cmd.len > len || !cmd.ident) {
DBG("corrupted command");
BT_DBG("corrupted command");
break;
}
......@@ -1962,14 +1623,14 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
break;
default:
ERR("Uknown signaling command 0x%2.2x", cmd.code);
BT_ERR("Uknown signaling command 0x%2.2x", cmd.code);
err = -EINVAL;
break;
};
if (err) {
l2cap_cmd_rej rej;
DBG("error %d", err);
BT_DBG("error %d", err);
/* FIXME: Map err to a valid reason. */
rej.reason = __cpu_to_le16(0);
......@@ -1987,12 +1648,13 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, __u16 cid, struct
{
struct sock *sk;
if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, cid))) {
DBG("unknown cid 0x%4.4x", cid);
sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
if (!sk) {
BT_DBG("unknown cid 0x%4.4x", cid);
goto drop;
}
DBG("sk %p, len %d", sk, skb->len);
BT_DBG("sk %p, len %d", sk, skb->len);
if (sk->state != BT_CONNECTED)
goto drop;
......@@ -2000,14 +1662,19 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, __u16 cid, struct
if (l2cap_pi(sk)->imtu < skb->len)
goto drop;
skb_queue_tail(&sk->receive_queue, skb);
sk->data_ready(sk, skb->len);
return 0;
/* If socket recv buffers overflows we drop data here
* which is *bad* because L2CAP has to be reliable.
* But we don't have any other choice. L2CAP doesn't
* provide flow control mechanism */
if (!sock_queue_rcv_skb(sk, skb))
goto done;
drop:
kfree_skb(skb);
done:
if (sk) bh_unlock_sock(sk);
return 0;
}
......@@ -2020,7 +1687,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
cid = __le16_to_cpu(lh->cid);
len = __le16_to_cpu(lh->len);
DBG("len %d, cid 0x%4.4x", len, cid);
BT_DBG("len %d, cid 0x%4.4x", len, cid);
if (cid == 0x0001)
l2cap_sig_channel(conn, skb);
......@@ -2029,142 +1696,181 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
}
/* ------------ L2CAP interface with lower layer (HCI) ------------- */
static int l2cap_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct hci_dev *hdev = (struct hci_dev *) ptr;
DBG("hdev %s, event %ld", hdev->name, event);
static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
{
int exact = 0, lm1 = 0, lm2 = 0;
register struct sock *sk;
write_lock(&l2cap_rt_lock);
if (type != ACL_LINK)
return 0;
switch (event) {
case HCI_DEV_UP:
l2cap_iff_add(hdev);
break;
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
case HCI_DEV_DOWN:
l2cap_iff_del(hdev);
break;
};
/* Find listening sockets and check their link_mode */
read_lock(&l2cap_sk_list.lock);
for (sk = l2cap_sk_list.head; sk; sk = sk->next) {
if (sk->state != BT_LISTEN)
continue;
write_unlock(&l2cap_rt_lock);
if (!bacmp(&bluez_sk(sk)->src, bdaddr)) {
lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
exact++;
} else if (!bacmp(&bluez_sk(sk)->src, BDADDR_ANY))
lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
}
read_unlock(&l2cap_sk_list.lock);
return NOTIFY_DONE;
return exact ? lm1 : lm2;
}
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
static int l2cap_connect_cfm(struct hci_conn *hcon, __u8 status)
{
struct l2cap_iff *iff;
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
if (!(iff = hdev->l2cap_data)) {
ERR("unknown interface");
if (hcon->type != ACL_LINK)
return 0;
}
/* Always accept connection */
return 1;
if (!status) {
struct l2cap_conn *conn;
conn = l2cap_conn_add(hcon, status);
if (conn)
l2cap_conn_ready(conn);
} else
l2cap_conn_del(hcon, bterr(status));
return 0;
}
int l2cap_connect_cfm(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 status, struct hci_conn *hconn)
static int l2cap_disconn_ind(struct hci_conn *hcon, __u8 reason)
{
struct l2cap_conn *conn;
struct l2cap_iff *iff;
int err = 0;
BT_DBG("hcon %p reason %d", hcon, reason);
DBG("hdev %s bdaddr %s hconn %p", hdev->name, batostr(bdaddr), hconn);
if (!(iff = hdev->l2cap_data)) {
ERR("unknown interface");
if (hcon->type != ACL_LINK)
return 0;
}
l2cap_iff_lock(iff);
l2cap_conn_del(hcon, bterr(reason));
return 0;
}
conn = l2cap_get_conn_by_addr(iff, bdaddr);
static int l2cap_auth_cfm(struct hci_conn *hcon, __u8 status)
{
struct l2cap_chan_list *l;
struct l2cap_conn *conn;
l2cap_conn_rsp rsp;
struct sock *sk;
int result;
if (!(conn = hcon->l2cap_data))
return 0;
l = &conn->chan_list;
if (conn) {
/* Outgoing connection */
DBG("Outgoing connection: %s -> %s, %p, %2.2x", batostr(iff->bdaddr), batostr(bdaddr), conn, status);
BT_DBG("conn %p", conn);
if (!status && hconn) {
conn->state = BT_CONNECTED;
conn->hconn = hconn;
read_lock(&l->lock);
hconn->l2cap_data = (void *)conn;
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
/* Establish channels */
l2cap_conn_ready(conn);
} else {
l2cap_conn_del(conn, bterr(status));
if (sk->state != BT_CONNECT2 ||
(l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) {
bh_unlock_sock(sk);
continue;
}
} else {
/* Incomming connection */
DBG("Incomming connection: %s -> %s, %2.2x", batostr(iff->bdaddr), batostr(bdaddr), status);
if (status || !hconn)
goto done;
if (!(conn = l2cap_conn_add(iff, bdaddr))) {
err = -ENOMEM;
goto done;
if (!status) {
sk->state = BT_CONFIG;
result = 0;
} else {
sk->state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ/10);
result = L2CAP_CR_SEC_BLOCK;
}
conn->hconn = hconn;
hconn->l2cap_data = (void *)conn;
rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
rsp.result = __cpu_to_le16(result);
rsp.status = __cpu_to_le16(0);
l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP,
L2CAP_CONN_RSP_SIZE, &rsp);
conn->state = BT_CONNECTED;
bh_unlock_sock(sk);
}
done:
l2cap_iff_unlock(iff);
return err;
read_unlock(&l->lock);
return 0;
}
int l2cap_disconn_ind(struct hci_conn *hconn, __u8 reason)
static int l2cap_encrypt_cfm(struct hci_conn *hcon, __u8 status)
{
struct l2cap_conn *conn = hconn->l2cap_data;
struct l2cap_chan_list *l;
struct l2cap_conn *conn;
l2cap_conn_rsp rsp;
struct sock *sk;
int result;
if (!(conn = hcon->l2cap_data))
return 0;
l = &conn->chan_list;
DBG("hconn %p reason %d", hconn, reason);
BT_DBG("conn %p", conn);
if (!conn) {
ERR("unknown connection");
return 0;
}
conn->hconn = NULL;
read_lock(&l->lock);
for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
bh_lock_sock(sk);
if (sk->state != BT_CONNECT2) {
bh_unlock_sock(sk);
continue;
}
if (!status) {
sk->state = BT_CONFIG;
result = 0;
} else {
sk->state = BT_DISCONN;
l2cap_sock_set_timer(sk, HZ/10);
result = L2CAP_CR_SEC_BLOCK;
}
rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid);
rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid);
rsp.result = __cpu_to_le16(result);
rsp.status = __cpu_to_le16(0);
l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP,
L2CAP_CONN_RSP_SIZE, &rsp);
l2cap_iff_lock(conn->iff);
l2cap_conn_del(conn, bterr(reason));
l2cap_iff_unlock(conn->iff);
bh_unlock_sock(sk);
}
read_unlock(&l->lock);
return 0;
}
int l2cap_recv_acldata(struct hci_conn *hconn, struct sk_buff *skb, __u16 flags)
static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, __u16 flags)
{
struct l2cap_conn *conn = hconn->l2cap_data;
struct l2cap_conn *conn = hcon->l2cap_data;
if (!conn) {
ERR("unknown connection %p", hconn);
if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
goto drop;
}
DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
if (flags & ACL_START) {
int flen, tlen, size;
l2cap_hdr *lh;
if (conn->rx_len) {
ERR("Unexpected start frame (len %d)", skb->len);
kfree_skb(conn->rx_skb); conn->rx_skb = NULL;
BT_ERR("Unexpected start frame (len %d)", skb->len);
kfree_skb(conn->rx_skb);
conn->rx_skb = NULL;
conn->rx_len = 0;
}
if (skb->len < L2CAP_HDR_SIZE) {
ERR("Frame is too small (len %d)", skb->len);
BT_ERR("Frame is too small (len %d)", skb->len);
goto drop;
}
......@@ -2172,7 +1878,7 @@ int l2cap_recv_acldata(struct hci_conn *hconn, struct sk_buff *skb, __u16 flags)
tlen = __le16_to_cpu(lh->len);
flen = skb->len - L2CAP_HDR_SIZE;
DBG("Start: total len %d, frag len %d", tlen, flen);
BT_DBG("Start: total len %d, frag len %d", tlen, flen);
if (flen == tlen) {
/* Complete frame received */
......@@ -2186,19 +1892,21 @@ int l2cap_recv_acldata(struct hci_conn *hconn, struct sk_buff *skb, __u16 flags)
goto drop;
memcpy(skb_put(conn->rx_skb, skb->len), skb->data, skb->len);
conn->rx_len = tlen - flen;
} else {
DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
if (!conn->rx_len) {
ERR("Unexpected continuation frame (len %d)", skb->len);
BT_ERR("Unexpected continuation frame (len %d)", skb->len);
goto drop;
}
if (skb->len > conn->rx_len) {
ERR("Fragment is too large (len %d)", skb->len);
kfree_skb(conn->rx_skb); conn->rx_skb = NULL;
BT_ERR("Fragment is too large (len %d, expect %d)",
skb->len, conn->rx_len);
kfree_skb(conn->rx_skb);
conn->rx_skb = NULL;
conn->rx_len = 0;
goto drop;
}
......@@ -2217,7 +1925,54 @@ int l2cap_recv_acldata(struct hci_conn *hconn, struct sk_buff *skb, __u16 flags)
return 0;
}
struct proto_ops l2cap_sock_ops = {
/* ----- Proc fs support ------ */
static int l2cap_sock_dump(char *buf, struct bluez_sock_list *list)
{
struct l2cap_pinfo *pi;
struct sock *sk;
char *ptr = buf;
write_lock(&list->lock);
for (sk = list->head; sk; sk = sk->next) {
pi = l2cap_pi(sk);
ptr += sprintf(ptr, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
batostr(&bluez_sk(sk)->src), batostr(&bluez_sk(sk)->dst),
sk->state, pi->psm, pi->scid, pi->dcid, pi->imtu, pi->omtu,
pi->link_mode);
}
write_unlock(&list->lock);
ptr += sprintf(ptr, "\n");
return ptr - buf;
}
static int l2cap_read_proc(char *buf, char **start, off_t offset, int count, int *eof, void *priv)
{
char *ptr = buf;
int len;
BT_DBG("count %d, offset %ld", count, offset);
ptr += l2cap_sock_dump(ptr, &l2cap_sk_list);
len = ptr - buf;
if (len <= count + offset)
*eof = 1;
*start = buf + offset;
len -= offset;
if (len > count)
len = count;
if (len < 0)
len = 0;
return len;
}
static struct proto_ops l2cap_sock_ops = {
family: PF_BLUETOOTH,
release: l2cap_sock_release,
bind: l2cap_sock_bind,
......@@ -2226,8 +1981,8 @@ struct proto_ops l2cap_sock_ops = {
accept: l2cap_sock_accept,
getname: l2cap_sock_getname,
sendmsg: l2cap_sock_sendmsg,
recvmsg: l2cap_sock_recvmsg,
poll: l2cap_sock_poll,
recvmsg: bluez_sock_recvmsg,
poll: bluez_sock_poll,
socketpair: sock_no_socketpair,
ioctl: sock_no_ioctl,
shutdown: sock_no_shutdown,
......@@ -2236,75 +1991,53 @@ struct proto_ops l2cap_sock_ops = {
mmap: sock_no_mmap
};
struct net_proto_family l2cap_sock_family_ops = {
static struct net_proto_family l2cap_sock_family_ops = {
family: PF_BLUETOOTH,
create: l2cap_sock_create,
create: l2cap_sock_create
};
struct hci_proto l2cap_hci_proto = {
static struct hci_proto l2cap_hci_proto = {
name: "L2CAP",
id: HCI_PROTO_L2CAP,
connect_ind: l2cap_connect_ind,
connect_cfm: l2cap_connect_cfm,
disconn_ind: l2cap_disconn_ind,
recv_acldata: l2cap_recv_acldata,
};
struct notifier_block l2cap_nblock = {
notifier_call: l2cap_dev_event
auth_cfm: l2cap_auth_cfm,
encrypt_cfm: l2cap_encrypt_cfm
};
int __init l2cap_init(void)
{
INF("BlueZ L2CAP ver %s Copyright (C) 2000,2001 Qualcomm Inc",
VERSION);
INF("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
int err;
if (bluez_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops)) {
ERR("Can't register L2CAP socket");
return -EPROTO;
if ((err = bluez_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops))) {
BT_ERR("Can't register L2CAP socket");
return err;
}
if (hci_register_proto(&l2cap_hci_proto) < 0) {
ERR("Can't register L2CAP protocol");
return -EPROTO;
if ((err = hci_register_proto(&l2cap_hci_proto))) {
BT_ERR("Can't register L2CAP protocol");
return err;
}
hci_register_notifier(&l2cap_nblock);
l2cap_register_proc();
create_proc_read_entry("bluetooth/l2cap", 0, 0, l2cap_read_proc, NULL);
BT_INFO("BlueZ L2CAP ver %s Copyright (C) 2000,2001 Qualcomm Inc", VERSION);
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
return 0;
}
void l2cap_cleanup(void)
{
l2cap_unregister_proc();
remove_proc_entry("bluetooth/l2cap", NULL);
/* Unregister socket, protocol and notifier */
/* Unregister socket and protocol */
if (bluez_sock_unregister(BTPROTO_L2CAP))
ERR("Can't unregister L2CAP socket");
if (hci_unregister_proto(&l2cap_hci_proto) < 0)
ERR("Can't unregister L2CAP protocol");
BT_ERR("Can't unregister L2CAP socket");
hci_unregister_notifier(&l2cap_nblock);
/* We _must_ not have any sockets and/or connections
* at this stage.
*/
/* Free interface list and unlock HCI devices */
{
struct list_head *list = &l2cap_iff_list;
while (!list_empty(list)) {
struct l2cap_iff *iff;
iff = list_entry(list->next, struct l2cap_iff, list);
l2cap_iff_del(iff->hdev);
}
}
if (hci_unregister_proto(&l2cap_hci_proto))
BT_ERR("Can't unregister L2CAP protocol");
}
module_init(l2cap_init);
......@@ -2313,4 +2046,3 @@ module_exit(l2cap_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
MODULE_DESCRIPTION("BlueZ L2CAP ver " VERSION);
MODULE_LICENSE("GPL");
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* BlueZ L2CAP proc fs support.
*
* $Id: l2cap_proc.c,v 1.2 2001/06/02 01:40:09 maxk Exp $
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/list.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <net/bluetooth/bluez.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap_core.h>
#ifndef L2CAP_DEBUG
#undef DBG
#define DBG( A... )
#endif
/* ----- PROC fs support ----- */
static int l2cap_conn_dump(char *buf, struct l2cap_iff *iff)
{
struct list_head *p;
char *ptr = buf;
list_for_each(p, &iff->conn_list) {
struct l2cap_conn *c;
c = list_entry(p, struct l2cap_conn, list);
ptr += sprintf(ptr, " %p %d %p %p %s %s\n",
c, c->state, c->iff, c->hconn, batostr(&c->src), batostr(&c->dst));
}
return ptr - buf;
}
static int l2cap_iff_dump(char *buf)
{
struct list_head *p;
char *ptr = buf;
ptr += sprintf(ptr, "Interfaces:\n");
write_lock(&l2cap_rt_lock);
list_for_each(p, &l2cap_iff_list) {
struct l2cap_iff *iff;
iff = list_entry(p, struct l2cap_iff, list);
ptr += sprintf(ptr, " %s %p %p\n", iff->hdev->name, iff, iff->hdev);
l2cap_iff_lock(iff);
ptr += l2cap_conn_dump(ptr, iff);
l2cap_iff_unlock(iff);
}
write_unlock(&l2cap_rt_lock);
ptr += sprintf(ptr, "\n");
return ptr - buf;
}
static int l2cap_sock_dump(char *buf, struct bluez_sock_list *list)
{
struct l2cap_pinfo *pi;
struct sock *sk;
char *ptr = buf;
ptr += sprintf(ptr, "Sockets:\n");
write_lock(&list->lock);
for (sk = list->head; sk; sk = sk->next) {
pi = l2cap_pi(sk);
ptr += sprintf(ptr, " %p %d %p %d %s %s 0x%4.4x 0x%4.4x %d %d\n", sk, sk->state, pi->conn, pi->psm,
batostr(&pi->src), batostr(&pi->dst), pi->scid, pi->dcid, pi->imtu, pi->omtu );
}
write_unlock(&list->lock);
ptr += sprintf(ptr, "\n");
return ptr - buf;
}
static int l2cap_read_proc(char *buf, char **start, off_t offset, int count, int *eof, void *priv)
{
char *ptr = buf;
int len;
DBG("count %d, offset %ld", count, offset);
ptr += l2cap_iff_dump(ptr);
ptr += l2cap_sock_dump(ptr, &l2cap_sk_list);
len = ptr - buf;
if (len <= count + offset)
*eof = 1;
*start = buf + offset;
len -= offset;
if (len > count)
len = count;
if (len < 0)
len = 0;
return len;
}
void l2cap_register_proc(void)
{
create_proc_read_entry("bluetooth/l2cap", 0, 0, l2cap_read_proc, NULL);
}
void l2cap_unregister_proc(void)
{
remove_proc_entry("bluetooth/l2cap", NULL);
}
......@@ -25,7 +25,7 @@
/*
* BlueZ kernel library.
*
* $Id: lib.c,v 1.3 2001/06/22 23:14:23 maxk Exp $
* $Id: lib.c,v 1.1 2002/03/08 21:06:59 maxk Exp $
*/
#include <linux/kernel.h>
......
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* BlueZ SCO sockets.
*
* $Id: sco.c,v 1.3 2002/04/17 17:37:16 maxk Exp $
*/
#define VERSION "0.2"
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/list.h>
#include <net/sock.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/sco.h>
#ifndef SCO_DEBUG
#undef BT_DBG
#define BT_DBG( A... )
#endif
static struct proto_ops sco_sock_ops;
static struct bluez_sock_list sco_sk_list = {
lock: RW_LOCK_UNLOCKED
};
static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent);
static void sco_chan_del(struct sock *sk, int err);
static inline struct sock * sco_chan_get(struct sco_conn *conn);
static int sco_conn_del(struct hci_conn *conn, int err);
static void sco_sock_close(struct sock *sk);
static void sco_sock_kill(struct sock *sk);
/* ----- SCO timers ------ */
static void sco_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *) arg;
BT_DBG("sock %p state %d", sk, sk->state);
bh_lock_sock(sk);
sk->err = ETIMEDOUT;
sk->state_change(sk);
bh_unlock_sock(sk);
sco_sock_kill(sk);
sock_put(sk);
}
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
BT_DBG("sock %p state %d timeout %ld", sk, sk->state, timeout);
if (!mod_timer(&sk->timer, jiffies + timeout))
sock_hold(sk);
}
static void sco_sock_clear_timer(struct sock *sk)
{
BT_DBG("sock %p state %d", sk, sk->state);
if (timer_pending(&sk->timer) && del_timer(&sk->timer))
__sock_put(sk);
}
static void sco_sock_init_timer(struct sock *sk)
{
init_timer(&sk->timer);
sk->timer.function = sco_sock_timeout;
sk->timer.data = (unsigned long)sk;
}
/* -------- SCO connections --------- */
static struct sco_conn *sco_conn_add(struct hci_conn *hcon, __u8 status)
{
struct hci_dev *hdev = hcon->hdev;
struct sco_conn *conn;
if ((conn = hcon->sco_data))
return conn;
if (status)
return conn;
if (!(conn = kmalloc(sizeof(struct sco_conn), GFP_ATOMIC)))
return NULL;
memset(conn, 0, sizeof(struct sco_conn));
spin_lock_init(&conn->lock);
hcon->sco_data = conn;
conn->hcon = hcon;
conn->src = &hdev->bdaddr;
conn->dst = &hcon->dst;
if (hdev->sco_mtu > 0)
conn->mtu = hdev->sco_mtu;
else
conn->mtu = 60;
BT_DBG("hcon %p conn %p", hcon, conn);
MOD_INC_USE_COUNT;
return conn;
}
static int sco_conn_del(struct hci_conn *hcon, int err)
{
struct sco_conn *conn;
struct sock *sk;
if (!(conn = hcon->sco_data))
return 0;
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
/* Kill socket */
if ((sk = sco_chan_get(conn))) {
bh_lock_sock(sk);
sco_sock_clear_timer(sk);
sco_chan_del(sk, err);
bh_unlock_sock(sk);
sco_sock_kill(sk);
}
hcon->sco_data = NULL;
kfree(conn);
MOD_DEC_USE_COUNT;
return 0;
}
int sco_connect(struct sock *sk)
{
bdaddr_t *src = &bluez_sk(sk)->src;
bdaddr_t *dst = &bluez_sk(sk)->dst;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
int err = 0;
BT_DBG("%s -> %s", batostr(src), batostr(dst));
if (!(hdev = hci_get_route(dst, src)))
return -EHOSTUNREACH;
hci_dev_lock_bh(hdev);
err = -ENOMEM;
hcon = hci_connect(hdev, SCO_LINK, dst);
if (!hcon)
goto done;
conn = sco_conn_add(hcon, 0);
if (!conn) {
hci_conn_put(hcon);
goto done;
}
/* Update source addr of the socket */
bacpy(src, conn->src);
err = sco_chan_add(conn, sk, NULL);
if (err)
goto done;
if (hcon->state == BT_CONNECTED) {
sco_sock_clear_timer(sk);
sk->state = BT_CONNECTED;
} else {
sk->state = BT_CONNECT;
sco_sock_set_timer(sk, sk->sndtimeo);
}
done:
hci_dev_unlock_bh(hdev);
hci_dev_put(hdev);
return err;
}
static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
int err, count;
/* Check outgoing MTU */
if (len > conn->mtu)
return -EINVAL;
BT_DBG("sk %p len %d", sk, len);
count = MIN(conn->mtu, len);
if (!(skb = bluez_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err)))
return err;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
err = -EFAULT;
goto fail;
}
if ((err = hci_send_sco(conn->hcon, skb)) < 0)
goto fail;
return count;
fail:
kfree_skb(skb);
return err;
}
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
if (!sk)
goto drop;
BT_DBG("sk %p len %d", sk, skb->len);
if (sk->state != BT_CONNECTED)
goto drop;
if (!sock_queue_rcv_skb(sk, skb))
return;
drop:
kfree_skb(skb);
return;
}
/* -------- Socket interface ---------- */
static struct sock *__sco_get_sock_by_addr(bdaddr_t *ba)
{
struct sock *sk;
for (sk = sco_sk_list.head; sk; sk = sk->next) {
if (!bacmp(&bluez_sk(sk)->src, ba))
break;
}
return sk;
}
/* Find socket listening on source bdaddr.
* Returns closest match.
*/
static struct sock *sco_get_sock_listen(bdaddr_t *src)
{
struct sock *sk, *sk1 = NULL;
read_lock(&sco_sk_list.lock);
for (sk = sco_sk_list.head; sk; sk = sk->next) {
if (sk->state != BT_LISTEN)
continue;
/* Exact match. */
if (!bacmp(&bluez_sk(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&bluez_sk(sk)->src, BDADDR_ANY))
sk1 = sk;
}
read_unlock(&sco_sk_list.lock);
return sk ? sk : sk1;
}
static void sco_sock_destruct(struct sock *sk)
{
BT_DBG("sk %p", sk);
skb_queue_purge(&sk->receive_queue);
skb_queue_purge(&sk->write_queue);
if (sk->protinfo)
kfree(sk->protinfo);
MOD_DEC_USE_COUNT;
}
static void sco_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted channels */
while ((sk = bluez_accept_dequeue(parent, NULL)))
sco_sock_close(sk);
parent->state = BT_CLOSED;
parent->zapped = 1;
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void sco_sock_kill(struct sock *sk)
{
if (!sk->zapped || sk->socket)
return;
BT_DBG("sk %p state %d", sk, sk->state);
/* Kill poor orphan */
bluez_sock_unlink(&sco_sk_list, sk);
sk->dead = 1;
sock_put(sk);
}
/* Close socket.
* Must be called on unlocked socket.
*/
static void sco_sock_close(struct sock *sk)
{
struct sco_conn *conn;
sco_sock_clear_timer(sk);
lock_sock(sk);
conn = sco_pi(sk)->conn;
BT_DBG("sk %p state %d conn %p socket %p", sk, sk->state, conn, sk->socket);
switch (sk->state) {
case BT_LISTEN:
sco_sock_cleanup_listen(sk);
break;
case BT_CONNECTED:
case BT_CONFIG:
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
break;
default:
sk->zapped = 1;
break;
};
release_sock(sk);
sco_sock_kill(sk);
}
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
BT_DBG("sk %p", sk);
if (parent)
sk->type = parent->type;
}
static struct sock *sco_sock_alloc(struct socket *sock, int proto, int prio)
{
struct sock *sk;
sk = bluez_sock_alloc(sock, proto, sizeof(struct sco_pinfo), prio);
if (!sk)
return NULL;
sk->destruct = sco_sock_destruct;
sk->sndtimeo = SCO_CONN_TIMEOUT;
sk->state = BT_OPEN;
sco_sock_init_timer(sk);
bluez_sock_link(&sco_sk_list, sk);
MOD_INC_USE_COUNT;
return sk;
}
static int sco_sock_create(struct socket *sock, int protocol)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_SEQPACKET)
return -ESOCKTNOSUPPORT;
sock->ops = &sco_sock_ops;
if (!(sk = sco_sock_alloc(sock, protocol, GFP_KERNEL)))
return -ENOMEM;
sco_sock_init(sk, NULL);
return 0;
}
static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
bdaddr_t *src = &sa->sco_bdaddr;
int err = 0;
BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->state != BT_OPEN) {
err = -EBADFD;
goto done;
}
write_lock(&sco_sk_list.lock);
if (bacmp(src, BDADDR_ANY) && __sco_get_sock_by_addr(src)) {
err = -EADDRINUSE;
goto unlock;
}
/* Save source address */
bacpy(&bluez_sk(sk)->src, &sa->sco_bdaddr);
sk->state = BT_BOUND;
unlock:
write_unlock(&sco_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p", sk);
if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco))
return -EINVAL;
if (sk->state != BT_OPEN && sk->state != BT_BOUND)
return -EBADFD;
if (sk->type != SOCK_SEQPACKET)
return -EINVAL;
lock_sock(sk);
/* Set destination address and psm */
bacpy(&bluez_sk(sk)->dst, &sa->sco_bdaddr);
if ((err = sco_connect(sk)))
goto done;
err = bluez_sock_w4_connect(sk, flags);
done:
release_sock(sk);
return err;
}
int sco_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
err = -EBADFD;
goto done;
}
sk->max_ack_backlog = backlog;
sk->ack_backlog = 0;
sk->state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
int sco_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *ch;
long timeo;
int err = 0;
lock_sock(sk);
if (sk->state != BT_LISTEN) {
err = -EBADFD;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk->sleep, &wait);
while (!(ch = bluez_accept_dequeue(sk, newsock))) {
set_current_state(TASK_INTERRUPTIBLE);
if (!timeo) {
err = -EAGAIN;
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
if (sk->state != BT_LISTEN) {
err = -EBADFD;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(sk->sleep, &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", ch);
done:
release_sock(sk);
return err;
}
static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_sco);
if (peer)
bacpy(&sa->sco_bdaddr, &bluez_sk(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &bluez_sk(sk)->src);
return 0;
}
static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, int len, struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (sk->err)
return sock_error(sk);
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
lock_sock(sk);
if (sk->state == BT_CONNECTED)
err = sco_send_frame(sk, msg, len);
else
err = -ENOTCONN;
release_sock(sk);
return err;
}
int sco_sock_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
default:
err = -ENOPROTOOPT;
break;
};
release_sock(sk);
return err;
}
int sco_sock_getsockopt(struct socket *sock, int level, int optname, char *optval, int *optlen)
{
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case SCO_OPTIONS:
if (sk->state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
opts.mtu = sco_pi(sk)->conn->mtu;
BT_INFO("mtu %d", opts.mtu);
len = MIN(len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
break;
case SCO_CONNINFO:
if (sk->state != BT_CONNECTED) {
err = -ENOTCONN;
break;
}
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
len = MIN(len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
};
release_sock(sk);
return err;
}
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
sock_orphan(sk);
sco_sock_close(sk);
return 0;
}
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
BT_DBG("conn %p", conn);
sco_pi(sk)->conn = conn;
conn->sk = sk;
if (parent)
bluez_accept_enqueue(parent, sk);
}
static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
{
int err = 0;
sco_conn_lock(conn);
if (conn->sk) {
err = -EBUSY;
} else {
__sco_chan_add(conn, sk, parent);
}
sco_conn_unlock(conn);
return err;
}
static inline struct sock * sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
sk = conn->sk;
sco_conn_unlock(conn);
return sk;
}
/* Delete channel.
* Must be called on the locked socket. */
static void sco_chan_del(struct sock *sk, int err)
{
struct sco_conn *conn;
conn = sco_pi(sk)->conn;
BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
hci_conn_put(conn->hcon);
}
sk->state = BT_CLOSED;
sk->err = err;
sk->state_change(sk);
sk->zapped = 1;
}
static void sco_conn_ready(struct sco_conn *conn)
{
struct sock *parent, *sk;
BT_DBG("conn %p", conn);
sco_conn_lock(conn);
if ((sk = conn->sk)) {
sco_sock_clear_timer(sk);
bh_lock_sock(sk);
sk->state = BT_CONNECTED;
sk->state_change(sk);
bh_unlock_sock(sk);
} else {
parent = sco_get_sock_listen(conn->src);
if (!parent)
goto done;
bh_lock_sock(parent);
sk = sco_sock_alloc(NULL, BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
}
sco_sock_init(sk, parent);
bacpy(&bluez_sk(sk)->src, conn->src);
bacpy(&bluez_sk(sk)->dst, conn->dst);
hci_conn_hold(conn->hcon);
__sco_chan_add(conn, sk, parent);
sk->state = BT_CONNECTED;
/* Wake up parent */
parent->data_ready(parent, 1);
bh_unlock_sock(parent);
}
done:
sco_conn_unlock(conn);
}
/* ----- SCO interface with lower layer (HCI) ----- */
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
{
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
/* Always accept connection */
return HCI_LM_ACCEPT;
}
int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
{
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
if (hcon->type != SCO_LINK)
return 0;
if (!status) {
struct sco_conn *conn;
conn = sco_conn_add(hcon, status);
if (conn)
sco_conn_ready(conn);
} else
sco_conn_del(hcon, bterr(status));
return 0;
}
int sco_disconn_ind(struct hci_conn *hcon, __u8 reason)
{
BT_DBG("hcon %p reason %d", hcon, reason);
if (hcon->type != SCO_LINK)
return 0;
sco_conn_del(hcon, bterr(reason));
return 0;
}
int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
{
struct sco_conn *conn = hcon->sco_data;
if (!conn)
goto drop;
BT_DBG("conn %p len %d", conn, skb->len);
if (skb->len) {
sco_recv_frame(conn, skb);
return 0;
}
drop:
kfree_skb(skb);
return 0;
}
/* ----- Proc fs support ------ */
static int sco_sock_dump(char *buf, struct bluez_sock_list *list)
{
struct sco_pinfo *pi;
struct sock *sk;
char *ptr = buf;
write_lock(&list->lock);
for (sk = list->head; sk; sk = sk->next) {
pi = sco_pi(sk);
ptr += sprintf(ptr, "%s %s %d\n",
batostr(&bluez_sk(sk)->src), batostr(&bluez_sk(sk)->dst),
sk->state);
}
write_unlock(&list->lock);
ptr += sprintf(ptr, "\n");
return ptr - buf;
}
static int sco_read_proc(char *buf, char **start, off_t offset, int count, int *eof, void *priv)
{
char *ptr = buf;
int len;
BT_DBG("count %d, offset %ld", count, offset);
ptr += sco_sock_dump(ptr, &sco_sk_list);
len = ptr - buf;
if (len <= count + offset)
*eof = 1;
*start = buf + offset;
len -= offset;
if (len > count)
len = count;
if (len < 0)
len = 0;
return len;
}
static struct proto_ops sco_sock_ops = {
family: PF_BLUETOOTH,
release: sco_sock_release,
bind: sco_sock_bind,
connect: sco_sock_connect,
listen: sco_sock_listen,
accept: sco_sock_accept,
getname: sco_sock_getname,
sendmsg: sco_sock_sendmsg,
recvmsg: bluez_sock_recvmsg,
poll: bluez_sock_poll,
socketpair: sock_no_socketpair,
ioctl: sock_no_ioctl,
shutdown: sock_no_shutdown,
setsockopt: sco_sock_setsockopt,
getsockopt: sco_sock_getsockopt,
mmap: sock_no_mmap
};
static struct net_proto_family sco_sock_family_ops = {
family: PF_BLUETOOTH,
create: sco_sock_create
};
static struct hci_proto sco_hci_proto = {
name: "SCO",
id: HCI_PROTO_SCO,
connect_ind: sco_connect_ind,
connect_cfm: sco_connect_cfm,
disconn_ind: sco_disconn_ind,
recv_scodata: sco_recv_scodata,
};
int __init sco_init(void)
{
int err;
if ((err = bluez_sock_register(BTPROTO_SCO, &sco_sock_family_ops))) {
BT_ERR("Can't register SCO socket layer");
return err;
}
if ((err = hci_register_proto(&sco_hci_proto))) {
BT_ERR("Can't register SCO protocol");
return err;
}
create_proc_read_entry("bluetooth/sco", 0, 0, sco_read_proc, NULL);
BT_INFO("BlueZ SCO ver %s Copyright (C) 2000,2001 Qualcomm Inc", VERSION);
BT_INFO("Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>");
return 0;
}
void sco_cleanup(void)
{
int err;
remove_proc_entry("bluetooth/sco", NULL);
/* Unregister socket, protocol and notifier */
if ((err = bluez_sock_unregister(BTPROTO_SCO)))
BT_ERR("Can't unregister SCO socket layer %d", err);
if ((err = hci_unregister_proto(&sco_hci_proto)))
BT_ERR("Can't unregister SCO protocol %d", err);
}
module_init(sco_init);
module_exit(sco_cleanup);
MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>");
MODULE_DESCRIPTION("BlueZ SCO ver " VERSION);
MODULE_LICENSE("GPL");
......@@ -25,7 +25,7 @@
/*
* BlueZ symbols.
*
* $Id: syms.c,v 1.1 2001/07/12 19:31:24 maxk Exp $
* $Id: syms.c,v 1.1 2002/03/08 21:06:59 maxk Exp $
*/
#include <linux/config.h>
......@@ -39,7 +39,6 @@
#include <linux/socket.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/bluez.h>
#include <net/bluetooth/hci_core.h>
/* HCI Core */
......@@ -47,17 +46,18 @@ EXPORT_SYMBOL(hci_register_dev);
EXPORT_SYMBOL(hci_unregister_dev);
EXPORT_SYMBOL(hci_register_proto);
EXPORT_SYMBOL(hci_unregister_proto);
EXPORT_SYMBOL(hci_register_notifier);
EXPORT_SYMBOL(hci_unregister_notifier);
EXPORT_SYMBOL(hci_get_route);
EXPORT_SYMBOL(hci_connect);
EXPORT_SYMBOL(hci_disconnect);
EXPORT_SYMBOL(hci_dev_get);
EXPORT_SYMBOL(hci_conn_auth);
EXPORT_SYMBOL(hci_conn_encrypt);
EXPORT_SYMBOL(hci_recv_frame);
EXPORT_SYMBOL(hci_send_acl);
EXPORT_SYMBOL(hci_send_sco);
EXPORT_SYMBOL(hci_send_raw);
EXPORT_SYMBOL(hci_si_event);
/* BlueZ lib */
EXPORT_SYMBOL(bluez_dump);
......@@ -68,5 +68,11 @@ EXPORT_SYMBOL(bterr);
/* BlueZ sockets */
EXPORT_SYMBOL(bluez_sock_register);
EXPORT_SYMBOL(bluez_sock_unregister);
EXPORT_SYMBOL(bluez_sock_alloc);
EXPORT_SYMBOL(bluez_sock_link);
EXPORT_SYMBOL(bluez_sock_unlink);
EXPORT_SYMBOL(bluez_sock_recvmsg);
EXPORT_SYMBOL(bluez_sock_poll);
EXPORT_SYMBOL(bluez_accept_enqueue);
EXPORT_SYMBOL(bluez_accept_dequeue);
EXPORT_SYMBOL(bluez_sock_w4_connect);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment