Commit 18a93310 authored by Linus Torvalds's avatar Linus Torvalds

v2.5.1.10 -> v2.5.1.11

- Davide Libenzi, Ingo Molnar: scheduler updates
- Greg KH: USB update
- Jean Tourrilhes: IrDA and wireless updates
- Jens Axboe: bio/block updates
parent 908920b1
......@@ -98,6 +98,7 @@ APISOURCES := $(TOPDIR)/drivers/media/video/videodev.c \
$(TOPDIR)/drivers/sound/sound_firmware.c \
$(TOPDIR)/drivers/net/wan/syncppp.c \
$(TOPDIR)/drivers/net/wan/z85230.c \
$(TOPDIR)/drivers/usb/hcd.c \
$(TOPDIR)/drivers/usb/usb.c \
$(TOPDIR)/drivers/video/fbmem.c \
$(TOPDIR)/drivers/video/fbcmap.c \
......
......@@ -281,6 +281,14 @@
!Edrivers/usb/usb.c
</sect1>
<sect1><title>Host Controller APIs</title>
<para>These APIs are only for use by host controller drivers,
most of which implement standard register interfaces such as
EHCI, OHCI, or UHCI.
</para>
!Edrivers/usb/hcd.c
</sect1>
</chapter>
<chapter id="uart16x50">
......
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 2
EXTRAVERSION =-pre10
EXTRAVERSION =-pre11
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
......
......@@ -2267,6 +2267,8 @@ static struct pci_device_id eepro100_pci_tbl[] __devinitdata = {
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CAM,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID1029,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID1030,
......
......@@ -140,7 +140,7 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
{
struct net_device *dev;
struct irport_cb *self;
int ret;
void *ret;
int err;
IRDA_DEBUG(0, __FUNCTION__ "()\n");
......@@ -169,13 +169,12 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
self->io.fifo_size = 16;
/* Lock the port that we need */
ret = check_region(self->io.sir_base, self->io.sir_ext);
if (ret < 0) {
ret = request_region(self->io.sir_base, self->io.sir_ext, driver_name);
if (!ret) {
IRDA_DEBUG(0, __FUNCTION__ "(), can't get iobase of 0x%03x\n",
self->io.sir_base);
return NULL;
}
request_region(self->io.sir_base, self->io.sir_ext, driver_name);
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&self->qos);
......
......@@ -246,7 +246,7 @@ static int nsc_ircc_open(int i, chipio_t *info)
struct net_device *dev;
struct nsc_ircc_cb *self;
struct pm_dev *pmdev;
int ret;
void *ret;
int err;
IRDA_DEBUG(2, __FUNCTION__ "()\n");
......@@ -282,15 +282,14 @@ static int nsc_ircc_open(int i, chipio_t *info)
self->io.fifo_size = 32;
/* Reserve the ioports that we need */
ret = check_region(self->io.fir_base, self->io.fir_ext);
if (ret < 0) {
ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
if (!ret) {
WARNING(__FUNCTION__ "(), can't get iobase of 0x%03x\n",
self->io.fir_base);
dev_self[i] = NULL;
kfree(self);
return -ENODEV;
}
request_region(self->io.fir_base, self->io.fir_ext, driver_name);
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&self->qos);
......
......@@ -160,7 +160,7 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
{
struct net_device *dev;
struct w83977af_ir *self;
int ret;
void *ret;
int err;
IRDA_DEBUG(0, __FUNCTION__ "()\n");
......@@ -190,14 +190,13 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
self->io.fifo_size = 32;
/* Lock the port that we need */
ret = check_region(self->io.fir_base, self->io.fir_ext);
if (ret < 0) {
ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
if (!ret) {
IRDA_DEBUG(0, __FUNCTION__ "(), can't get iobase of 0x%03x\n",
self->io.fir_base);
/* w83977af_cleanup( self); */
return -ENODEV;
}
request_region(self->io.fir_base, self->io.fir_ext, driver_name);
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&self->qos);
......
......@@ -999,7 +999,9 @@ static int ray_event(event_t event, int priority,
/*===========================================================================*/
int ray_dev_init(struct net_device *dev)
{
#ifdef RAY_IMMEDIATE_INIT
int i;
#endif /* RAY_IMMEDIATE_INIT */
ray_dev_t *local = dev->priv;
dev_link_t *link = local->finder;
......@@ -1008,6 +1010,7 @@ int ray_dev_init(struct net_device *dev)
DEBUG(2,"ray_dev_init - device not present\n");
return -1;
}
#ifdef RAY_IMMEDIATE_INIT
/* Download startup parameters */
if ( (i = dl_startup_params(dev)) < 0)
{
......@@ -1015,6 +1018,13 @@ int ray_dev_init(struct net_device *dev)
"returns 0x%x\n",i);
return -1;
}
#else /* RAY_IMMEDIATE_INIT */
/* Postpone the card init so that we can still configure the card,
* for example using the Wireless Extensions. The init will happen
* in ray_open() - Jean II */
DEBUG(1,"ray_dev_init: postponing card init to ray_open() ; Status = %d\n",
local->card_status);
#endif /* RAY_IMMEDIATE_INIT */
/* copy mac and broadcast addresses to linux device */
memcpy(&dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
......@@ -1245,6 +1255,22 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
wrq->u.freq.e = 0;
break;
/* Set frequency/channel */
case SIOCSIWFREQ:
/* Reject if card is already initialised */
if(local->card_status != CARD_AWAITING_PARAM)
{
err = -EBUSY;
break;
}
/* Setting by channel number */
if ((wrq->u.freq.m > USA_HOP_MOD) || (wrq->u.freq.e > 0))
err = -EOPNOTSUPP;
else
local->sparm.b5.a_hop_pattern = wrq->u.freq.m;
break;
/* Get current network name (ESSID) */
case SIOCGIWESSID:
if (wrq->u.data.pointer)
......@@ -1262,6 +1288,46 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
break;
/* Set desired network name (ESSID) */
case SIOCSIWESSID:
/* Reject if card is already initialised */
if(local->card_status != CARD_AWAITING_PARAM)
{
err = -EBUSY;
break;
}
if (wrq->u.data.pointer)
{
char card_essid[IW_ESSID_MAX_SIZE + 1];
/* Check if we asked for `any' */
if(wrq->u.data.flags == 0)
{
/* Corey : can you do that ? */
err = -EOPNOTSUPP;
}
else
{
/* Check the size of the string */
if(wrq->u.data.length >
IW_ESSID_MAX_SIZE + 1)
{
err = -E2BIG;
break;
}
copy_from_user(card_essid,
wrq->u.data.pointer,
wrq->u.data.length);
card_essid[IW_ESSID_MAX_SIZE] = '\0';
/* Set the ESSID in the card */
memcpy(local->sparm.b5.a_current_ess_id, card_essid,
IW_ESSID_MAX_SIZE);
}
}
break;
/* Get current Access Point (BSSID in our case) */
case SIOCGIWAP:
memcpy(wrq->u.ap_addr.sa_data, local->bss_id, ETH_ALEN);
......@@ -1302,6 +1368,34 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
wrq->u.rts.disabled = (wrq->u.rts.value == 32767);
#endif /* WIRELESS_EXT > 8 */
wrq->u.rts.fixed = 1;
break;
/* Set the desired RTS threshold */
case SIOCSIWRTS:
{
int rthr = wrq->u.rts.value;
/* Reject if card is already initialised */
if(local->card_status != CARD_AWAITING_PARAM)
{
err = -EBUSY;
break;
}
/* if(wrq->u.rts.fixed == 0) we should complain */
#if WIRELESS_EXT > 8
if(wrq->u.rts.disabled)
rthr = 32767;
else
#endif /* WIRELESS_EXT > 8 */
if((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
{
err = -EINVAL;
break;
}
local->sparm.b5.a_rts_threshold[0] = (rthr >> 8) & 0xFF;
local->sparm.b5.a_rts_threshold[1] = rthr & 0xFF;
}
break;
/* Get the current fragmentation threshold */
......@@ -1313,6 +1407,35 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#endif /* WIRELESS_EXT > 8 */
wrq->u.frag.fixed = 1;
break;
/* Set the desired fragmentation threshold */
case SIOCSIWFRAG:
{
int fthr = wrq->u.frag.value;
/* Reject if card is already initialised */
if(local->card_status != CARD_AWAITING_PARAM)
{
err = -EBUSY;
break;
}
/* if(wrq->u.frag.fixed == 0) should complain */
#if WIRELESS_EXT > 8
if(wrq->u.frag.disabled)
fthr = 32767;
else
#endif /* WIRELESS_EXT > 8 */
if((fthr < 256) || (fthr > 2347)) /* To check out ! */
{
err = -EINVAL;
break;
}
local->sparm.b5.a_frag_threshold[0] = (fthr >> 8) & 0xFF;
local->sparm.b5.a_frag_threshold[1] = fthr & 0xFF;
}
break;
#endif /* WIRELESS_EXT > 7 */
#if WIRELESS_EXT > 8
......@@ -1323,6 +1446,33 @@ static int ray_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
else
wrq->u.mode = IW_MODE_ADHOC;
break;
/* Set the current mode of operation */
case SIOCSIWMODE:
{
char card_mode = 1;
/* Reject if card is already initialised */
if(local->card_status != CARD_AWAITING_PARAM)
{
err = -EBUSY;
break;
}
switch (wrq->u.mode)
{
case IW_MODE_ADHOC:
card_mode = 0;
// Fall through
case IW_MODE_INFRA:
local->sparm.b5.a_network_type = card_mode;
break;
default:
err = -EINVAL;
}
}
break;
#endif /* WIRELESS_EXT > 8 */
#if WIRELESS_EXT > 7
/* ------------------ IWSPY SUPPORT ------------------ */
......@@ -1549,6 +1699,21 @@ static int ray_open(struct net_device *dev)
if (link->open == 0) local->num_multi = 0;
link->open++;
/* If the card is not started, time to start it ! - Jean II */
if(local->card_status == CARD_AWAITING_PARAM) {
int i;
DEBUG(1,"ray_open: doing init now !\n");
/* Download startup parameters */
if ( (i = dl_startup_params(dev)) < 0)
{
printk(KERN_INFO "ray_dev_init dl_startup_params failed - "
"returns 0x%x\n",i);
return -1;
}
}
if (sniffer) netif_stop_queue(dev);
else netif_start_queue(dev);
......@@ -1572,6 +1737,11 @@ static int ray_dev_close(struct net_device *dev)
if (link->state & DEV_STALE_CONFIG)
mod_timer(&link->release, jiffies + HZ/20);
/* In here, we should stop the hardware (stop card from beeing active)
* and set local->card_status to CARD_AWAITING_PARAM, so that while the
* card is closed we can chage its configuration.
* Probably also need a COR reset to get sane state - Jean II */
MOD_DEC_USE_COUNT;
return 0;
......
......@@ -4019,7 +4019,8 @@ static int __init wavelan_config(device * dev)
dev->irq = irq;
request_region(ioaddr, sizeof(ha_t), "wavelan");
if (!request_region(ioaddr, sizeof(ha_t), "wavelan"))
return -EBUSY;
dev->mem_start = 0x0000;
dev->mem_end = 0x0000;
......
......@@ -812,6 +812,7 @@
0074 56k Voice Modem
1033 8014 RCV56ACF 56k Voice Modem
009b Vrc5476
00e0 USB 2.0
1034 Framatome Connectors USA Inc.
1035 Comp. & Comm. Research Lab
1036 Future Domain Corp.
......@@ -5417,6 +5418,7 @@ C 0c Serial bus controller
03 USB Controller
00 UHCI
10 OHCI
20 EHCI
80 Unspecified
fe USB Device
04 Fibre Channel
......
......@@ -300,7 +300,7 @@ static void stop_recording(struct btaudio *bta)
static int btaudio_mixer_open(struct inode *inode, struct file *file)
{
int minor = MINOR(inode->i_rdev);
int minor = minor(inode->i_rdev);
struct btaudio *bta;
for (bta = btaudios; bta != NULL; bta = bta->next)
......@@ -459,7 +459,7 @@ static int btaudio_dsp_open(struct inode *inode, struct file *file,
static int btaudio_dsp_open_digital(struct inode *inode, struct file *file)
{
int minor = MINOR(inode->i_rdev);
int minor = minor(inode->i_rdev);
struct btaudio *bta;
for (bta = btaudios; bta != NULL; bta = bta->next)
......@@ -475,7 +475,7 @@ static int btaudio_dsp_open_digital(struct inode *inode, struct file *file)
static int btaudio_dsp_open_analog(struct inode *inode, struct file *file)
{
int minor = MINOR(inode->i_rdev);
int minor = minor(inode->i_rdev);
struct btaudio *bta;
for (bta = btaudios; bta != NULL; bta = bta->next)
......
......@@ -692,7 +692,7 @@ static int auerchain_control_msg (pauerchain_t acp, struct usb_device *dev, unsi
dr->bRequest = request;
dr->wValue = cpu_to_le16 (value);
dr->wIndex = cpu_to_le16 (index);
dr->wlength = cpu_to_le16 (size);
dr->wLength = cpu_to_le16 (size);
FILL_CONTROL_URB (urb, dev, pipe, (unsigned char*)dr, data, size, /* build urb */
(usb_complete_t)auerchain_blocking_completion,0);
......@@ -891,13 +891,13 @@ static void auerswald_ctrlread_wretcomplete (urb_t * urb)
}
/* fill the control message */
bp->dr->requesttype = AUT_RREQ;
bp->dr->request = AUV_RBLOCK;
bp->dr->length = bp->dr->value; /* temporary stored */
bp->dr->value = cpu_to_le16 (1); /* Retry Flag */
bp->dr->bRequestType = AUT_RREQ;
bp->dr->bRequest = AUV_RBLOCK;
bp->dr->wLength = bp->dr->wValue; /* temporary stored */
bp->dr->wValue = cpu_to_le16 (1); /* Retry Flag */
/* bp->dr->index = channel id; remains */
FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
(unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->length),
(unsigned char*)bp->dr, bp->bufp, le16_to_cpu (bp->dr->wLength),
(usb_complete_t)auerswald_ctrlread_complete,bp);
/* submit the control msg as next paket */
......@@ -937,11 +937,11 @@ static void auerswald_ctrlread_complete (urb_t * urb)
bp->retries++;
dbg ("Retry count = %d", bp->retries);
/* send a long dummy control-write-message to allow device firmware to react */
bp->dr->requesttype = AUT_WREQ;
bp->dr->request = AUV_DUMMY;
bp->dr->value = bp->dr->length; /* temporary storage */
// bp->dr->index channel ID remains
bp->dr->length = cpu_to_le16 (32); /* >= 8 bytes */
bp->dr->bRequestType = AUT_WREQ;
bp->dr->bRequest = AUV_DUMMY;
bp->dr->wValue = bp->dr->wLength; /* temporary storage */
// bp->dr->wIndex channel ID remains
bp->dr->wLength = cpu_to_le16 (32); /* >= 8 bytes */
FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
(unsigned char*)bp->dr, bp->bufp, 32,
(usb_complete_t)auerswald_ctrlread_wretcomplete,bp);
......@@ -1056,11 +1056,11 @@ static void auerswald_int_complete (urb_t * urb)
}
/* fill the control message */
bp->dr->requesttype = AUT_RREQ;
bp->dr->request = AUV_RBLOCK;
bp->dr->value = cpu_to_le16 (0);
bp->dr->index = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
bp->dr->length = cpu_to_le16 (bytecount);
bp->dr->bRequestType = AUT_RREQ;
bp->dr->bRequest = AUV_RBLOCK;
bp->dr->wValue = cpu_to_le16 (0);
bp->dr->wIndex = cpu_to_le16 (channelid | AUH_DIRECT | AUH_UNSPLIT);
bp->dr->wLength = cpu_to_le16 (bytecount);
FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_rcvctrlpipe (cp->usbdev, 0),
(unsigned char*)bp->dr, bp->bufp, bytecount,
(usb_complete_t)auerswald_ctrlread_complete,bp);
......@@ -1773,11 +1773,11 @@ static ssize_t auerchar_write (struct file *file, const char *buf, size_t len, l
/* Set the transfer Parameters */
bp->len = len+AUH_SIZE;
bp->dr->requesttype = AUT_WREQ;
bp->dr->request = AUV_WBLOCK;
bp->dr->value = cpu_to_le16 (0);
bp->dr->index = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
bp->dr->length = cpu_to_le16 (len+AUH_SIZE);
bp->dr->bRequestType = AUT_WREQ;
bp->dr->bRequest = AUV_WBLOCK;
bp->dr->wValue = cpu_to_le16 (0);
bp->dr->wIndex = cpu_to_le16 (ccp->scontext.id | AUH_DIRECT | AUH_UNSPLIT);
bp->dr->wLength = cpu_to_le16 (len+AUH_SIZE);
FILL_CONTROL_URB (bp->urbp, cp->usbdev, usb_sndctrlpipe (cp->usbdev, 0),
(unsigned char*)bp->dr, bp->bufp, len+AUH_SIZE,
auerchar_ctrlwrite_complete, bp);
......
......@@ -5,9 +5,9 @@
O_TARGET :=
obj-$(CONFIG_EHCI_HCD) += ehci-hcd.o
# obj-$(CONFIG_OHCI_HCD) += ohci-hcd.o
# obj-$(CONFIG_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
# obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
# obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
# Extract lists of the multi-part drivers.
# The 'int-*' lists are the intermediate files used to build the multi's.
......
......@@ -39,12 +39,6 @@
* buffer low/full speed data so the host collects it at high speed.
*/
#ifdef EHCI_SOFT_RETRIES
static int soft_retries = EHCI_SOFT_RETRIES;
MODULE_PARM (soft_retries, "i");
MODULE_PARM_DESC (soft_retries, "Number of software retries for endpoint i/o");
#endif
/*-------------------------------------------------------------------------*/
/* fill a qtd, returning how much of the buffer we were able to queue up */
......@@ -134,8 +128,9 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
urb->status = -EPIPE;
else /* unknown */
urb->status = -EPROTO;
dbg ("devpath %s ep %d-%s qtd token %x --> status %d",
urb->dev->devpath, usb_pipeendpoint (urb->pipe),
dbg ("ep %d-%s qtd token %08x --> status %d",
/* devpath */
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
token, urb->status);
......@@ -148,8 +143,8 @@ static inline void qtd_copy_status (struct urb *urb, size_t length, u32 token)
usb_pipeendpoint (pipe),
usb_pipeout (pipe));
if (urb->dev->tt && !usb_pipeint (pipe)) {
err ("must CLEAR_TT_BUFFER, hub %s port %d%s addr %d ep %d",
urb->dev->tt->hub->devpath, urb->dev->ttport,
err ("must CLEAR_TT_BUFFER, hub port %d%s addr %d ep %d",
urb->dev->ttport, /* devpath */
urb->dev->tt->multi ? "" : " (all-ports TT)",
urb->dev->devnum, usb_pipeendpoint (urb->pipe));
// FIXME something (khubd?) should make the hub
......@@ -228,12 +223,10 @@ qh_completions (
struct list_head *qtd_list,
int freeing
) {
struct ehci_qtd *qtd = 0;
struct list_head *next = 0;
u32 token;
struct ehci_qtd *qtd, *last;
struct list_head *next;
struct ehci_qh *qh = 0;
struct urb *urb = 0;
int halted = 0;
int unlink = 0, halted = 0;
unsigned long flags;
int retval = 0;
......@@ -243,89 +236,116 @@ qh_completions (
return retval;
}
for (qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
/* scan QTDs till end of list, or we reach an active one */
for (qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list),
last = 0, next = 0;
next != qtd_list;
qtd = list_entry (next, struct ehci_qtd, qtd_list)) {
token = le32_to_cpu (qtd->hw_token);
if (!qh) {
urb = qtd->urb;
last = qtd, qtd = list_entry (next,
struct ehci_qtd, qtd_list)) {
struct urb *urb = qtd->urb;
u32 token = 0;
/* qh is non-null iff these qtds were queued to the HC */
qh = (struct ehci_qh *) urb->hcpriv;
/* clean up any state from previous QTD ...*/
if (last) {
if (likely (last->urb != urb)) {
/* complete() can reenter this HCD */
spin_unlock_irqrestore (&ehci->lock, flags);
if (likely (freeing != 0))
ehci_urb_done (ehci, last->buf_dma,
last->urb);
else
ehci_urb_complete (ehci, last->buf_dma,
last->urb);
spin_lock_irqsave (&ehci->lock, flags);
retval++;
}
/* qh overlays can have HC's old cached copies of
* next qtd ptrs, if an URB was queued afterwards.
*/
if (qh && cpu_to_le32 (last->qtd_dma) == qh->hw_current
&& last->hw_next != qh->hw_qtd_next) {
qh->hw_alt_next = last->hw_alt_next;
qh->hw_qtd_next = last->hw_next;
}
if (likely (freeing != 0))
ehci_qtd_free (ehci, last);
last = 0;
}
next = qtd->qtd_list.next;
/* if these qtds were queued to the HC, some may be active.
* else we're cleaning up after a failed URB submission.
*/
if (likely (qh != 0)) {
int qh_halted;
qh_halted = __constant_cpu_to_le32 (QTD_STS_HALT)
& qh->hw_token;
token = le32_to_cpu (qtd->hw_token);
halted = halted
|| qh_halted
|| (ehci->hcd.state == USB_STATE_HALT)
|| (qh->qh_state == QH_STATE_IDLE);
if (unlikely ((token & QTD_STS_HALT) != 0)) {
#ifdef EHCI_SOFT_RETRIES
/* extra soft retries for protocol errors */
if (!halted
&& qh->retries < soft_retries
&& (QTD_STS_HALT|QTD_STS_XACT)
== (token & 0xff)
&& QTD_CERR (token) == 0) {
if (qh->retries == 0)
dbg ("soft retry, qh %p qtd %p",
qh, qtd);
qh->retries++;
token &= ~0x0ff;
token |= QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
/* qtd update not needed */
qh->hw_token = cpu_to_le32 (token);
spin_unlock_irqrestore (&ehci->lock,
flags);
return;
} else if (qh->retries >= soft_retries
&& soft_retries) {
dbg ("retried %d times, qh %p qtd %p",
qh->retries, qh, qtd);
}
#endif /* EHCI_SOFT_RETRIES */
halted = 1;
}
if (unlikely ((token & QTD_STS_ACTIVE) != 0)) {
/* stop scan if qtd is visible to the HC */
if (!halted) {
urb = 0;
break;
}
/* QH halts only because of fault or unlink; in both
* cases, queued URBs get unlinked. But for unlink,
* URBs at the head of the queue can stay linked.
*/
if (unlikely (halted != 0)) {
/* continue cleanup if HC is halted */
/* unlink everything because of HC shutdown? */
if (ehci->hcd.state == USB_STATE_HALT) {
freeing = unlink = 1;
urb->status = -ESHUTDOWN;
goto scrub;
/* explicit unlink, starting here? */
} else if (qh->qh_state == QH_STATE_IDLE
&& (urb->status == -ECONNRESET
|| urb->status == -ENOENT)) {
freeing = unlink = 1;
/* unlink everything because of error? */
} else if (qh_halted
&& !(token & QTD_STS_HALT)) {
freeing = unlink = 1;
if (urb->status == -EINPROGRESS)
urb->status = -ECONNRESET;
/* unlink the rest? */
} else if (unlink) {
urb->status = -ECONNRESET;
/* QH halted to unlink urbs after this? */
} else if ((token & QTD_STS_ACTIVE) != 0) {
qtd = 0;
continue;
}
/* stall? some other urb was unlinked? */
if (urb->status == -EINPROGRESS) {
dbg ("?why? qh %p, qtd %p halted, urb %p, token %8x, len %d",
qh, qtd, urb, token, urb->actual_length);
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
/*
* FIXME: write this code. When one queued urb is unlinked,
* unlink every succeeding urb.
/* Else QH is active, so we must not modify QTDs
* that HC may be working on. Break from loop.
*/
} else if (unlikely ((token & QTD_STS_ACTIVE) != 0)) {
next = qtd_list;
qtd = 0;
continue;
}
/* else stopped for some other reason */
}
scrub:
spin_lock (&urb->lock);
qtd_copy_status (urb, qtd->length, token);
spin_unlock (&urb->lock);
}
next = qtd->qtd_list.next;
/*
* NOTE: this won't work right with interrupt urbs that
* need multiple qtds ... only the first scan of qh->qtd_list
* starts at the right qtd, yet multiple scans could happen
* for transfers that are scheduled across multiple uframes.
* (Such schedules are not currently allowed!)
*/
if (likely (freeing != 0))
list_del (&qtd->qtd_list);
......@@ -347,8 +367,6 @@ return retval;
qtd->hw_buf [0] |= cpu_to_le32 (0x0fff & qtd->buf_dma);
}
spin_unlock_irqrestore (&ehci->lock, flags);
#if 0
if (urb->status == -EINPROGRESS)
vdbg (" qtd %p ok, urb %p, token %8x, len %d",
......@@ -364,21 +382,6 @@ return retval;
pci_unmap_single (ehci->hcd.pdev,
qtd->buf_dma, sizeof (struct usb_ctrlrequest),
PCI_DMA_TODEVICE);
/* another queued urb? */
if (unlikely (qtd->urb != urb)) {
if (likely (freeing != 0))
ehci_urb_done (ehci, qtd->buf_dma, urb);
else
ehci_urb_complete (ehci, qtd->buf_dma, urb);
retval++;
urb = qtd->urb;
}
if (likely (freeing != 0))
ehci_qtd_free (ehci, qtd);
spin_lock_irqsave (&ehci->lock, flags);
qtd = list_entry (next, struct ehci_qtd, qtd_list);
}
/* patch up list head? */
......@@ -389,11 +392,12 @@ return retval;
spin_unlock_irqrestore (&ehci->lock, flags);
/* last urb's completion might still need calling */
if (likely (qtd && urb)) {
if (likely (freeing != 0))
ehci_urb_done (ehci, qtd->buf_dma, urb);
else
ehci_urb_complete (ehci, qtd->buf_dma, urb);
if (likely (last != 0)) {
if (likely (freeing != 0)) {
ehci_urb_done (ehci, last->buf_dma, last->urb);
ehci_qtd_free (ehci, last);
} else
ehci_urb_complete (ehci, last->buf_dma, last->urb);
retval++;
}
return retval;
......@@ -749,7 +753,9 @@ submit_async (
/* is an URB is queued to this qh already? */
if (unlikely (!list_empty (&qh->qtd_list))) {
struct ehci_qtd *last_qtd;
int short_rx = 0;
/* update the last qtd's "next" pointer */
// dbg_qh ("non-empty qh", ehci, qh);
last_qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
......@@ -760,6 +766,21 @@ submit_async (
&& (epnum & 0x10)) {
// only the last QTD for now
last_qtd->hw_alt_next = hw_next;
short_rx = 1;
}
/* Adjust any old copies in qh overlay too.
* Interrupt code must cope with case of HC having it
* cached, and clobbering these updates.
* ... complicates getting rid of extra interrupts!
*/
if (qh->hw_current == cpu_to_le32 (last_qtd->qtd_dma)) {
wmb ();
qh->hw_qtd_next = hw_next;
if (short_rx)
qh->hw_alt_next = hw_next
| (qh->hw_alt_next & 0x1e);
vdbg ("queue to qh %p, patch", qh);
}
/* no URB queued */
......@@ -822,8 +843,8 @@ static void end_unlink_async (struct ehci_hcd *ehci)
qh_completions (ehci, &qh->qtd_list, 1);
// FIXME unlink any urb should unlink all following urbs,
// so that this will never happen
// unlink any urb should now unlink all following urbs, so that
// relinking only happens for urbs before the unlinked ones.
if (!list_empty (&qh->qtd_list)
&& HCD_IS_RUNNING (ehci->hcd.state))
qh_link_async (ehci, qh);
......
......@@ -429,7 +429,7 @@ void ext3_put_super (struct super_block * sb)
J_ASSERT(list_empty(&sbi->s_orphan));
invalidate_bdev(sb->s_bdev, 0);
if (sbi->journal_bdev != sb->s_bdev) {
if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
/*
* Invalidate the journal device's buffers. We don't want them
* floating about in memory - the physical journal device may
......
......@@ -1570,6 +1570,7 @@
#define PCI_DEVICE_ID_INTEL_82434 0x04a3
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_82562ET 0x1031
#define PCI_DEVICE_ID_INTEL_82801CAM 0x1038
#define PCI_DEVICE_ID_INTEL_82559ER 0x1209
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
#define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222
......
......@@ -305,11 +305,7 @@ struct task_struct {
prio_array_t *array;
unsigned int time_slice;
unsigned long sleep_timestamp, run_timestamp;
#define SLEEP_HIST_SIZE 4
int sleep_hist[SLEEP_HIST_SIZE];
int sleep_idx;
unsigned long swap_cnt_last;
unsigned long policy;
unsigned long cpus_allowed;
......
......@@ -330,7 +330,6 @@ static void rest_init(void)
{
kernel_thread(init, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
unlock_kernel();
init_idle(); /* This will also wait for all other CPUs */
cpu_idle();
}
......@@ -418,6 +417,16 @@ asmlinkage void __init start_kernel(void)
* make syscalls (and thus be locked).
*/
smp_init();
/*
* Finally, we wait for all other CPU's, and initialize this
* thread that will become the idle thread for the boot CPU.
* After this, the scheduler is fully initialized, and we can
* start creating and running new threads.
*/
init_idle();
/* Do the rest non-__init'ed, we're now alive */
rest_init();
}
......
......@@ -705,9 +705,6 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
current->time_slice = 1;
expire_task(current);
}
p->sleep_timestamp = p->run_timestamp = jiffies;
memset(p->sleep_hist, 0, sizeof(p->sleep_hist[0])*SLEEP_HIST_SIZE);
p->sleep_idx = 0;
__restore_flags(flags);
/*
......
......@@ -46,8 +46,9 @@ struct prio_array {
static struct runqueue {
int cpu;
spinlock_t lock;
unsigned long nr_running, nr_switches, last_rt_event;
unsigned long nr_running, nr_switches;
task_t *curr, *idle;
unsigned long swap_cnt;
prio_array_t *active, *expired, arrays[2];
char __pad [SMP_CACHE_BYTES];
} runqueues [NR_CPUS] __cacheline_aligned;
......@@ -91,115 +92,19 @@ static inline void enqueue_task(struct task_struct *p, prio_array_t *array)
p->array = array;
}
/*
* This is the per-process load estimator. Processes that generate
* more load than the system can handle get a priority penalty.
*
* The estimator uses a 4-entry load-history ringbuffer which is
* updated whenever a task is moved to/from the runqueue. The load
* estimate is also updated from the timer tick to get an accurate
* estimation of currently executing tasks as well.
*/
#define NEXT_IDX(idx) (((idx) + 1) % SLEEP_HIST_SIZE)
static inline void update_sleep_avg_deactivate(task_t *p)
{
unsigned int idx;
unsigned long j = jiffies, last_sample = p->run_timestamp / HZ,
curr_sample = j / HZ, delta = curr_sample - last_sample;
if (unlikely(delta)) {
if (delta < SLEEP_HIST_SIZE) {
for (idx = 0; idx < delta; idx++) {
p->sleep_idx++;
p->sleep_idx %= SLEEP_HIST_SIZE;
p->sleep_hist[p->sleep_idx] = 0;
}
} else {
for (idx = 0; idx < SLEEP_HIST_SIZE; idx++)
p->sleep_hist[idx] = 0;
p->sleep_idx = 0;
}
}
p->sleep_timestamp = j;
}
#if SLEEP_HIST_SIZE != 4
# error update this code.
#endif
static inline unsigned int get_sleep_avg(task_t *p, unsigned long j)
{
unsigned int sum;
sum = p->sleep_hist[0];
sum += p->sleep_hist[1];
sum += p->sleep_hist[2];
sum += p->sleep_hist[3];
return sum * HZ / ((SLEEP_HIST_SIZE-1)*HZ + (j % HZ));
}
static inline void update_sleep_avg_activate(task_t *p, unsigned long j)
{
unsigned int idx;
unsigned long delta_ticks, last_sample = p->sleep_timestamp / HZ,
curr_sample = j / HZ, delta = curr_sample - last_sample;
if (unlikely(delta)) {
if (delta < SLEEP_HIST_SIZE) {
p->sleep_hist[p->sleep_idx] += HZ - (p->sleep_timestamp % HZ);
p->sleep_idx++;
p->sleep_idx %= SLEEP_HIST_SIZE;
for (idx = 1; idx < delta; idx++) {
p->sleep_idx++;
p->sleep_idx %= SLEEP_HIST_SIZE;
p->sleep_hist[p->sleep_idx] = HZ;
}
} else {
for (idx = 0; idx < SLEEP_HIST_SIZE; idx++)
p->sleep_hist[idx] = HZ;
p->sleep_idx = 0;
}
p->sleep_hist[p->sleep_idx] = 0;
delta_ticks = j % HZ;
} else
delta_ticks = j - p->sleep_timestamp;
p->sleep_hist[p->sleep_idx] += delta_ticks;
p->run_timestamp = j;
}
static inline void activate_task(task_t *p, runqueue_t *rq)
{
prio_array_t *array = rq->active;
unsigned long j = jiffies;
unsigned int sleep, load;
int penalty;
if (likely(p->run_timestamp == j))
goto enqueue;
/*
* Give the process a priority penalty if it has not slept often
* enough in the past. We scale the priority penalty according
* to the current load of the runqueue, and the 'load history'
* this process has. Eg. if the CPU has 3 processes running
* right now then a process that has slept more than two-thirds
* of the time is considered to be 'interactive'. The higher
* the load of the CPUs is, the easier it is for a process to
* get an non-interactivity penalty.
*/
#define MAX_PENALTY (MAX_USER_PRIO/3)
update_sleep_avg_activate(p, j);
sleep = get_sleep_avg(p, j);
load = HZ - sleep;
penalty = (MAX_PENALTY * load)/HZ;
if (!rt_task(p)) {
p->prio = NICE_TO_PRIO(p->__nice) + penalty;
if (p->prio > MAX_PRIO-1)
p->prio = MAX_PRIO-1;
unsigned long prio_bonus = rq->swap_cnt - p->swap_cnt_last;
if (prio_bonus > MAX_PRIO)
prio_bonus = MAX_PRIO;
p->prio -= prio_bonus;
if (p->prio < MAX_RT_PRIO)
p->prio = MAX_RT_PRIO;
}
enqueue:
enqueue_task(p, array);
rq->nr_running++;
}
......@@ -209,7 +114,7 @@ static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
rq->nr_running--;
dequeue_task(p, p->array);
p->array = NULL;
update_sleep_avg_deactivate(p);
p->swap_cnt_last = rq->swap_cnt;
}
static inline void resched_task(task_t *p)
......@@ -505,7 +410,8 @@ static void load_balance(runqueue_t *this_rq)
spin_unlock(&busiest->lock);
}
#define REBALANCE_TICK (HZ/100)
/* Rebalance every 250 msecs */
#define REBALANCE_TICK (HZ/4)
void idle_tick(void)
{
......@@ -532,39 +438,18 @@ void expire_task(task_t *p)
*/
spin_lock_irqsave(&rq->lock, flags);
if ((p->policy != SCHED_FIFO) && !--p->time_slice) {
unsigned int time_slice;
p->need_resched = 1;
if (rt_task(p))
p->time_slice = RT_PRIO_TO_TIMESLICE(p->prio);
else
p->time_slice = PRIO_TO_TIMESLICE(p->prio);
/*
* Timeslice used up - discard any possible
* priority penalty:
*/
dequeue_task(p, rq->active);
/*
* Tasks that have nice values of -20 ... -15 are put
* back into the active array. If they use up too much
* CPU time then they'll get a priority penalty anyway
* so this can not starve other processes accidentally.
* Otherwise this is pretty handy for sysadmins ...
*/
if (p->prio <= MAX_RT_PRIO + MAX_PENALTY/2)
enqueue_task(p, rq->active);
else
enqueue_task(p, rq->expired);
} else {
/*
* Deactivate + activate the task so that the
* load estimator gets updated properly:
*/
time_slice = RT_PRIO_TO_TIMESLICE(p->prio);
if (!rt_task(p)) {
deactivate_task(p, rq);
activate_task(p, rq);
time_slice = PRIO_TO_TIMESLICE(p->prio);
if (++p->prio >= MAX_PRIO)
p->prio = MAX_PRIO - 1;
}
p->time_slice = time_slice;
enqueue_task(p, rq->expired);
}
load_balance(rq);
spin_unlock_irqrestore(&rq->lock, flags);
}
......@@ -616,6 +501,7 @@ asmlinkage void schedule(void)
rq->active = rq->expired;
rq->expired = array;
array = rq->active;
rq->swap_cnt++;
}
idx = sched_find_first_zero_bit(array->bitmap);
......@@ -1301,6 +1187,7 @@ void __init sched_init(void)
rq->expired = rq->arrays + 1;
spin_lock_init(&rq->lock);
rq->cpu = i;
rq->swap_cnt = 0;
for (j = 0; j < 2; j++) {
array = rq->arrays + j;
......
......@@ -490,19 +490,35 @@ int ircomm_proc_read(char *buf, char **start, off_t offset, int len)
{
struct ircomm_cb *self;
unsigned long flags;
int i=0;
save_flags(flags);
cli();
len = 0;
len += sprintf(buf+len, "Instance %d:\n", i++);
self = (struct ircomm_cb *) hashbin_get_first(ircomm);
while (self != NULL) {
ASSERT(self->magic == IRCOMM_MAGIC, return len;);
if(self->line < 0x10)
len += sprintf(buf+len, "ircomm%d", self->line);
else
len += sprintf(buf+len, "irlpt%d", self->line - 0x10);
len += sprintf(buf+len, " state: %s, ",
ircomm_state[ self->state]);
len += sprintf(buf+len,
"slsap_sel: %#02x, dlsap_sel: %#02x, mode:",
self->slsap_sel, self->dlsap_sel);
if(self->service_type & IRCOMM_3_WIRE_RAW)
len += sprintf(buf+len, " 3-wire-raw");
if(self->service_type & IRCOMM_3_WIRE)
len += sprintf(buf+len, " 3-wire");
if(self->service_type & IRCOMM_9_WIRE)
len += sprintf(buf+len, " 9-wire");
if(self->service_type & IRCOMM_CENTRONICS)
len += sprintf(buf+len, " Centronics");
len += sprintf(buf+len, "\n");
self = (struct ircomm_cb *) hashbin_get_next(ircomm);
}
restore_flags(flags);
......
......@@ -103,12 +103,30 @@ int ircomm_lmp_connect_request(struct ircomm_cb *self,
*
*
*/
int ircomm_lmp_connect_response(struct ircomm_cb *self, struct sk_buff *skb)
int ircomm_lmp_connect_response(struct ircomm_cb *self, struct sk_buff *userdata)
{
struct sk_buff *skb;
int ret;
IRDA_DEBUG(0, __FUNCTION__"()\n");
/* Any userdata supplied? */
if (userdata == NULL) {
skb = dev_alloc_skb(64);
if (!skb)
return -ENOMEM;
/* Reserve space for MUX and LAP header */
skb_reserve(skb, LMP_MAX_HEADER);
} else {
skb = userdata;
/*
* Check that the client has reserved enough space for
* headers
*/
ASSERT(skb_headroom(skb) >= LMP_MAX_HEADER, return -1;);
}
ret = irlmp_connect_response(self->lsap, skb);
return 0;
......
......@@ -80,6 +80,7 @@ const char *infrared_mode[] = {
"TV_REMOTE",
};
#ifdef CONFIG_IRDA_DEBUG
static const char *task_state[] = {
"IRDA_TASK_INIT",
"IRDA_TASK_DONE",
......@@ -91,6 +92,7 @@ static const char *task_state[] = {
"IRDA_TASK_CHILD_WAIT",
"IRDA_TASK_CHILD_DONE",
};
#endif /* CONFIG_IRDA_DEBUG */
static void irda_task_timer_expired(void *data);
......
......@@ -41,6 +41,7 @@
#include <net/irda/iriap_event.h>
#include <net/irda/iriap.h>
#ifdef CONFIG_IRDA_DEBUG
/* FIXME: This one should go in irlmp.c */
static const char *ias_charset_types[] = {
"CS_ASCII",
......@@ -55,6 +56,7 @@ static const char *ias_charset_types[] = {
"CS_ISO_8859_9",
"CS_UNICODE"
};
#endif /* CONFIG_IRDA_DEBUG */
static hashbin_t *iriap = NULL;
static __u32 service_handle;
......
......@@ -317,8 +317,15 @@ void irlan_connect_indication(void *instance, void *sap, struct qos_info *qos,
del_timer(&self->watchdog_timer);
irlan_do_provider_event(self, IRLAN_DATA_CONNECT_INDICATION, skb);
irlan_do_client_event(self, IRLAN_DATA_CONNECT_INDICATION, skb);
/* If you want to pass the skb to *both* state machines, you will
* need to skb_clone() it, so that you don't free it twice.
* As the state machines don't need it, git rid of it here...
* Jean II */
if (skb)
dev_kfree_skb(skb);
irlan_do_provider_event(self, IRLAN_DATA_CONNECT_INDICATION, NULL);
irlan_do_client_event(self, IRLAN_DATA_CONNECT_INDICATION, NULL);
if (self->provider.access_type == ACCESS_PEER) {
/*
......@@ -421,6 +428,13 @@ void irlan_disconnect_indication(void *instance, void *sap, LM_REASON reason,
break;
}
/* If you want to pass the skb to *both* state machines, you will
* need to skb_clone() it, so that you don't free it twice.
* As the state machines don't need it, git rid of it here...
* Jean II */
if (userdata)
dev_kfree_skb(userdata);
irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
......
......@@ -61,7 +61,16 @@ int irlan_eth_init(struct net_device *dev)
dev->hard_start_xmit = irlan_eth_xmit;
dev->get_stats = irlan_eth_get_stats;
dev->set_multicast_list = irlan_eth_set_multicast_list;
dev->features |= NETIF_F_DYNALLOC;
/* NETIF_F_DYNALLOC feature was set by irlan_eth_init() and would
* cause the unregister_netdev() to do asynch completion _and_
* kfree self->dev afterwards. Which is really bad because the
* netdevice was not allocated separately but is embedded in
* our control block and therefore gets freed with *self.
* The only reason why this would have been enabled is to hide
* some netdev refcount issues. If unregister_netdev() blocks
* forever, tell us about it... */
//dev->features |= NETIF_F_DYNALLOC;
ether_setup(dev);
......
......@@ -59,6 +59,7 @@ int sysctl_warn_noreply_time = 3;
extern void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb);
static void __irlap_close(struct irlap_cb *self);
#ifdef CONFIG_IRDA_DEBUG
static char *lap_reasons[] = {
"ERROR, NOT USED",
"LAP_DISC_INDICATION",
......@@ -69,6 +70,7 @@ static char *lap_reasons[] = {
"LAP_PRIMARY_CONFLICT",
"ERROR, NOT USED",
};
#endif /* CONFIG_IRDA_DEBUG */
#ifdef CONFIG_PROC_FS
int irlap_proc_read(char *, char **, off_t, int);
......
......@@ -77,6 +77,7 @@ static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event,
static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event,
struct sk_buff *, struct irlap_info *);
#ifdef CONFIG_IRDA_DEBUG
static const char *irlap_event[] = {
"DISCOVERY_REQUEST",
"CONNECT_REQUEST",
......@@ -117,6 +118,7 @@ static const char *irlap_event[] = {
"BACKOFF_TIMER_EXPIRED",
"MEDIA_BUSY_TIMER_EXPIRED",
};
#endif /* CONFIG_IRDA_DEBUG */
const char *irlap_state[] = {
"LAP_NDM",
......@@ -312,7 +314,6 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
{
discovery_t *discovery_rsp;
int ret = 0;
int i;
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == LAP_MAGIC, return -1;);
......@@ -478,6 +479,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
break;
#ifdef CONFIG_IRDA_ULTRA
case SEND_UI_FRAME:
{
int i;
/* Only allowed to repeat an operation twice */
for (i=0; ((i<2) && (self->media_busy == FALSE)); i++) {
skb = skb_dequeue(&self->txq_ultra);
......@@ -492,6 +495,7 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event,
irda_device_set_media_busy(self->netdev, TRUE);
}
break;
}
case RECV_UI_FRAME:
/* Only accept broadcast frames in NDM mode */
if (info->caddr != CBROADCAST) {
......
......@@ -49,6 +49,7 @@ const char *irlsap_state[] = {
"LSAP_SETUP_PEND",
};
#ifdef CONFIG_IRDA_DEBUG
static const char *irlmp_event[] = {
"LM_CONNECT_REQUEST",
"LM_CONNECT_CONFIRM",
......@@ -75,6 +76,7 @@ static const char *irlmp_event[] = {
"LM_LAP_DISCOVERY_CONFIRM",
"LM_LAP_IDLE_TIMEOUT",
};
#endif /* CONFIG_IRDA_DEBUG */
/* LAP Connection control proto declarations */
static void irlmp_state_standby (struct lap_cb *, IRLMP_EVENT,
......
......@@ -197,7 +197,7 @@ int __init irda_init(void)
return 0;
}
static void __exit irda_cleanup(void)
void __exit irda_cleanup(void)
{
#ifdef CONFIG_SYSCTL
irda_sysctl_unregister();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment