Commit 54de58e9 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://gkernel.bkbits.net/misc-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents cff20aab 801783b6
......@@ -1483,12 +1483,13 @@ vortex_up(struct net_device *dev)
mii_reg1 = mdio_read(dev, vp->phys[0], 1);
mii_reg5 = mdio_read(dev, vp->phys[0], 5);
if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) {
; /* No MII device or no link partner report */
netif_carrier_off(dev); /* No MII device or no link partner report */
} else {
mii_reg5 &= vp->advertising;
if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
|| (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
vp->full_duplex = 1;
netif_carrier_on(dev);
}
vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
if (vortex_debug > 1)
......@@ -1692,13 +1693,16 @@ vortex_timer(unsigned long data)
switch (dev->if_port) {
case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
if (media_status & Media_LnkBeat) {
netif_carrier_on(dev);
ok = 1;
if (vortex_debug > 1)
printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n",
dev->name, media_tbl[dev->if_port].name, media_status);
} else if (vortex_debug > 1)
} else if (vortex_debug > 1) {
netif_carrier_off(dev);
printk(KERN_DEBUG "%s: Media %s has no link beat, %x.\n",
dev->name, media_tbl[dev->if_port].name, media_status);
}
break;
case XCVR_MII: case XCVR_NWAY:
{
......@@ -1707,7 +1711,7 @@ vortex_timer(unsigned long data)
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
dev->name, mii_status);
if (mii_status & 0x0004) {
if (mii_status & BMSR_LSTATUS) {
int mii_reg5 = mdio_read(dev, vp->phys[0], 5);
if (! vp->force_fd && mii_reg5 != 0xffff) {
int duplex;
......@@ -1731,6 +1735,9 @@ vortex_timer(unsigned long data)
/* AKPM: bug: should reset Tx and Rx after setting Duplex. Page 180 */
}
}
netif_carrier_on(dev);
} else {
netif_carrier_off(dev);
}
}
break;
......
......@@ -1226,17 +1226,6 @@ CONFIG_NATSEMI
More specific information and updates are available from
<http://www.scyld.com/network/natsemi.html>.
CONFIG_NATSEMI_CABLE_MAGIC
Some systems see lots of errors with NatSemi ethernet controllers
on certain cables. If you are seeing lots of errors, try turning
this option on. Some boards have incorrect values for supporting
resistors that can cause this change to break. If you turn this
option on and your network suddenly stops working, turn this
option off.
Say N unless you are certain you need this option.
Vendors should not enable this option by default.
CONFIG_SK_G16
If you have a network (Ethernet) card of this type, say Y and read
the Ethernet-HOWTO, available from
......
......@@ -165,9 +165,6 @@ if [ "$CONFIG_NET_ETHERNET" = "y" ]; then
dep_tristate ' Mylex EISA LNE390A/B support (EXPERIMENTAL)' CONFIG_LNE390 $CONFIG_EISA $CONFIG_EXPERIMENTAL
dep_tristate ' Myson MTD-8xx PCI Ethernet support' CONFIG_FEALNX $CONFIG_PCI
dep_tristate ' National Semiconductor DP8381x series PCI Ethernet support' CONFIG_NATSEMI $CONFIG_PCI
if [ "$CONFIG_NATSEMI" = "y" -o "$CONFIG_NATSEMI" = "m" ]; then
bool ' NatSemi workaround for high errors' CONFIG_NATSEMI_CABLE_MAGIC
fi
dep_tristate ' PCI NE2000 and clones support (see help)' CONFIG_NE2K_PCI $CONFIG_PCI
dep_tristate ' Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)' CONFIG_NE3210 $CONFIG_EISA $CONFIG_EXPERIMENTAL
dep_tristate ' Racal-Interlan EISA ES3210 support (EXPERIMENTAL)' CONFIG_ES3210 $CONFIG_EISA $CONFIG_EXPERIMENTAL
......
......@@ -158,7 +158,7 @@ static int act200l_change_speed(struct irda_task *task)
}
break;
case IRDA_TASK_CHILD_WAIT:
WARNING(__FUNCTION__ "(), resetting dongle timed out!\n");
WARNING("%s(), resetting dongle timed out!\n", __FUNCTION__);
ret = -1;
break;
case IRDA_TASK_CHILD_DONE:
......@@ -203,7 +203,7 @@ static int act200l_change_speed(struct irda_task *task)
self->speed_task = NULL;
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->speed_task = NULL;
ret = -1;
......@@ -269,7 +269,7 @@ static int act200l_reset(struct irda_task *task)
self->reset_task = NULL;
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->reset_task = NULL;
ret = -1;
......
......@@ -259,7 +259,7 @@ static int actisys_reset(struct irda_task *task)
self->speed = 9600; /* That's the default */
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->reset_task = NULL;
ret = -1;
......
......@@ -141,12 +141,12 @@ int __init ali_ircc_init(void)
int reg, revision;
int i = 0;
IRDA_DEBUG(2, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
/* Probe for all the ALi chipsets we know about */
for (chip= chips; chip->name; chip++, i++)
{
IRDA_DEBUG(2, __FUNCTION__"(), Probing for %s ...\n", chip->name);
IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, chip->name);
/* Try all config registers for this chip */
for (cfg=0; cfg<2; cfg++)
......@@ -176,13 +176,11 @@ int __init ali_ircc_init(void)
if (reg == chip->cid_value)
{
IRDA_DEBUG(2, __FUNCTION__
"(), Chip found at 0x%03x\n", cfg_base);
IRDA_DEBUG(2, "%s(), Chip found at 0x%03x\n", __FUNCTION__, cfg_base);
outb(0x1F, cfg_base);
revision = inb(cfg_base+1);
IRDA_DEBUG(2, __FUNCTION__
"(), Found %s chip, revision=%d\n",
IRDA_DEBUG(2, "%s(), Found %s chip, revision=%d\n", __FUNCTION__,
chip->name, revision);
/*
......@@ -205,15 +203,14 @@ int __init ali_ircc_init(void)
}
else
{
IRDA_DEBUG(2, __FUNCTION__
"(), No %s chip at 0x%03x\n", chip->name, cfg_base);
IRDA_DEBUG(2, "%s(), No %s chip at 0x%03x\n", __FUNCTION__, chip->name, cfg_base);
}
/* Exit configuration */
outb(0xbb, cfg_base);
}
}
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End -----------------\n");
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
return ret;
}
......@@ -227,7 +224,7 @@ static void __exit ali_ircc_cleanup(void)
{
int i;
IRDA_DEBUG(2, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
pm_unregister_all(ali_ircc_pmproc);
......@@ -236,7 +233,7 @@ static void __exit ali_ircc_cleanup(void)
ali_ircc_close(dev_self[i]);
}
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End -----------------\n");
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
}
/*
......@@ -251,9 +248,10 @@ static int ali_ircc_open(int i, chipio_t *info)
struct ali_ircc_cb *self;
struct pm_dev *pmdev;
int dongle_id;
int ret;
int err;
IRDA_DEBUG(2, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
/* Set FIR FIFO and DMA Threshold */
if ((ali_ircc_setup(info)) == -1)
......@@ -263,7 +261,7 @@ static int ali_ircc_open(int i, chipio_t *info)
self = kmalloc(sizeof(struct ali_ircc_cb), GFP_KERNEL);
if (self == NULL)
{
ERROR(__FUNCTION__ "(), can't allocate memory for control block!\n");
ERROR("%s(), can't allocate memory for control block!\n", __FUNCTION__);
return -ENOMEM;
}
memset(self, 0, sizeof(struct ali_ircc_cb));
......@@ -285,7 +283,7 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Reserve the ioports that we need */
ret = check_region(self->io.fir_base, self->io.fir_ext);
if (ret < 0) {
WARNING(__FUNCTION__ "(), can't get iobase of 0x%03x\n",
WARNING("%s(), can't get iobase of 0x%03x\n", __FUNCTION__,
self->io.fir_base);
dev_self[i] = NULL;
kfree(self);
......@@ -339,7 +337,7 @@ static int ali_ircc_open(int i, chipio_t *info)
self->tx_fifo.tail = self->tx_buff.head;
if (!(dev = dev_alloc("irda%d", &err))) {
ERROR(__FUNCTION__ "(), dev_alloc() failed!\n");
ERROR("%s(), dev_alloc() failed!\n", __FUNCTION__);
return -ENOMEM;
}
......@@ -358,14 +356,14 @@ static int ali_ircc_open(int i, chipio_t *info)
err = register_netdevice(dev);
rtnl_unlock();
if (err) {
ERROR(__FUNCTION__ "(), register_netdev() failed!\n");
ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
return -1;
}
MESSAGE("IrDA: Registered device %s\n", dev->name);
/* Check dongle id */
dongle_id = ali_ircc_read_dongle_id(i, info);
MESSAGE(__FUNCTION__ "(), %s, Found dongle: %s\n", driver_name, dongle_types[dongle_id]);
MESSAGE("%s(), %s, Found dongle: %s\n", __FUNCTION__, driver_name, dongle_types[dongle_id]);
self->io.dongle_id = dongle_id;
......@@ -373,7 +371,7 @@ static int ali_ircc_open(int i, chipio_t *info)
if (pmdev)
pmdev->data = self;
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End -----------------\n");
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
return 0;
}
......@@ -389,7 +387,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
{
int iobase;
IRDA_DEBUG(4, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(4, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
ASSERT(self != NULL, return -1;);
......@@ -403,7 +401,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
}
/* Release the PORT that this driver is using */
IRDA_DEBUG(4, __FUNCTION__ "(), Releasing Region %03x\n", self->io.fir_base);
IRDA_DEBUG(4, "%s(), Releasing Region %03x\n", __FUNCTION__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
......@@ -415,7 +413,7 @@ static int __exit ali_ircc_close(struct ali_ircc_cb *self)
dev_self[self->index] = NULL;
kfree(self);
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End -----------------\n");
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
return 0;
}
......@@ -458,7 +456,7 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
int cfg_base = info->cfg_base;
int hi, low, reg;
IRDA_DEBUG(2, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
/* Enter Configuration */
outb(chip->entr1, cfg_base);
......@@ -477,13 +475,13 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
info->sir_base = info->fir_base;
IRDA_DEBUG(2, __FUNCTION__ "(), probing fir_base=0x%03x\n", info->fir_base);
IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__, info->fir_base);
/* Read IRQ control register */
outb(0x70, cfg_base);
reg = inb(cfg_base+1);
info->irq = reg & 0x0f;
IRDA_DEBUG(2, __FUNCTION__ "(), probing irq=%d\n", info->irq);
IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq);
/* Read DMA channel */
outb(0x74, cfg_base);
......@@ -491,26 +489,26 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info)
info->dma = reg & 0x07;
if(info->dma == 0x04)
WARNING(__FUNCTION__ "(), No DMA channel assigned !\n");
WARNING("%s(), No DMA channel assigned !\n", __FUNCTION__);
else
IRDA_DEBUG(2, __FUNCTION__ "(), probing dma=%d\n", info->dma);
IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma);
/* Read Enabled Status */
outb(0x30, cfg_base);
reg = inb(cfg_base+1);
info->enabled = (reg & 0x80) && (reg & 0x01);
IRDA_DEBUG(2, __FUNCTION__ "(), probing enabled=%d\n", info->enabled);
IRDA_DEBUG(2, "%s(), probing enabled=%d\n", __FUNCTION__, info->enabled);
/* Read Power Status */
outb(0x22, cfg_base);
reg = inb(cfg_base+1);
info->suspended = (reg & 0x20);
IRDA_DEBUG(2, __FUNCTION__ "(), probing suspended=%d\n", info->suspended);
IRDA_DEBUG(2, "%s(), probing suspended=%d\n", __FUNCTION__, info->suspended);
/* Exit configuration */
outb(0xbb, cfg_base);
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End -----------------\n");
IRDA_DEBUG(2, "%s(), ----------------- End -----------------\n", __FUNCTION__);
return 0;
}
......@@ -528,8 +526,13 @@ static int ali_ircc_setup(chipio_t *info)
int version;
int iobase = info->fir_base;
IRDA_DEBUG(2, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
/* Locking comments :
* Most operations here need to be protected. We are called before
* the device instance is created in ali_ircc_open(), therefore
* nobody can bother us - Jean II */
/* Switch to FIR space */
SIR2FIR(iobase);
......@@ -584,7 +587,7 @@ static int ali_ircc_setup(chipio_t *info)
// outb(UART_IER_RDI, iobase+UART_IER); //benjamin 2000/11/23 01:25PM
// Turn on the interrupts in ali_ircc_net_open
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End ------------------\n");
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
return 0;
}
......@@ -601,7 +604,7 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
int dongle_id, reg;
int cfg_base = info->cfg_base;
IRDA_DEBUG(2, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
/* Enter Configuration */
outb(chips[i].entr1, cfg_base);
......@@ -615,13 +618,13 @@ static int ali_ircc_read_dongle_id (int i, chipio_t *info)
outb(0xf0, cfg_base);
reg = inb(cfg_base+1);
dongle_id = ((reg>>6)&0x02) | ((reg>>5)&0x01);
IRDA_DEBUG(2, __FUNCTION__ "(), probing dongle_id=%d, dongle_types=%s\n",
IRDA_DEBUG(2, "%s(), probing dongle_id=%d, dongle_types=%s\n", __FUNCTION__,
dongle_id, dongle_types[dongle_id]);
/* Exit configuration */
outb(0xbb, cfg_base);
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End ------------------\n");
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
return dongle_id;
}
......@@ -637,7 +640,7 @@ static void ali_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
struct net_device *dev = (struct net_device *) dev_id;
struct ali_ircc_cb *self;
IRDA_DEBUG(2, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
if (!dev) {
WARNING("%s: irq %d for unknown device.\n", driver_name, irq);
......@@ -656,7 +659,7 @@ static void ali_ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
spin_unlock(&self->lock);
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End ------------------\n");
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
}
/*
* Function ali_ircc_fir_interrupt(irq, struct ali_ircc_cb *self, regs)
......@@ -669,7 +672,7 @@ static void ali_ircc_fir_interrupt(int irq, struct ali_ircc_cb *self, struct pt_
__u8 eir, OldMessageCount;
int iobase, tmp;
IRDA_DEBUG(1, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
iobase = self->io.fir_base;
......@@ -682,10 +685,10 @@ static void ali_ircc_fir_interrupt(int irq, struct ali_ircc_cb *self, struct pt_
//self->ier = inb(iobase+FIR_IER); 2000/12/1 04:32PM
eir = self->InterruptID & self->ier; /* Mask out the interesting ones */
IRDA_DEBUG(1, __FUNCTION__ "(), self->InterruptID = %x\n",self->InterruptID);
IRDA_DEBUG(1, __FUNCTION__ "(), self->LineStatus = %x\n",self->LineStatus);
IRDA_DEBUG(1, __FUNCTION__ "(), self->ier = %x\n",self->ier);
IRDA_DEBUG(1, __FUNCTION__ "(), eir = %x\n",eir);
IRDA_DEBUG(1, "%s(), self->InterruptID = %x\n", __FUNCTION__,self->InterruptID);
IRDA_DEBUG(1, "%s(), self->LineStatus = %x\n", __FUNCTION__,self->LineStatus);
IRDA_DEBUG(1, "%s(), self->ier = %x\n", __FUNCTION__,self->ier);
IRDA_DEBUG(1, "%s(), eir = %x\n", __FUNCTION__,eir);
/* Disable interrupts */
SetCOMInterrupts(self, FALSE);
......@@ -696,7 +699,7 @@ static void ali_ircc_fir_interrupt(int irq, struct ali_ircc_cb *self, struct pt_
{
if (self->io.direction == IO_XMIT) /* TX */
{
IRDA_DEBUG(1, __FUNCTION__ "(), ******* IIR_EOM (Tx) *******\n");
IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Tx) *******\n", __FUNCTION__);
if(ali_ircc_dma_xmit_complete(self))
{
......@@ -715,23 +718,23 @@ static void ali_ircc_fir_interrupt(int irq, struct ali_ircc_cb *self, struct pt_
}
else /* RX */
{
IRDA_DEBUG(1, __FUNCTION__ "(), ******* IIR_EOM (Rx) *******\n");
IRDA_DEBUG(1, "%s(), ******* IIR_EOM (Rx) *******\n", __FUNCTION__);
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
IRDA_DEBUG(1, __FUNCTION__ "(), ******* self->rcvFramesOverflow = TRUE ******** \n");
IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __FUNCTION__);
}
if (ali_ircc_dma_receive_complete(self))
{
IRDA_DEBUG(1, __FUNCTION__ "(), ******* receive complete ******** \n");
IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __FUNCTION__);
self->ier = IER_EOM;
}
else
{
IRDA_DEBUG(1, __FUNCTION__ "(), ******* Not receive complete ******** \n");
IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __FUNCTION__);
self->ier = IER_EOM | IER_TIMER;
}
......@@ -744,7 +747,7 @@ static void ali_ircc_fir_interrupt(int irq, struct ali_ircc_cb *self, struct pt_
if(OldMessageCount > ((self->LineStatus+1) & 0x07))
{
self->rcvFramesOverflow = TRUE;
IRDA_DEBUG(1, __FUNCTION__ "(), ******* self->rcvFramesOverflow = TRUE ******* \n");
IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __FUNCTION__);
}
/* Disable Timer */
switch_bank(iobase, BANK1);
......@@ -776,7 +779,7 @@ static void ali_ircc_fir_interrupt(int irq, struct ali_ircc_cb *self, struct pt_
/* Restore Interrupt */
SetCOMInterrupts(self, TRUE);
IRDA_DEBUG(1, __FUNCTION__ "(), ----------------- End ---------------\n");
IRDA_DEBUG(1, "%s(), ----------------- End ---------------\n", __FUNCTION__);
}
/*
......@@ -790,7 +793,7 @@ static void ali_ircc_sir_interrupt(int irq, struct ali_ircc_cb *self, struct pt_
int iobase;
int iir, lsr;
IRDA_DEBUG(2, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
iobase = self->io.sir_base;
......@@ -799,14 +802,13 @@ static void ali_ircc_sir_interrupt(int irq, struct ali_ircc_cb *self, struct pt_
/* Clear interrupt */
lsr = inb(iobase+UART_LSR);
IRDA_DEBUG(4, __FUNCTION__
"(), iir=%02x, lsr=%02x, iobase=%#x\n",
IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", __FUNCTION__,
iir, lsr, iobase);
switch (iir)
{
case UART_IIR_RLSI:
IRDA_DEBUG(2, __FUNCTION__ "(), RLSI\n");
IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__);
break;
case UART_IIR_RDI:
/* Receive interrupt */
......@@ -820,14 +822,14 @@ static void ali_ircc_sir_interrupt(int irq, struct ali_ircc_cb *self, struct pt_
}
break;
default:
IRDA_DEBUG(0, __FUNCTION__ "(), unhandled IIR=%#x\n", iir);
IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __FUNCTION__, iir);
break;
}
}
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End ------------------\n");
IRDA_DEBUG(2, "%s(), ----------------- End ------------------\n", __FUNCTION__);
}
......@@ -842,7 +844,7 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
int boguscount = 0;
int iobase;
IRDA_DEBUG(2, __FUNCTION__ "(), ---------------- Start ----------------\n");
IRDA_DEBUG(2, "%s(), ---------------- Start ----------------\n", __FUNCTION__);
ASSERT(self != NULL, return;);
iobase = self->io.sir_base;
......@@ -857,7 +859,7 @@ static void ali_ircc_sir_receive(struct ali_ircc_cb *self)
/* Make sure we don't stay here to long */
if (boguscount++ > 32) {
IRDA_DEBUG(2,__FUNCTION__ "(), breaking!\n");
IRDA_DEBUG(2,"%s(), breaking!\n", __FUNCTION__);
break;
}
} while (inb(iobase+UART_LSR) & UART_LSR_DR);
......@@ -937,6 +939,9 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
IRDA_DEBUG(2, __FUNCTION__ "(), setting speed = %d \n", baud);
/* This function *must* be called with irq off and spin-lock.
* - Jean II */
iobase = self->io.fir_base;
SetCOMInterrupts(self, FALSE); // 2000/11/24 11:43AM
......@@ -1084,7 +1089,6 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
int iobase,dongle_id;
unsigned long flags;
int tmp = 0;
IRDA_DEBUG(1, __FUNCTION__ "(), ---------------- Start ----------------\n");
......@@ -1092,8 +1096,7 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
iobase = self->io.fir_base; /* or iobase = self->io.sir_base; */
dongle_id = self->io.dongle_id;
save_flags(flags);
cli();
/* We are already locked, no need to do it again */
IRDA_DEBUG(1, __FUNCTION__ "(), Set Speed for %s , Speed = %d\n", dongle_types[dongle_id], speed);
......@@ -1259,8 +1262,6 @@ static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
switch_bank(iobase, BANK0);
restore_flags(flags);
IRDA_DEBUG(1, __FUNCTION__ "(), ----------------- End ------------------\n");
}
......@@ -1440,20 +1441,26 @@ static int ali_ircc_fir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* Make sure tests *& speed change are atomic */
spin_lock_irqsave(&self->lock, flags);
/* Note : you should make sure that speed changes are not going
* to corrupt any outgoing frame. Look at nsc-ircc for the gory
* details - Jean II */
/* Check if we need to change the speed */
speed = irda_get_next_speed(skb);
if ((speed != self->io.speed) && (speed != -1)) {
/* Check for empty frame */
if (!skb->len) {
ali_ircc_change_speed(self, speed);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return 0;
} else
self->new_speed = speed;
}
spin_lock_irqsave(&self->lock, flags);
/* Register and copy this frame to DMA memory */
self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
......@@ -1651,7 +1658,7 @@ static int ali_ircc_dma_xmit_complete(struct ali_ircc_cb *self)
if((inb(iobase+FIR_LSR) & LSR_FRAME_ABORT) == LSR_FRAME_ABORT)
{
ERROR(__FUNCTION__ "(), ********* LSR_FRAME_ABORT *********\n");
ERROR("%s(), ********* LSR_FRAME_ABORT *********\n", __FUNCTION__);
self->stats.tx_errors++;
self->stats.tx_fifo_errors++;
}
......@@ -1898,8 +1905,8 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
skb = dev_alloc_skb(len+1);
if (skb == NULL)
{
WARNING(__FUNCTION__ "(), memory squeeze, "
"dropping frame.\n");
WARNING("%s(), memory squeeze, "
"dropping frame.\n", __FUNCTION__);
self->stats.rx_dropped++;
return FALSE;
......@@ -1957,20 +1964,26 @@ static int ali_ircc_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* Make sure tests *& speed change are atomic */
spin_lock_irqsave(&self->lock, flags);
/* Note : you should make sure that speed changes are not going
* to corrupt any outgoing frame. Look at nsc-ircc for the gory
* details - Jean II */
/* Check if we need to change the speed */
speed = irda_get_next_speed(skb);
if ((speed != self->io.speed) && (speed != -1)) {
/* Check for empty frame */
if (!skb->len) {
ali_ircc_change_speed(self, speed);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return 0;
} else
self->new_speed = speed;
}
spin_lock_irqsave(&self->lock, flags);
/* Init tx buffer */
self->tx_buff.data = self->tx_buff.head;
......@@ -2016,10 +2029,6 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_DEBUG(2, __FUNCTION__ "(), %s, (cmd=0x%X)\n", dev->name, cmd);
/* Disable interrupts & save flags */
save_flags(flags);
cli();
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
IRDA_DEBUG(1, __FUNCTION__ "(), SIOCSBANDWIDTH\n");
......@@ -2031,7 +2040,9 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!in_interrupt() && !capable(CAP_NET_ADMIN))
return -EPERM;
spin_lock_irqsave(&self->lock, flags);
ali_ircc_change_speed(self, irq->ifr_baudrate);
spin_unlock_irqrestore(&self->lock, flags);
break;
case SIOCSMEDIABUSY: /* Set media busy */
IRDA_DEBUG(1, __FUNCTION__ "(), SIOCSMEDIABUSY\n");
......@@ -2041,14 +2052,13 @@ static int ali_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
IRDA_DEBUG(2, __FUNCTION__ "(), SIOCGRECEIVING\n");
/* This is protected */
irq->ifr_receiving = ali_ircc_is_receiving(self);
break;
default:
ret = -EOPNOTSUPP;
}
restore_flags(flags);
IRDA_DEBUG(2, __FUNCTION__ "(), ----------------- End ------------------\n");
return ret;
......@@ -2219,19 +2229,16 @@ static void SetCOMInterrupts(struct ali_ircc_cb *self , unsigned char enable)
static void SIR2FIR(int iobase)
{
//unsigned char tmp;
unsigned long flags;
IRDA_DEBUG(1, __FUNCTION__ "(), ---------------- Start ----------------\n");
save_flags(flags);
cli();
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
outb(0x28, iobase+UART_MCR);
outb(0x68, iobase+UART_MCR);
outb(0x88, iobase+UART_MCR);
restore_flags(flags);
outb(0x60, iobase+FIR_MCR); /* Master Reset */
outb(0x20, iobase+FIR_MCR); /* Master Interrupt Enable */
......@@ -2245,12 +2252,11 @@ static void SIR2FIR(int iobase)
static void FIR2SIR(int iobase)
{
unsigned char val;
unsigned long flags;
IRDA_DEBUG(1, __FUNCTION__ "(), ---------------- Start ----------------\n");
save_flags(flags);
cli();
/* Already protected (change_speed() or setup()), no need to lock.
* Jean II */
outb(0x20, iobase+FIR_MCR); /* IRQ to low */
outb(0x00, iobase+UART_IER);
......@@ -2263,8 +2269,6 @@ static void FIR2SIR(int iobase)
val = inb(iobase+UART_LSR);
val = inb(iobase+UART_MSR);
restore_flags(flags);
IRDA_DEBUG(1, __FUNCTION__ "(), ----------------- End ------------------\n");
}
......
......@@ -129,7 +129,7 @@ static int girbil_change_speed(struct irda_task *task)
}
break;
case IRDA_TASK_CHILD_WAIT:
WARNING(__FUNCTION__ "(), resetting dongle timed out!\n");
WARNING("%s(), resetting dongle timed out!\n", __FUNCTION__);
ret = -1;
break;
case IRDA_TASK_CHILD_DONE:
......@@ -168,7 +168,7 @@ static int girbil_change_speed(struct irda_task *task)
self->speed_task = NULL;
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->speed_task = NULL;
ret = -1;
......@@ -221,7 +221,7 @@ static int girbil_reset(struct irda_task *task)
self->reset_task = NULL;
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->reset_task = NULL;
ret = -1;
......
......@@ -1171,7 +1171,7 @@ static inline int irda_usb_open(struct irda_usb_cb *self)
irda_usb_init_qos(self);
/* Initialise list of skb beeing curently transmitted */
self->tx_list = hashbin_new(HB_GLOBAL);
self->tx_list = hashbin_new(HB_NOLOCK); /* unused */
/* Allocate the buffer for speed changes */
/* Don't change this buffer size and allocation without doing
......
......@@ -124,7 +124,7 @@ static void __exit irport_cleanup(void)
{
int i;
IRDA_DEBUG( 4, __FUNCTION__ "()\n");
IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
for (i=0; i < 4; i++) {
if (dev_self[i])
......@@ -140,15 +140,15 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
void *ret;
int err;
IRDA_DEBUG(0, __FUNCTION__ "()\n");
IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
/*
* Allocate new instance of the driver
*/
self = kmalloc(sizeof(struct irport_cb), GFP_KERNEL);
if (!self) {
ERROR(__FUNCTION__ "(), can't allocate memory for "
"control block!\n");
ERROR("%s(), can't allocate memory for "
"control block!\n", __FUNCTION__);
return NULL;
}
memset(self, 0, sizeof(struct irport_cb));
......@@ -168,8 +168,8 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
/* Lock the port that we need */
ret = request_region(self->io.sir_base, self->io.sir_ext, driver_name);
if (!ret) {
IRDA_DEBUG(0, __FUNCTION__ "(), can't get iobase of 0x%03x\n",
self->io.sir_base);
IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
__FUNCTION__, self->io.sir_base);
return NULL;
}
......@@ -212,7 +212,7 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
self->mode = IRDA_IRLAP;
if (!(dev = dev_alloc("irda%d", &err))) {
ERROR(__FUNCTION__ "(), dev_alloc() failed!\n");
ERROR("%s(), dev_alloc() failed!\n", __FUNCTION__);
return NULL;
}
self->netdev = dev;
......@@ -240,7 +240,7 @@ irport_open(int i, unsigned int iobase, unsigned int irq)
err = register_netdevice(dev);
rtnl_unlock();
if (err) {
ERROR(__FUNCTION__ "(), register_netdev() failed!\n");
ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
return NULL;
}
MESSAGE("IrDA: Registered device %s\n", dev->name);
......@@ -265,8 +265,8 @@ int irport_close(struct irport_cb *self)
}
/* Release the IO-port that this driver is using */
IRDA_DEBUG(0 , __FUNCTION__ "(), Releasing Region %03x\n",
self->io.sir_base);
IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
__FUNCTION__, self->io.sir_base);
release_region(self->io.sir_base, self->io.sir_ext);
if (self->tx_buff.head)
......@@ -284,14 +284,13 @@ int irport_close(struct irport_cb *self)
void irport_start(struct irport_cb *self)
{
unsigned long flags;
int iobase;
iobase = self->io.sir_base;
irport_stop(self);
spin_lock_irqsave(&self->lock, flags);
/* We can't lock, we may be called from a FIR driver - Jean II */
/* Initialize UART */
outb(UART_LCR_WLEN8, iobase+UART_LCR); /* Reset DLAB */
......@@ -299,26 +298,21 @@ void irport_start(struct irport_cb *self)
/* Turn on interrups */
outb(UART_IER_RLSI | UART_IER_RDI |UART_IER_THRI, iobase+UART_IER);
spin_unlock_irqrestore(&self->lock, flags);
}
void irport_stop(struct irport_cb *self)
{
unsigned long flags;
int iobase;
iobase = self->io.sir_base;
spin_lock_irqsave(&self->lock, flags);
/* We can't lock, we may be called from a FIR driver - Jean II */
/* Reset UART */
outb(0, iobase+UART_MCR);
/* Turn off interrupts */
outb(0, iobase+UART_IER);
spin_unlock_irqrestore(&self->lock, flags);
}
/*
......@@ -329,7 +323,7 @@ void irport_stop(struct irport_cb *self)
*/
int irport_probe(int iobase)
{
IRDA_DEBUG(4, __FUNCTION__ "(), iobase=%#x\n", iobase);
IRDA_DEBUG(4, "%s(), iobase=%#x\n", __FUNCTION__, iobase);
return 0;
}
......@@ -339,27 +333,28 @@ int irport_probe(int iobase)
*
* Set speed of IrDA port to specified baudrate
*
* This function should be called with irq off and spin-lock.
*/
void irport_change_speed(void *priv, __u32 speed)
{
struct irport_cb *self = (struct irport_cb *) priv;
unsigned long flags;
int iobase;
int fcr; /* FIFO control reg */
int lcr; /* Line control reg */
int divisor;
IRDA_DEBUG(0, __FUNCTION__ "(), Setting speed to: %d\n", speed);
ASSERT(self != NULL, return;);
IRDA_DEBUG(1, "%s(), Setting speed to: %d - iobase=%#x\n",
__FUNCTION__, speed, self->io.sir_base);
/* We can't lock, we may be called from a FIR driver - Jean II */
iobase = self->io.sir_base;
/* Update accounting for new speed */
self->io.speed = speed;
spin_lock_irqsave(&self->lock, flags);
/* Turn off interrupts */
outb(0, iobase+UART_IER);
......@@ -387,9 +382,9 @@ void irport_change_speed(void *priv, __u32 speed)
outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
/* Turn on interrups */
outb(/*UART_IER_RLSI|*/UART_IER_RDI/*|UART_IER_THRI*/, iobase+UART_IER);
spin_unlock_irqrestore(&self->lock, flags);
/* This will generate a fata interrupt storm.
* People calling us will do that properly - Jean II */
//outb(/*UART_IER_RLSI|*/UART_IER_RDI/*|UART_IER_THRI*/, iobase+UART_IER);
}
/*
......@@ -397,19 +392,33 @@ void irport_change_speed(void *priv, __u32 speed)
*
* State machine for changing speed of the device. We do it this way since
* we cannot use schedule_timeout() when we are in interrupt context
*
*/
int __irport_change_speed(struct irda_task *task)
{
struct irport_cb *self;
__u32 speed = (__u32) task->param;
unsigned long flags = 0;
int wasunlocked = 0;
int ret = 0;
IRDA_DEBUG(2, __FUNCTION__ "(), <%ld>\n", jiffies);
IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
self = (struct irport_cb *) task->instance;
ASSERT(self != NULL, return -1;);
/* Locking notes : this function may be called from irq context with
* spinlock, via irport_write_wakeup(), or from non-interrupt without
* spinlock (from the task timer). Yuck !
* This is ugly, and unsafe is the spinlock is not already aquired.
* This will be fixed when irda-task get rewritten.
* Jean II */
if (!spin_is_locked(&self->lock)) {
spin_lock_irqsave(&self->lock, flags);
wasunlocked = 1;
}
switch (task->state) {
case IRDA_TASK_INIT:
case IRDA_TASK_WAIT:
......@@ -446,8 +455,7 @@ int __irport_change_speed(struct irda_task *task)
irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
break;
case IRDA_TASK_CHILD_WAIT:
WARNING(__FUNCTION__
"(), changing speed of dongle timed out!\n");
WARNING("%s(), changing speed of dongle timed out!\n", __FUNCTION__);
ret = -1;
break;
case IRDA_TASK_CHILD_DONE:
......@@ -457,11 +465,16 @@ int __irport_change_speed(struct irda_task *task)
irda_task_next_state(task, IRDA_TASK_DONE);
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
ret = -1;
break;
}
/* Put stuff in the sate we found them - Jean II */
if(wasunlocked) {
spin_unlock_irqrestore(&self->lock, flags);
}
return ret;
}
......@@ -480,7 +493,7 @@ static void irport_write_wakeup(struct irport_cb *self)
ASSERT(self != NULL, return;);
IRDA_DEBUG(4, __FUNCTION__ "()\n");
IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
iobase = self->io.sir_base;
......@@ -491,6 +504,9 @@ static void irport_write_wakeup(struct irport_cb *self)
self->tx_buff.data, self->tx_buff.len);
self->tx_buff.data += actual;
self->tx_buff.len -= actual;
/* Turn on transmit finished interrupt. */
outb(UART_IER_THRI, iobase+UART_IER);
} else {
/*
* Now serial buffer is almost free & we can start
......@@ -498,11 +514,12 @@ static void irport_write_wakeup(struct irport_cb *self)
* if we need to change the speed of the hardware
*/
if (self->new_speed) {
IRDA_DEBUG(5, __FUNCTION__ "(), Changing speed!\n");
IRDA_DEBUG(5, "%s(), Changing speed!\n", __FUNCTION__);
irda_task_execute(self, __irport_change_speed,
irport_change_speed_complete,
NULL, (void *) self->new_speed);
self->new_speed = 0;
IRDA_DEBUG(5, "%s(), Speed changed!\n", __FUNCTION__ );
} else {
/* Tell network layer that we want more frames */
netif_wake_queue(self->netdev);
......@@ -538,7 +555,7 @@ static int irport_write(int iobase, int fifo_size, __u8 *buf, int len)
/* Tx FIFO should be empty! */
if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
IRDA_DEBUG(0, __FUNCTION__ "(), failed, fifo not empty!\n");
IRDA_DEBUG(0, "%s(), failed, fifo not empty!\n", __FUNCTION__);
return 0;
}
......@@ -563,7 +580,7 @@ static int irport_change_speed_complete(struct irda_task *task)
{
struct irport_cb *self;
IRDA_DEBUG(0, __FUNCTION__ "()\n");
IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
self = (struct irport_cb *) task->instance;
......@@ -589,13 +606,19 @@ static void irport_timeout(struct net_device *dev)
{
struct irport_cb *self;
int iobase;
unsigned long flags;
self = (struct irport_cb *) dev->priv;
iobase = self->io.sir_base;
WARNING("%s: transmit timed out\n", dev->name);
spin_lock_irqsave(&self->lock, flags);
irport_start(self);
self->change_speed(self->priv, self->io.speed);
/* This will re-enable irqs */
outb(/*UART_IER_RLSI|*/UART_IER_RDI/*|UART_IER_THRI*/, iobase+UART_IER);
spin_unlock_irqrestore(&self->lock, flags);
dev->trans_start = jiffies;
netif_wake_queue(dev);
}
......@@ -614,7 +637,7 @@ int irport_hard_xmit(struct sk_buff *skb, struct net_device *dev)
int iobase;
s32 speed;
IRDA_DEBUG(0, __FUNCTION__ "()\n");
IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
ASSERT(dev != NULL, return 0;);
......@@ -625,22 +648,25 @@ int irport_hard_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* Make sure tests *& speed change are atomic */
spin_lock_irqsave(&self->lock, flags);
/* Check if we need to change the speed */
speed = irda_get_next_speed(skb);
if ((speed != self->io.speed) && (speed != -1)) {
/* Check for empty frame */
if (!skb->len) {
/* Better go there already locked - Jean II */
irda_task_execute(self, __irport_change_speed,
irport_change_speed_complete,
NULL, (void *) speed);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return 0;
} else
self->new_speed = speed;
}
spin_lock_irqsave(&self->lock, flags);
/* Init tx buffer */
self->tx_buff.data = self->tx_buff.head;
......@@ -685,7 +711,7 @@ static void irport_receive(struct irport_cb *self)
/* Make sure we don't stay here to long */
if (boguscount++ > 32) {
IRDA_DEBUG(2,__FUNCTION__ "(), breaking!\n");
IRDA_DEBUG(2,"%s(), breaking!\n", __FUNCTION__);
break;
}
} while (inb(iobase+UART_LSR) & UART_LSR_DR);
......@@ -705,7 +731,7 @@ void irport_interrupt(int irq, void *dev_id, struct pt_regs *regs)
int iir, lsr;
if (!dev) {
WARNING(__FUNCTION__ "() irq %d for unknown device.\n", irq);
WARNING("%s() irq %d for unknown device.\n", __FUNCTION__, irq);
return;
}
self = (struct irport_cb *) dev->priv;
......@@ -719,13 +745,12 @@ void irport_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* Clear interrupt */
lsr = inb(iobase+UART_LSR);
IRDA_DEBUG(4, __FUNCTION__
"(), iir=%02x, lsr=%02x, iobase=%#x\n",
iir, lsr, iobase);
IRDA_DEBUG(4, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n",
__FUNCTION__, iir, lsr, iobase);
switch (iir) {
case UART_IIR_RLSI:
IRDA_DEBUG(2, __FUNCTION__ "(), RLSI\n");
IRDA_DEBUG(2, "%s(), RLSI\n", __FUNCTION__);
break;
case UART_IIR_RDI:
/* Receive interrupt */
......@@ -737,7 +762,7 @@ void irport_interrupt(int irq, void *dev_id, struct pt_regs *regs)
irport_write_wakeup(self);
break;
default:
IRDA_DEBUG(0, __FUNCTION__ "(), unhandled IIR=%#x\n", iir);
IRDA_DEBUG(0, "%s(), unhandled IIR=%#x\n", __FUNCTION__, iir);
break;
}
......@@ -771,8 +796,9 @@ int irport_net_open(struct net_device *dev)
struct irport_cb *self;
int iobase;
char hwname[16];
unsigned long flags;
IRDA_DEBUG(0, __FUNCTION__ "()\n");
IRDA_DEBUG(1, "%s()\n", __FUNCTION__);
ASSERT(dev != NULL, return -1;);
self = (struct irport_cb *) dev->priv;
......@@ -781,12 +807,14 @@ int irport_net_open(struct net_device *dev)
if (request_irq(self->io.irq, self->interrupt, 0, dev->name,
(void *) dev)) {
IRDA_DEBUG(0, __FUNCTION__ "(), unable to allocate irq=%d\n",
self->io.irq);
IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
__FUNCTION__, self->io.irq);
return -EAGAIN;
}
spin_lock_irqsave(&self->lock, flags);
irport_start(self);
spin_unlock_irqrestore(&self->lock, flags);
/* Give self a hardware name */
......@@ -818,8 +846,9 @@ int irport_net_close(struct net_device *dev)
{
struct irport_cb *self;
int iobase;
unsigned long flags;
IRDA_DEBUG(4, __FUNCTION__ "()\n");
IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
ASSERT(dev != NULL, return -1;);
self = (struct irport_cb *) dev->priv;
......@@ -836,7 +865,9 @@ int irport_net_close(struct net_device *dev)
irlap_close(self->irlap);
self->irlap = NULL;
spin_lock_irqsave(&self->lock, flags);
irport_stop(self);
spin_unlock_irqrestore(&self->lock, flags);
free_irq(self->io.irq, dev);
......@@ -860,7 +891,7 @@ void irport_wait_until_sent(struct irport_cb *self)
/* Wait until Tx FIFO is empty */
while (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
IRDA_DEBUG(2, __FUNCTION__ "(), waiting!\n");
IRDA_DEBUG(2, "%s(), waiting!\n", __FUNCTION__);
current->state = TASK_INTERRUPTIBLE;
schedule_timeout(MSECS_TO_JIFFIES(60));
}
......@@ -915,7 +946,7 @@ static int irport_raw_write(struct net_device *dev, __u8 *buf, int len)
/* Tx FIFO should be empty! */
if (!(inb(iobase+UART_LSR) & UART_LSR_THRE)) {
IRDA_DEBUG( 0, __FUNCTION__ "(), failed, fifo not empty!\n");
IRDA_DEBUG( 0, "%s(), failed, fifo not empty!\n", __FUNCTION__);
return -1;
}
......@@ -949,11 +980,7 @@ static int irport_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ASSERT(self != NULL, return -1;);
IRDA_DEBUG(2, __FUNCTION__ "(), %s, (cmd=0x%X)\n", dev->name, cmd);
/* Disable interrupts & save flags */
save_flags(flags);
cli();
IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
......@@ -979,14 +1006,16 @@ static int irport_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
dongle->write = irport_raw_write;
dongle->set_dtr_rts = irport_set_dtr_rts;
self->dongle = dongle;
/* Now initialize the dongle! */
dongle->issue->open(dongle, &self->qos);
/* Reset dongle */
irda_task_execute(dongle, dongle->issue->reset, NULL, NULL,
NULL);
/* Make dongle available to driver only now to avoid
* race conditions - Jean II */
self->dongle = dongle;
break;
case SIOCSMEDIABUSY: /* Set media busy */
if (!capable(CAP_NET_ADMIN)) {
......@@ -1005,14 +1034,15 @@ static int irport_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
}
/* No real need to lock... */
spin_lock_irqsave(&self->lock, flags);
irport_set_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
spin_unlock_irqrestore(&self->lock, flags);
break;
default:
ret = -EOPNOTSUPP;
}
restore_flags(flags);
return ret;
}
......
......@@ -74,8 +74,10 @@ char *driver_name = "irtty";
int __init irtty_init(void)
{
int status;
irtty = hashbin_new( HB_LOCAL);
/* Probably no need to lock here because all operations done in
* open()/close() which are already safe - Jean II */
irtty = hashbin_new( HB_NOLOCK);
if ( irtty == NULL) {
printk( KERN_WARNING "IrDA: Can't allocate irtty hashbin!\n");
return -ENOMEM;
......@@ -118,9 +120,8 @@ static void __exit irtty_cleanup(void)
/* Unregister tty line-discipline */
if ((ret = tty_register_ldisc(N_IRDA, NULL))) {
ERROR(__FUNCTION__
"(), can't unregister line discipline (err = %d)\n",
ret);
ERROR("%s(), can't unregister line discipline (err = %d)\n",
__FUNCTION__, ret);
}
/*
......@@ -163,6 +164,7 @@ static int irtty_open(struct tty_struct *tty)
return -ENOMEM;
}
memset(self, 0, sizeof(struct irtty_cb));
spin_lock_init(&self->lock);
self->tty = tty;
tty->disc_data = self;
......@@ -226,7 +228,7 @@ static int irtty_open(struct tty_struct *tty)
self->rx_buff.data = self->rx_buff.head;
if (!(dev = dev_alloc("irda%d", &err))) {
ERROR(__FUNCTION__ "(), dev_alloc() failed!\n");
ERROR("%s(), dev_alloc() failed!\n", __FUNCTION__);
return -ENOMEM;
}
......@@ -245,7 +247,7 @@ static int irtty_open(struct tty_struct *tty)
err = register_netdevice(dev);
rtnl_unlock();
if (err) {
ERROR(__FUNCTION__ "(), register_netdev() failed!\n");
ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
return -1;
}
......@@ -266,11 +268,12 @@ static int irtty_open(struct tty_struct *tty)
static void irtty_close(struct tty_struct *tty)
{
struct irtty_cb *self = (struct irtty_cb *) tty->disc_data;
unsigned long flags;
/* First make sure we're connected. */
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRTTY_MAGIC, return;);
/* Stop tty */
tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
tty->disc_data = 0;
......@@ -287,6 +290,11 @@ static void irtty_close(struct tty_struct *tty)
rtnl_unlock();
}
self = hashbin_remove(irtty, (int) self, NULL);
/* Protect access to self->task and self->?x_buff - Jean II */
spin_lock_irqsave(&self->lock, flags);
/* Remove speed changing task if any */
if (self->task)
irda_task_delete(self->task);
......@@ -294,13 +302,12 @@ static void irtty_close(struct tty_struct *tty)
self->tty = NULL;
self->magic = 0;
self = hashbin_remove(irtty, (int) self, NULL);
if (self->tx_buff.head)
kfree(self->tx_buff.head);
if (self->rx_buff.head)
kfree(self->rx_buff.head);
spin_unlock_irqrestore(&self->lock, flags);
kfree(self);
......@@ -326,6 +333,7 @@ static void irtty_stop_receiver(struct irtty_cb *self, int stop)
else
cflag |= CREAD;
/* This is unsafe, but currently under discussion - Jean II */
self->tty->termios->c_cflag = cflag;
self->tty->driver.set_termios(self->tty, &old_termios);
}
......@@ -378,6 +386,7 @@ static void __irtty_change_speed(struct irtty_cb *self, __u32 speed)
break;
}
/* This is unsafe, but currently under discussion - Jean II */
self->tty->termios->c_cflag = cflag;
self->tty->driver.set_termios(self->tty, &old_termios);
......@@ -393,6 +402,7 @@ static void __irtty_change_speed(struct irtty_cb *self, __u32 speed)
static int irtty_change_speed(struct irda_task *task)
{
struct irtty_cb *self;
unsigned long flags;
__u32 speed = (__u32) task->param;
int ret = 0;
......@@ -401,12 +411,17 @@ static int irtty_change_speed(struct irda_task *task)
self = (struct irtty_cb *) task->instance;
ASSERT(self != NULL, return -1;);
/* Protect access to self->task - Jean II */
spin_lock_irqsave(&self->lock, flags);
/* Check if busy */
if (self->task && self->task != task) {
IRDA_DEBUG(0, __FUNCTION__ "(), busy!\n");
spin_unlock_irqrestore(&self->lock, flags);
return MSECS_TO_JIFFIES(10);
} else
self->task = task;
spin_unlock_irqrestore(&self->lock, flags);
switch (task->state) {
case IRDA_TASK_INIT:
......@@ -451,8 +466,7 @@ static int irtty_change_speed(struct irda_task *task)
irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
break;
case IRDA_TASK_CHILD_WAIT:
WARNING(__FUNCTION__
"(), changing speed of dongle timed out!\n");
WARNING("%s(), changing speed of dongle timed out!\n", __FUNCTION__);
ret = -1;
break;
case IRDA_TASK_CHILD_DONE:
......@@ -463,7 +477,7 @@ static int irtty_change_speed(struct irda_task *task)
self->task = NULL;
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->task = NULL;
ret = -1;
......@@ -501,6 +515,7 @@ static int irtty_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
switch (cmd) {
case TCGETS:
case TCGETA:
/* Unsure about locking here, to check - Jean II */
return n_tty_ioctl(tty, (struct file *) file, cmd,
(unsigned long) arg);
break;
......@@ -516,15 +531,16 @@ static int irtty_ioctl(struct tty_struct *tty, void *file, int cmd, void *arg)
dongle->write = irtty_raw_write;
dongle->set_dtr_rts = irtty_set_dtr_rts;
/* Bind dongle */
self->dongle = dongle;
/* Now initialize the dongle! */
dongle->issue->open(dongle, &self->qos);
/* Reset dongle */
irda_task_execute(dongle, dongle->issue->reset, NULL, NULL,
NULL);
/* Make dongle available to driver only now to avoid
* race conditions - Jean II */
self->dongle = dongle;
break;
case IRTTY_IOCGET:
ASSERT(self->netdev != NULL, return -1;);
......@@ -559,6 +575,9 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
return;
}
// Are we in interrupt context ? What locking is done ? - Jean II
//spin_lock_irqsave(&self->lock, flags);
/* Read the characters out of the buffer */
while (count--) {
/*
......@@ -589,6 +608,7 @@ static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
break;
}
}
//spin_unlock_irqrestore(&self->lock, flags);
}
/*
......@@ -626,11 +646,13 @@ static int irtty_hard_xmit(struct sk_buff *skb, struct net_device *dev)
struct irtty_cb *self;
int actual = 0;
__s32 speed;
unsigned long flags;
self = (struct irtty_cb *) dev->priv;
ASSERT(self != NULL, return 0;);
/* Lock transmit buffer */
/* Lock transmit buffer
* this serialise operations, no need to spinlock - Jean II */
netif_stop_queue(dev);
/* Check if we need to change the speed */
......@@ -647,6 +669,9 @@ static int irtty_hard_xmit(struct sk_buff *skb, struct net_device *dev)
self->new_speed = speed;
}
/* Protect access to self->tx_buff - Jean II */
spin_lock_irqsave(&self->lock, flags);
/* Init tx buffer*/
self->tx_buff.data = self->tx_buff.head;
......@@ -667,6 +692,8 @@ static int irtty_hard_xmit(struct sk_buff *skb, struct net_device *dev)
self->tx_buff.data += actual;
self->tx_buff.len -= actual;
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return 0;
......@@ -695,6 +722,7 @@ static void irtty_write_wakeup(struct tty_struct *tty)
{
struct irtty_cb *self = (struct irtty_cb *) tty->disc_data;
int actual = 0;
unsigned long flags;
/*
* First make sure we're connected.
......@@ -702,6 +730,11 @@ static void irtty_write_wakeup(struct tty_struct *tty)
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRTTY_MAGIC, return;);
/* Protected via netif_stop_queue(dev); - Jean II */
/* Protect access to self->tx_buff - Jean II */
spin_lock_irqsave(&self->lock, flags);
/* Finished with frame? */
if (self->tx_buff.len > 0) {
/* Write data left in transmit buffer */
......@@ -710,6 +743,7 @@ static void irtty_write_wakeup(struct tty_struct *tty)
self->tx_buff.data += actual;
self->tx_buff.len -= actual;
spin_unlock_irqrestore(&self->lock, flags);
} else {
/*
* Now serial buffer is almost free & we can start
......@@ -721,6 +755,9 @@ static void irtty_write_wakeup(struct tty_struct *tty)
tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
/* Don't change speed with irq off */
spin_unlock_irqrestore(&self->lock, flags);
if (self->new_speed) {
IRDA_DEBUG(5, __FUNCTION__ "(), Changing speed!\n");
irda_task_execute(self, irtty_change_speed,
......@@ -755,12 +792,17 @@ static int irtty_set_dtr_rts(struct net_device *dev, int dtr, int rts)
{
struct irtty_cb *self;
struct tty_struct *tty;
//unsigned long flags;
mm_segment_t fs;
int arg = 0;
self = (struct irtty_cb *) dev->priv;
tty = self->tty;
/* Was protected in ioctl handler, but the serial driver doesn't
* like it. This may need to change. - Jean II */
//spin_lock_irqsave(&self->lock, flags);
#ifdef TIOCM_OUT2 /* Not defined for ARM */
arg = TIOCM_OUT2;
#endif
......@@ -780,11 +822,14 @@ static int irtty_set_dtr_rts(struct net_device *dev, int dtr, int rts)
fs = get_fs();
set_fs(get_ds());
/* This is probably unsafe, but currently under discussion - Jean II */
if (tty->driver.ioctl(tty, NULL, TIOCMSET, (unsigned long) &arg)) {
IRDA_DEBUG(2, __FUNCTION__ "(), error doing ioctl!\n");
}
set_fs(fs);
//spin_unlock_irqrestore(&self->lock, flags);
return 0;
}
......@@ -799,13 +844,17 @@ static int irtty_set_dtr_rts(struct net_device *dev, int dtr, int rts)
int irtty_set_mode(struct net_device *dev, int mode)
{
struct irtty_cb *self;
unsigned long flags;
self = (struct irtty_cb *) dev->priv;
ASSERT(self != NULL, return -1;);
IRDA_DEBUG(2, __FUNCTION__ "(), mode=%s\n", infrared_mode[mode]);
/* Protect access to self->rx_buff - Jean II */
spin_lock_irqsave(&self->lock, flags);
/* save status for driver */
self->mode = mode;
......@@ -814,6 +863,8 @@ int irtty_set_mode(struct net_device *dev, int mode)
self->rx_buff.len = 0;
self->rx_buff.state = OUTSIDE_FRAME;
spin_unlock_irqrestore(&self->lock, flags);
return 0;
}
......@@ -955,7 +1006,6 @@ static int irtty_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct if_irda_req *irq = (struct if_irda_req *) rq;
struct irtty_cb *self;
dongle_t *dongle;
unsigned long flags;
int ret = 0;
ASSERT(dev != NULL, return -1;);
......@@ -971,8 +1021,7 @@ static int irtty_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
* irda_device_dongle_init() can't be locked.
* irda_task_execute() doesn't need to be locked (but
* irtty_change_speed() should protect itself).
* As this driver doesn't have spinlock protection, keep
* old fashion locking :-(
* Other calls protect themselves.
* Jean II
*/
......@@ -1025,20 +1074,14 @@ static int irtty_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else {
save_flags(flags);
cli();
irtty_set_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts);
restore_flags(flags);
}
break;
case SIOCSMODE:
if (!capable(CAP_NET_ADMIN))
ret = -EPERM;
else {
save_flags(flags);
cli();
irtty_set_mode(dev, irq->ifr_mode);
restore_flags(flags);
}
break;
default:
......
......@@ -109,7 +109,7 @@ static int mcp2120_change_speed(struct irda_task *task)
}
break;
case IRDA_TASK_CHILD_WAIT:
WARNING(__FUNCTION__ "(), resetting dongle timed out!\n");
WARNING("%s(), resetting dongle timed out!\n", __FUNCTION__);
ret = -1;
break;
case IRDA_TASK_CHILD_DONE:
......@@ -157,7 +157,7 @@ static int mcp2120_change_speed(struct irda_task *task)
//printk("mcp2120_change_speed irda_task_wait\n");
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->speed_task = NULL;
ret = -1;
......@@ -212,7 +212,7 @@ static int mcp2120_reset(struct irda_task *task)
self->reset_task = NULL;
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->reset_task = NULL;
ret = -1;
......
......@@ -161,7 +161,7 @@ int __init nsc_ircc_init(void)
/* Probe for all the NSC chipsets we know about */
for (chip=chips; chip->name ; chip++) {
IRDA_DEBUG(2, __FUNCTION__"(), Probing for %s ...\n",
IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__,
chip->name);
/* Try all config registers for this chip */
......@@ -179,8 +179,7 @@ int __init nsc_ircc_init(void)
/* Read index register */
reg = inb(cfg_base);
if (reg == 0xff) {
IRDA_DEBUG(2, __FUNCTION__
"() no chip at 0x%03x\n", cfg_base);
IRDA_DEBUG(2, "%s() no chip at 0x%03x\n", __FUNCTION__, cfg_base);
continue;
}
......@@ -188,9 +187,8 @@ int __init nsc_ircc_init(void)
outb(chip->cid_index, cfg_base);
id = inb(cfg_base+1);
if ((id & chip->cid_mask) == chip->cid_value) {
IRDA_DEBUG(2, __FUNCTION__
"() Found %s chip, revision=%d\n",
chip->name, id & ~chip->cid_mask);
IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n",
__FUNCTION__, chip->name, id & ~chip->cid_mask);
/*
* If the user supplies the base address, then
* we init the chip, if not we probe the values
......@@ -205,8 +203,7 @@ int __init nsc_ircc_init(void)
ret = 0;
i++;
} else {
IRDA_DEBUG(2, __FUNCTION__
"(), Wrong chip id=0x%02x\n", id);
IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id);
}
}
......@@ -247,7 +244,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
void *ret;
int err;
IRDA_DEBUG(2, __FUNCTION__ "()\n");
IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
MESSAGE("%s, Found chip at base=0x%03x\n", driver_name,
info->cfg_base);
......@@ -260,8 +257,8 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
/* Allocate new instance of the driver */
self = kmalloc(sizeof(struct nsc_ircc_cb), GFP_KERNEL);
if (self == NULL) {
ERROR(__FUNCTION__ "(), can't allocate memory for "
"control block!\n");
ERROR("%s(), can't allocate memory for "
"control block!\n", __FUNCTION__);
return -ENOMEM;
}
memset(self, 0, sizeof(struct nsc_ircc_cb));
......@@ -282,8 +279,8 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
/* Reserve the ioports that we need */
ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name);
if (!ret) {
WARNING(__FUNCTION__ "(), can't get iobase of 0x%03x\n",
self->io.fir_base);
WARNING("%s(), can't get iobase of 0x%03x\n",
__FUNCTION__, self->io.fir_base);
dev_self[i] = NULL;
kfree(self);
return -ENODEV;
......@@ -333,7 +330,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
self->tx_fifo.tail = self->tx_buff.head;
if (!(dev = dev_alloc("irda%d", &err))) {
ERROR(__FUNCTION__ "(), dev_alloc() failed!\n");
ERROR("%s(), dev_alloc() failed!\n", __FUNCTION__);
return -ENOMEM;
}
......@@ -352,7 +349,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info)
err = register_netdevice(dev);
rtnl_unlock();
if (err) {
ERROR(__FUNCTION__ "(), register_netdev() failed!\n");
ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
return -1;
}
MESSAGE("IrDA: Registered device %s\n", dev->name);
......@@ -388,7 +385,7 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
{
int iobase;
IRDA_DEBUG(4, __FUNCTION__ "()\n");
IRDA_DEBUG(4, "%s()\n", __FUNCTION__);
ASSERT(self != NULL, return -1;);
......@@ -402,8 +399,8 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self)
}
/* Release the PORT that this driver is using */
IRDA_DEBUG(4, __FUNCTION__ "(), Releasing Region %03x\n",
self->io.fir_base);
IRDA_DEBUG(4, "%s(), Releasing Region %03x\n",
__FUNCTION__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
......@@ -439,7 +436,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 0x2e8: outb(0x15, cfg_base+1); break;
case 0x3f8: outb(0x16, cfg_base+1); break;
case 0x2f8: outb(0x17, cfg_base+1); break;
default: ERROR(__FUNCTION__ "(), invalid base_address");
default: ERROR("%s(), invalid base_address", __FUNCTION__);
}
/* Control Signal Routing Register (CSRT) */
......@@ -451,7 +448,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 9: temp = 0x05; break;
case 11: temp = 0x06; break;
case 15: temp = 0x07; break;
default: ERROR(__FUNCTION__ "(), invalid irq");
default: ERROR("%s(), invalid irq", __FUNCTION__);
}
outb(1, cfg_base);
......@@ -459,7 +456,7 @@ static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info)
case 0: outb(0x08+temp, cfg_base+1); break;
case 1: outb(0x10+temp, cfg_base+1); break;
case 3: outb(0x18+temp, cfg_base+1); break;
default: ERROR(__FUNCTION__ "(), invalid dma");
default: ERROR("%s(), invalid dma", __FUNCTION__);
}
outb(2, cfg_base); /* Mode Control Register (MCTL) */
......@@ -498,7 +495,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
break;
}
info->sir_base = info->fir_base;
IRDA_DEBUG(2, __FUNCTION__ "(), probing fir_base=0x%03x\n",
IRDA_DEBUG(2, "%s(), probing fir_base=0x%03x\n", __FUNCTION__,
info->fir_base);
/* Read control signals routing register (CSRT) */
......@@ -531,7 +528,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
info->irq = 15;
break;
}
IRDA_DEBUG(2, __FUNCTION__ "(), probing irq=%d\n", info->irq);
IRDA_DEBUG(2, "%s(), probing irq=%d\n", __FUNCTION__, info->irq);
/* Currently we only read Rx DMA but it will also be used for Tx */
switch ((reg >> 3) & 0x03) {
......@@ -548,7 +545,7 @@ static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info)
info->dma = 3;
break;
}
IRDA_DEBUG(2, __FUNCTION__ "(), probing dma=%d\n", info->dma);
IRDA_DEBUG(2, "%s(), probing dma=%d\n", __FUNCTION__, info->dma);
/* Read mode control register (MCTL) */
outb(CFG_MCTL, cfg_base);
......@@ -694,8 +691,8 @@ static int nsc_ircc_setup(chipio_t *info)
switch_bank(iobase, BANK3);
version = inb(iobase+MID);
IRDA_DEBUG(2, __FUNCTION__ "() Driver %s Found chip version %02x\n",
driver_name, version);
IRDA_DEBUG(2, "%s() Driver %s Found chip version %02x\n",
__FUNCTION__, driver_name, version);
/* Should be 0x2? */
if (0x20 != (version & 0xf0)) {
......@@ -797,39 +794,39 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
switch (dongle_id) {
case 0x00: /* same as */
case 0x01: /* Differential serial interface */
IRDA_DEBUG(0, __FUNCTION__ "(), %s not defined by irda yet\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x02: /* same as */
case 0x03: /* Reserved */
IRDA_DEBUG(0, __FUNCTION__ "(), %s not defined by irda yet\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x04: /* Sharp RY5HD01 */
break;
case 0x05: /* Reserved, but this is what the Thinkpad reports */
IRDA_DEBUG(0, __FUNCTION__ "(), %s not defined by irda yet\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x06: /* Single-ended serial interface */
IRDA_DEBUG(0, __FUNCTION__ "(), %s not defined by irda yet\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x07: /* Consumer-IR only */
IRDA_DEBUG(0, __FUNCTION__ "(), %s is not for IrDA mode\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
IRDA_DEBUG(0, __FUNCTION__ "(), %s\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x09: /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
outb(0x28, iobase+7); /* Set irsl[0-2] as output */
break;
case 0x0A: /* same as */
case 0x0B: /* Reserved */
IRDA_DEBUG(0, __FUNCTION__ "(), %s not defined by irda yet\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x0C: /* same as */
case 0x0D: /* HP HSDL-1100/HSDL-2100 */
......@@ -843,15 +840,15 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
outb(0x28, iobase+7); /* Set irsl[0-2] as output */
break;
case 0x0F: /* No dongle connected */
IRDA_DEBUG(0, __FUNCTION__ "(), %s\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s\n",
__FUNCTION__, dongle_types[dongle_id]);
switch_bank(iobase, BANK0);
outb(0x62, iobase+MCR);
break;
default:
IRDA_DEBUG(0, __FUNCTION__ "(), invalid dongle_id %#x",
dongle_id);
IRDA_DEBUG(0, "%s(), invalid dongle_id %#x",
__FUNCTION__, dongle_id);
}
/* IRCFG1: IRSL1 and 2 are set to IrDA mode */
......@@ -870,7 +867,6 @@ static void nsc_ircc_init_dongle_interface (int iobase, int dongle_id)
*/
static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
{
unsigned long flags;
__u8 bank;
/* Save current bank */
......@@ -883,31 +879,31 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
switch (dongle_id) {
case 0x00: /* same as */
case 0x01: /* Differential serial interface */
IRDA_DEBUG(0, __FUNCTION__ "(), %s not defined by irda yet\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x02: /* same as */
case 0x03: /* Reserved */
IRDA_DEBUG(0, __FUNCTION__ "(), %s not defined by irda yet\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x04: /* Sharp RY5HD01 */
break;
case 0x05: /* Reserved */
IRDA_DEBUG(0, __FUNCTION__ "(), %s not defined by irda yet\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x06: /* Single-ended serial interface */
IRDA_DEBUG(0, __FUNCTION__ "(), %s not defined by irda yet\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s not defined by irda yet\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x07: /* Consumer-IR only */
IRDA_DEBUG(0, __FUNCTION__ "(), %s is not for IrDA mode\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s is not for IrDA mode\n",
__FUNCTION__, dongle_types[dongle_id]);
break;
case 0x08: /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
IRDA_DEBUG(0, __FUNCTION__ "(), %s\n",
dongle_types[dongle_id]);
IRDA_DEBUG(0, "%s(), %s\n",
__FUNCTION__, dongle_types[dongle_id]);
outb(0x00, iobase+4);
if (speed > 115200)
outb(0x01, iobase+4);
......@@ -916,11 +912,10 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
outb(0x01, iobase+4);
if (speed == 4000000) {
save_flags(flags);
cli();
/* There was a cli() there, but we now are already
* under spin_lock_irqsave() - JeanII */
outb(0x81, iobase+4);
outb(0x80, iobase+4);
restore_flags(flags);
} else
outb(0x00, iobase+4);
break;
......@@ -1538,8 +1533,8 @@ static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase)
skb = dev_alloc_skb(len+1);
if (skb == NULL) {
WARNING(__FUNCTION__ "(), memory squeeze, "
"dropping frame.\n");
WARNING("%s(), memory squeeze, "
"dropping frame.\n", __FUNCTION__);
self->stats.rx_dropped++;
/* Restore bank register */
......@@ -1960,33 +1955,30 @@ static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_DEBUG(2, __FUNCTION__ "(), %s, (cmd=0x%X)\n", dev->name, cmd);
/* Disable interrupts & save flags */
save_flags(flags);
cli();
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
goto out;
break;
}
spin_lock_irqsave(&self->lock, flags);
nsc_ircc_change_speed(self, irq->ifr_baudrate);
spin_unlock_irqrestore(&self->lock, flags);
break;
case SIOCSMEDIABUSY: /* Set media busy */
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
goto out;
break;
}
irda_device_set_media_busy(self->netdev, TRUE);
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
/* This is already protected */
irq->ifr_receiving = nsc_ircc_is_receiving(self);
break;
default:
ret = -EOPNOTSUPP;
}
out:
restore_flags(flags);
return ret;
}
......
......@@ -431,6 +431,7 @@ static int __init ircc_open(unsigned int fir_base, unsigned int sir_base)
struct ircc_cb *self;
struct irport_cb *irport;
unsigned char low, high, chip, config, dma, irq, version;
unsigned long flags;
IRDA_DEBUG(0, __FUNCTION__ "\n");
......@@ -484,7 +485,6 @@ static int __init ircc_open(unsigned int fir_base, unsigned int sir_base)
return -ENOMEM;
}
memset(self, 0, sizeof(struct ircc_cb));
spin_lock_init(&self->lock);
/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
self->rx_buff.truesize = 4000;
......@@ -555,6 +555,9 @@ static int __init ircc_open(unsigned int fir_base, unsigned int sir_base)
request_region(self->io->fir_base, CHIP_IO_EXTENT, driver_name);
/* Don't allow irport to change under us - Jean II */
spin_lock_irqsave(&self->irport->lock, flags);
/* Initialize QoS for this device */
irda_init_max_qos_capabilies(&irport->qos);
......@@ -581,6 +584,7 @@ static int __init ircc_open(unsigned int fir_base, unsigned int sir_base)
self->netdev->stop = &ircc_net_close;
irport_start(self->irport);
spin_unlock_irqrestore(&self->irport->lock, flags);
self->pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, ircc_pmproc);
if (self->pmdev)
......@@ -598,6 +602,7 @@ static int __init ircc_open(unsigned int fir_base, unsigned int sir_base)
*
* Change the speed of the device
*
* This function should be called with irq off and spin-lock.
*/
static void ircc_change_speed(void *priv, u32 speed)
{
......@@ -658,6 +663,7 @@ static void ircc_change_speed(void *priv, u32 speed)
/* Make special FIR init if necessary */
if (speed > 115200) {
/* No need to lock, already locked - Jean II */
irport_stop(self->irport);
/* Install FIR transmit handler */
......@@ -674,6 +680,7 @@ static void ircc_change_speed(void *priv, u32 speed)
} else {
/* Install SIR transmit handler */
dev->hard_start_xmit = &irport_hard_xmit;
/* No need to lock, already locked - Jean II */
irport_start(self->irport);
IRDA_DEBUG(0, __FUNCTION__
......@@ -727,20 +734,26 @@ static int ircc_hard_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* Make sure tests *& speed change are atomic */
spin_lock_irqsave(&self->irport->lock, flags);
/* Note : you should make sure that speed changes are not going
* to corrupt any outgoing frame. Look at nsc-ircc for the gory
* details - Jean II */
/* Check if we need to change the speed after this frame */
speed = irda_get_next_speed(skb);
if ((speed != self->io->speed) && (speed != -1)) {
/* Check for empty frame */
if (!skb->len) {
ircc_change_speed(self, speed);
spin_unlock_irqrestore(&self->irport->lock, flags);
dev_kfree_skb(skb);
return 0;
} else
self->new_speed = speed;
}
spin_lock_irqsave(&self->lock, flags);
memcpy(self->tx_buff.head, skb->data, skb->len);
self->tx_buff.len = skb->len;
......@@ -763,7 +776,7 @@ static int ircc_hard_xmit(struct sk_buff *skb, struct net_device *dev)
/* Transmit frame */
ircc_dma_xmit(self, iobase, 0);
}
spin_unlock_irqrestore(&self->lock, flags);
spin_unlock_irqrestore(&self->irport->lock, flags);
dev_kfree_skb(skb);
return 0;
......@@ -936,14 +949,14 @@ static void ircc_dma_receive_complete(struct ircc_cb *self, int iobase)
len -= 4;
if ((len < 2) || (len > 2050)) {
WARNING(__FUNCTION__ "(), bogus len=%d\n", len);
WARNING("%s(), bogus len=%d\n", __FUNCTION__, len);
return;
}
IRDA_DEBUG(2, __FUNCTION__ ": msgcnt = %d, len=%d\n", msgcnt, len);
skb = dev_alloc_skb(len+1);
if (!skb) {
WARNING(__FUNCTION__ "(), memory squeeze, dropping frame.\n");
WARNING("%s(), memory squeeze, dropping frame.\n", __FUNCTION__);
return;
}
/* Make sure IP header gets aligned */
......@@ -985,12 +998,13 @@ static void ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/* Check if we should use the SIR interrupt handler */
if (self->io->speed < 576000) {
/* Will spinlock itself - Jean II */
irport_interrupt(irq, dev_id, regs);
return;
}
iobase = self->io->fir_base;
spin_lock(&self->lock);
spin_lock(&self->irport->lock);
register_bank(iobase, 0);
iir = inb(iobase+IRCC_IIR);
......@@ -1013,7 +1027,7 @@ static void ircc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
register_bank(iobase, 0);
outb(IRCC_IER_ACTIVE_FRAME|IRCC_IER_EOM, iobase+IRCC_IER);
spin_unlock(&self->lock);
spin_unlock(&self->irport->lock);
}
#if 0 /* unused */
......@@ -1072,7 +1086,7 @@ static int ircc_net_open(struct net_device *dev)
if (request_dma(self->io->dma, dev->name)) {
irport_net_close(dev);
WARNING(__FUNCTION__ "(), unable to allocate DMA=%d\n", self->io->dma);
WARNING("%s(), unable to allocate DMA=%d\n", __FUNCTION__, self->io->dma);
return -EAGAIN;
}
......@@ -1093,7 +1107,7 @@ static int ircc_net_close(struct net_device *dev)
struct ircc_cb *self;
int iobase;
IRDA_DEBUG(0, __FUNCTION__ "\n");
IRDA_DEBUG(0, "%s()\n", __FUNCTION__);
ASSERT(dev != NULL, return -1;);
irport = (struct irport_cb *) dev->priv;
......@@ -1128,17 +1142,15 @@ static void ircc_suspend(struct ircc_cb *self)
static void ircc_wakeup(struct ircc_cb *self)
{
unsigned long flags;
if (!self->io->suspended)
return;
save_flags(flags);
cli();
/* The code was doing a "cli()" here, but this can't be right.
* If you need protection, do it in net_open with a spinlock
* or give a good reason. - Jean II */
ircc_net_open(self->netdev);
restore_flags(flags);
MESSAGE("%s, Waking up\n", driver_name);
}
......@@ -1174,6 +1186,7 @@ static int __exit ircc_close(struct ircc_cb *self)
iobase = self->irport->io.fir_base;
/* This will destroy irport */
irport_close(self->irport);
/* Stop interrupts */
......@@ -1187,6 +1200,7 @@ static int __exit ircc_close(struct ircc_cb *self)
outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase+IRCC_SCE_CFGA);
outb(IRCC_CFGB_IR, iobase+IRCC_SCE_CFGB);
#endif
/* Release the PORT that this driver is using */
IRDA_DEBUG(0, __FUNCTION__ "(), releasing 0x%03x\n", iobase);
......
......@@ -161,7 +161,7 @@ static int tekram_change_speed(struct irda_task *task)
irda_task_next_state(task, IRDA_TASK_CHILD_DONE);
break;
case IRDA_TASK_CHILD_WAIT:
WARNING(__FUNCTION__ "(), resetting dongle timed out!\n");
WARNING("%s(), resetting dongle timed out!\n", __FUNCTION__);
ret = -1;
break;
case IRDA_TASK_CHILD_DONE:
......@@ -187,7 +187,7 @@ static int tekram_change_speed(struct irda_task *task)
self->speed_task = NULL;
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->speed_task = NULL;
ret = -1;
......@@ -255,7 +255,7 @@ int tekram_reset(struct irda_task *task)
self->reset_task = NULL;
break;
default:
ERROR(__FUNCTION__ "(), unknown state %d\n", task->state);
ERROR("%s(), unknown state %d\n", __FUNCTION__, task->state);
irda_task_next_state(task, IRDA_TASK_DONE);
self->reset_task = NULL;
ret = -1;
......
......@@ -421,8 +421,8 @@ toshoboe_interrupt (int irq, void *dev_id, struct pt_regs *regs)
}
else
{
printk (KERN_INFO __FUNCTION__
"(), memory squeeze, dropping frame.\n");
printk (KERN_INFO
"%s(), memory squeeze, dropping frame.\n", __FUNCTION__);
}
self->taskfile->recv[self->rxs].control = 0x83;
......@@ -824,7 +824,7 @@ toshoboe_probe (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
if (!(dev = dev_alloc("irda%d", &err))) {
ERROR(__FUNCTION__ "(), dev_alloc() failed!\n");
ERROR("%s(), dev_alloc() failed!\n", __FUNCTION__);
err = -ENOMEM;
goto freebufs;
}
......@@ -843,7 +843,7 @@ toshoboe_probe (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
err = register_netdevice(dev);
rtnl_unlock();
if (err) {
ERROR(__FUNCTION__ "(), register_netdev() failed!\n");
ERROR("%s(), register_netdev() failed!\n", __FUNCTION__);
/* XXX there is not freeing for dev? */
goto freebufs;
}
......
......@@ -175,6 +175,7 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
return -ENOMEM;
}
memset(self, 0, sizeof(struct w83977af_ir));
spin_lock_init(&self->lock);
/* Need to store self somewhere */
dev_self[i] = self;
......@@ -236,7 +237,7 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
self->rx_buff.data = self->rx_buff.head;
if (!(dev = dev_alloc("irda%d", &err))) {
ERROR(__FUNCTION__ "(), dev_alloc() failed!\n");
ERROR("%s(), dev_alloc() failed!\n", __FUNCTION__);
return -ENOMEM;
}
dev->priv = (void *) self;
......@@ -254,7 +255,7 @@ int w83977af_open(int i, unsigned int iobase, unsigned int irq,
err = register_netdevice(dev);
rtnl_unlock();
if (err) {
ERROR(__FUNCTION__ "(), register_netdevice() failed!\n");
ERROR("%s(), register_netdevice() failed!\n", __FUNCTION__);
return -1;
}
MESSAGE("IrDA: Registered device %s\n", dev->name);
......@@ -603,8 +604,7 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
switch_bank(iobase, SET2);
outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
#ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
save_flags(flags);
cli();
spin_lock_irqsave(&self->lock, flags);
disable_dma(self->io.dma);
clear_dma_ff(self->io.dma);
......@@ -623,7 +623,7 @@ static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
hcr = inb(iobase+HCR);
outb(hcr | HCR_EN_DMA, iobase+HCR);
enable_dma(self->io.dma);
restore_flags(flags);
spin_unlock_irqrestore(&self->lock, flags);
#else
outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
#endif
......@@ -761,8 +761,7 @@ int w83977af_dma_receive(struct w83977af_ir *self)
self->rx_buff.data = self->rx_buff.head;
#ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
save_flags(flags);
cli();
spin_lock_irqsave(&self->lock, flags);
disable_dma(self->io.dma);
clear_dma_ff(self->io.dma);
......@@ -788,7 +787,7 @@ int w83977af_dma_receive(struct w83977af_ir *self)
hcr = inb(iobase+HCR);
outb(hcr | HCR_EN_DMA, iobase+HCR);
enable_dma(self->io.dma);
restore_flags(flags);
spin_unlock_irqrestore(&self->lock, flags);
#else
outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
#endif
......@@ -892,8 +891,8 @@ int w83977af_dma_receive_complete(struct w83977af_ir *self)
skb = dev_alloc_skb(len+1);
if (skb == NULL) {
printk(KERN_INFO __FUNCTION__
"(), memory squeeze, dropping frame.\n");
printk(KERN_INFO
"%s(), memory squeeze, dropping frame.\n", __FUNCTION__);
/* Restore set register */
outb(set, iobase+SSR);
......@@ -1334,10 +1333,8 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
IRDA_DEBUG(2, __FUNCTION__ "(), %s, (cmd=0x%X)\n", dev->name, cmd);
/* Disable interrupts & save flags */
save_flags(flags);
cli();
spin_lock_irqsave(&self->lock, flags);
switch (cmd) {
case SIOCSBANDWIDTH: /* Set bandwidth */
if (!capable(CAP_NET_ADMIN)) {
......@@ -1360,7 +1357,7 @@ static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
ret = -EOPNOTSUPP;
}
out:
restore_flags(flags);
spin_unlock_irqrestore(&self->lock, flags);
return ret;
}
......
......@@ -100,20 +100,65 @@
* ETHTOOL_* further support (Tim Hockin)
version 1.0.13:
* ETHTOOL_[GS]EEPROM support (Tim Hockin)
* ETHTOOL_[G]EEPROM support (Tim Hockin)
version 1.0.13:
* crc cleanup (Matt Domsch <Matt_Domsch@dell.com>)
version 1.0.14:
* Cleanup some messages and autoneg in ethtool (Tim Hockin)
version 1.0.15:
* Get rid of cable_magic flag
* use new (National provided) solution for cable magic issue
version 1.0.16:
* call netdev_rx() for RxErrors (Manfred Spraul)
* formatting and cleanups
* change options and full_duplex arrays to be zero
initialized
* enable only the WoL and PHY interrupts in wol mode
TODO:
* big endian support with CFG:BEM instead of cpu_to_le32
* support for an external PHY
* flow control
*/
#if !defined(__OPTIMIZE__)
#warning You must compile this file with the correct options!
#warning See the last lines of the source file.
#error You must compile this driver with "-O".
#endif
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include <linux/rtnetlink.h>
#include <linux/mii.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#define DRV_NAME "natsemi"
#define DRV_VERSION "1.07+LK1.0.13"
#define DRV_RELDATE "Nov 12, 2001"
#define DRV_VERSION "1.07+LK1.0.16"
#define DRV_RELDATE "Aug 28, 2002"
/* Updated to recommendations in pci-skeleton v2.03. */
......@@ -132,7 +177,12 @@ c-help: http://www.scyld.com/network/natsemi.html
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
#define NATSEMI_DEF_MSG (NETIF_MSG_DRV | \
NETIF_MSG_LINK | \
NETIF_MSG_WOL | \
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
static int debug = NATSEMI_DEF_MSG;
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
......@@ -152,8 +202,8 @@ static int rx_copybreak;
The media type is usually passed in 'options[]'.
*/
#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int options[MAX_UNITS];
static int full_duplex[MAX_UNITS];
/* Operational parameters that are set at compile time. */
......@@ -164,7 +214,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 16
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
#define RX_RING_SIZE 64
#define RX_RING_SIZE 32
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
......@@ -183,37 +233,6 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */
#if !defined(__OPTIMIZE__)
#warning You must compile this file with the correct options!
#warning See the last lines of the source file.
#error You must compile this driver with "-O".
#endif
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include <linux/rtnetlink.h>
#include <linux/mii.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
KERN_INFO DRV_NAME ".c:v1.07 1/9/2001 Written by Donald Becker <becker@scyld.com>\n"
......@@ -232,7 +251,7 @@ MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
MODULE_PARM_DESC(max_interrupt_work, "DP8381x maximum events handled per interrupt");
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
MODULE_PARM_DESC(debug, "DP8381x debug level (0-5)");
MODULE_PARM_DESC(debug, "DP8381x default debug bitmask");
MODULE_PARM_DESC(rx_copybreak, "DP8381x copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(options, "DP8381x: Bits 0-3: media type, bit 17: full duplex");
MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
......@@ -394,14 +413,12 @@ enum register_offsets {
SDCFG = 0xF8
};
/* the values for the 'magic' registers above (PGSEL=1) */
#ifdef CONFIG_NATSEMI_CABLE_MAGIC
#define PMDCSR_VAL 0x1898
#else
#define PMDCSR_VAL 0x189C
#endif
#define PMDCSR_VAL 0x189c /* enable preferred adaptation circuitry */
#define TSTDAT_VAL 0x0
#define DSPCFG_VAL 0x5040
#define SDCFG_VAL 0x008c
#define SDCFG_VAL 0x008c /* set voltage thresholds for Signal Detect */
#define DSPCFG_LOCK 0x20 /* coefficient lock bit in DSPCFG */
#define TSTDAT_FIXED 0xe8 /* magic number for bad coefficients */
/* misc PCI space registers */
enum pci_register_offsets {
......@@ -421,6 +438,7 @@ enum ChipCmd_bits {
enum ChipConfig_bits {
CfgPhyDis = 0x200,
CfgPhyRst = 0x400,
CfgExtPhy = 0x1000,
CfgAnegEnable = 0x2000,
CfgAneg100 = 0x4000,
CfgAnegFull = 0x8000,
......@@ -630,10 +648,13 @@ struct netdev_private {
u32 SavedClkRun;
/* silicon revision */
u32 srr;
/* expected DSPCFG value */
u16 dspcfg;
/* MII transceiver section. */
u16 advertising; /* NWay media advertisement */
unsigned int iosize;
spinlock_t lock;
u32 msg_enable;
};
static int eeprom_read(long ioaddr, int location);
......@@ -643,6 +664,8 @@ static void natsemi_reset(struct net_device *dev);
static void natsemi_reload_eeprom(struct net_device *dev);
static void natsemi_stop_rxtx(struct net_device *dev);
static int netdev_open(struct net_device *dev);
static void do_cable_magic(struct net_device *dev);
static void undo_cable_magic(struct net_device *dev);
static void check_link(struct net_device *dev);
static void netdev_timer(unsigned long data);
static void tx_timeout(struct net_device *dev);
......@@ -753,6 +776,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
np->iosize = iosize;
spin_lock_init(&np->lock);
np->msg_enable = debug;
/* Reset the chip to erase previous misconfiguration. */
natsemi_reload_eeprom(dev);
......@@ -763,14 +787,15 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
option = dev->mem_start;
/* The lower four bits are the media type. */
if (option > 0) {
if (option) {
if (option & 0x200)
np->full_duplex = 1;
if (option & 15)
printk(KERN_INFO "%s: ignoring user supplied media type %d",
printk(KERN_INFO
"%s: ignoring user supplied media type %d",
dev->name, option & 15);
}
if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
if (find_cnt < MAX_UNITS && full_duplex[find_cnt])
np->full_duplex = 1;
/* The chip-specific entries in the device structure. */
......@@ -796,14 +821,17 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
}
netif_carrier_off(dev);
printk(KERN_INFO "%s: %s at 0x%lx, ",
dev->name, natsemi_pci_info[chip_idx].name, ioaddr);
for (i = 0; i < ETH_ALEN-1; i++)
printk("%2.2x:", dev->dev_addr[i]);
printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
if (netif_msg_drv(np)) {
printk(KERN_INFO "%s: %s at %#08lx, ",
dev->name, natsemi_pci_info[chip_idx].name, ioaddr);
for (i = 0; i < ETH_ALEN-1; i++)
printk("%02x:", dev->dev_addr[i]);
printk("%02x, IRQ %d.\n", dev->dev_addr[i], irq);
}
np->advertising = mdio_read(dev, 1, MII_ADVERTISE);
if ((readl(ioaddr + ChipConfig) & 0xe000) != 0xe000) {
if ((readl(ioaddr + ChipConfig) & 0xe000) != 0xe000
&& netif_msg_probe(np)) {
u32 chip_config = readl(ioaddr + ChipConfig);
printk(KERN_INFO "%s: Transceiver default autonegotiation %s "
"10%s %s duplex.\n",
......@@ -812,12 +840,18 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
chip_config & CfgAneg100 ? "0" : "",
chip_config & CfgAnegFull ? "full" : "half");
}
printk(KERN_INFO "%s: Transceiver status 0x%4.4x advertising %4.4x.\n",
dev->name, mdio_read(dev, 1, MII_BMSR),
np->advertising);
if (netif_msg_probe(np))
printk(KERN_INFO
"%s: Transceiver status %#04x advertising %#04x.\n",
dev->name, mdio_read(dev, 1, MII_BMSR),
np->advertising);
/* save the silicon revision for later querying */
np->srr = readl(ioaddr + SiliconRev);
if (netif_msg_hw(np))
printk(KERN_INFO "%s: silicon revision %#04x.\n",
dev->name, np->srr);
return 0;
}
......@@ -914,6 +948,7 @@ static void natsemi_reset(struct net_device *dev)
u32 rfcr;
u16 pmatch[3];
u16 sopass[3];
struct netdev_private *np = dev->priv;
/*
* Resetting the chip causes some registers to be lost.
......@@ -947,10 +982,10 @@ static void natsemi_reset(struct net_device *dev)
break;
udelay(5);
}
if (i==NATSEMI_HW_TIMEOUT && debug) {
if (i==NATSEMI_HW_TIMEOUT && netif_msg_hw(np)) {
printk(KERN_INFO "%s: reset did not complete in %d usec.\n",
dev->name, i*5);
} else if (debug > 2) {
} else if (netif_msg_hw(np)) {
printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
dev->name, i*5);
}
......@@ -979,6 +1014,7 @@ static void natsemi_reset(struct net_device *dev)
static void natsemi_reload_eeprom(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
int i;
writel(EepromReload, dev->base_addr + PCIBusCfg);
......@@ -987,10 +1023,10 @@ static void natsemi_reload_eeprom(struct net_device *dev)
break;
udelay(5);
}
if (i==NATSEMI_HW_TIMEOUT && debug) {
if (i==NATSEMI_HW_TIMEOUT && netif_msg_hw(np)) {
printk(KERN_INFO "%s: EEPROM did not reload in %d usec.\n",
dev->name, i*5);
} else if (debug > 2) {
} else if (netif_msg_hw(np)) {
printk(KERN_DEBUG "%s: EEPROM reloaded in %d usec.\n",
dev->name, i*5);
}
......@@ -999,6 +1035,7 @@ static void natsemi_reload_eeprom(struct net_device *dev)
static void natsemi_stop_rxtx(struct net_device *dev)
{
long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv;
int i;
writel(RxOff | TxOff, ioaddr + ChipCmd);
......@@ -1007,10 +1044,10 @@ static void natsemi_stop_rxtx(struct net_device *dev)
break;
udelay(5);
}
if (i==NATSEMI_HW_TIMEOUT && debug) {
if (i==NATSEMI_HW_TIMEOUT && netif_msg_hw(np)) {
printk(KERN_INFO "%s: Tx/Rx process did not stop in %d usec.\n",
dev->name, i*5);
} else if (debug > 2) {
} else if (netif_msg_hw(np)) {
printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
dev->name, i*5);
}
......@@ -1028,7 +1065,7 @@ static int netdev_open(struct net_device *dev)
i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
if (i) return i;
if (debug > 1)
if (netif_msg_ifup(np))
printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
dev->name, dev->irq);
i = alloc_ring(dev);
......@@ -1043,8 +1080,8 @@ static int netdev_open(struct net_device *dev)
netif_start_queue(dev);
if (debug > 2)
printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
if (netif_msg_ifup(np))
printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
dev->name, (int)readl(ioaddr + ChipCmd));
/* Set the timer to check for link beat. */
......@@ -1057,6 +1094,54 @@ static int netdev_open(struct net_device *dev)
return 0;
}
static void do_cable_magic(struct net_device *dev)
{
/*
* 100 MBit links with short cables can trip an issue with the chip.
* The problem manifests as lots of CRC errors and/or flickering
* activity LED while idle. This process is based on instructions
* from engineers at National.
*/
if (readl(dev->base_addr + ChipConfig) & CfgSpeed100) {
u16 data;
writew(1, dev->base_addr + PGSEL);
/*
* coefficient visibility should already be enabled via
* DSPCFG | 0x1000
*/
data = readw(dev->base_addr + TSTDAT) & 0xff;
/*
* the value must be negative, and within certain values
* (these values all come from National)
*/
if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
struct netdev_private *np = dev->priv;
/* the bug has been triggered - fix the coefficient */
writew(TSTDAT_FIXED, dev->base_addr + TSTDAT);
/* lock the value */
data = readw(dev->base_addr + DSPCFG);
np->dspcfg = data | DSPCFG_LOCK;
writew(np->dspcfg, dev->base_addr + DSPCFG);
}
writew(0, dev->base_addr + PGSEL);
}
}
static void undo_cable_magic(struct net_device *dev)
{
u16 data;
struct netdev_private *np = dev->priv;
writew(1, dev->base_addr + PGSEL);
/* make sure the lock bit is clear */
data = readw(dev->base_addr + DSPCFG);
np->dspcfg = data & ~DSPCFG_LOCK;
writew(np->dspcfg, dev->base_addr + DSPCFG);
writew(0, dev->base_addr + PGSEL);
}
static void check_link(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
......@@ -1064,30 +1149,32 @@ static void check_link(struct net_device *dev)
int duplex;
int chipcfg = readl(ioaddr + ChipConfig);
if(!(chipcfg & CfgLink)) {
if (!(chipcfg & CfgLink)) {
if (netif_carrier_ok(dev)) {
if (debug)
printk(KERN_INFO "%s: no link. Disabling watchdog.\n",
if (netif_msg_link(np))
printk(KERN_NOTICE "%s: link down.\n",
dev->name);
netif_carrier_off(dev);
undo_cable_magic(dev);
}
return;
}
if (!netif_carrier_ok(dev)) {
if (debug)
printk(KERN_INFO "%s: link is back. Enabling watchdog.\n",
dev->name);
if (netif_msg_link(np))
printk(KERN_NOTICE "%s: link up.\n", dev->name);
netif_carrier_on(dev);
do_cable_magic(dev);
}
duplex = np->full_duplex || (chipcfg & CfgFullDuplex ? 1 : 0);
/* if duplex is set then bit 28 must be set, too */
if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
if (debug)
printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
" capability.\n", dev->name,
duplex ? "full" : "half");
if (netif_msg_link(np))
printk(KERN_INFO
"%s: Setting %s-duplex based on negotiated "
"link capability.\n", dev->name,
duplex ? "full" : "half");
if (duplex) {
np->rx_config |= RxAcceptTx;
np->tx_config |= TxCarrierIgn | TxHeartIgn;
......@@ -1106,17 +1193,12 @@ static void init_registers(struct net_device *dev)
long ioaddr = dev->base_addr;
int i;
/* save the silicon revision for later */
if (debug > 4)
printk(KERN_DEBUG "%s: found silicon revision %xh.\n",
dev->name, np->srr);
for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
if (readl(dev->base_addr + ChipConfig) & CfgAnegDone)
break;
udelay(10);
}
if (i==NATSEMI_HW_TIMEOUT && debug) {
if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
printk(KERN_INFO
"%s: autonegotiation did not complete in %d usec.\n",
dev->name, i*10);
......@@ -1135,6 +1217,7 @@ static void init_registers(struct net_device *dev)
writew(DSPCFG_VAL, ioaddr + DSPCFG);
writew(SDCFG_VAL, ioaddr + SDCFG);
writew(0, ioaddr + PGSEL);
np->dspcfg = DSPCFG_VAL;
/* Enable PHY Specific event based interrupts. Link state change
and Auto-Negotiation Completion are among the affected.
......@@ -1181,8 +1264,8 @@ static void init_registers(struct net_device *dev)
* nothing will be written to memory. */
np->SavedClkRun = readl(ioaddr + ClkRun);
writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
if (np->SavedClkRun & PMEStatus) {
printk(KERN_NOTICE "%s: Wake-up event %8.8x\n",
if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
dev->name, readl(ioaddr + WOLCmd));
}
......@@ -1197,8 +1280,10 @@ static void init_registers(struct net_device *dev)
writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
}
/*
* The frequency on this has been increased because of a nasty little problem.
/*
* Purpose:
* check for sudden death of the NIC:
*
* It seems that a reference set for this chip went out with incorrect info,
* and there exist boards that aren't quite right. An unexpected voltage drop
* can cause the PHY to get itself in a weird state (basically reset..).
......@@ -1212,7 +1297,7 @@ static void netdev_timer(unsigned long data)
long ioaddr = dev->base_addr;
u16 dspcfg;
if (debug > 3) {
if (netif_msg_timer(np)) {
/* DO NOT read the IntrStatus register,
* a read clears any pending interrupts.
*/
......@@ -1220,15 +1305,18 @@ static void netdev_timer(unsigned long data)
dev->name);
}
spin_lock_irq(&np->lock);
/* check for a nasty random phy-reset - use dspcfg as a flag */
writew(1, ioaddr+PGSEL);
dspcfg = readw(ioaddr+DSPCFG);
writew(0, ioaddr+PGSEL);
if (dspcfg != DSPCFG_VAL) {
if (dspcfg != np->dspcfg) {
if (!netif_queue_stopped(dev)) {
printk(KERN_INFO
"%s: possible phy reset: re-initializing\n",
dev->name);
spin_unlock_irq(&np->lock);
if (netif_msg_hw(np))
printk(KERN_NOTICE "%s: possible phy reset: "
"re-initializing\n", dev->name);
disable_irq(dev->irq);
spin_lock_irq(&np->lock);
init_registers(dev);
......@@ -1237,10 +1325,10 @@ static void netdev_timer(unsigned long data)
} else {
/* hurry back */
next_tick = HZ;
spin_unlock_irq(&np->lock);
}
} else {
/* init_registers() calls check_link() for the above case */
spin_lock_irq(&np->lock);
check_link(dev);
spin_unlock_irq(&np->lock);
}
......@@ -1251,18 +1339,18 @@ static void dump_ring(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
if (debug > 2) {
if (netif_msg_pktdata(np)) {
int i;
printk(KERN_DEBUG " Tx ring at %p:\n", np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++) {
printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
i, np->tx_ring[i].next_desc,
np->tx_ring[i].cmd_status,
np->tx_ring[i].addr);
}
printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++) {
printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
i, np->rx_ring[i].next_desc,
np->rx_ring[i].cmd_status,
np->rx_ring[i].addr);
......@@ -1278,9 +1366,11 @@ static void tx_timeout(struct net_device *dev)
disable_irq(dev->irq);
spin_lock_irq(&np->lock);
if (netif_device_present(dev)) {
printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
" resetting...\n",
dev->name, readl(ioaddr + IntrStatus));
if (netif_msg_tx_err(np))
printk(KERN_WARNING
"%s: Transmit timed out, status %#08x,"
" resetting...\n",
dev->name, readl(ioaddr + IntrStatus));
dump_ring(dev);
natsemi_reset(dev);
......@@ -1304,8 +1394,8 @@ static int alloc_ring(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
np->rx_ring = pci_alloc_consistent(np->pci_dev,
sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
&np->ring_dma);
sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
&np->ring_dma);
if (!np->rx_ring)
return -ENOMEM;
np->tx_ring = &np->rx_ring[RX_RING_SIZE];
......@@ -1438,7 +1528,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (debug > 4) {
if (netif_msg_tx_queued(np)) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
dev->name, np->cur_tx, entry);
}
......@@ -1451,14 +1541,11 @@ static void netdev_tx_done(struct net_device *dev)
for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
int entry = np->dirty_tx % TX_RING_SIZE;
if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn)) {
if (debug > 4)
printk(KERN_DEBUG "%s: tx frame #%d is busy.\n",
dev->name, np->dirty_tx);
if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
break;
}
if (debug > 4)
printk(KERN_DEBUG "%s: tx frame #%d finished with status %8.8xh.\n",
if (netif_msg_tx_done(np))
printk(KERN_DEBUG
"%s: tx frame #%d finished, status %#08x.\n",
dev->name, np->dirty_tx,
le32_to_cpu(np->tx_ring[entry].cmd_status));
if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
......@@ -1495,30 +1582,31 @@ static void netdev_tx_done(struct net_device *dev)
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
{
struct net_device *dev = dev_instance;
struct netdev_private *np;
long ioaddr;
struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
int boguscnt = max_interrupt_work;
ioaddr = dev->base_addr;
np = dev->priv;
if (!netif_device_present(dev))
return;
do {
/* Reading automatically acknowledges all int sources. */
u32 intr_status = readl(ioaddr + IntrStatus);
if (debug > 4)
printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
if (netif_msg_intr(np))
printk(KERN_DEBUG "%s: Interrupt, status %#08x.\n",
dev->name, intr_status);
if (intr_status == 0)
break;
if (intr_status & (IntrRxDone | IntrRxIntr | RxStatusFIFOOver | IntrRxErr | IntrRxOverrun ))
if (intr_status &
(IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
IntrRxErr | IntrRxOverrun)) {
netdev_rx(dev);
}
if (intr_status & (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr) ) {
if (intr_status &
(IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
spin_lock(&np->lock);
netdev_tx_done(dev);
spin_unlock(&np->lock);
......@@ -1529,16 +1617,16 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
netdev_error(dev, intr_status);
if (--boguscnt < 0) {
printk(KERN_WARNING "%s: Too much work at interrupt, "
"status=0x%4.4x.\n",
dev->name, intr_status);
if (netif_msg_intr(np))
printk(KERN_WARNING
"%s: Too much work at interrupt, "
"status=%#08x.\n", dev->name, intr_status);
break;
}
} while (1);
if (debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt.\n",
dev->name);
if (netif_msg_intr(np))
printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name);
}
/* This routine is logically part of the interrupt handler, but separated
......@@ -1552,22 +1640,24 @@ static void netdev_rx(struct net_device *dev)
/* If the driver owns the next entry it's a new packet. Send it up. */
while (desc_status < 0) { /* e.g. & DescOwn */
if (debug > 4)
printk(KERN_DEBUG " In netdev_rx() entry %d status was %8.8x.\n",
entry, desc_status);
if (netif_msg_rx_status(np))
printk(KERN_DEBUG
" netdev_rx() entry %d status was %#08x.\n",
entry, desc_status);
if (--boguscnt < 0)
break;
if ((desc_status & (DescMore|DescPktOK|DescRxLong)) != DescPktOK) {
if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
if (desc_status & DescMore) {
printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
"multiple buffers, entry %#x status %x.\n",
dev->name, np->cur_rx, desc_status);
if (netif_msg_rx_err(np))
printk(KERN_WARNING
"%s: Oversized(?) Ethernet "
"frame spanned multiple "
"buffers, entry %#08x "
"status %#08x.\n", dev->name,
np->cur_rx, desc_status);
np->stats.rx_length_errors++;
} else {
/* There was a error. */
if (debug > 2)
printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
desc_status);
/* There was an error. */
np->stats.rx_errors++;
if (desc_status & (DescRxAbort|DescRxOver))
np->stats.rx_over_errors++;
......@@ -1582,8 +1672,8 @@ static void netdev_rx(struct net_device *dev)
struct sk_buff *skb;
/* Omit CRC size. */
int pkt_len = (desc_status & DescSizeMask) - 4;
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
/* Check if the packet is long enough to accept
* without copying to a minimally-sized skbuff. */
if (pkt_len < rx_copybreak
&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb->dev = dev;
......@@ -1646,11 +1736,16 @@ static void netdev_error(struct net_device *dev, int intr_status)
spin_lock(&np->lock);
if (intr_status & LinkChange) {
printk(KERN_NOTICE
"%s: Link changed: Autonegotiation advertising"
" %4.4x partner %4.4x.\n", dev->name,
(int)mdio_read(dev, 1, MII_ADVERTISE),
(int)mdio_read(dev, 1, MII_LPA));
u16 adv = mdio_read(dev, 1, MII_ADVERTISE);
u16 lpa = mdio_read(dev, 1, MII_LPA);
if (mdio_read(dev, 1, MII_BMCR) & BMCR_ANENABLE
&& netif_msg_link(np)) {
printk(KERN_INFO
"%s: Autonegotiation advertising"
" %#04x partner %#04x.\n", dev->name,
adv, lpa);
}
/* read MII int status to clear the flag */
readw(ioaddr + MIntrStatus);
check_link(dev);
......@@ -1661,29 +1756,28 @@ static void netdev_error(struct net_device *dev, int intr_status)
if (intr_status & IntrTxUnderrun) {
if ((np->tx_config & TxDrthMask) < 62)
np->tx_config += 2;
if (debug > 2)
printk(KERN_NOTICE "%s: increasing Tx threshold, new tx cfg %8.8xh.\n",
dev->name, np->tx_config);
if (netif_msg_tx_err(np))
printk(KERN_NOTICE
"%s: increased Tx threshold, txcfg %#08x.\n",
dev->name, np->tx_config);
writel(np->tx_config, ioaddr + TxConfig);
}
if (intr_status & WOLPkt) {
if (intr_status & WOLPkt && netif_msg_wol(np)) {
int wol_status = readl(ioaddr + WOLCmd);
printk(KERN_NOTICE "%s: Link wake-up event %8.8x\n",
printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
dev->name, wol_status);
}
if (intr_status & RxStatusFIFOOver) {
if (debug >= 2) {
printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
dev->name);
}
np->stats.rx_fifo_errors++;
}
/* Hmmmmm, it's not clear how to recover from PCI faults. */
if (intr_status & IntrPCIErr) {
if (debug) {
printk(KERN_NOTICE "%s: PCI error %08x\n", dev->name,
intr_status & IntrPCIErr);
}
printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
intr_status & IntrPCIErr);
np->stats.tx_fifo_errors++;
np->stats.rx_fifo_errors++;
}
......@@ -1761,7 +1855,8 @@ static void __set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
/* Unconditionally log net taps. */
printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
dev->name);
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
} else if ((dev->mc_count > multicast_filter_limit)
......@@ -1896,7 +1991,7 @@ static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
/* get message-level */
case ETHTOOL_GMSGLVL: {
struct ethtool_value edata = {ETHTOOL_GMSGLVL};
edata.data = debug;
edata.data = np->msg_enable;
if (copy_to_user(useraddr, &edata, sizeof(edata)))
return -EFAULT;
return 0;
......@@ -1906,7 +2001,7 @@ static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
struct ethtool_value edata;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
debug = edata.data;
np->msg_enable = edata.data;
return 0;
}
/* restart autonegotiation */
......@@ -1941,6 +2036,9 @@ static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
return -EFAULT;
if (eeprom.offset > eeprom.offset+eeprom.len)
return -EINVAL;
if ((eeprom.offset+eeprom.len) > NATSEMI_EEPROM_SIZE) {
eeprom.len = NATSEMI_EEPROM_SIZE-eeprom.offset;
}
......@@ -2096,18 +2194,22 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_TP);
SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
/* only supports twisted-pair */
ecmd->port = PORT_TP;
/* only supports twisted-pair or MII */
tmp = readl(dev->base_addr + ChipConfig);
if (tmp & CfgExtPhy)
ecmd->port = PORT_MII;
else
ecmd->port = PORT_TP;
/* only supports internal transceiver */
ecmd->transceiver = XCVR_INTERNAL;
/* this isn't fully supported at higher layers */
/* not sure what this is for */
ecmd->phy_address = readw(dev->base_addr + PhyCtrl) & PhyAddrMask;
ecmd->advertising = ADVERTISED_TP;
ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
tmp = mdio_read(dev, 1, MII_ADVERTISE);
if (tmp & ADVERTISE_10HALF)
ecmd->advertising |= ADVERTISED_10baseT_Half;
......@@ -2118,20 +2220,21 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
if (tmp & ADVERTISE_100FULL)
ecmd->advertising |= ADVERTISED_100baseT_Full;
tmp = readl(dev->base_addr + ChipConfig);
if (tmp & CfgAnegEnable) {
tmp = mdio_read(dev, 1, MII_BMCR);
if (tmp & BMCR_ANENABLE) {
ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->autoneg = AUTONEG_ENABLE;
} else {
ecmd->autoneg = AUTONEG_DISABLE;
}
tmp = readl(dev->base_addr + ChipConfig);
if (tmp & CfgSpeed100) {
ecmd->speed = SPEED_100;
} else {
ecmd->speed = SPEED_10;
}
if (tmp & CfgFullDuplex) {
ecmd->duplex = DUPLEX_FULL;
} else {
......@@ -2152,7 +2255,7 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
return -EINVAL;
if (ecmd->port != PORT_TP)
if (ecmd->port != PORT_TP && ecmd->port != PORT_MII)
return -EINVAL;
if (ecmd->transceiver != XCVR_INTERNAL)
return -EINVAL;
......@@ -2162,39 +2265,22 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
/* WHEW! now lets bang some bits */
tmp = mdio_read(dev, 1, MII_BMCR);
if (ecmd->autoneg == AUTONEG_ENABLE) {
/* advertise only what has been requested */
tmp = readl(dev->base_addr + ChipConfig);
tmp &= ~(CfgAneg100 | CfgAnegFull);
tmp |= CfgAnegEnable;
if (ecmd->advertising & ADVERTISED_100baseT_Half
|| ecmd->advertising & ADVERTISED_100baseT_Full) {
tmp |= CfgAneg100;
}
if (ecmd->advertising & ADVERTISED_10baseT_Full
|| ecmd->advertising & ADVERTISED_100baseT_Full) {
tmp |= CfgAnegFull;
}
writel(tmp, dev->base_addr + ChipConfig);
/* turn on autonegotiation, and force a renegotiate */
tmp = mdio_read(dev, 1, MII_BMCR);
tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
mdio_write(dev, 1, MII_BMCR, tmp);
/* turn on autonegotiation */
tmp |= BMCR_ANENABLE;
np->advertising = mdio_read(dev, 1, MII_ADVERTISE);
} else {
/* turn off auto negotiation, set speed and duplexity */
tmp = mdio_read(dev, 1, MII_BMCR);
tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
if (ecmd->speed == SPEED_100) {
if (ecmd->speed == SPEED_100)
tmp |= BMCR_SPEED100;
}
if (ecmd->duplex == DUPLEX_FULL) {
if (ecmd->duplex == DUPLEX_FULL)
tmp |= BMCR_FULLDPLX;
} else {
else
np->full_duplex = 0;
}
mdio_write(dev, 1, MII_BMCR, tmp);
}
mdio_write(dev, 1, MII_BMCR, tmp);
return 0;
}
......@@ -2229,7 +2315,7 @@ static int netdev_get_regs(struct net_device *dev, u8 *buf)
/* the interrupt status is clear-on-read - see if we missed any */
if (rbuf[4] & rbuf[5]) {
printk(KERN_WARNING
"%s: shoot, we dropped an interrupt (0x%x)\n",
"%s: shoot, we dropped an interrupt (%#08x)\n",
dev->name, rbuf[4] & rbuf[5]);
}
......@@ -2296,7 +2382,7 @@ static void enable_wol_mode(struct net_device *dev, int enable_intr)
long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv;
if (debug > 1)
if (netif_msg_wol(np))
printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
dev->name);
......@@ -2319,7 +2405,8 @@ static void enable_wol_mode(struct net_device *dev, int enable_intr)
/* enable the WOL interrupt.
* Could be used to send a netlink message.
*/
writel(readl(ioaddr + IntrMask) | WOLPkt, ioaddr + IntrMask);
writel(WOLPkt | LinkChange, ioaddr + IntrMask);
writel(1, ioaddr + IntrEnable);
}
}
......@@ -2331,12 +2418,15 @@ static int netdev_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
if (debug > 1) {
printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
dev->name, (int)readl(ioaddr + ChipCmd));
printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
}
if (netif_msg_ifdown(np))
printk(KERN_DEBUG
"%s: Shutting down ethercard, status was %#04x.\n",
dev->name, (int)readl(ioaddr + ChipCmd));
if (netif_msg_pktdata(np))
printk(KERN_DEBUG
"%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
dev->name, np->cur_tx, np->dirty_tx,
np->cur_rx, np->dirty_rx);
del_timer_sync(&np->timer);
......@@ -2375,7 +2465,7 @@ static int netdev_close(struct net_device *dev)
drain_ring(dev);
free_ring(dev);
{
{
u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
if (wol) {
/* restart the NIC in WOL mode.
......
......@@ -24,19 +24,27 @@
Version 1.02 (D-Link):
- Add new board to PCI ID list
- Fix multicast bug
Version 1.03 (D-Link):
- New Rx scheme, reduce Rx congestion
- Option to disable flow control
Version 1.04 (D-Link):
- Tx timeout recovery
- More support for ethtool.
*/
#define DRV_NAME "sundance"
#define DRV_VERSION "1.02"
#define DRV_RELDATE "17-Jan-2002"
#define DRV_VERSION "1.04"
#define DRV_RELDATE "19-Aug-2002"
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 30;
static int max_interrupt_work = 0;
static int mtu;
/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
Typical is a 64 element hash table based on the Ethernet CRC. */
......@@ -47,6 +55,8 @@ static int multicast_filter_limit = 32;
This chip can receive into offset buffers, so the Alpha does not
need a copy-align. */
static int rx_copybreak;
static int tx_coalesce=1;
static int flowctrl=1;
/* media[] specifies the media type the NIC operates at.
autosense Autosensing active media.
......@@ -70,9 +80,10 @@ static char *media[MAX_UNITS];
bonding and packet priority, and more than 128 requires modifying the
Tx error recovery.
Large receive rings merely waste memory. */
#define TX_RING_SIZE 16
#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
#define RX_RING_SIZE 32
#define TX_RING_SIZE 64
#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
#define RX_RING_SIZE 64
#define RX_BUDGET 32
#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
......@@ -107,13 +118,17 @@ static char *media[MAX_UNITS];
#include <linux/init.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/bitops.h>
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#ifndef _LOCAL_CRC32
#include <linux/crc32.h>
#else
#include "crc32.h"
#endif
/* These identify the driver base version and may not be removed. */
static char version[] __devinitdata =
......@@ -129,10 +144,12 @@ MODULE_PARM(mtu, "i");
MODULE_PARM(debug, "i");
MODULE_PARM(rx_copybreak, "i");
MODULE_PARM(media, "1-" __MODULE_STRING(MAX_UNITS) "s");
MODULE_PARM(flowctrl, "i");
MODULE_PARM_DESC(max_interrupt_work, "Sundance Alta maximum events handled per interrupt");
MODULE_PARM_DESC(mtu, "Sundance Alta MTU (all boards)");
MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
/*
Theory of Operation
......@@ -207,7 +224,6 @@ IVc. Errata
*/
enum pci_id_flags_bits {
/* Set PCI command register bits before calling probe1(). */
......@@ -290,20 +306,24 @@ static struct pci_id_info pci_id_tbl[] = {
enum alta_offsets {
DMACtrl = 0x00,
TxListPtr = 0x04,
TxDMACtrl = 0x08,
TxDescPoll = 0x0a,
TxDMABurstThresh = 0x08,
TxDMAUrgentThresh = 0x09,
TxDMAPollPeriod = 0x0a,
RxDMAStatus = 0x0c,
RxListPtr = 0x10,
RxDMACtrl = 0x14,
RxDescPoll = 0x16,
RxDMABurstThresh = 0x14,
RxDMAUrgentThresh = 0x15,
RxDMAPollPeriod = 0x16,
LEDCtrl = 0x1a,
ASICCtrl = 0x30,
EEData = 0x34,
EECtrl = 0x36,
TxThreshold = 0x3c,
TxStartThresh = 0x3c,
RxEarlyThresh = 0x3e,
FlashAddr = 0x40,
FlashData = 0x44,
TxStatus = 0x46,
TxFrameId = 0x47,
DownCounter = 0x18,
IntrClear = 0x4a,
IntrEnable = 0x4c,
......@@ -337,6 +357,16 @@ enum alta_offsets {
/* Aliased and bogus values! */
RxStatus = 0x0c,
};
enum ASICCtrl_HiWord_bit {
GlobalReset = 0x0001,
RxReset = 0x0002,
TxReset = 0x0004,
DMAReset = 0x0008,
FIFOReset = 0x0010,
NetworkReset = 0x0020,
HostReset = 0x0040,
ResetBusy = 0x0400,
};
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
......@@ -399,19 +429,20 @@ struct netdev_private {
struct timer_list timer; /* Media monitoring timer. */
/* Frequently used values: keep some adjacent for cache effect. */
spinlock_t lock;
spinlock_t rx_lock; /* Group with Tx control cache line. */
int chip_id, drv_flags;
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int rx_buf_sz; /* Based on MTU+slack. */
spinlock_t txlock; /* Group with Tx control cache line. */
struct netdev_desc *last_tx; /* Last Tx descriptor used. */
unsigned int cur_tx, dirty_tx;
unsigned int tx_full:1; /* The Tx queue is full. */
/* These values are keep track of the transceiver/media in use. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int medialock:1; /* Do not sense media. */
unsigned int flowctrl:1;
unsigned int default_port:4; /* Last dev->if_port value. */
unsigned int an_enable:1;
unsigned int speed;
struct tasklet_struct rx_tasklet;
int budget;
/* Multicast and receive mode. */
spinlock_t mcastlock; /* SMP lock multicast updates. */
u16 mcast_filter[4];
......@@ -424,6 +455,9 @@ struct netdev_private {
/* The station address location in the EEPROM. */
#define EEPROM_SA_OFFSET 0x10
#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
IntrDrvRqst | IntrTxDone | StatsMax | \
LinkChange)
static int eeprom_read(long ioaddr, int location);
static int mdio_read(struct net_device *dev, int phy_id, int location);
......@@ -434,9 +468,11 @@ static void netdev_timer(unsigned long data);
static void tx_timeout(struct net_device *dev);
static void init_ring(struct net_device *dev);
static int start_tx(struct sk_buff *skb, struct net_device *dev);
static int reset_tx (struct net_device *dev, int irq);
static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
static void rx_poll(unsigned long data);
static void refill_rx (struct net_device *dev);
static void netdev_error(struct net_device *dev, int intr_status);
static int netdev_rx(struct net_device *dev);
static void netdev_error(struct net_device *dev, int intr_status);
static void set_rx_mode(struct net_device *dev);
static struct net_device_stats *get_stats(struct net_device *dev);
......@@ -502,6 +538,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
np->pci_dev = pdev;
spin_lock_init(&np->lock);
tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
......@@ -582,6 +619,12 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
np->an_enable = 1;
}
}
if (tx_coalesce < 1)
tx_coalesce = 1;
else if (tx_coalesce > TX_QUEUE_LEN - 1)
tx_coalesce = TX_QUEUE_LEN - 1;
if (flowctrl == 0)
np->flowctrl = 0;
}
/* Fibre PHY? */
......@@ -742,7 +785,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
return;
}
static int netdev_open(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
......@@ -779,14 +821,10 @@ static int netdev_open(struct net_device *dev)
writew(0, ioaddr + IntrEnable);
writew(0, ioaddr + DownCounter);
/* Set the chip to poll every N*320nsec. */
writeb(100, ioaddr + RxDescPoll);
writeb(127, ioaddr + TxDescPoll);
writeb(100, ioaddr + RxDMAPollPeriod);
writeb(127, ioaddr + TxDMAPollPeriod);
netif_start_queue(dev);
/* Enable interrupts by setting the interrupt mask. */
writew(IntrRxDone | IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
| StatsMax | LinkChange, ioaddr + IntrEnable);
writew(StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
if (debug > 2)
......@@ -802,6 +840,9 @@ static int netdev_open(struct net_device *dev)
np->timer.data = (unsigned long)dev;
np->timer.function = &netdev_timer; /* timer handler */
add_timer(&np->timer);
/* Enable interrupts by setting the interrupt mask. */
writew(DEFAULT_INTR, ioaddr + IntrEnable);
return 0;
}
......@@ -855,9 +896,12 @@ static void tx_timeout(struct net_device *dev)
{
struct netdev_private *np = dev->priv;
long ioaddr = dev->base_addr;
long flag;
printk(KERN_WARNING "%s: Transmit timed out, status %2.2x,"
" resetting...\n", dev->name, readb(ioaddr + TxStatus));
printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
"TxFrameId %2.2x,"
" resetting...\n", dev->name, readb(ioaddr + TxStatus),
readb(ioaddr + TxFrameId));
{
int i;
......@@ -866,22 +910,24 @@ static void tx_timeout(struct net_device *dev)
printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
printk(" %4.4x", np->tx_ring[i].status);
printk(" %8.8x", np->tx_ring[i].status);
printk("\n");
}
spin_lock_irqsave(&np->lock, flag);
reset_tx(dev, 0);
spin_unlock_irqrestore(&np->lock, flag);
/* Perhaps we should reinitialize the hardware here. */
dev->if_port = 0;
/* Stop and restart the chip's Tx processes . */
/* Trigger an immediate transmit demand. */
writew(IntrRxDone | IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
| StatsMax | LinkChange, ioaddr + IntrEnable);
writew(DEFAULT_INTR, ioaddr + IntrEnable);
dev->trans_start = jiffies;
np->stats.tx_errors++;
if (!np->tx_full)
if (!netif_queue_stopped(dev))
netif_wake_queue(dev);
}
......@@ -892,7 +938,6 @@ static void init_ring(struct net_device *dev)
struct netdev_private *np = dev->priv;
int i;
np->tx_full = 0;
np->cur_rx = np->cur_tx = 0;
np->dirty_rx = np->dirty_tx = 0;
......@@ -929,15 +974,16 @@ static void init_ring(struct net_device *dev)
return;
}
static int start_tx(struct sk_buff *skb, struct net_device *dev)
static int
start_tx (struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = dev->priv;
struct netdev_private *np = (struct netdev_private *) dev->priv;
struct netdev_desc *txdesc;
unsigned entry;
long ioaddr = dev->base_addr;
/* Note: Ordering is important here, set the field with the
"ownership" bit last, and only then increment cur_tx. */
/* Calculate the next Tx descriptor entry. */
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
......@@ -945,11 +991,17 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
txdesc->next_desc = 0;
/* Note: disable the interrupt generation here before releasing. */
txdesc->status =
cpu_to_le32((entry<<2) | DescIntrOnDMADone | DescIntrOnTx | DisableAlign);
txdesc->frag[0].addr = cpu_to_le32(pci_map_single(np->pci_dev,
skb->data, skb->len, PCI_DMA_TODEVICE));
txdesc->frag[0].length = cpu_to_le32(skb->len | LastFrag);
if (entry % tx_coalesce == 0) {
txdesc->status = cpu_to_le32 ((entry << 2) |
DescIntrOnTx | DisableAlign);
} else {
txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
}
txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
skb->len,
PCI_DMA_TODEVICE));
txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
if (np->last_tx)
np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
entry*sizeof(struct netdev_desc));
......@@ -957,24 +1009,63 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
np->cur_tx++;
/* On some architectures: explicitly flush cache lines here. */
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1) {
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
&& !netif_queue_stopped(dev)) {
/* do nothing */
} else {
np->tx_full = 1;
netif_stop_queue(dev);
netif_stop_queue (dev);
}
/* Side effect: The read wakes the potentially-idle transmit channel. */
if (readl(dev->base_addr + TxListPtr) == 0)
writel(np->tx_ring_dma + entry*sizeof(*np->tx_ring),
if (readl (dev->base_addr + TxListPtr) == 0)
writel (np->tx_ring_dma + entry*sizeof(*np->tx_ring),
dev->base_addr + TxListPtr);
dev->trans_start = jiffies;
if (debug > 4) {
printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
dev->name, np->cur_tx, entry);
printk (KERN_DEBUG
"%s: Transmit frame #%d queued in slot %d.\n",
dev->name, np->cur_tx, entry);
}
if (tx_coalesce > 1)
writel (1000, ioaddr + DownCounter);
return 0;
}
static int
reset_tx (struct net_device *dev, int irq)
{
struct netdev_private *np = (struct netdev_private*) dev->priv;
long ioaddr = dev->base_addr;
int i;
int frame_id;
frame_id = readb(ioaddr + TxFrameId);
writew (TxReset | DMAReset | FIFOReset | NetworkReset,
ioaddr + ASICCtrl + 2);
for (i=50; i > 0; i--) {
if ((readw(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
break;
mdelay(1);
}
for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
int entry = np->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & 0x00010000))
break;
skb = np->tx_skbuff[entry];
/* Free the original skb. */
pci_unmap_single(np->pci_dev,
np->tx_ring[entry].frag[0].addr,
skb->len, PCI_DMA_TODEVICE);
if (irq)
dev_kfree_skb_irq (np->tx_skbuff[entry]);
else
dev_kfree_skb (np->tx_skbuff[entry]);
np->tx_skbuff[entry] = 0;
}
writel (np->tx_ring_dma + frame_id * sizeof(*np->tx_ring),
dev->base_addr + TxListPtr);
return 0;
}
......@@ -989,83 +1080,88 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
ioaddr = dev->base_addr;
np = dev->priv;
spin_lock(&np->lock);
do {
int intr_status = readw(ioaddr + IntrStatus);
writew(intr_status & (IntrRxDone | IntrRxDMADone | IntrPCIErr |
IntrDrvRqst | IntrTxDone | IntrTxDMADone | StatsMax |
LinkChange), ioaddr + IntrStatus);
writew(intr_status, ioaddr + IntrStatus);
if (debug > 4)
printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
dev->name, intr_status);
if (intr_status == 0)
if (!(intr_status & DEFAULT_INTR))
break;
if (intr_status & (IntrRxDone|IntrRxDMADone))
netdev_rx(dev);
if (intr_status & (IntrRxDMADone)) {
writew(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
ioaddr + IntrEnable);
if (np->budget < 0)
np->budget = RX_BUDGET;
tasklet_schedule(&np->rx_tasklet);
}
if (intr_status & IntrTxDone) {
if (intr_status & (IntrTxDone | IntrDrvRqst)) {
int boguscnt = 32;
int tx_status = readw(ioaddr + TxStatus);
int tx_status = readw (ioaddr + TxStatus);
while (tx_status & 0x80) {
if (debug > 4)
printk("%s: Transmit status is %2.2x.\n",
dev->name, tx_status);
printk
("%s: Transmit status is %2.2x.\n",
dev->name, tx_status);
if (tx_status & 0x1e) {
np->stats.tx_errors++;
if (tx_status & 0x10) np->stats.tx_fifo_errors++;
if (tx_status & 0x10)
np->stats.tx_fifo_errors++;
#ifdef ETHER_STATS
if (tx_status & 0x08) np->stats.collisions16++;
if (tx_status & 0x08)
np->stats.collisions16++;
#else
if (tx_status & 0x08) np->stats.collisions++;
if (tx_status & 0x08)
np->stats.collisions++;
#endif
if (tx_status & 0x04) np->stats.tx_fifo_errors++;
if (tx_status & 0x02) np->stats.tx_window_errors++;
if (tx_status & 0x02)
np->stats.tx_window_errors++;
/* This reset has not been verified!. */
if (tx_status & 0x10) { /* Reset the Tx. */
writew(0x001c, ioaddr + ASICCtrl + 2);
#if 0 /* Do we need to reset the Tx pointer here? */
writel(np->tx_ring_dma
+ np->dirty_tx*sizeof(*np->tx_ring),
dev->base_addr + TxListPtr);
#endif
if (tx_status & 0x10) { /* Reset the Tx. */
np->stats.tx_fifo_errors++;
spin_lock(&np->lock);
reset_tx(dev, 1);
spin_unlock(&np->lock);
}
if (tx_status & 0x1e) /* Restart the Tx. */
writew(TxEnable, ioaddr + MACCtrl1);
if (tx_status & 0x1e) /* Restart the Tx. */
writew (TxEnable,
ioaddr + MACCtrl1);
}
/* Yup, this is a documentation bug. It cost me *hours*. */
writew(0, ioaddr + TxStatus);
tx_status = readb(ioaddr + TxStatus);
writew (0, ioaddr + TxStatus);
tx_status = readw (ioaddr + TxStatus);
if (--boguscnt < 0)
break;
}
}
spin_lock(&np->lock);
for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
int entry = np->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
if ( ! (np->tx_ring[entry].status & 0x00010000))
if (!(np->tx_ring[entry].status & 0x00010000))
break;
skb = np->tx_skbuff[entry];
/* Free the original skb. */
pci_unmap_single(np->pci_dev,
np->tx_ring[entry].frag[0].addr,
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
dev_kfree_skb_irq (np->tx_skbuff[entry]);
np->tx_skbuff[entry] = 0;
}
if (np->tx_full
&& np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
spin_unlock(&np->lock);
if (netif_queue_stopped(dev) &&
np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
/* The ring is no longer full, clear tbusy. */
np->tx_full = 0;
netif_wake_queue(dev);
netif_wake_queue (dev);
}
/* Abnormal error summary/uncommon events handlers. */
if (intr_status & (IntrDrvRqst | IntrPCIErr | LinkChange | StatsMax))
if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
netdev_error(dev, intr_status);
if (--boguscnt < 0) {
get_stats(dev);
......@@ -1073,49 +1169,41 @@ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
printk(KERN_WARNING "%s: Too much work at interrupt, "
"status=0x%4.4x / 0x%4.4x.\n",
dev->name, intr_status, readw(ioaddr + IntrClear));
/* Re-enable us in 3.2msec. */
writew(0, ioaddr + IntrEnable);
writew(1000, ioaddr + DownCounter);
writew(IntrDrvRqst, ioaddr + IntrEnable);
break;
}
} while (1);
if (debug > 3)
printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
dev->name, readw(ioaddr + IntrStatus));
if (np->cur_tx - np->dirty_tx > 0 && tx_coalesce > 1)
writel(100, ioaddr + DownCounter);
spin_unlock(&np->lock);
}
/* This routine is logically part of the interrupt handler, but separated
for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
static void rx_poll(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct netdev_private *np = dev->priv;
int entry = np->cur_rx % RX_RING_SIZE;
int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
if (debug > 4) {
printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
entry, np->rx_ring[entry].status);
}
int boguscnt = np->budget;
long ioaddr = dev->base_addr;
int received = 0;
/* If EOP is set on the next entry, it's a new packet. Send it up. */
while (1) {
struct netdev_desc *desc = &(np->rx_ring[entry]);
u32 frame_status;
u32 frame_status = le32_to_cpu(desc->status);
int pkt_len;
if (--boguscnt < 0) {
goto not_done;
}
if (!(desc->status & DescOwn))
break;
frame_status = le32_to_cpu(desc->status);
pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
if (debug > 4)
printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
frame_status);
if (--boguscnt < 0)
break;
pci_dma_sync_single(np->pci_dev, desc->frag[0].addr,
np->rx_buf_sz, PCI_DMA_FROMDEVICE);
......@@ -1136,7 +1224,6 @@ static int netdev_rx(struct net_device *dev)
}
} else {
struct sk_buff *skb;
#ifndef final_version
if (debug > 4)
printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
......@@ -1164,11 +1251,36 @@ static int netdev_rx(struct net_device *dev)
netif_rx(skb);
dev->last_rx = jiffies;
}
entry = (++np->cur_rx) % RX_RING_SIZE;
entry = (entry + 1) % RX_RING_SIZE;
received++;
}
np->cur_rx = entry;
refill_rx (dev);
np->budget -= received;
writew(DEFAULT_INTR, ioaddr + IntrEnable);
return;
not_done:
np->cur_rx = entry;
refill_rx (dev);
if (!received)
received = 1;
np->budget -= received;
if (np->budget <= 0)
np->budget = RX_BUDGET;
tasklet_schedule(&np->rx_tasklet);
return;
}
static void refill_rx (struct net_device *dev)
{
struct netdev_private *np = dev->priv;
int entry;
int cnt = 0;
/* Refill the Rx ring buffers. */
for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
struct sk_buff *skb;
entry = np->dirty_rx % RX_RING_SIZE;
if (np->rx_skbuff[entry] == NULL) {
......@@ -1186,56 +1298,51 @@ static int netdev_rx(struct net_device *dev)
np->rx_ring[entry].frag[0].length =
cpu_to_le32(np->rx_buf_sz | LastFrag);
np->rx_ring[entry].status = 0;
cnt++;
}
/* No need to restart Rx engine, it will poll. */
return 0;
return;
}
static void netdev_error(struct net_device *dev, int intr_status)
{
long ioaddr = dev->base_addr;
struct netdev_private *np = dev->priv;
u16 mii_ctl, mii_advertise, mii_lpa;
int speed;
if (intr_status & IntrDrvRqst) {
/* Stop the down counter and turn interrupts back on. */
if (debug > 1)
printk("%s: Turning interrupts back on.\n", dev->name);
writew(0, ioaddr + IntrEnable);
writew(0, ioaddr + DownCounter);
writew(IntrRxDone | IntrRxDMADone | IntrPCIErr | IntrDrvRqst |
IntrTxDone | StatsMax | LinkChange, ioaddr + IntrEnable);
/* Ack buggy InRequest */
writew (IntrDrvRqst, ioaddr + IntrStatus);
}
if (intr_status & LinkChange) {
if (np->an_enable) {
mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
mii_advertise &= mii_lpa;
printk (KERN_INFO "%s: Link changed: ", dev->name);
if (mii_advertise & ADVERTISE_100FULL)
if (mii_advertise & ADVERTISE_100FULL) {
np->speed = 100;
printk ("100Mbps, full duplex\n");
else if (mii_advertise & ADVERTISE_100HALF)
} else if (mii_advertise & ADVERTISE_100HALF) {
np->speed = 100;
printk ("100Mbps, half duplex\n");
else if (mii_advertise & ADVERTISE_10FULL)
} else if (mii_advertise & ADVERTISE_10FULL) {
np->speed = 10;
printk ("10Mbps, full duplex\n");
else if (mii_advertise & ADVERTISE_10HALF)
} else if (mii_advertise & ADVERTISE_10HALF) {
np->speed = 10;
printk ("10Mbps, half duplex\n");
else
} else
printk ("\n");
} else {
mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
np->speed = speed;
printk (KERN_INFO "%s: Link changed: %dMbps ,",
dev->name, speed);
printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
"full" : "half");
}
check_duplex (dev);
if (np->flowctrl == 0)
writew(readw(ioaddr + MACCtrl0) & ~EnbFlowCtrl,
ioaddr + MACCtrl0);
}
if (intr_status & StatsMax) {
get_stats(dev);
......@@ -1319,24 +1426,136 @@ static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
{
struct netdev_private *np = dev->priv;
u32 ethcmd;
long ioaddr = dev->base_addr;
if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
return -EFAULT;
switch (ethcmd) {
case ETHTOOL_GDRVINFO: {
struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
strcpy(info.driver, DRV_NAME);
strcpy(info.version, DRV_VERSION);
strcpy(info.bus_info, np->pci_dev->slot_name);
if (copy_to_user(useraddr, &info, sizeof(info)))
case ETHTOOL_GDRVINFO: {
struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
strcpy(info.driver, DRV_NAME);
strcpy(info.version, DRV_VERSION);
strcpy(info.bus_info, np->pci_dev->slot_name);
memset(&info.fw_version, 0, sizeof(info.fw_version));
if (copy_to_user(useraddr, &info, sizeof(info)))
return -EFAULT;
return 0;
}
case ETHTOOL_GSET: {
struct ethtool_cmd cmd = { ETHTOOL_GSET };
if (readl (ioaddr + ASICCtrl) & 0x80) {
/* fiber device */
cmd.supported = SUPPORTED_Autoneg |
SUPPORTED_FIBRE;
cmd.advertising= ADVERTISED_Autoneg |
ADVERTISED_FIBRE;
cmd.port = PORT_FIBRE;
cmd.transceiver = XCVR_INTERNAL;
} else {
/* copper device */
cmd.supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_MII;
cmd.advertising = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_Autoneg |
ADVERTISED_MII;
cmd.port = PORT_MII;
cmd.transceiver = XCVR_INTERNAL;
}
if (readb(ioaddr + MIICtrl) & 0x80) {
cmd.speed = np->speed;
cmd.duplex = np->full_duplex ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
cmd.speed = -1;
cmd.duplex = -1;
}
if ( np->an_enable)
cmd.autoneg = AUTONEG_ENABLE;
else
cmd.autoneg = AUTONEG_DISABLE;
cmd.phy_address = np->phys[0];
if (copy_to_user(useraddr, &cmd,
sizeof(cmd)))
return -EFAULT;
return 0;
}
case ETHTOOL_SSET: {
struct ethtool_cmd cmd;
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
netif_carrier_off(dev);
if (cmd.autoneg == AUTONEG_ENABLE) {
if (np->an_enable)
return 0;
else {
np->an_enable = 1;
/* Reset PHY */
mdio_write (dev, np->phys[0], MII_BMCR,
BMCR_RESET);
mdelay (300);
/* Start auto negotiation */
mdio_write (dev, np->phys[0], MII_BMCR,
BMCR_ANENABLE|BMCR_ANRESTART);
return 0;
}
} else {
/* Reset PHY */
mdio_write (dev, np->phys[0], MII_BMCR,
BMCR_RESET);
mdelay (300);
np->an_enable = 0;
switch(cmd.speed + cmd.duplex){
case SPEED_10 + DUPLEX_HALF:
np->speed = 10;
np->full_duplex = 0;
break;
case SPEED_10 + DUPLEX_FULL:
np->speed = 10;
np->full_duplex = 1;
break;
case SPEED_100 + DUPLEX_HALF:
np->speed = 100;
np->full_duplex = 0;
break;
case SPEED_100 + DUPLEX_FULL:
np->speed = 100;
np->full_duplex = 1;
break;
default:
return -EINVAL;
}
mdio_write (dev, np->phys[0], MII_BMCR,
((np->speed == 100) ? BMCR_SPEED100 : 0) |
((np->full_duplex) ? BMCR_FULLDPLX : 0) );
}
return 0;
}
#ifdef ETHTOOL_GLINK
case ETHTOOL_GLINK:{
struct ethtool_value link = { ETHTOOL_GLINK };
link.data = readb(ioaddr + MIICtrl) & 0x80;
if (copy_to_user(useraddr, &link, sizeof(link)))
return -EFAULT;
return 0;
}
}
#endif
default:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
......@@ -1460,10 +1679,10 @@ static void __devexit sundance_remove1 (struct pci_dev *pdev)
}
static struct pci_driver sundance_driver = {
.name = DRV_NAME,
.id_table = sundance_pci_tbl,
.probe = sundance_probe1,
.remove = __devexit_p(sundance_remove1),
name: DRV_NAME,
id_table: sundance_pci_tbl,
probe: sundance_probe1,
remove: __devexit_p(sundance_remove1),
};
static int __init sundance_init(void)
......@@ -1482,3 +1701,5 @@ static void __exit sundance_exit(void)
module_init(sundance_init);
module_exit(sundance_exit);
......@@ -59,7 +59,7 @@ nbd_end_request(struct request *req)
blk_finished_io(nsect);
req->bio = bio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, uptodate);
bio_endio(bio, nsect << 9, uptodate ? 0 : -EIO);
}
blk_put_request(req);
spin_unlock_irqrestore(q->queue_lock, flags);
......
......@@ -691,6 +691,8 @@ enum {
NETIF_MSG_TX_DONE = 0x0400,
NETIF_MSG_RX_STATUS = 0x0800,
NETIF_MSG_PKTDATA = 0x1000,
NETIF_MSG_HW = 0x2000,
NETIF_MSG_WOL = 0x4000,
};
#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
......@@ -706,6 +708,8 @@ enum {
#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
/* Schedule rx intr now? */
......
......@@ -55,8 +55,8 @@ struct irda_sock {
__u16 mask; /* Hint bits mask */
__u16 hints; /* Hint bits */
__u32 ckey; /* IrLMP client handle */
__u32 skey; /* IrLMP service handle */
void *ckey; /* IrLMP client handle */
void *skey; /* IrLMP service handle */
struct ias_object *ias_obj; /* Our service name + lsap in IAS */
struct iriap_cb *iriap; /* Used to query remote IAS */
......
......@@ -86,8 +86,8 @@ struct ircomm_tty_cb {
struct iriap_cb *iriap; /* Instance used for querying remote IAS */
struct ias_object* obj;
int skey;
int ckey;
void *skey;
void *ckey;
struct termios normal_termios;
struct termios callout_termios;
......@@ -104,6 +104,14 @@ struct ircomm_tty_cb {
long pgrp; /* pgrp of opening process */
int open_count;
int blocked_open; /* # of blocked opens */
/* Protect concurent access to :
* o self->open_count
* o self->ctrl_skb
* o self->tx_skb
* Maybe other things may gain to be protected as well...
* Jean II */
spinlock_t spinlock;
};
void ircomm_tty_start(struct tty_struct *tty);
......
......@@ -183,7 +183,6 @@ struct irlmp_cb {
hashbin_t *services;
hashbin_t *cachelog; /* Current discovery log */
spinlock_t log_lock; /* discovery log spinlock */
int running;
......@@ -197,12 +196,12 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap, notify_t *notify, __u8 pid);
void irlmp_close_lsap( struct lsap_cb *self);
__u16 irlmp_service_to_hint(int service);
__u32 irlmp_register_service(__u16 hints);
int irlmp_unregister_service(__u32 handle);
__u32 irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
void *irlmp_register_service(__u16 hints);
int irlmp_unregister_service(void *handle);
void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
DISCOVERY_CALLBACK1 expir_clb, void *priv);
int irlmp_unregister_client(__u32 handle);
int irlmp_update_client(__u32 handle, __u16 hint_mask,
int irlmp_unregister_client(void *handle);
int irlmp_update_client(void *handle, __u16 hint_mask,
DISCOVERY_CALLBACK1 disco_clb,
DISCOVERY_CALLBACK1 expir_clb, void *priv);
......@@ -221,7 +220,7 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
struct sk_buff *userdata);
int irlmp_disconnect_request(struct lsap_cb *, struct sk_buff *userdata);
void irlmp_discovery_confirm(hashbin_t *discovery_log, DISCOVERY_MODE);
void irlmp_discovery_confirm(hashbin_t *discovery_log, DISCOVERY_MODE mode);
void irlmp_discovery_request(int nslots);
struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots);
void irlmp_do_expiry(void);
......@@ -258,8 +257,6 @@ extern int sysctl_discovery;
extern int sysctl_lap_keepalive_time; /* in ms, default is LM_IDLE_TIMEOUT */
extern struct irlmp_cb *irlmp;
static inline hashbin_t *irlmp_get_cachelog(void) { return irlmp->cachelog; }
/* Check if LAP queue is full.
* Used by IrTTP for low control, see comments in irlap.h - Jean II */
static inline int irlmp_lap_tx_queue_full(struct lsap_cb *self)
......
......@@ -36,12 +36,12 @@
#define NAME_SIZE 32
/*
* Hash types
* Hash types (some flags can be xored)
* See comments in irqueue.c for which one to use...
*/
#define HB_NOLOCK 0
#define HB_GLOBAL 1
#define HB_LOCAL 2
#define HB_SORTED 4
#define HB_NOLOCK 0 /* No concurent access prevention */
#define HB_LOCK 1 /* Prevent concurent write with global lock */
#define HB_SORTED 4 /* Not yet supported */
/*
* Hash defines
......@@ -57,17 +57,12 @@
typedef void (*FREE_FUNC)(void *arg);
/*
* Hashbin
*/
#define GET_HASHBIN(x) ( x & HASHBIN_MASK )
struct irda_queue {
struct irda_queue *q_next;
struct irda_queue *q_prev;
char q_name[NAME_SIZE];
__u32 q_hash;
long q_hash; /* Must be able to cast a (void *) */
};
typedef struct irda_queue irda_queue_t;
......@@ -75,8 +70,9 @@ typedef struct hashbin_t {
__u32 magic;
int hb_type;
int hb_size;
spinlock_t hb_mutex[HASHBIN_SIZE] IRDA_ALIGN;
irda_queue_t *hb_queue[HASHBIN_SIZE] IRDA_ALIGN;
spinlock_t hb_spinlock; /* HB_LOCK - Can be used by the user */
irda_queue_t* hb_queue[HASHBIN_SIZE] IRDA_ALIGN;
irda_queue_t* hb_current;
} hashbin_t;
......@@ -84,19 +80,18 @@ typedef struct hashbin_t {
hashbin_t *hashbin_new(int type);
int hashbin_delete(hashbin_t* hashbin, FREE_FUNC func);
int hashbin_clear(hashbin_t* hashbin, FREE_FUNC free_func);
void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, __u32 hashv,
void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
char* name);
void* hashbin_find(hashbin_t* hashbin, __u32 hashv, char* name);
void* hashbin_remove(hashbin_t* hashbin, __u32 hashv, char* name);
void* hashbin_remove(hashbin_t* hashbin, long hashv, char* name);
void* hashbin_remove_first(hashbin_t *hashbin);
void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry);
void* hashbin_find(hashbin_t* hashbin, long hashv, char* name);
void* hashbin_lock_find(hashbin_t* hashbin, long hashv, char* name);
void* hashbin_find_next(hashbin_t* hashbin, long hashv, char* name,
void ** pnext);
irda_queue_t *hashbin_get_first(hashbin_t *hashbin);
irda_queue_t *hashbin_get_next(hashbin_t *hashbin);
void enqueue_last(irda_queue_t **queue, irda_queue_t* element);
void enqueue_first(irda_queue_t **queue, irda_queue_t* element);
irda_queue_t *dequeue_first(irda_queue_t **queue);
#define HASHBIN_GET_SIZE(hashbin) hashbin->hb_size
#endif
......@@ -62,6 +62,9 @@ struct irtty_cb {
struct qos_info qos; /* QoS capabilities for this device */
dongle_t *dongle; /* Dongle driver */
spinlock_t lock; /* For serializing operations */
__u32 new_speed;
__u32 flags; /* Interface flags */
......
......@@ -165,7 +165,9 @@ struct ircc_cb {
struct irport_cb *irport;
spinlock_t lock; /* For serializing operations */
/* Locking : half of our operations are done with irport, so we
* use the irport spinlock to make sure *everything* is properly
* synchronised - Jean II */
__u32 new_speed;
__u32 flags; /* Interface flags */
......
......@@ -179,6 +179,11 @@ struct w83977af_ir {
chipio_t io; /* IrDA controller information */
iobuff_t tx_buff; /* Transmit buffer */
iobuff_t rx_buff; /* Receive buffer */
/* Note : currently locking is *very* incomplete, but this
* will get you started. Check in nsc-ircc.c for a proper
* locking strategy. - Jean II */
spinlock_t lock; /* For serializing operations */
__u32 flags; /* Interface flags */
__u32 new_speed;
......
......@@ -61,7 +61,7 @@ void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *new)
/* Set time of first discovery if node is new (see below) */
new->first_timestamp = new->timestamp;
spin_lock_irqsave(&irlmp->log_lock, flags);
spin_lock_irqsave(&cachelog->hb_spinlock, flags);
/*
* Remove all discoveries of devices that has previously been
......@@ -95,13 +95,13 @@ void irlmp_add_discovery(hashbin_t *cachelog, discovery_t *new)
/* Insert the new and updated version */
hashbin_insert(cachelog, (irda_queue_t *) new, new->daddr, NULL);
spin_unlock_irqrestore(&irlmp->log_lock, flags);
spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
}
/*
* Function irlmp_add_discovery_log (cachelog, log)
*
* Merge a disovery log into the cachlog.
* Merge a disovery log into the cachelog.
*
*/
void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
......@@ -115,11 +115,17 @@ void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log)
* discovery, so restart discovery again with just the half timeout
* of the normal one.
*/
/* Well... It means that there was nobody out there - Jean II */
if (log == NULL) {
/* irlmp_start_discovery_timer(irlmp, 150); */
return;
}
/*
* Locking : we are the only owner of this discovery log, so
* no need to lock it.
* We just need to lock the global log in irlmp_add_discovery().
*/
discovery = (discovery_t *) hashbin_remove_first(log);
while (discovery != NULL) {
irlmp_add_discovery(cachelog, discovery);
......@@ -146,7 +152,7 @@ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force)
IRDA_DEBUG(4, __FUNCTION__ "()\n");
spin_lock_irqsave(&irlmp->log_lock, flags);
spin_lock_irqsave(&log->hb_spinlock, flags);
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
......@@ -169,7 +175,7 @@ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force)
}
}
spin_unlock_irqrestore(&irlmp->log_lock, flags);
spin_unlock_irqrestore(&log->hb_spinlock, flags);
}
/*
......@@ -230,13 +236,13 @@ struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn, __u16 m
return NULL;
/* Save spin lock - spinlock should be discovery specific */
spin_lock_irqsave(&irlmp->log_lock, flags);
spin_lock_irqsave(&log->hb_spinlock, flags);
/* Create the client specific buffer */
n = HASHBIN_GET_SIZE(log);
buffer = kmalloc(n * sizeof(struct irda_device_info), GFP_ATOMIC);
if (buffer == NULL) {
spin_unlock_irqrestore(&irlmp->log_lock, flags);
spin_unlock_irqrestore(&log->hb_spinlock, flags);
return NULL;
}
......@@ -257,7 +263,7 @@ struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn, __u16 m
discovery = (discovery_t *) hashbin_get_next(log);
}
spin_unlock_irqrestore(&irlmp->log_lock, flags);
spin_unlock_irqrestore(&log->hb_spinlock, flags);
/* Get the actual number of device in the buffer and return */
*pn = i;
......@@ -276,7 +282,7 @@ __u32 irlmp_find_device(hashbin_t *cachelog, char *name, __u32 *saddr)
unsigned long flags;
discovery_t *d;
spin_lock_irqsave(&irlmp->log_lock, flags);
spin_lock_irqsave(&cachelog->hb_spinlock, flags);
/* Look at all discoveries for that link */
d = (discovery_t *) hashbin_get_first(cachelog);
......@@ -288,13 +294,13 @@ __u32 irlmp_find_device(hashbin_t *cachelog, char *name, __u32 *saddr)
if (strcmp(name, d->nickname) == 0) {
*saddr = d->saddr;
spin_unlock_irqrestore(&irlmp->log_lock, flags);
spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
return d->daddr;
}
d = (discovery_t *) hashbin_get_next(cachelog);
}
spin_unlock_irqrestore(&irlmp->log_lock, flags);
spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
return 0;
}
......@@ -310,7 +316,7 @@ int discovery_proc_read(char *buf, char **start, off_t offset, int length,
{
discovery_t *discovery;
unsigned long flags;
hashbin_t *cachelog = irlmp_get_cachelog();
hashbin_t *cachelog = irlmp->cachelog;
int len = 0;
if (!irlmp)
......@@ -318,7 +324,7 @@ int discovery_proc_read(char *buf, char **start, off_t offset, int length,
len = sprintf(buf, "IrLMP: Discovery log:\n\n");
spin_lock_irqsave(&irlmp->log_lock, flags);
spin_lock_irqsave(&cachelog->hb_spinlock, flags);
discovery = (discovery_t *) hashbin_get_first(cachelog);
while (( discovery != NULL) && (len < length)) {
......@@ -362,7 +368,7 @@ int discovery_proc_read(char *buf, char **start, off_t offset, int length,
discovery = (discovery_t *) hashbin_get_next(cachelog);
}
spin_unlock_irqrestore(&irlmp->log_lock, flags);
spin_unlock_irqrestore(&cachelog->hb_spinlock, flags);
return len;
}
......@@ -61,7 +61,7 @@ hashbin_t *ircomm = NULL;
int __init ircomm_init(void)
{
ircomm = hashbin_new(HB_LOCAL);
ircomm = hashbin_new(HB_LOCK);
if (ircomm == NULL) {
ERROR(__FUNCTION__ "(), can't allocate hashbin!\n");
return -ENOMEM;
......@@ -505,11 +505,10 @@ int ircomm_proc_read(char *buf, char **start, off_t offset, int len)
struct ircomm_cb *self;
unsigned long flags;
save_flags(flags);
cli();
len = 0;
spin_lock_irqsave(&ircomm->hb_spinlock, flags);
self = (struct ircomm_cb *) hashbin_get_first(ircomm);
while (self != NULL) {
ASSERT(self->magic == IRCOMM_MAGIC, break;);
......@@ -535,7 +534,7 @@ int ircomm_proc_read(char *buf, char **start, off_t offset, int len)
self = (struct ircomm_cb *) hashbin_get_next(ircomm);
}
restore_flags(flags);
spin_unlock_irqrestore(&ircomm->hb_spinlock, flags);
return len;
}
......
......@@ -177,7 +177,7 @@ void ircomm_lmp_flow_control(struct sk_buff *skb)
line = cb->line;
self = (struct ircomm_cb *) hashbin_find(ircomm, line, NULL);
self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL);
if (!self) {
IRDA_DEBUG(2, __FUNCTION__ "(), didn't find myself\n");
return;
......
......@@ -99,6 +99,8 @@ pi_param_info_t ircomm_param_info = { pi_major_call_table, 3, 0x0f, 4 };
*/
int ircomm_param_flush(struct ircomm_tty_cb *self)
{
/* we should lock here, but I guess this function is unused...
* Jean II */
if (self->ctrl_skb) {
ircomm_control_request(self->ircomm, self->ctrl_skb);
self->ctrl_skb = NULL;
......@@ -132,14 +134,13 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
if (self->service_type == IRCOMM_3_WIRE_RAW)
return 0;
save_flags(flags);
cli();
spin_lock_irqsave(&self->spinlock, flags);
skb = self->ctrl_skb;
if (!skb) {
skb = dev_alloc_skb(256);
if (!skb) {
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
return -ENOMEM;
}
......@@ -154,12 +155,12 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
&ircomm_param_info);
if (count < 0) {
WARNING(__FUNCTION__ "(), no room for parameter!\n");
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
return -1;
}
skb_put(skb, count);
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
IRDA_DEBUG(2, __FUNCTION__ "(), skb->len=%d\n", skb->len);
......
......@@ -90,7 +90,7 @@ hashbin_t *ircomm_tty = NULL;
*/
int __init ircomm_tty_init(void)
{
ircomm_tty = hashbin_new(HB_LOCAL);
ircomm_tty = hashbin_new(HB_LOCK);
if (ircomm_tty == NULL) {
ERROR(__FUNCTION__ "(), can't allocate hashbin!\n");
return -ENOMEM;
......@@ -308,22 +308,25 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
__FILE__,__LINE__, tty->driver.name, self->open_count );
save_flags(flags); cli();
/* As far as I can see, we protect open_count - Jean II */
spin_lock_irqsave(&self->spinlock, flags);
if (!tty_hung_up_p(filp)) {
extra_count = 1;
self->open_count--;
}
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
self->blocked_open++;
while (1) {
if (!(self->flags & ASYNC_CALLOUT_ACTIVE) &&
(tty->termios->c_cflag & CBAUD)) {
save_flags(flags); cli();
/* Here, we use to lock those two guys, but
* as ircomm_param_request() does it itself,
* I don't see the point (and I see the deadlock).
* Jean II */
self->settings.dte |= IRCOMM_RTS + IRCOMM_DTR;
ircomm_param_request(self, IRCOMM_DTE, TRUE);
restore_flags(flags);
}
current->state = TASK_INTERRUPTIBLE;
......@@ -361,8 +364,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
__set_current_state(TASK_RUNNING);
remove_wait_queue(&self->open_wait, &wait);
if (extra_count)
if (extra_count) {
/* ++ is not atomic, so this should be protected - Jean II */
spin_lock_irqsave(&self->spinlock, flags);
self->open_count++;
spin_unlock_irqrestore(&self->spinlock, flags);
}
self->blocked_open--;
IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
......@@ -385,6 +392,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
{
struct ircomm_tty_cb *self;
unsigned int line;
unsigned long flags;
int ret;
IRDA_DEBUG(2, __FUNCTION__ "()\n");
......@@ -397,7 +405,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
}
/* Check if instance already exists */
self = hashbin_find(ircomm_tty, line, NULL);
self = hashbin_lock_find(ircomm_tty, line, NULL);
if (!self) {
/* No, so make new instance */
self = kmalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL);
......@@ -423,6 +431,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
init_timer(&self->watchdog_timer);
init_waitqueue_head(&self->open_wait);
init_waitqueue_head(&self->close_wait);
spin_lock_init(&self->spinlock);
/*
* Force TTY into raw mode by default which is usually what
......@@ -435,10 +444,13 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
/* Insert into hash */
hashbin_insert(ircomm_tty, (irda_queue_t *) self, line, NULL);
}
/* ++ is not atomic, so this should be protected - Jean II */
spin_lock_irqsave(&self->spinlock, flags);
self->open_count++;
tty->driver_data = self;
self->tty = tty;
spin_unlock_irqrestore(&self->spinlock, flags);
IRDA_DEBUG(1, __FUNCTION__"(), %s%d, count = %d\n", tty->driver.name,
self->line, self->open_count);
......@@ -526,12 +538,11 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
ASSERT(self != NULL, return;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
save_flags(flags);
cli();
spin_lock_irqsave(&self->spinlock, flags);
if (tty_hung_up_p(filp)) {
MOD_DEC_USE_COUNT;
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
IRDA_DEBUG(0, __FUNCTION__ "(), returning 1\n");
return;
......@@ -559,13 +570,19 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
}
if (self->open_count) {
MOD_DEC_USE_COUNT;
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
IRDA_DEBUG(0, __FUNCTION__ "(), open count > 0\n");
return;
}
self->flags |= ASYNC_CLOSING;
/* We need to unlock here (we were unlocking at the end of this
* function), because tty_wait_until_sent() may schedule.
* I don't know if the rest should be locked somehow,
* so someone should check. - Jean II */
spin_unlock_irqrestore(&self->spinlock, flags);
/*
* Now we wait for the transmit buffer to clear; and we notify
* the line discipline to only process XON/XOFF characters.
......@@ -597,7 +614,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
wake_up_interruptible(&self->close_wait);
MOD_DEC_USE_COUNT;
restore_flags(flags);
}
/*
......@@ -645,13 +661,12 @@ static void ircomm_tty_do_softint(void *private_)
return;
/* Unlink control buffer */
save_flags(flags);
cli();
spin_lock_irqsave(&self->spinlock, flags);
ctrl_skb = self->ctrl_skb;
self->ctrl_skb = NULL;
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
/* Flush control buffer if any */
if (ctrl_skb && self->flow == FLOW_START)
......@@ -661,13 +676,12 @@ static void ircomm_tty_do_softint(void *private_)
return;
/* Unlink transmit buffer */
save_flags(flags);
cli();
spin_lock_irqsave(&self->spinlock, flags);
skb = self->tx_skb;
self->tx_skb = NULL;
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
/* Flush transmit buffer if any */
if (skb)
......@@ -720,8 +734,7 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
return len;
}
save_flags(flags);
cli();
spin_lock_irqsave(&self->spinlock, flags);
/* Fetch current transmit buffer */
skb = self->tx_skb;
......@@ -768,7 +781,7 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
skb = dev_alloc_skb(self->max_data_size+
self->max_header_size);
if (!skb) {
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
return -ENOBUFS;
}
skb_reserve(skb, self->max_header_size);
......@@ -785,7 +798,7 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
len += size;
}
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
/*
* Schedule a new thread which will transmit the frame as soon
......@@ -824,13 +837,12 @@ static int ircomm_tty_write_room(struct tty_struct *tty)
(self->max_header_size == IRCOMM_TTY_HDR_UNITIALISED))
ret = 0;
else {
save_flags(flags);
cli();
spin_lock_irqsave(&self->spinlock, flags);
if (self->tx_skb)
ret = self->max_data_size - self->tx_skb->len;
else
ret = self->max_data_size;
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
}
IRDA_DEBUG(2, __FUNCTION__ "(), ret=%d\n", ret);
......@@ -946,13 +958,12 @@ static int ircomm_tty_chars_in_buffer(struct tty_struct *tty)
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
save_flags(flags);
cli();
spin_lock_irqsave(&self->spinlock, flags);
if (self->tx_skb)
len = self->tx_skb->len;
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
return len;
}
......@@ -969,8 +980,7 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
if (!(self->flags & ASYNC_INITIALIZED))
return;
save_flags(flags);
cli();
spin_lock_irqsave(&self->spinlock, flags);
del_timer(&self->watchdog_timer);
......@@ -994,7 +1004,7 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
}
self->flags &= ~ASYNC_INITIALIZED;
restore_flags(flags);
spin_unlock_irqrestore(&self->spinlock, flags);
}
/*
......@@ -1007,6 +1017,7 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self)
static void ircomm_tty_hangup(struct tty_struct *tty)
{
struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data;
unsigned long flags;
IRDA_DEBUG(0, __FUNCTION__"()\n");
......@@ -1019,9 +1030,13 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
/* ircomm_tty_flush_buffer(tty); */
ircomm_tty_shutdown(self);
/* I guess we need to lock here - Jean II */
spin_lock_irqsave(&self->spinlock, flags);
self->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CALLOUT_ACTIVE);
self->tty = 0;
self->open_count = 0;
spin_unlock_irqrestore(&self->spinlock, flags);
wake_up_interruptible(&self->open_wait);
}
......@@ -1362,11 +1377,14 @@ static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
struct ircomm_tty_cb *self;
int count = 0, l;
off_t begin = 0;
unsigned long flags;
spin_lock_irqsave(&ircomm_tty->hb_spinlock, flags);
self = (struct ircomm_tty_cb *) hashbin_get_first(ircomm_tty);
while ((self != NULL) && (count < 4000)) {
if (self->magic != IRCOMM_TTY_MAGIC)
return 0;
break;
l = ircomm_tty_line_info(self, buf + count);
count += l;
......@@ -1381,6 +1399,8 @@ static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len,
}
*eof = 1;
done:
spin_unlock_irqrestore(&ircomm_tty->hb_spinlock, flags);
if (offset >= count+begin)
return 0;
*start = buf + (offset-begin);
......
......@@ -331,6 +331,8 @@ static void ircomm_tty_discovery_indication(discovery_t *discovery,
info.daddr = discovery->daddr;
info.saddr = discovery->saddr;
/* FIXME. We probably need to use hashbin_find_next(), but we first
* need to ensure that "line" is unique. - Jean II */
self = (struct ircomm_tty_cb *) hashbin_get_first(ircomm_tty);
while (self != NULL) {
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
......
......@@ -91,13 +91,13 @@ int irda_device_proc_read(char *buf, char **start, off_t offset, int len,
int __init irda_device_init( void)
{
dongles = hashbin_new(HB_GLOBAL);
dongles = hashbin_new(HB_LOCK);
if (dongles == NULL) {
printk(KERN_WARNING "IrDA: Can't allocate dongles hashbin!\n");
return -ENOMEM;
}
tasks = hashbin_new(HB_GLOBAL);
tasks = hashbin_new(HB_LOCK);
if (tasks == NULL) {
printk(KERN_WARNING "IrDA: Can't allocate tasks hashbin!\n");
return -ENOMEM;
......@@ -438,7 +438,7 @@ dongle_t *irda_device_dongle_init(struct net_device *dev, int type)
}
#endif
if (!(reg = hashbin_find(dongles, type, NULL))) {
if (!(reg = hashbin_lock_find(dongles, type, NULL))) {
ERROR("IrDA: Unable to find requested dongle\n");
return NULL;
}
......@@ -477,7 +477,7 @@ int irda_device_dongle_cleanup(dongle_t *dongle)
int irda_device_register_dongle(struct dongle_reg *new)
{
/* Check if this dongle has been registred before */
if (hashbin_find(dongles, new->type, NULL)) {
if (hashbin_lock_find(dongles, new->type, NULL)) {
MESSAGE("%s: Dongle already registered\n", __FUNCTION__);
return 0;
}
......
......@@ -58,7 +58,7 @@ static const char *ias_charset_types[] = {
#endif /* CONFIG_IRDA_DEBUG */
static hashbin_t *iriap = NULL;
static __u32 service_handle;
static void *service_handle;
extern char *lmp_reasons[];
......@@ -91,11 +91,12 @@ int __init iriap_init(void)
__u16 hints;
/* Allocate master array */
iriap = hashbin_new(HB_LOCAL);
iriap = hashbin_new(HB_LOCK);
if (!iriap)
return -ENOMEM;
objects = hashbin_new(HB_LOCAL);
/* Object repository - defined in irias_object.c */
objects = hashbin_new(HB_LOCK);
if (!objects) {
WARNING("%s: Can't allocate objects hashbin!\n", __FUNCTION__);
return -ENOMEM;
......@@ -182,7 +183,7 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv,
init_timer(&self->watchdog_timer);
hashbin_insert(iriap, (irda_queue_t *) self, (int) self, NULL);
hashbin_insert(iriap, (irda_queue_t *) self, (long) self, NULL);
/* Initialize state machines */
iriap_next_client_state(self, S_DISCONNECT);
......@@ -235,7 +236,7 @@ void iriap_close(struct iriap_cb *self)
self->lsap = NULL;
}
entry = (struct iriap_cb *) hashbin_remove(iriap, (int) self, NULL);
entry = (struct iriap_cb *) hashbin_remove(iriap, (long) self, NULL);
ASSERT(entry == self, return;);
__iriap_close(self);
......@@ -973,13 +974,12 @@ int irias_proc_read(char *buf, char **start, off_t offset, int len)
ASSERT( objects != NULL, return 0;);
save_flags( flags);
cli();
len = 0;
len += sprintf(buf+len, "LM-IAS Objects:\n");
spin_lock_irqsave(&objects->hb_spinlock, flags);
/* List all objects */
obj = (struct ias_object *) hashbin_get_first(objects);
while ( obj != NULL) {
......@@ -989,6 +989,11 @@ int irias_proc_read(char *buf, char **start, off_t offset, int len)
len += sprintf(buf+len, "id=%d", obj->id);
len += sprintf(buf+len, "\n");
/* Careful for priority inversions here !
* All other uses of attrib spinlock are independant of
* the object spinlock, so we are safe. Jean II */
spin_lock(&obj->attribs->hb_spinlock);
/* List all attributes for this object */
attrib = (struct ias_attrib *)
hashbin_get_first(obj->attribs);
......@@ -1025,9 +1030,11 @@ int irias_proc_read(char *buf, char **start, off_t offset, int len)
attrib = (struct ias_attrib *)
hashbin_get_next(obj->attribs);
}
spin_unlock(&obj->attribs->hb_spinlock);
obj = (struct ias_object *) hashbin_get_next(objects);
}
restore_flags(flags);
spin_unlock_irqrestore(&objects->hb_spinlock, flags);
return len;
}
......
......@@ -93,7 +93,10 @@ struct ias_object *irias_new_object( char *name, int id)
obj->name = strndup(name, IAS_MAX_CLASSNAME);
obj->id = id;
obj->attribs = hashbin_new(HB_LOCAL);
/* Locking notes : the attrib spinlock has lower precendence
* than the objects spinlock. Never grap the objects spinlock
* while holding any attrib spinlock (risk of deadlock). Jean II */
obj->attribs = hashbin_new(HB_LOCK);
return obj;
}
......@@ -147,7 +150,7 @@ int irias_delete_object(struct ias_object *obj)
ASSERT(obj != NULL, return -1;);
ASSERT(obj->magic == IAS_OBJECT_MAGIC, return -1;);
node = hashbin_remove(objects, 0, obj->name);
node = hashbin_remove_this(objects, (irda_queue_t *) obj);
if (!node)
return 0; /* Already removed */
......@@ -172,7 +175,7 @@ int irias_delete_attrib(struct ias_object *obj, struct ias_attrib *attrib)
ASSERT(attrib != NULL, return -1;);
/* Remove attribute from object */
node = hashbin_remove(obj->attribs, 0, attrib->name);
node = hashbin_remove_this(obj->attribs, (irda_queue_t *) attrib);
if (!node)
return 0; /* Already removed or non-existent */
......@@ -211,7 +214,8 @@ struct ias_object *irias_find_object(char *name)
{
ASSERT(name != NULL, return NULL;);
return hashbin_find(objects, 0, name);
/* Unsafe (locking), object might change */
return hashbin_lock_find(objects, 0, name);
}
/*
......@@ -228,10 +232,11 @@ struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name)
ASSERT(obj->magic == IAS_OBJECT_MAGIC, return NULL;);
ASSERT(name != NULL, return NULL;);
attrib = hashbin_find(obj->attribs, 0, name);
attrib = hashbin_lock_find(obj->attribs, 0, name);
if (attrib == NULL)
return NULL;
/* Unsafe (locking), attrib might change */
return attrib;
}
......@@ -267,26 +272,32 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name,
{
struct ias_object *obj;
struct ias_attrib *attrib;
unsigned long flags;
/* Find object */
obj = hashbin_find(objects, 0, obj_name);
obj = hashbin_lock_find(objects, 0, obj_name);
if (obj == NULL) {
WARNING("%s: Unable to find object: %s\n", __FUNCTION__,
obj_name);
return -1;
}
/* Slightly unsafe (obj might get removed under us) */
spin_lock_irqsave(&obj->attribs->hb_spinlock, flags);
/* Find attribute */
attrib = hashbin_find(obj->attribs, 0, attrib_name);
if (attrib == NULL) {
WARNING("%s: Unable to find attribute: %s\n", __FUNCTION__,
attrib_name);
spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
return -1;
}
if ( attrib->value->type != new_value->type) {
IRDA_DEBUG( 0, __FUNCTION__
"(), changing value type not allowed!\n");
spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
return -1;
}
......@@ -297,6 +308,7 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name,
attrib->value = new_value;
/* Success */
spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags);
return 0;
}
......
......@@ -124,7 +124,7 @@ int __init irlan_init(void)
IRDA_DEBUG(0, __FUNCTION__ "()\n");
/* Allocate master structure */
irlan = hashbin_new(HB_LOCAL);
irlan = hashbin_new(HB_LOCK); /* protect from /proc */
if (irlan == NULL) {
printk(KERN_WARNING "IrLAN: Can't allocate hashbin!\n");
return -ENOMEM;
......@@ -1089,11 +1089,10 @@ static int irlan_proc_read(char *buf, char **start, off_t offset, int len)
unsigned long flags;
ASSERT(irlan != NULL, return 0;);
save_flags(flags);
cli();
len = 0;
spin_lock_irqsave(&irlan->hb_spinlock, flags);
len += sprintf(buf+len, "IrLAN instances:\n");
self = (struct irlan_cb *) hashbin_get_first(irlan);
......@@ -1129,7 +1128,7 @@ static int irlan_proc_read(char *buf, char **start, off_t offset, int len)
self = (struct irlan_cb *) hashbin_get_next(irlan);
}
restore_flags(flags);
spin_unlock_irqrestore(&irlan->hb_spinlock, flags);
return len;
}
......
......@@ -80,7 +80,7 @@ int irlap_proc_read(char *, char **, off_t, int);
int __init irlap_init(void)
{
/* Allocate master array */
irlap = hashbin_new(HB_LOCAL);
irlap = hashbin_new(HB_LOCK);
if (irlap == NULL) {
ERROR("%s: can't allocate irlap hashbin!\n", __FUNCTION__);
return -ENOMEM;
......@@ -139,7 +139,15 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
skb_queue_head_init(&self->wx_list);
/* My unique IrLAP device address! */
get_random_bytes(&self->saddr, sizeof(self->saddr));
/* We don't want the broadcast address, neither the NULL address
* (most often used to signify "invalid"), and we don't want an
* address already in use (otherwise connect won't be able
* to select the proper link). - Jean II */
do {
get_random_bytes(&self->saddr, sizeof(self->saddr));
} while ((self->saddr == 0x0) || (self->saddr == BROADCAST) ||
(hashbin_lock_find(irlap, self->saddr, NULL)) );
/* Copy to the driver */
memcpy(dev->dev_addr, &self->saddr, 4);
init_timer(&self->slot_timer);
......@@ -522,7 +530,8 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery)
self->discovery_log = NULL;
}
self->discovery_log= hashbin_new(HB_LOCAL);
/* All operations will occur at predictable time, no need to lock */
self->discovery_log= hashbin_new(HB_NOLOCK);
info.S = discovery->nslots; /* Number of slots */
info.s = 0; /* Current slot */
......@@ -1084,15 +1093,14 @@ int irlap_proc_read(char *buf, char **start, off_t offset, int len)
unsigned long flags;
int i = 0;
save_flags(flags);
cli();
spin_lock_irqsave(&irlap->hb_spinlock, flags);
len = 0;
self = (struct irlap_cb *) hashbin_get_first(irlap);
while (self != NULL) {
ASSERT(self != NULL, return -ENODEV;);
ASSERT(self->magic == LAP_MAGIC, return -EBADR;);
ASSERT(self != NULL, break;);
ASSERT(self->magic == LAP_MAGIC, break;);
len += sprintf(buf+len, "irlap%d ", i++);
len += sprintf(buf+len, "state: %s\n",
......@@ -1164,7 +1172,7 @@ int irlap_proc_read(char *buf, char **start, off_t offset, int len)
self = (struct irlap_cb *) hashbin_get_next(irlap);
}
restore_flags(flags);
spin_unlock_irqrestore(&irlap->hb_spinlock, flags);
return len;
}
......
......@@ -83,13 +83,13 @@ int __init irlmp_init(void)
memset(irlmp, 0, sizeof(struct irlmp_cb));
irlmp->magic = LMP_MAGIC;
spin_lock_init(&irlmp->log_lock);
irlmp->clients = hashbin_new(HB_GLOBAL);
irlmp->services = hashbin_new(HB_GLOBAL);
irlmp->links = hashbin_new(HB_GLOBAL);
irlmp->unconnected_lsaps = hashbin_new(HB_GLOBAL);
irlmp->cachelog = hashbin_new(HB_GLOBAL);
irlmp->clients = hashbin_new(HB_LOCK);
irlmp->services = hashbin_new(HB_LOCK);
irlmp->links = hashbin_new(HB_LOCK);
irlmp->unconnected_lsaps = hashbin_new(HB_LOCK);
irlmp->cachelog = hashbin_new(HB_NOLOCK);
spin_lock_init(&irlmp->cachelog->hb_spinlock);
irlmp->free_lsap_sel = 0x10; /* Reserved 0x00-0x0f */
strcpy(sysctl_devname, "Linux");
......@@ -177,8 +177,8 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid)
self->lsap_state = LSAP_DISCONNECTED;
/* Insert into queue of unconnected LSAPs */
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) self, (int) self,
NULL);
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) self,
(long) self, NULL);
return self;
}
......@@ -238,7 +238,7 @@ void irlmp_close_lsap(struct lsap_cb *self)
LM_LAP_DISCONNECT_REQUEST, NULL);
}
/* Now, remove from the link */
lsap = hashbin_remove(lap->lsaps, (int) self, NULL);
lsap = hashbin_remove(lap->lsaps, (long) self, NULL);
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
lap->cache.valid = FALSE;
#endif
......@@ -246,7 +246,7 @@ void irlmp_close_lsap(struct lsap_cb *self)
self->lap = NULL;
/* Check if we found the LSAP! If not then try the unconnected lsaps */
if (!lsap) {
lsap = hashbin_remove(irlmp->unconnected_lsaps, (int) self,
lsap = hashbin_remove(irlmp->unconnected_lsaps, (long) self,
NULL);
}
if (!lsap) {
......@@ -286,7 +286,7 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify)
lap->magic = LMP_LAP_MAGIC;
lap->saddr = saddr;
lap->daddr = DEV_ADDR_ANY;
lap->lsaps = hashbin_new(HB_GLOBAL);
lap->lsaps = hashbin_new(HB_LOCK);
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
lap->cache.valid = FALSE;
#endif
......@@ -347,7 +347,6 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
struct sk_buff *skb = NULL;
struct lap_cb *lap;
struct lsap_cb *lsap;
discovery_t *discovery;
ASSERT(self != NULL, return -EBADR;);
ASSERT(self->magic == LMP_LSAP_MAGIC, return -EBADR;);
......@@ -388,6 +387,10 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
* device with the given daddr
*/
if ((!saddr) || (saddr == DEV_ADDR_ANY)) {
discovery_t *discovery;
unsigned long flags;
spin_lock_irqsave(&irlmp->cachelog->hb_spinlock, flags);
if (daddr != DEV_ADDR_ANY)
discovery = hashbin_find(irlmp->cachelog, daddr, NULL);
else {
......@@ -400,8 +403,9 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
saddr = discovery->saddr;
daddr = discovery->daddr;
}
spin_unlock_irqrestore(&irlmp->cachelog->hb_spinlock, flags);
}
lap = hashbin_find(irlmp->links, saddr, NULL);
lap = hashbin_lock_find(irlmp->links, saddr, NULL);
if (lap == NULL) {
IRDA_DEBUG(1, __FUNCTION__ "(), Unable to find a usable link!\n");
return -EHOSTUNREACH;
......@@ -411,11 +415,8 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
if (lap->daddr == DEV_ADDR_ANY)
lap->daddr = daddr;
else if (lap->daddr != daddr) {
struct lsap_cb *any_lsap;
/* Check if some LSAPs are active on this LAP */
any_lsap = (struct lsap_cb *) hashbin_get_first(lap->lsaps);
if (any_lsap == NULL) {
if (HASHBIN_GET_SIZE(lap->lsaps) == 0) {
/* No active connection, but LAP hasn't been
* disconnected yet (waiting for timeout in LAP).
* Maybe we could give LAP a bit of help in this case.
......@@ -436,14 +437,15 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel,
* Remove LSAP from list of unconnected LSAPs and insert it into the
* list of connected LSAPs for the particular link
*/
lsap = hashbin_remove(irlmp->unconnected_lsaps, (int) self, NULL);
lsap = hashbin_remove(irlmp->unconnected_lsaps, (long) self, NULL);
ASSERT(lsap != NULL, return -1;);
ASSERT(lsap->magic == LMP_LSAP_MAGIC, return -1;);
ASSERT(lsap->lap != NULL, return -1;);
ASSERT(lsap->lap->magic == LMP_LAP_MAGIC, return -1;);
hashbin_insert(self->lap->lsaps, (irda_queue_t *) self, (int) self, NULL);
hashbin_insert(self->lap->lsaps, (irda_queue_t *) self, (long) self,
NULL);
set_bit(0, &self->connected); /* TRUE */
......@@ -574,29 +576,41 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb)
struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance)
{
struct lsap_cb *new;
unsigned long flags;
IRDA_DEBUG(1, __FUNCTION__ "()\n");
spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
/* Only allowed to duplicate unconnected LSAP's */
if (!hashbin_find(irlmp->unconnected_lsaps, (int) orig, NULL)) {
if (!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) {
IRDA_DEBUG(0, __FUNCTION__ "(), unable to find LSAP\n");
spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock,
flags);
return NULL;
}
/* Allocate a new instance */
new = kmalloc(sizeof(struct lsap_cb), GFP_ATOMIC);
if (!new) {
IRDA_DEBUG(0, __FUNCTION__ "(), unable to kmalloc\n");
spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock,
flags);
return NULL;
}
/* Dup */
memcpy(new, orig, sizeof(struct lsap_cb));
new->notify.instance = instance;
/* new->lap = orig->lap; => done in the memcpy() */
/* new->slsap_sel = orig->slsap_sel; => done in the memcpy() */
spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags);
/* Not everything is the same */
new->notify.instance = instance;
init_timer(&new->watchdog_timer);
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) new, (int) new,
NULL);
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) new,
(long) new, NULL);
/* Make sure that we invalidate the cache */
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
......@@ -646,7 +660,7 @@ int irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata)
ASSERT(self->lap->magic == LMP_LAP_MAGIC, return -1;);
ASSERT(self->lap->lsaps != NULL, return -1;);
lsap = hashbin_remove(self->lap->lsaps, (int) self, NULL);
lsap = hashbin_remove(self->lap->lsaps, (long) self, NULL);
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
self->lap->cache.valid = FALSE;
#endif
......@@ -655,8 +669,8 @@ int irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata)
ASSERT(lsap->magic == LMP_LSAP_MAGIC, return -1;);
ASSERT(lsap == self, return -1;);
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) self, (int) self,
NULL);
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) self,
(long) self, NULL);
/* Reset some values */
self->dlsap_sel = LSAP_ANY;
......@@ -699,15 +713,15 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,
ASSERT(self->lap != NULL, return;);
ASSERT(self->lap->lsaps != NULL, return;);
lsap = hashbin_remove(self->lap->lsaps, (int) self, NULL);
lsap = hashbin_remove(self->lap->lsaps, (long) self, NULL);
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
self->lap->cache.valid = FALSE;
#endif
ASSERT(lsap != NULL, return;);
ASSERT(lsap == self, return;);
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) lsap, (int) lsap,
NULL);
hashbin_insert(irlmp->unconnected_lsaps, (irda_queue_t *) lsap,
(long) lsap, NULL);
self->dlsap_sel = LSAP_ANY;
self->lap = NULL;
......@@ -886,7 +900,7 @@ void irlmp_check_services(discovery_t *discovery)
*/
while ((service = service_log[i++]) != S_END) {
IRDA_DEBUG( 4, "service=%02x\n", service);
client = hashbin_find(irlmp->registry, service, NULL);
client = hashbin_lock_find(irlmp->registry, service, NULL);
if (entry && entry->discovery_callback) {
IRDA_DEBUG( 4, "discovery_callback!\n");
......@@ -903,6 +917,7 @@ void irlmp_check_services(discovery_t *discovery)
kfree(service_log);
}
#endif
/*
* Function irlmp_notify_client (log)
*
......@@ -930,6 +945,12 @@ irlmp_notify_client(irlmp_client_t *client,
/*
* Now, check all discovered devices (if any), and notify client
* only about the services that the client is interested in
* Note : most often, we will get called immediately following
* a discovery, so the log is not going to expire.
* On the other hand, comming here through irlmp_discovery_request()
* is *very* problematic - Jean II
* Can't use hashbin_find_next(), key is not unique. I'm running
* out of options :-( - Jean II
*/
discovery = (discovery_t *) hashbin_get_first(log);
while (discovery != NULL) {
......@@ -956,6 +977,7 @@ irlmp_notify_client(irlmp_client_t *client,
void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode)
{
irlmp_client_t *client;
irlmp_client_t *client_next;
IRDA_DEBUG(3, __FUNCTION__ "()\n");
......@@ -965,11 +987,12 @@ void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode)
return;
client = (irlmp_client_t *) hashbin_get_first(irlmp->clients);
while (client != NULL) {
while (NULL != hashbin_find_next(irlmp->clients, (long) client, NULL,
(void *) &client_next) ) {
/* Check if we should notify client */
irlmp_notify_client(client, log, mode);
client = (irlmp_client_t *) hashbin_get_next(irlmp->clients);
client = client_next;
}
}
......@@ -987,13 +1010,15 @@ void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode)
void irlmp_discovery_expiry(discovery_t *expiry)
{
irlmp_client_t *client;
irlmp_client_t *client_next;
IRDA_DEBUG(3, __FUNCTION__ "()\n");
ASSERT(expiry != NULL, return;);
client = (irlmp_client_t *) hashbin_get_first(irlmp->clients);
while (client != NULL) {
while (NULL != hashbin_find_next(irlmp->clients, (long) client, NULL,
(void *) &client_next) ) {
/* Check if we should notify client */
if ((client->expir_callback) &&
(client->hint_mask & expiry->hints.word & 0x7f7f))
......@@ -1001,7 +1026,7 @@ void irlmp_discovery_expiry(discovery_t *expiry)
client->priv);
/* Next client */
client = (irlmp_client_t *) hashbin_get_next(irlmp->clients);
client = client_next;
}
}
......@@ -1196,11 +1221,9 @@ void irlmp_status_indication(struct lap_cb *self,
struct lsap_cb *curr;
/* Send status_indication to all LSAPs using this link */
next = (struct lsap_cb *) hashbin_get_first( self->lsaps);
while (next != NULL ) {
curr = next;
next = (struct lsap_cb *) hashbin_get_next(self->lsaps);
curr = (struct lsap_cb *) hashbin_get_first( self->lsaps);
while (NULL != hashbin_find_next(self->lsaps, (long) curr, NULL,
(void *) &next) ) {
ASSERT(curr->magic == LMP_LSAP_MAGIC, return;);
/*
* Inform service user if he has requested it
......@@ -1210,6 +1233,8 @@ void irlmp_status_indication(struct lap_cb *self,
link, lock);
else
IRDA_DEBUG(2, __FUNCTION__ "(), no handler\n");
curr = next;
}
}
......@@ -1245,29 +1270,15 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow)
(IRLAP_GET_TX_QUEUE_LEN(self->irlap) < LAP_HIGH_THRESHOLD)) {
/* Try to find the next lsap we should poll. */
next = self->flow_next;
if(next != NULL) {
/* Note that if there is only one LSAP on the LAP
* (most common case), self->flow_next is always NULL,
* so we always avoid this loop. - Jean II */
IRDA_DEBUG(4, __FUNCTION__ "() : searching my LSAP\n");
/* We look again in hashbins, because the lsap
* might have gone away... - Jean II */
curr = (struct lsap_cb *) hashbin_get_first(self->lsaps);
while((curr != NULL ) && (curr != next))
curr = (struct lsap_cb *) hashbin_get_next(self->lsaps);
} else
curr = NULL;
/* If we have no lsap, restart from first one */
if(curr == NULL)
curr = (struct lsap_cb *) hashbin_get_first(self->lsaps);
if(next == NULL)
next = (struct lsap_cb *) hashbin_get_first(self->lsaps);
/* Verify current one and find the next one */
curr = hashbin_find_next(self->lsaps, (long) next, NULL,
(void *) &self->flow_next);
/* Uh-oh... Paranoia */
if(curr == NULL)
break;
/* Next time, we will get the next one (or the first one) */
self->flow_next = (struct lsap_cb *) hashbin_get_next(self->lsaps);
IRDA_DEBUG(4, __FUNCTION__ "() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap));
/* Inform lsap user that it can send one more packet. */
......@@ -1414,20 +1425,12 @@ __u16 irlmp_service_to_hint(int service)
* Register local service with IrLMP
*
*/
__u32 irlmp_register_service(__u16 hints)
void *irlmp_register_service(__u16 hints)
{
irlmp_service_t *service;
__u32 handle;
IRDA_DEBUG(4, __FUNCTION__ "(), hints = %04x\n", hints);
/* Get a unique handle for this service */
get_random_bytes(&handle, sizeof(handle));
while (hashbin_find(irlmp->services, handle, NULL) || !handle)
get_random_bytes(&handle, sizeof(handle));
irlmp->hints.word |= hints;
/* Make a new registration */
service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC);
if (!service) {
......@@ -1435,9 +1438,12 @@ __u32 irlmp_register_service(__u16 hints)
return 0;
}
service->hints = hints;
hashbin_insert(irlmp->services, (irda_queue_t *) service, handle, NULL);
hashbin_insert(irlmp->services, (irda_queue_t *) service,
(long) service, NULL);
irlmp->hints.word |= hints;
return handle;
return (void *)service;
}
/*
......@@ -1447,35 +1453,38 @@ __u32 irlmp_register_service(__u16 hints)
*
* Returns: 0 on success, -1 on error
*/
int irlmp_unregister_service(__u32 handle)
int irlmp_unregister_service(void *handle)
{
irlmp_service_t *service;
unsigned long flags;
IRDA_DEBUG(4, __FUNCTION__ "()\n");
if (!handle)
return -1;
service = hashbin_find(irlmp->services, handle, NULL);
/* Caller may call with invalid handle (it's legal) - Jean II */
service = hashbin_lock_find(irlmp->services, (long) handle, NULL);
if (!service) {
IRDA_DEBUG(1, __FUNCTION__ "(), Unknown service!\n");
return -1;
}
service = hashbin_remove(irlmp->services, handle, NULL);
if (service)
kfree(service);
hashbin_remove_this(irlmp->services, (irda_queue_t *) service);
kfree(service);
/* Remove old hint bits */
irlmp->hints.word = 0;
/* Refresh current hint bits */
spin_lock_irqsave(&irlmp->services->hb_spinlock, flags);
service = (irlmp_service_t *) hashbin_get_first(irlmp->services);
while (service) {
irlmp->hints.word |= service->hints;
service = (irlmp_service_t *)hashbin_get_next(irlmp->services);
}
spin_unlock_irqrestore(&irlmp->services->hb_spinlock, flags);
return 0;
}
......@@ -1488,20 +1497,14 @@ int irlmp_unregister_service(__u32 handle)
*
* Returns: handle > 0 on success, 0 on error
*/
__u32 irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
DISCOVERY_CALLBACK1 expir_clb, void *priv)
{
irlmp_client_t *client;
__u32 handle;
IRDA_DEBUG(1, __FUNCTION__ "()\n");
ASSERT(irlmp != NULL, return 0;);
/* Get a unique handle for this client */
get_random_bytes(&handle, sizeof(handle));
while (hashbin_find(irlmp->clients, handle, NULL) || !handle)
get_random_bytes(&handle, sizeof(handle));
/* Make a new registration */
client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC);
if (!client) {
......@@ -1515,9 +1518,10 @@ __u32 irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
client->expir_callback = expir_clb;
client->priv = priv;
hashbin_insert(irlmp->clients, (irda_queue_t *) client, handle, NULL);
hashbin_insert(irlmp->clients, (irda_queue_t *) client,
(long) client, NULL);
return handle;
return (void *) client;
}
/*
......@@ -1528,7 +1532,7 @@ __u32 irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb,
*
* Returns: 0 on success, -1 on error
*/
int irlmp_update_client(__u32 handle, __u16 hint_mask,
int irlmp_update_client(void *handle, __u16 hint_mask,
DISCOVERY_CALLBACK1 disco_clb,
DISCOVERY_CALLBACK1 expir_clb, void *priv)
{
......@@ -1537,7 +1541,7 @@ int irlmp_update_client(__u32 handle, __u16 hint_mask,
if (!handle)
return -1;
client = hashbin_find(irlmp->clients, handle, NULL);
client = hashbin_lock_find(irlmp->clients, (long) handle, NULL);
if (!client) {
IRDA_DEBUG(1, __FUNCTION__ "(), Unknown client!\n");
return -1;
......@@ -1557,7 +1561,7 @@ int irlmp_update_client(__u32 handle, __u16 hint_mask,
* Returns: 0 on success, -1 on error
*
*/
int irlmp_unregister_client(__u32 handle)
int irlmp_unregister_client(void *handle)
{
struct irlmp_client *client;
......@@ -1566,16 +1570,16 @@ int irlmp_unregister_client(__u32 handle)
if (!handle)
return -1;
client = hashbin_find(irlmp->clients, handle, NULL);
/* Caller may call with invalid handle (it's legal) - Jean II */
client = hashbin_lock_find(irlmp->clients, (long) handle, NULL);
if (!client) {
IRDA_DEBUG(1, __FUNCTION__ "(), Unknown client!\n");
return -1;
}
IRDA_DEBUG( 4, __FUNCTION__ "(), removing client!\n");
client = hashbin_remove( irlmp->clients, handle, NULL);
if (client)
kfree(client);
hashbin_remove_this(irlmp->clients, (irda_queue_t *) client);
kfree(client);
return 0;
}
......@@ -1589,6 +1593,7 @@ int irlmp_slsap_inuse(__u8 slsap_sel)
{
struct lsap_cb *self;
struct lap_cb *lap;
unsigned long flags;
ASSERT(irlmp != NULL, return TRUE;);
ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;);
......@@ -1611,10 +1616,16 @@ int irlmp_slsap_inuse(__u8 slsap_sel)
* every IrLAP connection and check every LSAP assosiated with each
* the connection.
*/
spin_lock_irqsave(&irlmp->links->hb_spinlock, flags);
lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
while (lap != NULL) {
ASSERT(lap->magic == LMP_LAP_MAGIC, return TRUE;);
/* Careful for priority inversions here !
* All other uses of attrib spinlock are independant of
* the object spinlock, so we are safe. Jean II */
spin_lock(&lap->lsaps->hb_spinlock);
self = (struct lsap_cb *) hashbin_get_first(lap->lsaps);
while (self != NULL) {
ASSERT(self->magic == LMP_LSAP_MAGIC, return TRUE;);
......@@ -1626,8 +1637,11 @@ int irlmp_slsap_inuse(__u8 slsap_sel)
}
self = (struct lsap_cb*) hashbin_get_next(lap->lsaps);
}
spin_unlock(&lap->lsaps->hb_spinlock);
/* Next LAP */
lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
}
spin_unlock_irqrestore(&irlmp->links->hb_spinlock, flags);
return FALSE;
}
......@@ -1736,15 +1750,13 @@ int irlmp_proc_read(char *buf, char **start, off_t offset, int len)
ASSERT(irlmp != NULL, return 0;);
save_flags( flags);
cli();
len = 0;
len += sprintf( buf+len, "Unconnected LSAPs:\n");
spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
self = (struct lsap_cb *) hashbin_get_first( irlmp->unconnected_lsaps);
while (self != NULL) {
ASSERT(self->magic == LMP_LSAP_MAGIC, return 0;);
ASSERT(self->magic == LMP_LSAP_MAGIC, break;);
len += sprintf(buf+len, "lsap state: %s, ",
irlsap_state[ self->lsap_state]);
len += sprintf(buf+len,
......@@ -1756,9 +1768,10 @@ int irlmp_proc_read(char *buf, char **start, off_t offset, int len)
self = (struct lsap_cb *) hashbin_get_next(
irlmp->unconnected_lsaps);
}
spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags);
len += sprintf(buf+len, "\nRegistred Link Layers:\n");
spin_lock_irqsave(&irlmp->links->hb_spinlock, flags);
lap = (struct lap_cb *) hashbin_get_first(irlmp->links);
while (lap != NULL) {
len += sprintf(buf+len, "lap state: %s, ",
......@@ -1770,10 +1783,15 @@ int irlmp_proc_read(char *buf, char **start, off_t offset, int len)
HASHBIN_GET_SIZE(lap->lsaps));
len += sprintf(buf+len, "\n");
/* Careful for priority inversions here !
* All other uses of attrib spinlock are independant of
* the object spinlock, so we are safe. Jean II */
spin_lock(&lap->lsaps->hb_spinlock);
len += sprintf(buf+len, "\n Connected LSAPs:\n");
self = (struct lsap_cb *) hashbin_get_first(lap->lsaps);
while (self != NULL) {
ASSERT(self->magic == LMP_LSAP_MAGIC, return 0;);
ASSERT(self->magic == LMP_LSAP_MAGIC, break;);
len += sprintf(buf+len, " lsap state: %s, ",
irlsap_state[ self->lsap_state]);
len += sprintf(buf+len,
......@@ -1785,11 +1803,12 @@ int irlmp_proc_read(char *buf, char **start, off_t offset, int len)
self = (struct lsap_cb *) hashbin_get_next(
lap->lsaps);
}
spin_unlock(&lap->lsaps->hb_spinlock);
len += sprintf(buf+len, "\n");
lap = (struct lap_cb *) hashbin_get_next(irlmp->links);
}
restore_flags(flags);
spin_unlock_irqrestore(&irlmp->links->hb_spinlock, flags);
return len;
}
......
......@@ -207,6 +207,43 @@ void irlmp_idle_timer_expired(void *data)
irlmp_do_lap_event(self, LM_LAP_IDLE_TIMEOUT, NULL);
}
/*
* Send an event on all LSAPs attached to this LAP.
*/
static inline void
irlmp_do_all_lsap_event(hashbin_t * lsap_hashbin,
IRLMP_EVENT event)
{
struct lsap_cb *lsap;
struct lsap_cb *lsap_next;
/* Note : this function use the new hashbin_find_next()
* function, instead of the old hashbin_get_next().
* This make sure that we are always pointing one lsap
* ahead, so that if the current lsap is removed as the
* result of sending the event, we don't care.
* Also, as we store the context ourselves, if an enumeration
* of the same lsap hashbin happens as the result of sending the
* event, we don't care.
* The only problem is if the next lsap is removed. In that case,
* hashbin_find_next() will return NULL and we will abort the
* enumeration. - Jean II */
/* Also : we don't accept any skb in input. We can *NOT* pass
* the same skb to multiple clients safely, we would need to
* skb_clone() it. - Jean II */
lsap = (struct lsap_cb *) hashbin_get_first(lsap_hashbin);
while (NULL != hashbin_find_next(lsap_hashbin,
(long) lsap,
NULL,
(void *) &lsap_next) ) {
irlmp_do_lsap_event(lsap, event, NULL);
lsap = lsap_next;
}
}
/*********************************************************************
*
* LAP connection control states
......@@ -274,9 +311,6 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event,
static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb)
{
struct lsap_cb *lsap;
struct lsap_cb *lsap_current;
IRDA_DEBUG(2, __FUNCTION__ "(), event=%s\n", irlmp_event[event]);
switch (event) {
......@@ -290,11 +324,9 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
/* Just accept connection TODO, this should be fixed */
irlap_connect_response(self->irlap, skb);
lsap = (struct lsap_cb *) hashbin_get_first(self->lsaps);
while (lsap != NULL) {
irlmp_do_lsap_event(lsap, LM_LAP_CONNECT_CONFIRM, NULL);
lsap = (struct lsap_cb*) hashbin_get_next(self->lsaps);
}
/* Tell LSAPs that they can start sending data */
irlmp_do_all_lsap_event(self->lsaps, LM_LAP_CONNECT_CONFIRM);
/* Note : by the time we get there (LAP retries and co),
* the lsaps may already have gone. This avoid getting stuck
* forever in LAP_ACTIVE state - Jean II */
......@@ -310,11 +342,9 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
/* For all lsap_ce E Associated do LS_Connect_confirm */
irlmp_next_lap_state(self, LAP_ACTIVE);
lsap = (struct lsap_cb *) hashbin_get_first(self->lsaps);
while (lsap != NULL) {
irlmp_do_lsap_event(lsap, LM_LAP_CONNECT_CONFIRM, NULL);
lsap = (struct lsap_cb*) hashbin_get_next(self->lsaps);
}
/* Tell LSAPs that they can start sending data */
irlmp_do_all_lsap_event(self->lsaps, LM_LAP_CONNECT_CONFIRM);
/* Note : by the time we get there (LAP retries and co),
* the lsaps may already have gone. This avoid getting stuck
* forever in LAP_ACTIVE state - Jean II */
......@@ -328,18 +358,8 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
irlmp_next_lap_state(self, LAP_STANDBY);
/* Send disconnect event to all LSAPs using this link */
lsap = (struct lsap_cb *) hashbin_get_first( self->lsaps);
while (lsap != NULL ) {
ASSERT(lsap->magic == LMP_LSAP_MAGIC, return;);
lsap_current = lsap;
/* Be sure to stay one item ahead */
lsap = (struct lsap_cb *) hashbin_get_next(self->lsaps);
irlmp_do_lsap_event(lsap_current,
LM_LAP_DISCONNECT_INDICATION,
NULL);
}
irlmp_do_all_lsap_event(self->lsaps,
LM_LAP_DISCONNECT_INDICATION);
break;
case LM_LAP_DISCONNECT_REQUEST:
IRDA_DEBUG(4, __FUNCTION__ "(), LM_LAP_DISCONNECT_REQUEST\n");
......@@ -368,9 +388,6 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event,
static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
struct sk_buff *skb)
{
struct lsap_cb *lsap;
struct lsap_cb *lsap_current;
IRDA_DEBUG(4, __FUNCTION__ "()\n");
switch (event) {
......@@ -383,22 +400,11 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
* notify all LSAPs using this LAP, but that should be safe to
* do anyway.
*/
lsap = (struct lsap_cb *) hashbin_get_first(self->lsaps);
while (lsap != NULL) {
irlmp_do_lsap_event(lsap, LM_LAP_CONNECT_CONFIRM, NULL);
lsap = (struct lsap_cb*) hashbin_get_next(self->lsaps);
}
irlmp_do_all_lsap_event(self->lsaps, LM_LAP_CONNECT_CONFIRM);
/* Needed by connect indication */
lsap = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps);
while (lsap != NULL) {
lsap_current = lsap;
/* Be sure to stay one item ahead */
lsap = (struct lsap_cb*) hashbin_get_next(irlmp->unconnected_lsaps);
irlmp_do_lsap_event(lsap_current,
LM_LAP_CONNECT_CONFIRM, NULL);
}
irlmp_do_all_lsap_event(irlmp->unconnected_lsaps,
LM_LAP_CONNECT_CONFIRM);
/* Keep state */
break;
case LM_LAP_DISCONNECT_REQUEST:
......@@ -447,18 +453,8 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event,
/*
* Inform all connected LSAP's using this link
*/
lsap = (struct lsap_cb *) hashbin_get_first(self->lsaps);
while (lsap != NULL ) {
ASSERT(lsap->magic == LMP_LSAP_MAGIC, return;);
lsap_current = lsap;
/* Be sure to stay one item ahead */
lsap = (struct lsap_cb *) hashbin_get_next(self->lsaps);
irlmp_do_lsap_event(lsap_current,
LM_LAP_DISCONNECT_INDICATION,
NULL);
}
irlmp_do_all_lsap_event(self->lsaps,
LM_LAP_DISCONNECT_INDICATION);
/* Force an expiry of the discovery log.
* Now that the LAP is free, the system may attempt to
......@@ -581,15 +577,15 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
* Bind this LSAP to the IrLAP link where the connect was
* received
*/
lsap = hashbin_remove(irlmp->unconnected_lsaps, (int) self,
lsap = hashbin_remove(irlmp->unconnected_lsaps, (long) self,
NULL);
ASSERT(lsap == self, return -1;);
ASSERT(self->lap != NULL, return -1;);
ASSERT(self->lap->lsaps != NULL, return -1;);
hashbin_insert(self->lap->lsaps, (irda_queue_t *) self, (int) self,
NULL);
hashbin_insert(self->lap->lsaps, (irda_queue_t *) self,
(long) self, NULL);
irlmp_send_lcf_pdu(self->lap, self->dlsap_sel,
self->slsap_sel, CONNECT_CNF, skb);
......
......@@ -210,6 +210,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
__u8 dlsap_sel; /* Destination LSAP address */
__u8 pid; /* Protocol identifier */
__u8 *fp;
unsigned long flags;
IRDA_DEBUG(4, __FUNCTION__ "()\n");
......@@ -242,6 +243,8 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
return;
}
/* Search the connectionless LSAP */
spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
lsap = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps);
while (lsap != NULL) {
/*
......@@ -255,6 +258,8 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
}
lsap = (struct lsap_cb *) hashbin_get_next(irlmp->unconnected_lsaps);
}
spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags);
if (lsap)
irlmp_connless_data_indication(lsap, skb);
else {
......@@ -374,6 +379,7 @@ void irlmp_link_discovery_indication(struct lap_cb *self,
ASSERT(self != NULL, return;);
ASSERT(self->magic == LMP_LAP_MAGIC, return;);
/* Add to main log, cleanup */
irlmp_add_discovery(irlmp->cachelog, discovery);
/* Just handle it the same way as a discovery confirm,
......@@ -396,6 +402,7 @@ void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log)
ASSERT(self != NULL, return;);
ASSERT(self->magic == LMP_LAP_MAGIC, return;);
/* Add to main log, cleanup */
irlmp_add_discovery_log(irlmp->cachelog, log);
/* Propagate event to various LSAPs registered for it.
......@@ -411,6 +418,8 @@ void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log)
static inline void irlmp_update_cache(struct lap_cb *lap,
struct lsap_cb *lsap)
{
/* Prevent concurent read to get garbage */
lap->cache.valid = FALSE;
/* Update cache entry */
lap->cache.dlsap_sel = lsap->dlsap_sel;
lap->cache.slsap_sel = lsap->slsap_sel;
......@@ -441,6 +450,7 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
hashbin_t *queue)
{
struct lsap_cb *lsap;
unsigned long flags;
/*
* Optimize for the common case. We assume that the last frame
......@@ -455,6 +465,9 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
return (self->cache.lsap);
}
#endif
spin_lock_irqsave(&queue->hb_spinlock, flags);
lsap = (struct lsap_cb *) hashbin_get_first(queue);
while (lsap != NULL) {
/*
......@@ -465,29 +478,27 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
*/
if ((status == CONNECT_CMD) &&
(lsap->slsap_sel == slsap_sel) &&
(lsap->dlsap_sel == LSAP_ANY))
{
(lsap->dlsap_sel == LSAP_ANY)) {
/* This is where the dest lsap sel is set on incomming
* lsaps */
lsap->dlsap_sel = dlsap_sel;
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
irlmp_update_cache(self, lsap);
#endif
return lsap;
break;
}
/*
* Check if source LSAP and dest LSAP selectors match.
*/
if ((lsap->slsap_sel == slsap_sel) &&
(lsap->dlsap_sel == dlsap_sel))
{
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
irlmp_update_cache(self, lsap);
#endif
return lsap;
}
break;
lsap = (struct lsap_cb *) hashbin_get_next(queue);
}
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
if(lsap)
irlmp_update_cache(self, lsap);
#endif
spin_unlock_irqrestore(&queue->hb_spinlock, flags);
/* Sorry not found! */
return NULL;
/* Return what we've found or NULL */
return lsap;
}
......@@ -34,11 +34,412 @@
*
********************************************************************/
/*
* NOTE :
* There are various problems with this package :
* o the hash function for ints is pathetic (but could be changed)
* o locking is sometime suspicious (especially during enumeration)
* o most users have only a few elements (== overhead)
* o most users never use seach, so don't benefit from hashing
* Problem already fixed :
* o not 64 bit compliant (most users do hashv = (int) self)
* o hashbin_remove() is broken => use hashbin_remove_this()
* I think most users would be better served by a simple linked list
* (like include/linux/list.h) with a global spinlock per list.
* Jean II
*/
/*
* Notes on the concurent access to hashbin and other SMP issues
* -------------------------------------------------------------
* Hashbins are very often in the IrDA stack a global repository of
* information, and therefore used in a very asynchronous manner following
* various events (driver calls, timers, user calls...).
* Therefore, very often it is highly important to consider the
* management of concurent access to the hashbin and how to guarantee the
* consistency of the operations on it.
*
* First, we need to define the objective of locking :
* 1) Protect user data (content pointed by the hashbin)
* 2) Protect hashbin structure itself (linked list in each bin)
*
* OLD LOCKING
* -----------
*
* The previous locking strategy, either HB_LOCAL or HB_GLOBAL were
* both inadequate in *both* aspect.
* o HB_GLOBAL was using a spinlock for each bin (local locking).
* o HB_LOCAL was disabling irq on *all* CPUs, so use a single
* global semaphore.
* The problems were :
* A) Global irq disabling is no longer supported by the kernel
* B) No protection for the hashbin struct global data
* o hashbin_delete()
* o hb_current
* C) No protection for user data in some cases
*
* A) HB_LOCAL use global irq disabling, so doesn't work on kernel
* 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its
* performance is not satisfactory on SMP setups. Most hashbins were
* HB_LOCAL, so (A) definitely need fixing.
* B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL
* lock only the individual bins, it will never be able to lock the
* global data, so can't do (B).
* C) Some functions return pointer to data that is still in the
* hashbin :
* o hashbin_find()
* o hashbin_get_first()
* o hashbin_get_next()
* As the data is still in the hashbin, it may be changed or free'd
* while the caller is examinimg the data. In those case, locking can't
* be done within the hashbin, but must include use of the data within
* the caller.
* The caller can easily do this with HB_LOCAL (just disable irqs).
* However, this is impossible with HB_GLOBAL because the caller has no
* way to know the proper bin, so don't know which spinlock to use.
*
* Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is
* fundamentally broken and will never work.
*
* NEW LOCKING
* -----------
*
* To fix those problems, I've introduce a few changes in the
* hashbin locking :
* 1) New HB_LOCK scheme
* 2) hashbin->hb_spinlock
* 3) New hashbin usage policy
*
* HB_LOCK :
* -------
* HB_LOCK is a locking scheme intermediate between the old HB_LOCAL
* and HB_GLOBAL. It uses a single spinlock to protect the whole content
* of the hashbin. As it is a single spinlock, it can protect the global
* data of the hashbin and not only the bins themselves.
* HB_LOCK can only protect some of the hashbin calls, so it only lock
* call that can be made 100% safe and leave other call unprotected.
* HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin
* content is always small contention is not high, so it doesn't matter
* much. HB_LOCK is probably faster than HB_LOCAL.
*
* hashbin->hb_spinlock :
* --------------------
* The spinlock that HB_LOCK uses is available for caller, so that
* the caller can protect unprotected calls (see below).
* If the caller want to do entirely its own locking (HB_NOLOCK), he
* can do so and may use safely this spinlock.
* Locking is done like this :
* spin_lock_irqsave(&hashbin->hb_spinlock, flags);
* Releasing the lock :
* spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
*
* Safe & Protected calls :
* ----------------------
* The following calls are safe or protected via HB_LOCK :
* o hashbin_new() -> safe
* o hashbin_delete()
* o hashbin_insert()
* o hashbin_remove_first()
* o hashbin_remove()
* o hashbin_remove_this()
* o HASHBIN_GET_SIZE() -> atomic
*
* The following calls only protect the hashbin itself :
* o hashbin_lock_find()
* o hashbin_find_next()
*
* Unprotected calls :
* -----------------
* The following calls need to be protected by the caller :
* o hashbin_find()
* o hashbin_get_first()
* o hashbin_get_next()
*
* Locking Policy :
* --------------
* If the hashbin is used only in a single thread of execution
* (explicitely or implicitely), you can use HB_NOLOCK
* If the calling module already provide concurent access protection,
* you may use HB_NOLOCK.
*
* In all other cases, you need to use HB_LOCK and lock the hashbin
* everytime before calling one of the unprotected calls. You also must
* use the pointer returned by the unprotected call within the locked
* region.
*
* Extra care for enumeration :
* --------------------------
* hashbin_get_first() and hashbin_get_next() use the hashbin to
* store the current position, in hb_current.
* As long as the hashbin remains locked, this is safe. If you unlock
* the hashbin, the current position may change if anybody else modify
* or enumerate the hashbin.
* Summary : do the full enumeration while locked.
*
* Alternatively, you may use hashbin_find_next(). But, this will
* be slower, is more complex to use and doesn't protect the hashbin
* content. So, care is needed here as well.
*
* Other issues :
* ------------
* I believe that we are overdoing it by using spin_lock_irqsave()
* and we should use only spin_lock_bh() or similar. But, I don't have
* the balls to try it out.
* Don't believe that because hashbin are now (somewhat) SMP safe
* that the rest of the code is. Higher layers tend to be safest,
* but LAP and LMP would need some serious dedicated love.
*
* Jean II
*/
#include <net/irda/irda.h>
#include <net/irda/irqueue.h>
static irda_queue_t *dequeue_general( irda_queue_t **queue, irda_queue_t* element);
static __u32 hash( char* name);
/************************ QUEUE SUBROUTINES ************************/
/*
* Hashbin
*/
#define GET_HASHBIN(x) ( x & HASHBIN_MASK )
/*
* Function hash (name)
*
* This function hash the input string 'name' using the ELF hash
* function for strings.
*/
static __u32 hash( char* name)
{
__u32 h = 0;
__u32 g;
while(*name) {
h = (h<<4) + *name++;
if ((g = (h & 0xf0000000)))
h ^=g>>24;
h &=~g;
}
return h;
}
/*
* Function enqueue_first (queue, proc)
*
* Insert item first in queue.
*
*/
static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
{
IRDA_DEBUG( 4, __FUNCTION__ "()\n");
/*
* Check if queue is empty.
*/
if ( *queue == NULL ) {
/*
* Queue is empty. Insert one element into the queue.
*/
element->q_next = element->q_prev = *queue = element;
} else {
/*
* Queue is not empty. Insert element into front of queue.
*/
element->q_next = (*queue);
(*queue)->q_prev->q_next = element;
element->q_prev = (*queue)->q_prev;
(*queue)->q_prev = element;
(*queue) = element;
}
}
#ifdef HASHBIN_UNUSED
/*
* Function enqueue_last (queue, proc)
*
* Insert item into end of queue.
*
*/
static void __enqueue_last( irda_queue_t **queue, irda_queue_t* element)
{
IRDA_DEBUG( 4, __FUNCTION__ "()\n");
/*
* Check if queue is empty.
*/
if ( *queue == NULL ) {
/*
* Queue is empty. Insert one element into the queue.
*/
element->q_next = element->q_prev = *queue = element;
} else {
/*
* Queue is not empty. Insert element into end of queue.
*/
element->q_prev = (*queue)->q_prev;
element->q_prev->q_next = element;
(*queue)->q_prev = element;
element->q_next = *queue;
}
}
static inline void enqueue_last( irda_queue_t **queue, irda_queue_t* element)
{
unsigned long flags;
save_flags(flags);
cli();
__enqueue_last( queue, element);
restore_flags(flags);
}
/*
* Function enqueue_queue (queue, list)
*
* Insert a queue (list) into the start of the first queue
*
*/
static void enqueue_queue( irda_queue_t** queue, irda_queue_t** list )
{
irda_queue_t* tmp;
/*
* Check if queue is empty
*/
if ( *queue ) {
(*list)->q_prev->q_next = (*queue);
(*queue)->q_prev->q_next = (*list);
tmp = (*list)->q_prev;
(*list)->q_prev = (*queue)->q_prev;
(*queue)->q_prev = tmp;
} else {
*queue = (*list);
}
(*list) = NULL;
}
/*
* Function enqueue_second (queue, proc)
*
* Insert item behind head of queue.
*
*/
static void enqueue_second(irda_queue_t **queue, irda_queue_t* element)
{
IRDA_DEBUG( 0, "enqueue_second()\n");
/*
* Check if queue is empty.
*/
if ( *queue == NULL ) {
/*
* Queue is empty. Insert one element into the queue.
*/
element->q_next = element->q_prev = *queue = element;
} else {
/*
* Queue is not empty. Insert element into ..
*/
element->q_prev = (*queue);
(*queue)->q_next->q_prev = element;
element->q_next = (*queue)->q_next;
(*queue)->q_next = element;
}
}
#endif /* HASHBIN_UNUSED */
/*
* Function dequeue (queue)
*
* Remove first entry in queue
*
*/
static irda_queue_t *dequeue_first(irda_queue_t **queue)
{
irda_queue_t *ret;
IRDA_DEBUG( 4, "dequeue_first()\n");
/*
* Set return value
*/
ret = *queue;
if ( *queue == NULL ) {
/*
* Queue was empty.
*/
} else if ( (*queue)->q_next == *queue ) {
/*
* Queue only contained a single element. It will now be
* empty.
*/
*queue = NULL;
} else {
/*
* Queue contained several element. Remove the first one.
*/
(*queue)->q_prev->q_next = (*queue)->q_next;
(*queue)->q_next->q_prev = (*queue)->q_prev;
*queue = (*queue)->q_next;
}
/*
* Return the removed entry (or NULL of queue was empty).
*/
return ret;
}
/*
* Function dequeue_general (queue, element)
*
*
*/
static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element)
{
irda_queue_t *ret;
IRDA_DEBUG( 4, "dequeue_general()\n");
/*
* Set return value
*/
ret = *queue;
if ( *queue == NULL ) {
/*
* Queue was empty.
*/
} else if ( (*queue)->q_next == *queue ) {
/*
* Queue only contained a single element. It will now be
* empty.
*/
*queue = NULL;
} else {
/*
* Remove specific element.
*/
element->q_prev->q_next = element->q_next;
element->q_next->q_prev = element->q_prev;
if ( (*queue) == element)
(*queue) = element->q_next;
}
/*
* Return the removed entry (or NULL of queue was empty).
*/
return ret;
}
/************************ HASHBIN MANAGEMENT ************************/
/*
* Function hashbin_create ( type, name )
......@@ -49,7 +450,6 @@ static __u32 hash( char* name);
hashbin_t *hashbin_new(int type)
{
hashbin_t* hashbin;
int i;
/*
* Allocate new hashbin
......@@ -64,14 +464,17 @@ hashbin_t *hashbin_new(int type)
memset(hashbin, 0, sizeof(hashbin_t));
hashbin->hb_type = type;
hashbin->magic = HB_MAGIC;
//hashbin->hb_current = NULL;
/* Make sure all spinlock's are unlocked */
for (i=0;i<HASHBIN_SIZE;i++)
hashbin->hb_mutex[i] = SPIN_LOCK_UNLOCKED;
if ( hashbin->hb_type & HB_LOCK ) {
spin_lock_init(&hashbin->hb_spinlock);
}
return hashbin;
}
#ifdef HASHBIN_UNUSED
/*
* Function hashbin_clear (hashbin, free_func)
*
......@@ -102,7 +505,7 @@ int hashbin_clear( hashbin_t* hashbin, FREE_FUNC free_func)
return 0;
}
#endif /* HASHBIN_UNUSED */
/*
* Function hashbin_delete (hashbin, free_func)
......@@ -114,11 +517,17 @@ int hashbin_clear( hashbin_t* hashbin, FREE_FUNC free_func)
int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
{
irda_queue_t* queue;
unsigned long flags = 0;
int i;
ASSERT(hashbin != NULL, return -1;);
ASSERT(hashbin->magic == HB_MAGIC, return -1;);
/* Synchronize */
if ( hashbin->hb_type & HB_LOCK ) {
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
}
/*
* Free the entries in the hashbin, TODO: use hashbin_clear when
* it has been shown to work
......@@ -133,22 +542,32 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
}
}
/* Cleanup local data */
hashbin->hb_current = NULL;
hashbin->magic = ~HB_MAGIC;
/* Release lock */
if ( hashbin->hb_type & HB_LOCK) {
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
}
/*
* Free the hashbin structure
*/
hashbin->magic = ~HB_MAGIC;
kfree(hashbin);
return 0;
}
/********************* HASHBIN LIST OPERATIONS *********************/
/*
* Function hashbin_insert (hashbin, entry, name)
*
* Insert an entry into the hashbin
*
*/
void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, __u32 hashv, char* name)
void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, char* name)
{
unsigned long flags = 0;
int bin;
......@@ -166,12 +585,8 @@ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, __u32 hashv, char*
bin = GET_HASHBIN( hashv );
/* Synchronize */
if ( hashbin->hb_type & HB_GLOBAL ) {
spin_lock_irqsave( &hashbin->hb_mutex[ bin ], flags);
} else if ( hashbin->hb_type & HB_LOCAL ) {
save_flags(flags);
cli();
if ( hashbin->hb_type & HB_LOCK ) {
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
} /* Default is no-lock */
/*
......@@ -194,102 +609,61 @@ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, __u32 hashv, char*
hashbin->hb_size++;
/* Release lock */
if ( hashbin->hb_type & HB_GLOBAL) {
spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
} else if ( hashbin->hb_type & HB_LOCAL) {
restore_flags( flags);
}
if ( hashbin->hb_type & HB_LOCK ) {
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
} /* Default is no-lock */
}
/*
* Function hashbin_find (hashbin, hashv, name)
/*
* Function hashbin_remove_first (hashbin)
*
* Find item with the given hashv or name
* Remove first entry of the hashbin
*
* Note : this function no longer use hashbin_remove(), but does things
* similar to hashbin_remove_this(), so can be considered safe.
* Jean II
*/
void* hashbin_find( hashbin_t* hashbin, __u32 hashv, char* name )
void *hashbin_remove_first( hashbin_t *hashbin)
{
int bin, found = FALSE;
unsigned long flags = 0;
irda_queue_t* entry;
IRDA_DEBUG( 4, "hashbin_find()\n");
ASSERT( hashbin != NULL, return NULL;);
ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
irda_queue_t *entry = NULL;
/*
* Locate hashbin
*/
if ( name )
hashv = hash( name );
bin = GET_HASHBIN( hashv );
/* Synchronize */
if ( hashbin->hb_type & HB_GLOBAL ) {
spin_lock_irqsave( &hashbin->hb_mutex[ bin ], flags);
} else if ( hashbin->hb_type & HB_LOCAL ) {
save_flags(flags);
cli();
if ( hashbin->hb_type & HB_LOCK ) {
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
} /* Default is no-lock */
/*
* Search for entry
*/
entry = hashbin->hb_queue[ bin];
if ( entry ) {
do {
/*
* Check for key
*/
if ( entry->q_hash == hashv ) {
/*
* Name compare too?
*/
if ( name ) {
if ( strcmp( entry->q_name, name ) == 0 ) {
found = TRUE;
break;
}
} else {
found = TRUE;
break;
}
}
entry = entry->q_next;
} while ( entry != hashbin->hb_queue[ bin ] );
}
/* Release lock */
if ( hashbin->hb_type & HB_GLOBAL) {
spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
} else if ( hashbin->hb_type & HB_LOCAL) {
restore_flags( flags);
}
if ( found )
return entry;
else
return NULL;
}
void *hashbin_remove_first( hashbin_t *hashbin)
{
unsigned long flags;
irda_queue_t *entry = NULL;
entry = hashbin_get_first( hashbin);
if ( entry != NULL) {
int bin;
long hashv;
/*
* Locate hashbin
*/
hashv = entry->q_hash;
bin = GET_HASHBIN( hashv );
save_flags(flags);
cli();
/*
* Dequeue the entry...
*/
dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
(irda_queue_t*) entry );
hashbin->hb_size--;
entry->q_next = NULL;
entry->q_prev = NULL;
entry = hashbin_get_first( hashbin);
if ( entry != NULL)
hashbin_remove( hashbin, entry->q_hash, NULL);
/*
* Check if this item is the currently selected item, and in
* that case we must reset hb_current
*/
if ( entry == hashbin->hb_current)
hashbin->hb_current = NULL;
}
restore_flags( flags);
/* Release lock */
if ( hashbin->hb_type & HB_LOCK ) {
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
} /* Default is no-lock */
return entry;
}
......@@ -300,8 +674,16 @@ void *hashbin_remove_first( hashbin_t *hashbin)
*
* Remove entry with the given name
*
* The use of this function is highly discouraged, because the whole
* concept behind hashbin_remove() is broken. In many cases, it's not
* possible to guarantee the unicity of the index (either hashv or name),
* leading to removing the WRONG entry.
* The only simple safe use is :
* hashbin_remove(hasbin, (int) self, NULL);
* In other case, you must think hard to guarantee unicity of the index.
* Jean II
*/
void* hashbin_remove( hashbin_t* hashbin, __u32 hashv, char* name)
void* hashbin_remove( hashbin_t* hashbin, long hashv, char* name)
{
int bin, found = FALSE;
unsigned long flags = 0;
......@@ -320,12 +702,8 @@ void* hashbin_remove( hashbin_t* hashbin, __u32 hashv, char* name)
bin = GET_HASHBIN( hashv );
/* Synchronize */
if ( hashbin->hb_type & HB_GLOBAL ) {
spin_lock_irqsave( &hashbin->hb_mutex[ bin ], flags);
} else if ( hashbin->hb_type & HB_LOCAL ) {
save_flags(flags);
cli();
if ( hashbin->hb_type & HB_LOCK ) {
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
} /* Default is no-lock */
/*
......@@ -373,12 +751,9 @@ void* hashbin_remove( hashbin_t* hashbin, __u32 hashv, char* name)
}
/* Release lock */
if ( hashbin->hb_type & HB_GLOBAL) {
spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
} else if ( hashbin->hb_type & HB_LOCAL) {
restore_flags( flags);
}
if ( hashbin->hb_type & HB_LOCK ) {
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
} /* Default is no-lock */
/* Return */
......@@ -390,7 +765,7 @@ void* hashbin_remove( hashbin_t* hashbin, __u32 hashv, char* name)
}
/*
* Function hashbin_remove (hashbin, hashv, name)
* Function hashbin_remove_this (hashbin, entry)
*
* Remove entry with the given name
*
......@@ -404,7 +779,7 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
{
unsigned long flags = 0;
int bin;
__u32 hashv;
long hashv;
IRDA_DEBUG( 4, __FUNCTION__ "()\n");
......@@ -412,6 +787,11 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
ASSERT( entry != NULL, return NULL;);
/* Synchronize */
if ( hashbin->hb_type & HB_LOCK ) {
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
} /* Default is no-lock */
/* Check if valid and not already removed... */
if((entry->q_next == NULL) || (entry->q_prev == NULL))
return NULL;
......@@ -422,38 +802,148 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
hashv = entry->q_hash;
bin = GET_HASHBIN( hashv );
/* Synchronize */
if ( hashbin->hb_type & HB_GLOBAL ) {
spin_lock_irqsave( &hashbin->hb_mutex[ bin ], flags);
/*
* Dequeue the entry...
*/
dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
(irda_queue_t*) entry );
hashbin->hb_size--;
entry->q_next = NULL;
entry->q_prev = NULL;
/*
* Check if this item is the currently selected item, and in
* that case we must reset hb_current
*/
if ( entry == hashbin->hb_current)
hashbin->hb_current = NULL;
/* Release lock */
if ( hashbin->hb_type & HB_LOCK ) {
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
} /* Default is no-lock */
return entry;
}
/*********************** HASHBIN ENUMERATION ***********************/
/*
* Function hashbin_common_find (hashbin, hashv, name)
*
* Find item with the given hashv or name
*
*/
void* hashbin_find( hashbin_t* hashbin, long hashv, char* name )
{
int bin;
irda_queue_t* entry;
IRDA_DEBUG( 4, "hashbin_find()\n");
ASSERT( hashbin != NULL, return NULL;);
ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
/*
* Locate hashbin
*/
if ( name )
hashv = hash( name );
bin = GET_HASHBIN( hashv );
/*
* Search for entry
*/
entry = hashbin->hb_queue[ bin];
if ( entry ) {
do {
/*
* Check for key
*/
if ( entry->q_hash == hashv ) {
/*
* Name compare too?
*/
if ( name ) {
if ( strcmp( entry->q_name, name ) == 0 ) {
return entry;
}
} else {
return entry;
}
}
entry = entry->q_next;
} while ( entry != hashbin->hb_queue[ bin ] );
}
return NULL;
}
/*
* Function hashbin_lock_find (hashbin, hashv, name)
*
* Find item with the given hashv or name
*
* Same, but with spinlock protection...
* I call it safe, but it's only safe with respect to the hashbin, not its
* content. - Jean II
*/
void* hashbin_lock_find( hashbin_t* hashbin, long hashv, char* name )
{
unsigned long flags = 0;
irda_queue_t* entry;
/* Synchronize */
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
/*
* Search for entry
*/
entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
/* Release lock */
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
return entry;
}
/*
* Function hashbin_find (hashbin, hashv, name, pnext)
*
* Find an item with the given hashv or name, and its successor
*
* This function allow to do concurent enumerations without the
* need to lock over the whole session, because the caller keep the
* context of the search. On the other hand, it might fail and return
* NULL if the entry is removed. - Jean II
*/
void* hashbin_find_next( hashbin_t* hashbin, long hashv, char* name,
void ** pnext)
{
unsigned long flags = 0;
irda_queue_t* entry;
} else if ( hashbin->hb_type & HB_LOCAL ) {
save_flags(flags);
cli();
} /* Default is no-lock */
/* Synchronize */
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
/*
* Dequeue the entry...
* Search for current entry
* This allow to check if the current item is still in the
* hashbin or has been removed.
*/
dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
(irda_queue_t*) entry );
hashbin->hb_size--;
entry->q_next = NULL;
entry->q_prev = NULL;
entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
/*
* Check if this item is the currently selected item, and in
* that case we must reset hb_current
* Trick hashbin_get_next() to return what we want
*/
if ( entry == hashbin->hb_current)
hashbin->hb_current = NULL;
if(entry) {
hashbin->hb_current = entry;
*pnext = hashbin_get_next( hashbin );
} else
*pnext = NULL;
/* Release lock */
if ( hashbin->hb_type & HB_GLOBAL) {
spin_unlock_irqrestore( &hashbin->hb_mutex[ bin], flags);
} else if ( hashbin->hb_type & HB_LOCAL) {
restore_flags( flags);
}
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
return entry;
}
......@@ -496,6 +986,8 @@ irda_queue_t *hashbin_get_first( hashbin_t* hashbin)
* be started by a call to hashbin_get_first(). The function returns
* NULL when all items have been traversed
*
* The context of the search is stored within the hashbin, so you must
* protect yourself from concurent enumerations. - Jean II
*/
irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
{
......@@ -543,240 +1035,3 @@ irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
}
return NULL;
}
/*
* Function enqueue_last (queue, proc)
*
* Insert item into end of queue.
*
*/
static void __enqueue_last( irda_queue_t **queue, irda_queue_t* element)
{
IRDA_DEBUG( 4, __FUNCTION__ "()\n");
/*
* Check if queue is empty.
*/
if ( *queue == NULL ) {
/*
* Queue is empty. Insert one element into the queue.
*/
element->q_next = element->q_prev = *queue = element;
} else {
/*
* Queue is not empty. Insert element into end of queue.
*/
element->q_prev = (*queue)->q_prev;
element->q_prev->q_next = element;
(*queue)->q_prev = element;
element->q_next = *queue;
}
}
inline void enqueue_last( irda_queue_t **queue, irda_queue_t* element)
{
unsigned long flags;
save_flags(flags);
cli();
__enqueue_last( queue, element);
restore_flags(flags);
}
/*
* Function enqueue_first (queue, proc)
*
* Insert item first in queue.
*
*/
void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
{
IRDA_DEBUG( 4, __FUNCTION__ "()\n");
/*
* Check if queue is empty.
*/
if ( *queue == NULL ) {
/*
* Queue is empty. Insert one element into the queue.
*/
element->q_next = element->q_prev = *queue = element;
} else {
/*
* Queue is not empty. Insert element into front of queue.
*/
element->q_next = (*queue);
(*queue)->q_prev->q_next = element;
element->q_prev = (*queue)->q_prev;
(*queue)->q_prev = element;
(*queue) = element;
}
}
/*
* Function enqueue_queue (queue, list)
*
* Insert a queue (list) into the start of the first queue
*
*/
void enqueue_queue( irda_queue_t** queue, irda_queue_t** list )
{
irda_queue_t* tmp;
/*
* Check if queue is empty
*/
if ( *queue ) {
(*list)->q_prev->q_next = (*queue);
(*queue)->q_prev->q_next = (*list);
tmp = (*list)->q_prev;
(*list)->q_prev = (*queue)->q_prev;
(*queue)->q_prev = tmp;
} else {
*queue = (*list);
}
(*list) = NULL;
}
/*
* Function enqueue_second (queue, proc)
*
* Insert item behind head of queue.
*
*/
#if 0
static void enqueue_second(irda_queue_t **queue, irda_queue_t* element)
{
IRDA_DEBUG( 0, "enqueue_second()\n");
/*
* Check if queue is empty.
*/
if ( *queue == NULL ) {
/*
* Queue is empty. Insert one element into the queue.
*/
element->q_next = element->q_prev = *queue = element;
} else {
/*
* Queue is not empty. Insert element into ..
*/
element->q_prev = (*queue);
(*queue)->q_next->q_prev = element;
element->q_next = (*queue)->q_next;
(*queue)->q_next = element;
}
}
#endif
/*
* Function dequeue (queue)
*
* Remove first entry in queue
*
*/
irda_queue_t *dequeue_first(irda_queue_t **queue)
{
irda_queue_t *ret;
IRDA_DEBUG( 4, "dequeue_first()\n");
/*
* Set return value
*/
ret = *queue;
if ( *queue == NULL ) {
/*
* Queue was empty.
*/
} else if ( (*queue)->q_next == *queue ) {
/*
* Queue only contained a single element. It will now be
* empty.
*/
*queue = NULL;
} else {
/*
* Queue contained several element. Remove the first one.
*/
(*queue)->q_prev->q_next = (*queue)->q_next;
(*queue)->q_next->q_prev = (*queue)->q_prev;
*queue = (*queue)->q_next;
}
/*
* Return the removed entry (or NULL of queue was empty).
*/
return ret;
}
/*
* Function dequeue_general (queue, element)
*
*
*/
static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element)
{
irda_queue_t *ret;
IRDA_DEBUG( 4, "dequeue_general()\n");
/*
* Set return value
*/
ret = *queue;
if ( *queue == NULL ) {
/*
* Queue was empty.
*/
} else if ( (*queue)->q_next == *queue ) {
/*
* Queue only contained a single element. It will now be
* empty.
*/
*queue = NULL;
} else {
/*
* Remove specific element.
*/
element->q_prev->q_next = element->q_next;
element->q_next->q_prev = element->q_prev;
if ( (*queue) == element)
(*queue) = element->q_next;
}
/*
* Return the removed entry (or NULL of queue was empty).
*/
return ret;
}
/*
* Function hash (name)
*
* This function hash the input string 'name' using the ELF hash
* function for strings.
*/
static __u32 hash( char* name)
{
__u32 h = 0;
__u32 g;
while(*name) {
h = (h<<4) + *name++;
if ((g = (h & 0xf0000000)))
h ^=g>>24;
h &=~g;
}
return h;
}
......@@ -132,12 +132,14 @@ EXPORT_SYMBOL(irlmp_dup);
EXPORT_SYMBOL(lmp_reasons);
/* Queue */
EXPORT_SYMBOL(hashbin_find);
EXPORT_SYMBOL(hashbin_new);
EXPORT_SYMBOL(hashbin_insert);
EXPORT_SYMBOL(hashbin_delete);
EXPORT_SYMBOL(hashbin_remove);
EXPORT_SYMBOL(hashbin_remove_this);
EXPORT_SYMBOL(hashbin_find);
EXPORT_SYMBOL(hashbin_lock_find);
EXPORT_SYMBOL(hashbin_find_next);
EXPORT_SYMBOL(hashbin_get_next);
EXPORT_SYMBOL(hashbin_get_first);
......@@ -328,7 +330,8 @@ void __exit irda_cleanup(void)
* On the other hand, it needs to be initialised *after* the basic
* networking, the /proc/net filesystem and sysctl module. Those are
* currently initialised in .../init/main.c (before initcalls).
* Also, it needs to be initialised *after* the random number generator.
* Also, IrDA drivers needs to be initialised *after* the random number
* generator (main stack and higher layer init don't need it anymore).
*
* Jean II
*/
......
......@@ -91,7 +91,7 @@ int __init irttp_init(void)
irttp->magic = TTP_MAGIC;
irttp->tsaps = hashbin_new(HB_LOCAL);
irttp->tsaps = hashbin_new(HB_LOCK);
if (!irttp->tsaps) {
ERROR("%s: can't allocate IrTTP hashbin!\n", __FUNCTION__);
return -ENOMEM;
......@@ -433,7 +433,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
self->notify = *notify;
self->lsap = lsap;
hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (int) self, NULL);
hashbin_insert(irttp->tsaps, (irda_queue_t *) self, (long) self, NULL);
if (credit > TTP_RX_MAX_CREDIT)
self->initial_credit = TTP_RX_MAX_CREDIT;
......@@ -503,7 +503,7 @@ int irttp_close_tsap(struct tsap_cb *self)
return 0; /* Will be back! */
}
tsap = hashbin_remove(irttp->tsaps, (int) self, NULL);
tsap = hashbin_remove(irttp->tsaps, (long) self, NULL);
ASSERT(tsap == self, return -1;);
......@@ -1365,31 +1365,44 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance)
{
struct tsap_cb *new;
unsigned long flags;
IRDA_DEBUG(1, __FUNCTION__ "()\n");
/* Protect our access to the old tsap instance */
spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
/* Find the old instance */
if (!hashbin_find(irttp->tsaps, (int) orig, NULL)) {
IRDA_DEBUG(0, __FUNCTION__ "(), unable to find TSAP\n");
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
/* Allocate a new instance */
new = kmalloc(sizeof(struct tsap_cb), GFP_ATOMIC);
if (!new) {
IRDA_DEBUG(0, __FUNCTION__ "(), unable to kmalloc\n");
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return NULL;
}
/* Dup */
memcpy(new, orig, sizeof(struct tsap_cb));
new->notify.instance = instance;
new->lsap = irlmp_dup(orig->lsap, new);
/* We don't need the old instance any more */
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
/* Not everything should be copied */
new->notify.instance = instance;
new->lsap = irlmp_dup(orig->lsap, new);
init_timer(&new->todo_timer);
skb_queue_head_init(&new->rx_queue);
skb_queue_head_init(&new->tx_queue);
skb_queue_head_init(&new->rx_fragments);
hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (int) new, NULL);
/* This is locked */
hashbin_insert(irttp->tsaps, (irda_queue_t *) new, (long) new, NULL);
return new;
}
......@@ -1723,8 +1736,8 @@ int irttp_proc_read(char *buf, char **start, off_t offset, int len)
len = 0;
save_flags(flags);
cli();
/* Protect our access to the tsap list */
spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags);
self = (struct tsap_cb *) hashbin_get_first(irttp->tsaps);
while (self != NULL) {
......@@ -1770,7 +1783,7 @@ int irttp_proc_read(char *buf, char **start, off_t offset, int len)
self = (struct tsap_cb *) hashbin_get_next(irttp->tsaps);
}
restore_flags(flags);
spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags);
return len;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment