Commit 2d1a3871 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://gkernel.bkbits.net/irda-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 9959faa8 d1230b63
......@@ -55,12 +55,22 @@ CONFIG_NSC_FIR
<file:Documentation/modules.txt>. The module will be called
nsc-ircc.o.
CONFIG_TOSHIBA_FIR
CONFIG_TOSHIBA_OLD
Say Y here if you want to build support for the Toshiba Type-O IR
chipset. This chipset is used by the Toshiba Libretto 100CT, and
many more laptops. If you want to compile it as a module, say M
here and read <file:Documentation/modules.txt>. The module will be
called toshoboe.o.
many more laptops. This driver is obsolete, will no more be
maintained and will be removed in favor of the new driver.
If you want to compile it as a module, say M here and read
<file:Documentation/modules.txt>.
The module will be called toshoboe.o.
CONFIG_TOSHIBA_FIR
Say Y here if you want to build support for the Toshiba Type-O IR
and Donau oboe chipsets. These chipsets are used by the Toshiba
Libretto 100/110CT, Tecra 8100, Portege 7020 and many more laptops.
If you want to compile it as a module, say M here and read
<file:Documentation/modules.txt>.
The module will be called donauboe.o.
CONFIG_SMC_IRCC_FIR
Say Y here if you want to build support for the SMC Infrared
......@@ -165,3 +175,18 @@ CONFIG_ACT200L_DONGLE
the normal 9-pin serial port connector, and can currently only be
used by IrTTY. To activate support for ACTiSYS IR-200L dongles
you will have to start irattach like this: "irattach -d act200l".
Mobile Action MA600 dongle (Experimental)
CONFIG_MA600_DONGLE
Say Y here if you want to build support for the Mobile Action MA600
dongle. If you want to compile it as a module, say M here and read
<file:Documentation/modules.txt>. The MA600 dongle attaches to
the normal 9-pin serial port connector, and can currently only be
tested on IrCOMM. To activate support for MA600 dongles you will
have to insert "irattach -d ma600" in the /etc/irda/drivers script.
Note: irutils 0.9.15 requires no modification. irutils 0.9.9 needs
modification. For more information, download the following tar gzip
file.
There is a pre-compiled module on
<http://engsvr.ust.hk/~eetwl95/download/ma600-2.4.x.tar.gz>
......@@ -28,6 +28,7 @@ comment 'FIR device drivers'
dep_tristate 'IrDA USB dongles (EXPERIMENTAL)' CONFIG_USB_IRDA $CONFIG_IRDA $CONFIG_USB $CONFIG_EXPERIMENTAL
dep_tristate 'NSC PC87108/PC87338' CONFIG_NSC_FIR $CONFIG_IRDA
dep_tristate 'Winbond W83977AF (IR)' CONFIG_WINBOND_FIR $CONFIG_IRDA
dep_tristate 'Toshiba Type-O IR Port (old driver)' CONFIG_TOSHIBA_OLD $CONFIG_IRDA
dep_tristate 'Toshiba Type-O IR Port' CONFIG_TOSHIBA_FIR $CONFIG_IRDA
if [ "$CONFIG_EXPERIMENTAL" != "n" ]; then
dep_tristate 'SMC IrCC (EXPERIMENTAL)' CONFIG_SMC_IRCC_FIR $CONFIG_IRDA
......
......@@ -13,7 +13,8 @@ obj-$(CONFIG_USB_IRDA) += irda-usb.o
obj-$(CONFIG_NSC_FIR) += nsc-ircc.o
obj-$(CONFIG_WINBOND_FIR) += w83977af_ir.o
obj-$(CONFIG_SA1100_FIR) += sa1100_ir.o
obj-$(CONFIG_TOSHIBA_FIR) += toshoboe.o
obj-$(CONFIG_TOSHIBA_OLD) += toshoboe.o
obj-$(CONFIG_TOSHIBA_FIR) += donauboe.o
obj-$(CONFIG_SMC_IRCC_FIR) += smc-ircc.o irport.o
obj-$(CONFIG_ALI_FIR) += ali-ircc.o
obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
......
/*****************************************************************
*
* Filename: donauboe.c
* Version: 2.17
* Description: Driver for the Toshiba OBOE (or type-O or 701)
* FIR Chipset, also supports the DONAUOBOE (type-DO
* or d01) FIR chipset which as far as I know is
* register compatible.
* Status: Experimental.
* Author: James McKenzie <james@fishsoup.dhs.org>
* Created at: Sat May 8 12:35:27 1999
* Modified: Paul Bristow <paul.bristow@technologist.com>
* Modified: Mon Nov 11 19:10:05 1999
* Modified: James McKenzie <james@fishsoup.dhs.org>
* Modified: Thu Mar 16 12:49:00 2000 (Substantial rewrite)
* Modified: Sat Apr 29 00:23:03 2000 (Added DONAUOBOE support)
* Modified: Wed May 24 23:45:02 2000 (Fixed chipio_t structure)
* Modified: 2.13 Christian Gennerat <christian.gennerat@polytechnique.org>
* Modified: 2.13 dim jan 07 21:57:39 2001 (tested with kernel 2.4 & irnet/ppp)
* Modified: 2.14 Christian Gennerat <christian.gennerat@polytechnique.org>
* Modified: 2.14 lun fev 05 17:55:59 2001 (adapted to patch-2.4.1-pre8-irda1)
* Modified: 2.15 Martin Lucina <mato@kotelna.sk>
* Modified: 2.15 Fri Jun 21 20:40:59 2002 (sync with 2.4.18, substantial fixes)
* Modified: 2.16 Martin Lucina <mato@kotelna.sk>
* Modified: 2.16 Sat Jun 22 18:54:29 2002 (fix freeregion, default to verbose)
* Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
* Modified: 2.17 jeu sep 12 08:50:20 2002 (save_flags();cli(); replaced by spinlocks)
*
* Copyright (c) 1999 James McKenzie, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither James McKenzie nor Cambridge University admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
* Applicable Models : Libretto 100/110CT and many more.
* Toshiba refers to this chip as the type-O IR port,
* or the type-DO IR port.
*
********************************************************************/
/* Look at toshoboe.h (currently in include/net/irda) for details of */
/* Where to get documentation on the chip */
static char *rcsid =
"$Id: donauboe.c V2.17 jeu sep 12 08:50:20 2002 $";
/* See below for a description of the logic in this driver */
/* Is irda_crc16_table[] exported? not yet */
/* define this if you get errors about multiple defns of irda_crc16_table */
#undef CRC_EXPORTED
/* User servicable parts */
/* Enable the code which probes the chip and does a few tests */
/* Probe code is very useful for understanding how the hardware works */
/* Use it with various combinations of TT_LEN, RX_LEN */
/* Strongly recomended, disable if the probe fails on your machine */
/* and send me <james@fishsoup.dhs.org> the output of dmesg */
#define DO_PROBE 1
/* Trace Transmit ring, interrupts, Receive ring or not ? */
#define PROBE_VERBOSE 1
/* Debug option, examine sent and received raw data */
/* Irdadump is better, but does not see all packets. enable it if you want. */
#undef DUMP_PACKETS
/* MIR mode has not been tested. Some behaviour is different */
/* Seems to work against an Ericsson R520 for me. -Martin */
#define USE_MIR
/* Schedule back to back hardware transmits wherever possible, otherwise */
/* we need an interrupt for every frame, unset if oboe works for a bit and */
/* then hangs */
#define OPTIMIZE_TX
/* Set the number of slots in the rings */
/* If you get rx/tx fifo overflows at high bitrates, you can try increasing */
/* these */
#define RING_SIZE (OBOE_RING_SIZE_RX8 | OBOE_RING_SIZE_TX8)
#define TX_SLOTS 8
#define RX_SLOTS 8
/* Less user servicable parts below here */
/* Test, Transmit and receive buffer sizes, adjust at your peril */
/* remarks: nfs usually needs 1k blocks */
/* remarks: in SIR mode, CRC is received, -> RX_LEN=TX_LEN+2 */
/* remarks: test accepts large blocks. Standard is 0x80 */
/* When TT_LEN > RX_LEN (SIR mode) data is stored in successive slots. */
/* When 3 or more slots are needed for each test packet, */
/* data received in the first slots is overwritten, even */
/* if OBOE_CTL_RX_HW_OWNS is not set, without any error! */
#define TT_LEN 0x80
#define TX_LEN 0xc00
#define RX_LEN 0xc04
/* Real transmitted length (SIR mode) is about 14+(2%*TX_LEN) more */
/* long than user-defined length (see async_wrap_skb) and is less then 4K */
/* Real received length is (max RX_LEN) differs from user-defined */
/* length only b the CRC (2 or 4 bytes) */
#define BUF_SAFETY 0x7a
#define RX_BUF_SZ (RX_LEN)
#define TX_BUF_SZ (TX_LEN+BUF_SAFETY)
/* Logic of the netdev part of this driver */
/* The RX ring is filled with buffers, when a packet arrives */
/* it is DMA'd into the buffer which is marked used and RxDone called */
/* RxDone forms an skb (and checks the CRC if in SIR mode) and ships */
/* the packet off upstairs */
/* The transmitter on the oboe chip can work in one of two modes */
/* for each ring->tx[] the transmitter can either */
/* a) transmit the packet, leave the trasmitter enabled and proceed to */
/* the next ring */
/* OR */
/* b) transmit the packet, switch off the transmitter and issue TxDone */
/* All packets are entered into the ring in mode b), if the ring was */
/* empty the transmitter is started. */
/* If OPTIMIZE_TX is defined then in TxDone if the ring contains */
/* more than one packet, all but the last are set to mode a) [HOWEVER */
/* the hardware may not notice this, this is why we start in mode b) ] */
/* then restart the transmitter */
/* If OPTIMIZE_TX is not defined then we just restart the transmitter */
/* if the ring isn't empty */
/* Speed changes are delayed until the TxRing is empty */
/* mtt is handled by generating packets with bad CRCs, before the data */
/* TODO: */
/* check the mtt works ok */
/* finish the watchdog */
/* No user servicable parts below here */
#define STATIC static
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/rtnetlink.h>
#include <asm/system.h>
#include <asm/io.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda.h>
//#include <net/irda/irmod.h>
//#include <net/irda/irlap_frame.h>
#include <net/irda/irda_device.h>
#include <net/irda/crc.h>
#include "donauboe.h"
#define INB(port) inb_p(port)
#define OUTB(val,port) outb_p(val,port)
#define OUTBP(val,port) outb_p(val,port)
#define PROMPT OUTB(OBOE_PROMPT_BIT,OBOE_PROMPT);
#if PROBE_VERBOSE
#define PROBE_DEBUG(args...) (printk (args))
#else
#define PROBE_DEBUG(args...) ;
#endif
/* Set the DMA to be byte at a time */
#define CONFIG0H_DMA_OFF OBOE_CONFIG0H_RCVANY
#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
static struct pci_device_id toshoboe_pci_tbl[] __initdata = {
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, toshoboe_pci_tbl);
#define DRIVER_NAME "toshoboe"
static char *driver_name = DRIVER_NAME;
static int max_baud = 4000000;
static int do_probe = DO_PROBE;
/**********************************************************************/
/* Fcs code */
#ifdef CRC_EXPORTED
extern __u16 const irda_crc16_table[];
#else
static __u16 const irda_crc16_table[256] = {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
};
#endif
STATIC int
toshoboe_checkfcs (unsigned char *buf, int len)
{
int i;
union
{
__u16 value;
__u8 bytes[2];
}
fcs;
fcs.value = INIT_FCS;
for (i = 0; i < len; ++i)
fcs.value = irda_fcs (fcs.value, *(buf++));
return (fcs.value == GOOD_FCS);
}
/***********************************************************************/
/* Generic chip handling code */
#ifdef DUMP_PACKETS
static unsigned char dump[50];
STATIC void
_dumpbufs (unsigned char *data, int len, char tete)
{
int i,j;
char head=tete;
for (i=0;i<len;i+=16) {
for (j=0;j<16 && i+j<len;j++) { sprintf(&dump[3*j],"%02x.",data[i+j]); }
dump [3*j]=0;
IRDA_DEBUG (2, "%c%s\n",head , dump);
head='+';
}
}
#endif
/* Dump the registers */
STATIC void
toshoboe_dumpregs (struct toshoboe_cb *self)
{
__u32 ringbase;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
ringbase = INB (OBOE_RING_BASE0) << 10;
ringbase |= INB (OBOE_RING_BASE1) << 18;
ringbase |= INB (OBOE_RING_BASE2) << 26;
printk (KERN_ERR DRIVER_NAME ": Register dump:\n");
printk (KERN_ERR "Interrupts: Tx:%d Rx:%d TxUnder:%d RxOver:%d Sip:%d\n",
self->int_tx, self->int_rx, self->int_txunder, self->int_rxover,
self->int_sip);
printk (KERN_ERR "RX %02x TX %02x RingBase %08x\n",
INB (OBOE_RXSLOT), INB (OBOE_TXSLOT), ringbase);
printk (KERN_ERR "RING_SIZE %02x IER %02x ISR %02x\n",
INB (OBOE_RING_SIZE), INB (OBOE_IER), INB (OBOE_ISR));
printk (KERN_ERR "CONFIG1 %02x STATUS %02x\n",
INB (OBOE_CONFIG1), INB (OBOE_STATUS));
printk (KERN_ERR "CONFIG0 %02x%02x ENABLE %02x%02x\n",
INB (OBOE_CONFIG0H), INB (OBOE_CONFIG0L),
INB (OBOE_ENABLEH), INB (OBOE_ENABLEL));
printk (KERN_ERR "NEW_PCONFIG %02x%02x CURR_PCONFIG %02x%02x\n",
INB (OBOE_NEW_PCONFIGH), INB (OBOE_NEW_PCONFIGL),
INB (OBOE_CURR_PCONFIGH), INB (OBOE_CURR_PCONFIGL));
printk (KERN_ERR "MAXLEN %02x%02x RXCOUNT %02x%02x\n",
INB (OBOE_MAXLENH), INB (OBOE_MAXLENL),
INB (OBOE_RXCOUNTL), INB (OBOE_RXCOUNTH));
if (self->ring)
{
int i;
ringbase = virt_to_bus (self->ring);
printk (KERN_ERR "Ring at %08x:\n", ringbase);
printk (KERN_ERR "RX:");
for (i = 0; i < RX_SLOTS; ++i)
printk (" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
printk ("\n");
printk (KERN_ERR "TX:");
for (i = 0; i < RX_SLOTS; ++i)
printk (" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
printk ("\n");
}
}
/*Don't let the chip look at memory */
STATIC void
toshoboe_disablebm (struct toshoboe_cb *self)
{
__u8 command;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
pci_read_config_byte (self->pdev, PCI_COMMAND, &command);
command &= ~PCI_COMMAND_MASTER;
pci_write_config_byte (self->pdev, PCI_COMMAND, command);
}
/* Shutdown the chip and point the taskfile reg somewhere else */
STATIC void
toshoboe_stopchip (struct toshoboe_cb *self)
{
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
/*Disable interrupts */
OUTB (0x0, OBOE_IER);
/*Disable DMA, Disable Rx, Disable Tx */
OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
/*Disable SIR MIR FIR, Tx and Rx */
OUTB (0x00, OBOE_ENABLEH);
/*Point the ring somewhere safe */
OUTB (0x3f, OBOE_RING_BASE2);
OUTB (0xff, OBOE_RING_BASE1);
OUTB (0xff, OBOE_RING_BASE0);
OUTB (RX_LEN >> 8, OBOE_MAXLENH);
OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
/*Acknoledge any pending interrupts */
OUTB (0xff, OBOE_ISR);
/*Why */
OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
/*switch it off */
OUTB (OBOE_CONFIG1_OFF, OBOE_CONFIG1);
toshoboe_disablebm (self);
}
/* Transmitter initialization */
STATIC void
toshoboe_start_DMA (struct toshoboe_cb *self, int opts)
{
OUTB (0x0, OBOE_ENABLEH);
OUTB (CONFIG0H_DMA_ON | opts, OBOE_CONFIG0H);
OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
PROMPT;
}
/*Set the baud rate */
STATIC void
toshoboe_setbaud (struct toshoboe_cb *self)
{
__u16 pconfig = 0;
__u8 config0l = 0;
IRDA_DEBUG (2, "%s(%d/%d)\n", __FUNCTION__, self->speed, self->io.speed);
switch (self->speed)
{
case 2400:
case 4800:
case 9600:
case 19200:
case 38400:
case 57600:
case 115200:
#ifdef USE_MIR
case 1152000:
#endif
case 4000000:
break;
default:
printk (KERN_ERR DRIVER_NAME ": switch to unsupported baudrate %d\n",
self->speed);
return;
}
switch (self->speed)
{
/* For SIR the preamble is done by adding XBOFs */
/* to the packet */
/* set to filtered SIR mode, filter looks for BOF and EOF */
case 2400:
pconfig |= 47 << OBOE_PCONFIG_BAUDSHIFT;
pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
break;
case 4800:
pconfig |= 23 << OBOE_PCONFIG_BAUDSHIFT;
pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
break;
case 9600:
pconfig |= 11 << OBOE_PCONFIG_BAUDSHIFT;
pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
break;
case 19200:
pconfig |= 5 << OBOE_PCONFIG_BAUDSHIFT;
pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
break;
case 38400:
pconfig |= 2 << OBOE_PCONFIG_BAUDSHIFT;
pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
break;
case 57600:
pconfig |= 1 << OBOE_PCONFIG_BAUDSHIFT;
pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
break;
case 115200:
pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
pconfig |= 25 << OBOE_PCONFIG_WIDTHSHIFT;
break;
default:
/*Set to packet based reception */
OUTB (RX_LEN >> 8, OBOE_MAXLENH);
OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
break;
}
switch (self->speed)
{
case 2400:
case 4800:
case 9600:
case 19200:
case 38400:
case 57600:
case 115200:
config0l = OBOE_CONFIG0L_ENSIR;
if (self->async)
{
/*Set to character based reception */
/*System will lock if MAXLEN=0 */
/*so have to be careful */
OUTB (0x01, OBOE_MAXLENH);
OUTB (0x01, OBOE_MAXLENL);
OUTB (0x00, OBOE_MAXLENH);
}
else
{
/*Set to packet based reception */
config0l |= OBOE_CONFIG0L_ENSIRF;
OUTB (RX_LEN >> 8, OBOE_MAXLENH);
OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
}
break;
#ifdef USE_MIR
/* MIR mode */
/* Set for 16 bit CRC and enable MIR */
/* Preamble now handled by the chip */
case 1152000:
pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
pconfig |= 8 << OBOE_PCONFIG_WIDTHSHIFT;
pconfig |= 1 << OBOE_PCONFIG_PREAMBLESHIFT;
config0l = OBOE_CONFIG0L_CRC16 | OBOE_CONFIG0L_ENMIR;
break;
#endif
/* FIR mode */
/* Set for 32 bit CRC and enable FIR */
/* Preamble handled by the chip */
case 4000000:
pconfig |= 0 << OBOE_PCONFIG_BAUDSHIFT;
/* Documentation says 14, but toshiba use 15 in their drivers */
pconfig |= 15 << OBOE_PCONFIG_PREAMBLESHIFT;
config0l = OBOE_CONFIG0L_ENFIR;
break;
}
/* Copy into new PHY config buffer */
OUTBP (pconfig >> 8, OBOE_NEW_PCONFIGH);
OUTB (pconfig & 0xff, OBOE_NEW_PCONFIGL);
OUTB (config0l, OBOE_CONFIG0L);
/* Now make OBOE copy from new PHY to current PHY */
OUTB (0x0, OBOE_ENABLEH);
OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
PROMPT;
/* speed change executed */
self->new_speed = 0;
self->io.speed = self->speed;
}
/*Let the chip look at memory */
STATIC void
toshoboe_enablebm (struct toshoboe_cb *self)
{
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
pci_set_master (self->pdev);
}
/*setup the ring */
STATIC void
toshoboe_initring (struct toshoboe_cb *self)
{
int i;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
for (i = 0; i < TX_SLOTS; ++i)
{
self->ring->tx[i].len = 0;
self->ring->tx[i].control = 0x00;
self->ring->tx[i].address = virt_to_bus (self->tx_bufs[i]);
}
for (i = 0; i < RX_SLOTS; ++i)
{
self->ring->rx[i].len = RX_LEN;
self->ring->rx[i].len = 0;
self->ring->rx[i].address = virt_to_bus (self->rx_bufs[i]);
self->ring->rx[i].control = OBOE_CTL_RX_HW_OWNS;
}
}
STATIC void
toshoboe_resetptrs (struct toshoboe_cb *self)
{
/* Can reset pointers by twidling DMA */
OUTB (0x0, OBOE_ENABLEH);
OUTBP (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
self->rxs = inb_p (OBOE_RXSLOT) & OBOE_SLOT_MASK;
self->txs = inb_p (OBOE_TXSLOT) & OBOE_SLOT_MASK;
}
/* Called in locked state */
STATIC void
toshoboe_initptrs (struct toshoboe_cb *self)
{
/* spin_lock_irqsave(self->spinlock, flags); */
/* save_flags (flags); */
/* Can reset pointers by twidling DMA */
toshoboe_resetptrs (self);
OUTB (0x0, OBOE_ENABLEH);
OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
self->txpending = 0;
/* spin_unlock_irqrestore(self->spinlock, flags); */
/* restore_flags (flags); */
}
/* Wake the chip up and get it looking at the rings */
/* Called in locked state */
STATIC void
toshoboe_startchip (struct toshoboe_cb *self)
{
__u32 physaddr;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
toshoboe_initring (self);
toshoboe_enablebm (self);
OUTBP (OBOE_CONFIG1_RESET, OBOE_CONFIG1);
OUTBP (OBOE_CONFIG1_ON, OBOE_CONFIG1);
/* Stop the clocks */
OUTB (0, OBOE_ENABLEH);
/*Set size of rings */
OUTB (RING_SIZE, OBOE_RING_SIZE);
/*Acknoledge any pending interrupts */
OUTB (0xff, OBOE_ISR);
/*Enable ints */
OUTB (OBOE_INT_TXDONE | OBOE_INT_RXDONE |
OBOE_INT_TXUNDER | OBOE_INT_RXOVER | OBOE_INT_SIP , OBOE_IER);
/*Acknoledge any pending interrupts */
OUTB (0xff, OBOE_ISR);
/*Set the maximum packet length to 0xfff (4095) */
OUTB (RX_LEN >> 8, OBOE_MAXLENH);
OUTB (RX_LEN & 0xff, OBOE_MAXLENL);
/*Shutdown DMA */
OUTB (CONFIG0H_DMA_OFF, OBOE_CONFIG0H);
/*Find out where the rings live */
physaddr = virt_to_bus (self->ring);
ASSERT ((physaddr & 0x3ff) == 0,
printk (KERN_ERR DRIVER_NAME "ring not correctly aligned\n");
return;);
OUTB ((physaddr >> 10) & 0xff, OBOE_RING_BASE0);
OUTB ((physaddr >> 18) & 0xff, OBOE_RING_BASE1);
OUTB ((physaddr >> 26) & 0x3f, OBOE_RING_BASE2);
/*Enable DMA controler in byte mode and RX */
OUTB (CONFIG0H_DMA_ON, OBOE_CONFIG0H);
/* Start up the clocks */
OUTB (OBOE_ENABLEH_PHYANDCLOCK, OBOE_ENABLEH);
/*set to sensible speed */
self->speed = 9600;
toshoboe_setbaud (self);
toshoboe_initptrs (self);
}
STATIC void
toshoboe_isntstuck (struct toshoboe_cb *self)
{
}
STATIC void
toshoboe_checkstuck (struct toshoboe_cb *self)
{
unsigned long flags;
if (0)
{
spin_lock_irqsave(&self->spinlock, flags);
/* This will reset the chip completely */
printk (KERN_ERR DRIVER_NAME ": Resetting chip\n");
toshoboe_stopchip (self);
toshoboe_startchip (self);
spin_unlock_irqrestore(&self->spinlock, flags);
}
}
/*Generate packet of about mtt us long */
STATIC int
toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt)
{
int xbofs;
xbofs = ((int) (mtt/100)) * (int) (self->speed);
xbofs=xbofs/80000; /*Eight bits per byte, and mtt is in us*/
xbofs++;
IRDA_DEBUG (2, DRIVER_NAME
": generated mtt of %d bytes for %d us at %d baud\n"
, xbofs,mtt,self->speed);
if (xbofs > TX_LEN)
{
printk (KERN_ERR DRIVER_NAME ": wanted %d bytes MTT but TX_LEN is %d\n",
xbofs, TX_LEN);
xbofs = TX_LEN;
}
/*xbofs will do for SIR, MIR and FIR,SIR mode doesn't generate a checksum anyway */
memset (buf, XBOF, xbofs);
return xbofs;
}
/***********************************************************************/
/* Probe code */
STATIC void
toshoboe_dumptx (struct toshoboe_cb *self)
{
int i;
PROBE_DEBUG(KERN_WARNING "TX:");
for (i = 0; i < RX_SLOTS; ++i)
PROBE_DEBUG(" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control);
PROBE_DEBUG(" [%d]\n",self->speed);
}
STATIC void
toshoboe_dumprx (struct toshoboe_cb *self, int score)
{
int i;
PROBE_DEBUG(" %d\nRX:",score);
for (i = 0; i < RX_SLOTS; ++i)
PROBE_DEBUG(" (%d,%02x)",self->ring->rx[i].len,self->ring->rx[i].control);
PROBE_DEBUG("\n");
}
static inline int
stuff_byte (__u8 byte, __u8 * buf)
{
switch (byte)
{
case BOF: /* FALLTHROUGH */
case EOF: /* FALLTHROUGH */
case CE:
/* Insert transparently coded */
buf[0] = CE; /* Send link escape */
buf[1] = byte ^ IRDA_TRANS; /* Complement bit 5 */
return 2;
/* break; */
default:
/* Non-special value, no transparency required */
buf[0] = byte;
return 1;
/* break; */
}
}
STATIC int toshoboe_invalid_dev(int irq)
{
printk (KERN_WARNING DRIVER_NAME ": irq %d for unknown device.\n", irq);
return 1;
}
STATIC void
toshoboe_probeinterrupt (int irq, void *dev_id, struct pt_regs *regs)
{
struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
__u8 irqstat;
if (self == NULL && toshoboe_invalid_dev(irq))
return;
irqstat = INB (OBOE_ISR);
/* was it us */
if (!(irqstat & OBOE_INT_MASK))
return;
/* Ack all the interrupts */
OUTB (irqstat, OBOE_ISR);
if (irqstat & OBOE_INT_TXDONE)
{
int txp;
self->int_tx++;
PROBE_DEBUG("T");
txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
{
self->int_tx+=100;
PROBE_DEBUG("S");
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
}
}
if (irqstat & OBOE_INT_RXDONE) {
self->int_rx++;
PROBE_DEBUG("R"); }
if (irqstat & OBOE_INT_TXUNDER) {
self->int_txunder++;
PROBE_DEBUG("U"); }
if (irqstat & OBOE_INT_RXOVER) {
self->int_rxover++;
PROBE_DEBUG("O"); }
if (irqstat & OBOE_INT_SIP) {
self->int_sip++;
PROBE_DEBUG("I"); }
}
STATIC int
toshoboe_maketestpacket (unsigned char *buf, int badcrc, int fir)
{
int i;
int len = 0;
union
{
__u16 value;
__u8 bytes[2];
}
fcs;
if (fir)
{
memset (buf, 0, TT_LEN);
return (TT_LEN);
}
fcs.value = INIT_FCS;
memset (buf, XBOF, 10);
len += 10;
buf[len++] = BOF;
for (i = 0; i < TT_LEN; ++i)
{
len += stuff_byte (i, buf + len);
fcs.value = irda_fcs (fcs.value, i);
}
len += stuff_byte (fcs.bytes[0] ^ badcrc, buf + len);
len += stuff_byte (fcs.bytes[1] ^ badcrc, buf + len);
buf[len++] = EOF;
len++;
return len;
}
STATIC int
toshoboe_probefail (struct toshoboe_cb *self, char *msg)
{
printk (KERN_ERR DRIVER_NAME "probe(%d) failed %s\n",self-> speed, msg);
toshoboe_dumpregs (self);
toshoboe_stopchip (self);
free_irq (self->io.irq, (void *) self);
return 0;
}
STATIC int
toshoboe_numvalidrcvs (struct toshoboe_cb *self)
{
int i, ret = 0;
for (i = 0; i < RX_SLOTS; ++i)
if ((self->ring->rx[i].control & 0xe0) == 0)
ret++;
return ret;
}
STATIC int
toshoboe_numrcvs (struct toshoboe_cb *self)
{
int i, ret = 0;
for (i = 0; i < RX_SLOTS; ++i)
if (!(self->ring->rx[i].control & OBOE_CTL_RX_HW_OWNS))
ret++;
return ret;
}
STATIC int
toshoboe_probe (struct toshoboe_cb *self)
{
int i, j, n;
#ifdef USE_MIR
int bauds[] = { 9600, 115200, 4000000, 1152000 };
#else
int bauds[] = { 9600, 115200, 4000000 };
#endif
unsigned long flags;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
if (request_irq (self->io.irq, toshoboe_probeinterrupt,
self->io.irqflags, "toshoboe", (void *) self))
{
printk (KERN_ERR DRIVER_NAME ": probe failed to allocate irq %d\n",
self->io.irq);
return 0;
}
/* test 1: SIR filter and back to back */
for (j = 0; j < (sizeof (bauds) / sizeof (int)); ++j)
{
int fir = (j > 1);
toshoboe_stopchip (self);
spin_lock_irqsave(&self->spinlock, flags);
/*Address is already setup */
toshoboe_startchip (self);
self->int_rx = self->int_tx = 0;
self->speed = bauds[j];
toshoboe_setbaud (self);
toshoboe_initptrs (self);
spin_unlock_irqrestore(&self->spinlock, flags);
self->ring->tx[self->txs].control =
/* (FIR only) OBOE_CTL_TX_SIP needed for switching to next slot */
/* MIR: all received data is stored in one slot */
(fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
: OBOE_CTL_TX_HW_OWNS ;
self->ring->tx[self->txs].len =
toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
self->txs++;
self->txs %= TX_SLOTS;
self->ring->tx[self->txs].control =
(fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_SIP
: OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
self->ring->tx[self->txs].len =
toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
self->txs++;
self->txs %= TX_SLOTS;
self->ring->tx[self->txs].control =
(fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
: OBOE_CTL_TX_HW_OWNS ;
self->ring->tx[self->txs].len =
toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
self->txs++;
self->txs %= TX_SLOTS;
self->ring->tx[self->txs].control =
(fir) ? OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX
| OBOE_CTL_TX_SIP | OBOE_CTL_TX_BAD_CRC
: OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX ;
self->ring->tx[self->txs].len =
toshoboe_maketestpacket (self->tx_bufs[self->txs], 0, fir);
self->txs++;
self->txs %= TX_SLOTS;
toshoboe_dumptx (self);
/* Turn on TX and RX and loopback */
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
i = 0;
n = fir ? 1 : 4;
while (toshoboe_numvalidrcvs (self) != n)
{
if (i > 4800)
return toshoboe_probefail (self, "filter test");
udelay ((9600*(TT_LEN+16))/self->speed);
i++;
}
n = fir ? 203 : 102;
while ((toshoboe_numrcvs(self) != self->int_rx) || (self->int_tx != n))
{
if (i > 4800)
return toshoboe_probefail (self, "interrupt test");
udelay ((9600*(TT_LEN+16))/self->speed);
i++;
}
toshoboe_dumprx (self,i);
}
/* test 2: SIR in char at a time */
toshoboe_stopchip (self);
self->int_rx = self->int_tx = 0;
spin_lock_irqsave(&self->spinlock, flags);
toshoboe_startchip (self);
spin_unlock_irqrestore(&self->spinlock, flags);
self->async = 1;
self->speed = 115200;
toshoboe_setbaud (self);
self->ring->tx[self->txs].control =
OBOE_CTL_TX_RTCENTX | OBOE_CTL_TX_HW_OWNS;
self->ring->tx[self->txs].len = 4;
((unsigned char *) self->tx_bufs[self->txs])[0] = 'f';
((unsigned char *) self->tx_bufs[self->txs])[1] = 'i';
((unsigned char *) self->tx_bufs[self->txs])[2] = 's';
((unsigned char *) self->tx_bufs[self->txs])[3] = 'h';
toshoboe_dumptx (self);
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
i = 0;
while (toshoboe_numvalidrcvs (self) != 4)
{
if (i > 100)
return toshoboe_probefail (self, "Async test");
udelay (100);
i++;
}
while ((toshoboe_numrcvs (self) != self->int_rx) || (self->int_tx != 1))
{
if (i > 100)
return toshoboe_probefail (self, "Async interrupt test");
udelay (100);
i++;
}
toshoboe_dumprx (self,i);
self->async = 0;
self->speed = 9600;
toshoboe_setbaud (self);
toshoboe_stopchip (self);
free_irq (self->io.irq, (void *) self);
printk (KERN_WARNING DRIVER_NAME ": Self test passed ok\n");
return 1;
}
/******************************************************************/
/* Netdev style code */
/* Transmit something */
STATIC int
toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
{
struct toshoboe_cb *self;
__s32 speed;
int mtt, len, ctl;
unsigned long flags;
struct irda_skb_cb *cb = (struct irda_skb_cb *) skb->cb;
self = (struct toshoboe_cb *) dev->priv;
ASSERT (self != NULL, return 0; );
IRDA_DEBUG (1, "%s.tx:%x(%x)%x\n", __FUNCTION__
,skb->len,self->txpending,INB (OBOE_ENABLEH));
if (!cb->magic) {
IRDA_DEBUG (2, "%s.Not IrLAP:%x\n", __FUNCTION__, cb->magic);
#ifdef DUMP_PACKETS
_dumpbufs(skb->data,skb->len,'>');
#endif
}
/* change speed pending, wait for its execution */
if (self->new_speed)
return -EBUSY;
/* device stopped (apm) wait for restart */
if (self->stopped)
return -EBUSY;
toshoboe_checkstuck (self);
/* Check if we need to change the speed */
/* But not now. Wait after transmission if mtt not required */
speed=irda_get_next_speed(skb);
if ((speed != self->io.speed) && (speed != -1))
{
spin_lock_irqsave(&self->spinlock, flags);
if (self->txpending || skb->len)
{
self->new_speed = speed;
IRDA_DEBUG (1, "%s: Queued TxDone scheduled speed change %d\n" ,
__FUNCTION__, speed);
/* if no data, that's all! */
if (!skb->len)
{
spin_unlock_irqrestore(&self->spinlock, flags);
dev_kfree_skb (skb);
return 0;
}
/* True packet, go on, but */
/* do not accept anything before change speed execution */
netif_stop_queue(dev);
/* ready to process TxDone interrupt */
spin_unlock_irqrestore(&self->spinlock, flags);
}
else
{
/* idle and no data, change speed now */
self->speed = speed;
toshoboe_setbaud (self);
spin_unlock_irqrestore(&self->spinlock, flags);
dev_kfree_skb (skb);
return 0;
}
}
if ((mtt = irda_get_mtt(skb)))
{
/* This is fair since the queue should be empty anyway */
spin_lock_irqsave(&self->spinlock, flags);
if (self->txpending)
{
spin_unlock_irqrestore(&self->spinlock, flags);
return -EBUSY;
}
/* If in SIR mode we need to generate a string of XBOFs */
/* In MIR and FIR we need to generate a string of data */
/* which we will add a wrong checksum to */
mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt);
IRDA_DEBUG (1, "%s.mtt:%x(%x)%d\n", __FUNCTION__
,skb->len,mtt,self->txpending);
if (mtt)
{
self->ring->tx[self->txs].len = mtt & 0xfff;
ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
{
ctl |= OBOE_CTL_TX_BAD_CRC | OBOE_CTL_TX_SIP ;
}
#ifdef USE_MIR
else if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_MIRON)
{
ctl |= OBOE_CTL_TX_BAD_CRC;
}
#endif
self->ring->tx[self->txs].control = ctl;
OUTB (0x0, OBOE_ENABLEH);
/* It is only a timer. Do not send mtt packet outside! */
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX | OBOE_CONFIG0H_LOOP);
self->txpending++;
self->txs++;
self->txs %= TX_SLOTS;
}
else
{
printk(KERN_ERR DRIVER_NAME ": problem with mtt packet - ignored\n");
}
spin_unlock_irqrestore(&self->spinlock, flags);
}
#ifdef DUMP_PACKETS
dumpbufs(skb->data,skb->len,'>');
#endif
spin_lock_irqsave(&self->spinlock, flags);
if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS)
{
IRDA_DEBUG (0, "%s.ful:%x(%x)%x\n", __FUNCTION__
,skb->len, self->ring->tx[self->txs].control, self->txpending);
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
spin_unlock_irqrestore(&self->spinlock, flags);
return -EBUSY;
}
if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_SIRON)
{
len = async_wrap_skb (skb, self->tx_bufs[self->txs], TX_BUF_SZ);
}
else
{
len = skb->len;
memcpy (self->tx_bufs[self->txs], skb->data, len);
}
self->ring->tx[self->txs].len = len & 0x0fff;
/*Sometimes the HW doesn't see us assert RTCENTX in the interrupt code */
/*later this plays safe, we garuntee the last packet to be transmitted */
/*has RTCENTX set */
ctl = OBOE_CTL_TX_HW_OWNS | OBOE_CTL_TX_RTCENTX;
if (INB (OBOE_ENABLEH) & OBOE_ENABLEH_FIRON)
{
ctl |= OBOE_CTL_TX_SIP ;
}
self->ring->tx[self->txs].control = ctl;
/* If transmitter is idle start in one-shot mode */
if (!self->txpending)
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
self->txpending++;
self->txs++;
self->txs %= TX_SLOTS;
spin_unlock_irqrestore(&self->spinlock, flags);
dev_kfree_skb (skb);
return 0;
}
/*interrupt handler */
STATIC void
toshoboe_interrupt (int irq, void *dev_id, struct pt_regs *regs)
{
struct toshoboe_cb *self = (struct toshoboe_cb *) dev_id;
__u8 irqstat;
struct sk_buff *skb = NULL;
if (self == NULL && toshoboe_invalid_dev(irq))
return;
irqstat = INB (OBOE_ISR);
/* was it us */
if (!(irqstat & OBOE_INT_MASK))
return;
/* Ack all the interrupts */
OUTB (irqstat, OBOE_ISR);
toshoboe_isntstuck (self);
/* Txdone */
if (irqstat & OBOE_INT_TXDONE)
{
int txp, txpc;
int i;
txp = self->txpending;
self->txpending = 0;
for (i = 0; i < TX_SLOTS; ++i)
{
if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS)
self->txpending++;
}
IRDA_DEBUG (1, "%s.txd(%x)%x/%x\n", __FUNCTION__
,irqstat,txp,self->txpending);
txp = INB (OBOE_TXSLOT) & OBOE_SLOT_MASK;
/* Got anything queued ? start it together */
if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS)
{
txpc = txp;
#ifdef OPTIMIZE_TX
while (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
{
txp = txpc;
txpc++;
txpc %= TX_SLOTS;
self->stats.tx_packets++;
if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS)
self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX;
}
self->stats.tx_packets--;
#else
self->stats.tx_packets++;
#endif
toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX);
}
if ((!self->txpending) && (self->new_speed))
{
self->speed = self->new_speed;
IRDA_DEBUG (1, "%s: Executed TxDone scheduled speed change %d\n",
__FUNCTION__, self->speed);
toshoboe_setbaud (self);
}
/* Tell network layer that we want more frames */
if (!self->new_speed)
netif_wake_queue(self->netdev);
}
if (irqstat & OBOE_INT_RXDONE)
{
while (!(self->ring->rx[self->rxs].control & OBOE_CTL_RX_HW_OWNS))
{
int len = self->ring->rx[self->rxs].len;
skb = NULL;
IRDA_DEBUG (3, "%s.rcv:%x(%x)\n", __FUNCTION__
,len,self->ring->rx[self->rxs].control);
#ifdef DUMP_PACKETS
dumpbufs(self->rx_bufs[self->rxs],len,'<');
#endif
if (self->ring->rx[self->rxs].control == 0)
{
__u8 enable = INB (OBOE_ENABLEH);
/* In SIR mode we need to check the CRC as this */
/* hasn't been done by the hardware */
if (enable & OBOE_ENABLEH_SIRON)
{
if (!toshoboe_checkfcs (self->rx_bufs[self->rxs], len))
len = 0;
/*Trim off the CRC */
if (len > 1)
len -= 2;
else
len = 0;
IRDA_DEBUG (1, "%s.SIR:%x(%x)\n", __FUNCTION__, len,enable);
}
#ifdef USE_MIR
else if (enable & OBOE_ENABLEH_MIRON)
{
if (len > 1)
len -= 2;
else
len = 0;
IRDA_DEBUG (2, "%s.MIR:%x(%x)\n", __FUNCTION__, len,enable);
}
#endif
else if (enable & OBOE_ENABLEH_FIRON)
{
if (len > 3)
len -= 4; /*FIXME: check this */
else
len = 0;
IRDA_DEBUG (1, "%s.FIR:%x(%x)\n", __FUNCTION__, len,enable);
}
else
IRDA_DEBUG (0, "%s.?IR:%x(%x)\n", __FUNCTION__, len,enable);
if (len)
{
skb = dev_alloc_skb (len + 1);
if (skb)
{
skb_reserve (skb, 1);
skb_put (skb, len);
memcpy (skb->data, self->rx_bufs[self->rxs], len);
self->stats.rx_packets++;
skb->dev = self->netdev;
skb->mac.raw = skb->data;
skb->protocol = htons (ETH_P_IRDA);
}
else
{
printk (KERN_INFO
"%s(), memory squeeze, dropping frame.\n",
__FUNCTION__);
}
}
}
else
{
/* TODO: =========================================== */
/* if OBOE_CTL_RX_LENGTH, our buffers are too small */
/* (MIR or FIR) data is lost. */
/* (SIR) data is splitted in several slots. */
/* we have to join all the received buffers received */
/*in a large buffer before checking CRC. */
IRDA_DEBUG (0, "%s.err:%x(%x)\n", __FUNCTION__
,len,self->ring->rx[self->rxs].control);
}
self->ring->rx[self->rxs].len = 0x0;
self->ring->rx[self->rxs].control = OBOE_CTL_RX_HW_OWNS;
self->rxs++;
self->rxs %= RX_SLOTS;
if (skb)
netif_rx (skb);
}
}
if (irqstat & OBOE_INT_TXUNDER)
{
printk (KERN_WARNING DRIVER_NAME ": tx fifo underflow\n");
}
if (irqstat & OBOE_INT_RXOVER)
{
printk (KERN_WARNING DRIVER_NAME ": rx fifo overflow\n");
}
/* This must be useful for something... */
if (irqstat & OBOE_INT_SIP)
{
self->int_sip++;
IRDA_DEBUG (1, "%s.sip:%x(%x)%x\n", __FUNCTION__
,self->int_sip,irqstat,self->txpending);
}
}
STATIC int
toshoboe_net_init (struct net_device *dev)
{
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
/* Setup to be a normal IrDA network device driver */
irda_device_setup (dev);
/* Insert overrides below this line! */
return 0;
}
STATIC int
toshoboe_net_open (struct net_device *dev)
{
struct toshoboe_cb *self;
unsigned long flags;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
ASSERT (dev != NULL, return -1; );
self = (struct toshoboe_cb *) dev->priv;
ASSERT (self != NULL, return 0; );
if (self->async)
return -EBUSY;
if (self->stopped)
return 0;
if (request_irq (self->io.irq, toshoboe_interrupt,
SA_SHIRQ | SA_INTERRUPT, dev->name, (void *) self))
{
return -EAGAIN;
}
spin_lock_irqsave(&self->spinlock, flags);
toshoboe_startchip (self);
spin_unlock_irqrestore(&self->spinlock, flags);
/* Ready to play! */
netif_start_queue(dev);
/*
* Open new IrLAP layer instance, now that everything should be
* initialized properly
*/
self->irlap = irlap_open (dev, &self->qos, driver_name);
self->irdad = 1;
MOD_INC_USE_COUNT;
return 0;
}
STATIC int
toshoboe_net_close (struct net_device *dev)
{
struct toshoboe_cb *self;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
ASSERT (dev != NULL, return -1; );
self = (struct toshoboe_cb *) dev->priv;
/* Stop device */
netif_stop_queue(dev);
/* Stop and remove instance of IrLAP */
if (self->irlap)
irlap_close (self->irlap);
self->irlap = NULL;
self->irdad = 0;
free_irq (self->io.irq, (void *) self);
if (!self->stopped)
{
toshoboe_stopchip (self);
}
MOD_DEC_USE_COUNT;
return 0;
}
/*
* Function toshoboe_net_ioctl (dev, rq, cmd)
*
* Process IOCTL commands for this device
*
*/
STATIC int
toshoboe_net_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{
struct if_irda_req *irq = (struct if_irda_req *) rq;
struct toshoboe_cb *self;
unsigned long flags;
int ret = 0;
ASSERT (dev != NULL, return -1; );
self = dev->priv;
ASSERT (self != NULL, return -1; );
IRDA_DEBUG (5, "%s(), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
/* Disable interrupts & save flags */
spin_lock_irqsave(&self->spinlock, flags);
switch (cmd)
{
case SIOCSBANDWIDTH: /* Set bandwidth */
/* This function will also be used by IrLAP to change the
* speed, so we still must allow for speed change within
* interrupt context.
*/
IRDA_DEBUG (1, "%s(BANDWIDTH), %s, (%X/%ld\n", __FUNCTION__
,dev->name, INB (OBOE_STATUS), irq->ifr_baudrate );
if (!in_interrupt () && !capable (CAP_NET_ADMIN))
return -EPERM;
/* self->speed=irq->ifr_baudrate; */
/* toshoboe_setbaud(self); */
/* Just change speed once - inserted by Paul Bristow */
self->new_speed = irq->ifr_baudrate;
break;
case SIOCSMEDIABUSY: /* Set media busy */
IRDA_DEBUG (1, "%s(MEDIABUSY), %s, (%X/%x)\n", __FUNCTION__
,dev->name, INB (OBOE_STATUS), capable (CAP_NET_ADMIN) );
if (!capable (CAP_NET_ADMIN))
return -EPERM;
irda_device_set_media_busy (self->netdev, TRUE);
break;
case SIOCGRECEIVING: /* Check if we are receiving right now */
irq->ifr_receiving = (INB (OBOE_STATUS) & OBOE_STATUS_RXBUSY) ? 1 : 0;
IRDA_DEBUG (3, "%s(RECEIVING), %s, (%X/%x)\n", __FUNCTION__
,dev->name, INB (OBOE_STATUS), irq->ifr_receiving );
break;
default:
IRDA_DEBUG (1, "%s(?), %s, (cmd=0x%X)\n", __FUNCTION__, dev->name, cmd);
ret = -EOPNOTSUPP;
}
spin_unlock_irqrestore(&self->spinlock, flags);
return ret;
}
MODULE_DESCRIPTION("Toshiba OBOE IrDA Device Driver");
MODULE_AUTHOR("James McKenzie <james@fishsoup.dhs.org>");
MODULE_LICENSE("GPL");
MODULE_PARM (max_baud, "i");
MODULE_PARM_DESC(max_baud, "Maximum baud rate");
MODULE_PARM (do_probe, "i");
MODULE_PARM_DESC(do_probe, "Enable/disable chip probing and self-test");
STATIC void
toshoboe_close (struct pci_dev *pci_dev)
{
int i;
struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
ASSERT (self != NULL, return; );
if (!self->stopped)
{
toshoboe_stopchip (self);
}
release_region (self->io.fir_base, self->io.fir_ext);
for (i = 0; i < TX_SLOTS; ++i)
{
kfree (self->tx_bufs[i]);
self->tx_bufs[i] = NULL;
}
for (i = 0; i < RX_SLOTS; ++i)
{
kfree (self->rx_bufs[i]);
self->rx_bufs[i] = NULL;
}
if (self->netdev)
{
/* Remove netdevice */
rtnl_lock ();
unregister_netdevice (self->netdev);
rtnl_unlock ();
}
kfree (self->ringbuf);
self->ringbuf = NULL;
self->ring = NULL;
return;
}
STATIC int
toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
{
struct toshoboe_cb *self;
struct net_device *dev;
int i = 0;
int ok = 0;
int err;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
if ((err=pci_enable_device(pci_dev)))
return err;
self = kmalloc (sizeof (struct toshoboe_cb), GFP_KERNEL);
if (self == NULL)
{
printk (KERN_ERR DRIVER_NAME ": can't allocate memory for "
"IrDA control block\n");
return -ENOMEM;
}
memset (self, 0, sizeof (struct toshoboe_cb));
self->pdev = pci_dev;
self->base = pci_resource_start(pci_dev,0);
self->io.fir_base = self->base;
self->io.fir_ext = OBOE_IO_EXTENT;
self->io.irq = pci_dev->irq;
self->io.irqflags = SA_SHIRQ | SA_INTERRUPT;
self->speed = self->io.speed = 9600;
self->async = 0;
/* Lock the port that we need */
if (NULL==request_region (self->io.fir_base, self->io.fir_ext, driver_name))
{
printk (KERN_ERR DRIVER_NAME ": can't get iobase of 0x%03x\n"
,self->io.fir_base);
err = -EBUSY;
goto freeself;
}
spin_lock_init(&self->spinlock);
irda_init_max_qos_capabilies (&self->qos);
self->qos.baud_rate.bits = 0;
if (max_baud >= 2400)
self->qos.baud_rate.bits |= IR_2400;
/*if (max_baud>=4800) idev->qos.baud_rate.bits|=IR_4800; */
if (max_baud >= 9600)
self->qos.baud_rate.bits |= IR_9600;
if (max_baud >= 19200)
self->qos.baud_rate.bits |= IR_19200;
if (max_baud >= 115200)
self->qos.baud_rate.bits |= IR_115200;
#ifdef USE_MIR
if (max_baud >= 1152000)
{
self->qos.baud_rate.bits |= IR_1152000;
self->flags |= IFF_MIR;
}
#endif
if (max_baud >= 4000000)
{
self->qos.baud_rate.bits |= (IR_4000000 << 8);
self->flags |= IFF_FIR;
}
/*FIXME: work this out... */
self->qos.min_turn_time.bits = 0xff;
irda_qos_bits_to_value (&self->qos);
self->flags = IFF_SIR | IFF_DMA | IFF_PIO;
/* Allocate twice the size to guarantee alignment */
self->ringbuf = (void *) kmalloc (OBOE_RING_LEN << 1, GFP_KERNEL);
if (!self->ringbuf)
{
printk (KERN_ERR DRIVER_NAME ": can't allocate DMA buffers\n");
err = -ENOMEM;
goto freeregion;
}
/*We need to align the taskfile on a taskfile size boundary */
{
__u32 addr;
addr = (__u32) self->ringbuf;
addr &= ~(OBOE_RING_LEN - 1);
addr += OBOE_RING_LEN;
self->ring = (struct OboeRing *) addr;
}
memset (self->ring, 0, OBOE_RING_LEN);
self->io.mem_base = (__u32) self->ring;
ok = 1;
for (i = 0; i < TX_SLOTS; ++i)
{
self->tx_bufs[i] = kmalloc (TX_BUF_SZ, GFP_KERNEL);
if (!self->tx_bufs[i])
ok = 0;
}
for (i = 0; i < RX_SLOTS; ++i)
{
self->rx_bufs[i] = kmalloc (RX_BUF_SZ, GFP_KERNEL);
if (!self->rx_bufs[i])
ok = 0;
}
if (!ok)
{
printk (KERN_ERR DRIVER_NAME ": can't allocate rx/tx buffers\n");
err = -ENOMEM;
goto freebufs;
}
if (do_probe)
if (!toshoboe_probe (self))
{
err = -ENODEV;
goto freebufs;
}
if (!(dev = dev_alloc ("irda%d", &err)))
{
printk (KERN_ERR DRIVER_NAME ": dev_alloc() failed\n");
err = -ENOMEM;
goto freebufs;
}
dev->priv = (void *) self;
self->netdev = dev;
printk (KERN_INFO "IrDA: Registered device %s\n", dev->name);
dev->init = toshoboe_net_init;
dev->hard_start_xmit = toshoboe_hard_xmit;
dev->open = toshoboe_net_open;
dev->stop = toshoboe_net_close;
dev->do_ioctl = toshoboe_net_ioctl;
rtnl_lock ();
err = register_netdevice (dev);
rtnl_unlock ();
if (err)
{
printk (KERN_ERR DRIVER_NAME ": register_netdev() failed\n");
err = -ENOMEM;
goto freebufs;
}
pci_set_drvdata(pci_dev,self);
printk (KERN_INFO DRIVER_NAME ": Using multiple tasks, version %s\n", rcsid);
return 0;
freebufs:
for (i = 0; i < TX_SLOTS; ++i)
if (self->tx_bufs[i])
kfree (self->tx_bufs[i]);
for (i = 0; i < RX_SLOTS; ++i)
if (self->rx_bufs[i])
kfree (self->rx_bufs[i]);
kfree(self->ringbuf);
freeregion:
release_region (self->io.fir_base, self->io.fir_ext);
freeself:
kfree (self);
return err;
}
STATIC int
toshoboe_gotosleep (struct pci_dev *pci_dev, u32 crap)
{
struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
unsigned long flags;
int i = 10;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
if (!self || self->stopped)
return 0;
if ((!self->irdad) && (!self->async))
return 0;
/* Flush all packets */
while ((i--) && (self->txpending))
udelay (10000);
spin_lock_irqsave(&self->spinlock, flags);
toshoboe_stopchip (self);
self->stopped = 1;
self->txpending = 0;
spin_unlock_irqrestore(&self->spinlock, flags);
return 0;
}
STATIC int
toshoboe_wakeup (struct pci_dev *pci_dev)
{
struct toshoboe_cb *self = (struct toshoboe_cb*)pci_get_drvdata(pci_dev);
unsigned long flags;
IRDA_DEBUG (4, "%s()\n", __FUNCTION__);
if (!self || !self->stopped)
return 0;
if ((!self->irdad) && (!self->async))
return 0;
spin_lock_irqsave(&self->spinlock, flags);
toshoboe_startchip (self);
self->stopped = 0;
netif_wake_queue(self->netdev);
spin_unlock_irqrestore(&self->spinlock, flags);
return 0;
}
static struct pci_driver toshoboe_pci_driver = {
name : "toshoboe",
id_table : toshoboe_pci_tbl,
probe : toshoboe_open,
remove : toshoboe_close,
suspend : toshoboe_gotosleep,
resume : toshoboe_wakeup
};
int __init
toshoboe_init (void)
{
return pci_module_init(&toshoboe_pci_driver);
}
STATIC void __exit
toshoboe_cleanup (void)
{
pci_unregister_driver(&toshoboe_pci_driver);
}
module_init(toshoboe_init);
module_exit(toshoboe_cleanup);
/*********************************************************************
*
* Filename: toshoboe.h
* Version: 2.16
* Description: Driver for the Toshiba OBOE (or type-O or 701)
* FIR Chipset, also supports the DONAUOBOE (type-DO
* or d01) FIR chipset which as far as I know is
* register compatible.
* Status: Experimental.
* Author: James McKenzie <james@fishsoup.dhs.org>
* Created at: Sat May 8 12:35:27 1999
* Modified: 2.16 Martin Lucina <mato@kotelna.sk>
* Modified: 2.16 Sat Jun 22 18:54:29 2002 (sync headers)
* Modified: 2.17 Christian Gennerat <christian.gennerat@polytechnique.org>
* Modified: 2.17 jeu sep 12 08:50:20 2002 (add lock to be used by spinlocks)
*
* Copyright (c) 1999 James McKenzie, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither James McKenzie nor Cambridge University admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
* Applicable Models : Libretto 100/110CT and many more.
* Toshiba refers to this chip as the type-O IR port,
* or the type-DO IR port.
*
* IrDA chip set list from Toshiba Computer Engineering Corp.
* model method maker controler Version
* Portege 320CT FIR,SIR Toshiba Oboe(Triangle)
* Portege 3010CT FIR,SIR Toshiba Oboe(Sydney)
* Portege 3015CT FIR,SIR Toshiba Oboe(Sydney)
* Portege 3020CT FIR,SIR Toshiba Oboe(Sydney)
* Portege 7020CT FIR,SIR ? ?
*
* Satell. 4090XCDT FIR,SIR ? ?
*
* Libretto 100CT FIR,SIR Toshiba Oboe
* Libretto 1000CT FIR,SIR Toshiba Oboe
*
* TECRA750DVD FIR,SIR Toshiba Oboe(Triangle) REV ID=14h
* TECRA780 FIR,SIR Toshiba Oboe(Sandlot) REV ID=32h,33h
* TECRA750CDT FIR,SIR Toshiba Oboe(Triangle) REV ID=13h,14h
* TECRA8000 FIR,SIR Toshiba Oboe(ISKUR) REV ID=23h
*
********************************************************************/
/* The documentation for this chip is allegedly released */
/* However I have not seen it, not have I managed to contact */
/* anyone who has. HOWEVER the chip bears a striking resemblence */
/* to the IrDA controller in the Toshiba RISC TMPR3922 chip */
/* the documentation for this is freely available at */
/* http://www.toshiba.com/taec/components/Generic/TMPR3922.shtml */
/* The mapping between the registers in that document and the */
/* Registers in the 701 oboe chip are as follows */
/* 3922 reg 701 regs, by bit numbers */
/* 7- 0 15- 8 24-16 31-25 */
/* $28 0x0 0x1 */
/* $2c SEE NOTE 1 */
/* $30 0x6 0x7 */
/* $34 0x8 0x9 SEE NOTE 2 */
/* $38 0x10 0x11 */
/* $3C 0xe SEE NOTE 3 */
/* $40 0x12 0x13 */
/* $44 0x14 0x15 */
/* $48 0x16 0x17 */
/* $4c 0x18 0x19 */
/* $50 0x1a 0x1b */
/* FIXME: could be 0x1b 0x1a here */
/* $54 0x1d 0x1c */
/* $5C 0xf SEE NOTE 4 */
/* $130 SEE NOTE 5 */
/* $134 SEE NOTE 6 */
/* */
/* NOTES: */
/* 1. The pointer to ring is packed in most unceremoniusly */
/* 701 Register Address bits (A9-A0 must be zero) */
/* 0x4: A17 A16 A15 A14 A13 A12 A11 A10 */
/* 0x5: A25 A24 A23 A22 A21 A20 A19 A18 */
/* 0x2: 0 0 A31 A30 A29 A28 A27 A26 */
/* */
/* 2. The M$ drivers do a write 0x1 to 0x9, however the 3922 */
/* documentation would suggest that a write of 0x1 to 0x8 */
/* would be more appropriate. */
/* */
/* 3. This assignment is tenuous at best, register 0xe seems to */
/* have bits arranged 0 0 0 R/W R/W R/W R/W R/W */
/* if either of the lower two bits are set the chip seems to */
/* switch off */
/* */
/* 4. Bits 7-4 seem to be different 4 seems just to be generic */
/* receiver busy flag */
/* */
/* 5. and 6. The IER and ISR have a different bit assignment */
/* The lower three bits of both read back as ones */
/* ISR is register 0xc, IER is register 0xd */
/* 7 6 5 4 3 2 1 0 */
/* 0xc: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
/* 0xd: TxDone RxDone TxUndr RxOver SipRcv 1 1 1 */
/* TxDone xmitt done (generated only if generate interrupt bit */
/* is set in the ring) */
/* RxDone recv completed (or other recv condition if you set it */
/* up */
/* TxUnder underflow in Transmit FIFO */
/* RxOver overflow in Recv FIFO */
/* SipRcv received serial gap (or other condition you set) */
/* Interrupts are enabled by writing a one to the IER register */
/* Interrupts are cleared by writting a one to the ISR register */
/* */
/* 6. The remaining registers: 0x6 and 0x3 appear to be */
/* reserved parts of 16 or 32 bit registersthe remainder */
/* 0xa 0xb 0x1e 0x1f could possibly be (by their behaviour) */
/* the Unicast Filter register at $58. */
/* */
/* 7. While the core obviously expects 32 bit accesses all the */
/* M$ drivers do 8 bit accesses, infact the Miniport ones */
/* write and read back the byte serveral times (why?) */
#ifndef TOSHOBOE_H
#define TOSHOBOE_H
/* Registers */
#define OBOE_IO_EXTENT 0x1f
/*Receive and transmit slot pointers */
#define OBOE_REG(i) (i+(self->base))
#define OBOE_RXSLOT OBOE_REG(0x0)
#define OBOE_TXSLOT OBOE_REG(0x1)
#define OBOE_SLOT_MASK 0x3f
#define OBOE_TXRING_OFFSET 0x200
#define OBOE_TXRING_OFFSET_IN_SLOTS 0x40
/*pointer to the ring */
#define OBOE_RING_BASE0 OBOE_REG(0x4)
#define OBOE_RING_BASE1 OBOE_REG(0x5)
#define OBOE_RING_BASE2 OBOE_REG(0x2)
#define OBOE_RING_BASE3 OBOE_REG(0x3)
/*Number of slots in the ring */
#define OBOE_RING_SIZE OBOE_REG(0x7)
#define OBOE_RING_SIZE_RX4 0x00
#define OBOE_RING_SIZE_RX8 0x01
#define OBOE_RING_SIZE_RX16 0x03
#define OBOE_RING_SIZE_RX32 0x07
#define OBOE_RING_SIZE_RX64 0x0f
#define OBOE_RING_SIZE_TX4 0x00
#define OBOE_RING_SIZE_TX8 0x10
#define OBOE_RING_SIZE_TX16 0x30
#define OBOE_RING_SIZE_TX32 0x70
#define OBOE_RING_SIZE_TX64 0xf0
#define OBOE_RING_MAX_SIZE 64
/*Causes the gubbins to re-examine the ring */
#define OBOE_PROMPT OBOE_REG(0x9)
#define OBOE_PROMPT_BIT 0x1
/* Interrupt Status Register */
#define OBOE_ISR OBOE_REG(0xc)
/* Interrupt Enable Register */
#define OBOE_IER OBOE_REG(0xd)
/* Interrupt bits for IER and ISR */
#define OBOE_INT_TXDONE 0x80
#define OBOE_INT_RXDONE 0x40
#define OBOE_INT_TXUNDER 0x20
#define OBOE_INT_RXOVER 0x10
#define OBOE_INT_SIP 0x08
#define OBOE_INT_MASK 0xf8
/*Reset Register */
#define OBOE_CONFIG1 OBOE_REG(0xe)
#define OBOE_CONFIG1_RST 0x01
#define OBOE_CONFIG1_DISABLE 0x02
#define OBOE_CONFIG1_4 0x08
#define OBOE_CONFIG1_8 0x08
#define OBOE_CONFIG1_ON 0x8
#define OBOE_CONFIG1_RESET 0xf
#define OBOE_CONFIG1_OFF 0xe
#define OBOE_STATUS OBOE_REG(0xf)
#define OBOE_STATUS_RXBUSY 0x10
#define OBOE_STATUS_FIRRX 0x04
#define OBOE_STATUS_MIRRX 0x02
#define OBOE_STATUS_SIRRX 0x01
/*Speed control registers */
#define OBOE_CONFIG0L OBOE_REG(0x10)
#define OBOE_CONFIG0H OBOE_REG(0x11)
#define OBOE_CONFIG0H_TXONLOOP 0x80 /*Transmit when looping (dangerous) */
#define OBOE_CONFIG0H_LOOP 0x40 /*Loopback Tx->Rx */
#define OBOE_CONFIG0H_ENTX 0x10 /*Enable Tx */
#define OBOE_CONFIG0H_ENRX 0x08 /*Enable Rx */
#define OBOE_CONFIG0H_ENDMAC 0x04 /*Enable/reset* the DMA controller */
#define OBOE_CONFIG0H_RCVANY 0x02 /*DMA mode 1=bytes, 0=dwords */
#define OBOE_CONFIG0L_CRC16 0x80 /*CRC 1=16 bit 0=32 bit */
#define OBOE_CONFIG0L_ENFIR 0x40 /*Enable FIR */
#define OBOE_CONFIG0L_ENMIR 0x20 /*Enable MIR */
#define OBOE_CONFIG0L_ENSIR 0x10 /*Enable SIR */
#define OBOE_CONFIG0L_ENSIRF 0x08 /*Enable SIR framer */
#define OBOE_CONFIG0L_SIRTEST 0x04 /*Enable SIR framer in MIR and FIR */
#define OBOE_CONFIG0L_INVERTTX 0x02 /*Invert Tx Line */
#define OBOE_CONFIG0L_INVERTRX 0x01 /*Invert Rx Line */
#define OBOE_BOF OBOE_REG(0x12)
#define OBOE_EOF OBOE_REG(0x13)
#define OBOE_ENABLEL OBOE_REG(0x14)
#define OBOE_ENABLEH OBOE_REG(0x15)
#define OBOE_ENABLEH_PHYANDCLOCK 0x80 /*Toggle low to copy config in */
#define OBOE_ENABLEH_CONFIGERR 0x40
#define OBOE_ENABLEH_FIRON 0x20
#define OBOE_ENABLEH_MIRON 0x10
#define OBOE_ENABLEH_SIRON 0x08
#define OBOE_ENABLEH_ENTX 0x04
#define OBOE_ENABLEH_ENRX 0x02
#define OBOE_ENABLEH_CRC16 0x01
#define OBOE_ENABLEL_BROADCAST 0x01
#define OBOE_CURR_PCONFIGL OBOE_REG(0x16) /*Current config */
#define OBOE_CURR_PCONFIGH OBOE_REG(0x17)
#define OBOE_NEW_PCONFIGL OBOE_REG(0x18)
#define OBOE_NEW_PCONFIGH OBOE_REG(0x19)
#define OBOE_PCONFIGH_BAUDMASK 0xfc
#define OBOE_PCONFIGH_WIDTHMASK 0x04
#define OBOE_PCONFIGL_WIDTHMASK 0xe0
#define OBOE_PCONFIGL_PREAMBLEMASK 0x1f
#define OBOE_PCONFIG_BAUDMASK 0xfc00
#define OBOE_PCONFIG_BAUDSHIFT 10
#define OBOE_PCONFIG_WIDTHMASK 0x04e0
#define OBOE_PCONFIG_WIDTHSHIFT 5
#define OBOE_PCONFIG_PREAMBLEMASK 0x001f
#define OBOE_PCONFIG_PREAMBLESHIFT 0
#define OBOE_MAXLENL OBOE_REG(0x1a)
#define OBOE_MAXLENH OBOE_REG(0x1b)
#define OBOE_RXCOUNTH OBOE_REG(0x1c) /*Reset on recipt */
#define OBOE_RXCOUNTL OBOE_REG(0x1d) /*of whole packet */
/* The PCI ID of the OBOE chip */
#ifndef PCI_DEVICE_ID_FIR701
#define PCI_DEVICE_ID_FIR701 0x0701
#endif
#ifndef PCI_DEVICE_ID_FIRD01
#define PCI_DEVICE_ID_FIRD01 0x0d01
#endif
struct OboeSlot
{
__u16 len; /*Tweleve bits of packet length */
__u8 unused;
__u8 control; /*Slot control/status see below */
__u32 address; /*Slot buffer address */
}
__attribute__ ((packed));
#define OBOE_NTASKS OBOE_TXRING_OFFSET_IN_SLOTS
struct OboeRing
{
struct OboeSlot rx[OBOE_NTASKS];
struct OboeSlot tx[OBOE_NTASKS];
};
#define OBOE_RING_LEN (sizeof(struct OboeRing))
#define OBOE_CTL_TX_HW_OWNS 0x80 /*W/R This slot owned by the hardware */
#define OBOE_CTL_TX_DISTX_CRC 0x40 /*W Disable CRC generation for [FM]IR */
#define OBOE_CTL_TX_BAD_CRC 0x20 /*W Generate bad CRC */
#define OBOE_CTL_TX_SIP 0x10 /*W Generate an SIP after xmittion */
#define OBOE_CTL_TX_MKUNDER 0x08 /*W Generate an underrun error */
#define OBOE_CTL_TX_RTCENTX 0x04 /*W Enable receiver and generate TXdone */
/* After this slot is processed */
#define OBOE_CTL_TX_UNDER 0x01 /*R Set by hardware to indicate underrun */
#define OBOE_CTL_RX_HW_OWNS 0x80 /*W/R This slot owned by hardware */
#define OBOE_CTL_RX_PHYERR 0x40 /*R Decoder error on receiption */
#define OBOE_CTL_RX_CRCERR 0x20 /*R CRC error only set for [FM]IR */
#define OBOE_CTL_RX_LENGTH 0x10 /*R Packet > max Rx length */
#define OBOE_CTL_RX_OVER 0x08 /*R set to indicate an overflow */
#define OBOE_CTL_RX_SIRBAD 0x04 /*R SIR had BOF in packet or ABORT sequence */
#define OBOE_CTL_RX_RXEOF 0x02 /*R Finished receiving on this slot */
struct toshoboe_cb
{
struct net_device *netdev; /* Yes! we are some kind of netdevice */
struct net_device_stats stats;
struct tty_driver ttydev;
struct irlap_cb *irlap; /* The link layer we are binded to */
chipio_t io; /* IrDA controller information */
struct qos_info qos; /* QoS capabilities for this device */
__u32 flags; /* Interface flags */
struct pci_dev *pdev; /*PCI device */
int base; /*IO base */
int txpending; /*how many tx's are pending */
int txs, rxs; /*Which slots are we at */
int irdad; /*Driver under control of netdev end */
int async; /*Driver under control of async end */
int stopped; /*Stopped by some or other APM stuff */
int filter; /*In SIR mode do we want to receive
frames or byte ranges */
void *ringbuf; /*The ring buffer */
struct OboeRing *ring; /*The ring */
void *tx_bufs[OBOE_RING_MAX_SIZE]; /*The buffers */
void *rx_bufs[OBOE_RING_MAX_SIZE];
int speed; /*Current setting of the speed */
int new_speed; /*Set to request a speed change */
/* The spinlock protect critical parts of the driver.
* Locking is done like this :
* spin_lock_irqsave(&self->spinlock, flags);
* Releasing the lock :
* spin_unlock_irqrestore(&self->spinlock, flags);
*/
spinlock_t spinlock;
/* Used for the probe and diagnostics code */
int int_rx;
int int_tx;
int int_txunder;
int int_rxover;
int int_sip;
};
#endif
......@@ -2,9 +2,7 @@
*
* vlsi_ir.c: VLSI82C147 PCI IrDA controller driver for Linux
*
* Version: 0.3a, Nov 10, 2001
*
* Copyright (c) 2001 Martin Diehl
* Copyright (c) 2001-2002 Martin Diehl
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
......@@ -25,6 +23,17 @@
#include <linux/module.h>
MODULE_DESCRIPTION("IrDA SIR/MIR/FIR driver for VLSI 82C147");
MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
MODULE_LICENSE("GPL");
EXPORT_NO_SYMBOLS;
#define DRIVER_NAME "vlsi_ir"
#define DRIVER_VERSION "v0.4"
/********************************************************/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
......@@ -33,6 +42,9 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/proc_fs.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
......@@ -40,17 +52,9 @@
#include <net/irda/vlsi_ir.h>
/********************************************************/
MODULE_DESCRIPTION("IrDA SIR/MIR/FIR driver for VLSI 82C147");
MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
MODULE_LICENSE("GPL");
static /* const */ char drivername[] = "vlsi_ir";
static /* const */ char drivername[] = DRIVER_NAME;
#define PCI_CLASS_WIRELESS_IRDA 0x0d00
......@@ -64,13 +68,8 @@ static struct pci_device_id vlsi_irda_table [] __devinitdata = { {
MODULE_DEVICE_TABLE(pci, vlsi_irda_table);
/********************************************************/
MODULE_PARM(clksrc, "i");
MODULE_PARM_DESC(clksrc, "clock input source selection");
/* clksrc: which clock source to be used
* 0: auto - try PLL, fallback to 40MHz XCLK
* 1: on-chip 48MHz PLL
......@@ -78,12 +77,10 @@ MODULE_PARM_DESC(clksrc, "clock input source selection");
* 3: external 40MHz XCLK (HP OB-800)
*/
MODULE_PARM(clksrc, "i");
MODULE_PARM_DESC(clksrc, "clock input source selection");
static int clksrc = 0; /* default is 0(auto) */
MODULE_PARM(ringsize, "1-2i");
MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
/* ringsize: size of the tx and rx descriptor rings
* independent for tx and rx
* specify as ringsize=tx[,rx]
......@@ -92,11 +89,9 @@ MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
* there should be no gain when using rings larger than 8
*/
static int ringsize[] = {8,8}; /* default is tx=rx=8 */
MODULE_PARM(sirpulse, "i");
MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
MODULE_PARM(ringsize, "1-2i");
MODULE_PARM_DESC(ringsize, "TX, RX ring descriptor size");
static int ringsize[] = {8,8}; /* default is tx=8 / rx=8 */
/* sirpulse: tuning of the SIR pulse width within IrPHY 1.3 limits
* 0: very short, 1.5us (exception: 6us at 2.4 kbaud)
......@@ -107,346 +102,771 @@ MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
* pulse width saves more than 90% of the transmitted IR power.
*/
MODULE_PARM(sirpulse, "i");
MODULE_PARM_DESC(sirpulse, "SIR pulse width tuning");
static int sirpulse = 1; /* default is 3/16 bittime */
MODULE_PARM(qos_mtt_bits, "i");
MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
/* qos_mtt_bits: encoded min-turn-time value we require the peer device
* to use before transmitting to us. "Type 1" (per-station)
* bitfield according to IrLAP definition (section 6.6.8)
* The HP HDLS-1100 requires 1 msec - don't even know
* if this is the one which is used by my OB800
* Don't know which transceiver is used by my OB800 - the
* pretty common HP HDLS-1100 requires 1 msec - so lets use this.
*/
MODULE_PARM(qos_mtt_bits, "i");
MODULE_PARM_DESC(qos_mtt_bits, "IrLAP bitfield representing min-turn-time");
static int qos_mtt_bits = 0x04; /* default is 1 ms */
/********************************************************/
/* some helpers for operations on ring descriptors */
static inline int rd_is_active(struct vlsi_ring *r, unsigned i)
static void vlsi_reg_debug(unsigned iobase, const char *s)
{
return ((r->hw[i].rd_status & RD_STAT_ACTIVE) != 0);
}
int i;
static inline void rd_activate(struct vlsi_ring *r, unsigned i)
{
r->hw[i].rd_status |= RD_STAT_ACTIVE;
printk(KERN_DEBUG "%s: ", s);
for (i = 0; i < 0x20; i++)
printk("%02x", (unsigned)inb((iobase+i)));
printk("\n");
}
static inline void rd_set_addr_status(struct vlsi_ring *r, unsigned i, dma_addr_t a, u8 s)
static void vlsi_ring_debug(struct vlsi_ring *r)
{
struct ring_descr *rd = r->hw +i;
/* ordering is important for two reasons:
* - overlayed: writing addr overwrites status
* - we want to write status last so we have valid address in
* case status has RD_STAT_ACTIVE set
*/
if ((a & ~DMA_MASK_MSTRPAGE) != MSTRPAGE_VALUE)
BUG();
struct ring_descr *rd;
unsigned i;
a &= DMA_MASK_MSTRPAGE; /* clear highbyte to make sure we won't write
* to status - just in case MSTRPAGE_VALUE!=0
*/
rd->rd_addr = a;
wmb();
rd->rd_status = s; /* potentially passes ownership to the hardware */
printk(KERN_DEBUG "%s - ring %p / size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
__FUNCTION__, r, r->size, r->mask, r->len, r->dir, r->rd[0].hw);
printk(KERN_DEBUG "%s - head = %d / tail = %d\n", __FUNCTION__,
atomic_read(&r->head) & r->mask, atomic_read(&r->tail) & r->mask);
for (i = 0; i < r->size; i++) {
rd = &r->rd[i];
printk(KERN_DEBUG "%s - ring descr %u: ", __FUNCTION__, i);
printk("skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
printk(KERN_DEBUG "%s - hw: status=%02x count=%u addr=0x%08x\n",
__FUNCTION__, (unsigned) rd_get_status(rd),
(unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
}
}
static inline void rd_set_status(struct vlsi_ring *r, unsigned i, u8 s)
{
r->hw[i].rd_status = s;
}
/********************************************************/
static inline void rd_set_count(struct vlsi_ring *r, unsigned i, u16 c)
{
r->hw[i].rd_count = c;
}
#ifdef CONFIG_PROC_FS
static inline u8 rd_get_status(struct vlsi_ring *r, unsigned i)
static int vlsi_proc_pdev(struct pci_dev *pdev, char *buf, int len)
{
return r->hw[i].rd_status;
}
unsigned iobase = pci_resource_start(pdev, 0);
unsigned i;
char *out = buf;
static inline dma_addr_t rd_get_addr(struct vlsi_ring *r, unsigned i)
{
dma_addr_t a;
if (len < 500)
return 0;
a = (r->hw[i].rd_addr & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
return a;
out += sprintf(out, "\n%s (vid/did: %04x/%04x)\n",
pdev->name, (int)pdev->vendor, (int)pdev->device);
out += sprintf(out, "pci-power-state: %u\n", (unsigned) pdev->current_state);
out += sprintf(out, "resources: irq=%u / io=0x%04x / dma_mask=0x%016Lx\n",
pdev->irq, (unsigned)pci_resource_start(pdev, 0), (u64)pdev->dma_mask);
out += sprintf(out, "hw registers: ");
for (i = 0; i < 0x20; i++)
out += sprintf(out, "%02x", (unsigned)inb((iobase+i)));
out += sprintf(out, "\n");
return out - buf;
}
static inline u16 rd_get_count(struct vlsi_ring *r, unsigned i)
static int vlsi_proc_ndev(struct net_device *ndev, char *buf, int len)
{
return r->hw[i].rd_count;
}
vlsi_irda_dev_t *idev = ndev->priv;
char *out = buf;
u8 byte;
u16 word;
unsigned delta1, delta2;
struct timeval now;
unsigned iobase = ndev->base_addr;
/* producer advances r->head when descriptor was added for processing by hw */
if (len < 1000)
return 0;
static inline void ring_put(struct vlsi_ring *r)
{
r->head = (r->head + 1) & r->mask;
out += sprintf(out, "\n%s link state: %s / %s / %s / %s\n", ndev->name,
netif_device_present(ndev) ? "attached" : "detached",
netif_running(ndev) ? "running" : "not running",
netif_carrier_ok(ndev) ? "carrier ok" : "no carrier",
netif_queue_stopped(ndev) ? "queue stopped" : "queue running");
if (!netif_running(ndev))
return out - buf;
out += sprintf(out, "\nhw-state:\n");
pci_read_config_byte(idev->pdev, VLSI_PCI_IRMISC, &byte);
out += sprintf(out, "IRMISC:%s%s%s UART%s",
(byte&IRMISC_IRRAIL) ? " irrail" : "",
(byte&IRMISC_IRPD) ? " irpd" : "",
(byte&IRMISC_UARTTST) ? " uarttest" : "",
(byte&IRMISC_UARTEN) ? "" : " disabled\n");
if (byte&IRMISC_UARTEN) {
out += sprintf(out, "@0x%s\n",
(byte&2) ? ((byte&1) ? "3e8" : "2e8")
: ((byte&1) ? "3f8" : "2f8"));
}
pci_read_config_byte(idev->pdev, VLSI_PCI_CLKCTL, &byte);
out += sprintf(out, "CLKCTL: PLL %s%s%s / clock %s / wakeup %s\n",
(byte&CLKCTL_PD_INV) ? "powered" : "down",
(byte&CLKCTL_LOCK) ? " locked" : "",
(byte&CLKCTL_EXTCLK) ? ((byte&CLKCTL_XCKSEL)?" / 40 MHz XCLK":" / 48 MHz XCLK") : "",
(byte&CLKCTL_CLKSTP) ? "stopped" : "running",
(byte&CLKCTL_WAKE) ? "enabled" : "disabled");
pci_read_config_byte(idev->pdev, VLSI_PCI_MSTRPAGE, &byte);
out += sprintf(out, "MSTRPAGE: 0x%02x\n", (unsigned)byte);
byte = inb(iobase+VLSI_PIO_IRINTR);
out += sprintf(out, "IRINTR:%s%s%s%s%s%s%s%s\n",
(byte&IRINTR_ACTEN) ? " ACTEN" : "",
(byte&IRINTR_RPKTEN) ? " RPKTEN" : "",
(byte&IRINTR_TPKTEN) ? " TPKTEN" : "",
(byte&IRINTR_OE_EN) ? " OE_EN" : "",
(byte&IRINTR_ACTIVITY) ? " ACTIVITY" : "",
(byte&IRINTR_RPKTINT) ? " RPKTINT" : "",
(byte&IRINTR_TPKTINT) ? " TPKTINT" : "",
(byte&IRINTR_OE_INT) ? " OE_INT" : "");
word = inw(iobase+VLSI_PIO_RINGPTR);
out += sprintf(out, "RINGPTR: rx=%u / tx=%u\n", RINGPTR_GET_RX(word), RINGPTR_GET_TX(word));
word = inw(iobase+VLSI_PIO_RINGBASE);
out += sprintf(out, "RINGBASE: busmap=0x%08x\n",
((unsigned)word << 10)|(MSTRPAGE_VALUE<<24));
word = inw(iobase+VLSI_PIO_RINGSIZE);
out += sprintf(out, "RINGSIZE: rx=%u / tx=%u\n", RINGSIZE_TO_RXSIZE(word),
RINGSIZE_TO_TXSIZE(word));
word = inw(iobase+VLSI_PIO_IRCFG);
out += sprintf(out, "IRCFG:%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
(word&IRCFG_LOOP) ? " LOOP" : "",
(word&IRCFG_ENTX) ? " ENTX" : "",
(word&IRCFG_ENRX) ? " ENRX" : "",
(word&IRCFG_MSTR) ? " MSTR" : "",
(word&IRCFG_RXANY) ? " RXANY" : "",
(word&IRCFG_CRC16) ? " CRC16" : "",
(word&IRCFG_FIR) ? " FIR" : "",
(word&IRCFG_MIR) ? " MIR" : "",
(word&IRCFG_SIR) ? " SIR" : "",
(word&IRCFG_SIRFILT) ? " SIRFILT" : "",
(word&IRCFG_SIRTEST) ? " SIRTEST" : "",
(word&IRCFG_TXPOL) ? " TXPOL" : "",
(word&IRCFG_RXPOL) ? " RXPOL" : "");
word = inw(iobase+VLSI_PIO_IRENABLE);
out += sprintf(out, "IRENABLE:%s%s%s%s%s%s%s%s\n",
(word&IRENABLE_IREN) ? " IRENABLE" : "",
(word&IRENABLE_CFGER) ? " CFGERR" : "",
(word&IRENABLE_FIR_ON) ? " FIR_ON" : "",
(word&IRENABLE_MIR_ON) ? " MIR_ON" : "",
(word&IRENABLE_SIR_ON) ? " SIR_ON" : "",
(word&IRENABLE_ENTXST) ? " ENTXST" : "",
(word&IRENABLE_ENRXST) ? " ENRXST" : "",
(word&IRENABLE_CRC16_ON) ? " CRC16_ON" : "");
word = inw(iobase+VLSI_PIO_PHYCTL);
out += sprintf(out, "PHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
(unsigned)PHYCTL_TO_BAUD(word),
(unsigned)PHYCTL_TO_PLSWID(word),
(unsigned)PHYCTL_TO_PREAMB(word));
word = inw(iobase+VLSI_PIO_NPHYCTL);
out += sprintf(out, "NPHYCTL: baud-divisor=%u / pulsewidth=%u / preamble=%u\n",
(unsigned)PHYCTL_TO_BAUD(word),
(unsigned)PHYCTL_TO_PLSWID(word),
(unsigned)PHYCTL_TO_PREAMB(word));
word = inw(iobase+VLSI_PIO_MAXPKT);
out += sprintf(out, "MAXPKT: max. rx packet size = %u\n", word);
word = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
out += sprintf(out, "RCVBCNT: rx-fifo filling level = %u\n", word);
out += sprintf(out, "\nsw-state:\n");
out += sprintf(out, "IrPHY setup: %d baud - %s encoding\n", idev->baud,
(idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"));
do_gettimeofday(&now);
if (now.tv_usec >= idev->last_rx.tv_usec) {
delta2 = now.tv_usec - idev->last_rx.tv_usec;
delta1 = 0;
}
else {
delta2 = 1000000 + now.tv_usec - idev->last_rx.tv_usec;
delta1 = 1;
}
out += sprintf(out, "last rx: %lu.%06u sec\n",
now.tv_sec - idev->last_rx.tv_sec - delta1, delta2);
out += sprintf(out, "RX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu",
idev->stats.rx_packets, idev->stats.rx_bytes, idev->stats.rx_errors,
idev->stats.rx_dropped);
out += sprintf(out, " / overrun=%lu / length=%lu / frame=%lu / crc=%lu\n",
idev->stats.rx_over_errors, idev->stats.rx_length_errors,
idev->stats.rx_frame_errors, idev->stats.rx_crc_errors);
out += sprintf(out, "TX: packets=%lu / bytes=%lu / errors=%lu / dropped=%lu / fifo=%lu\n",
idev->stats.tx_packets, idev->stats.tx_bytes, idev->stats.tx_errors,
idev->stats.tx_dropped, idev->stats.tx_fifo_errors);
return out - buf;
}
/* consumer advances r->tail when descriptor was removed after getting processed by hw */
static inline void ring_get(struct vlsi_ring *r)
static int vlsi_proc_ring(struct vlsi_ring *r, char *buf, int len)
{
r->tail = (r->tail + 1) & r->mask;
}
struct ring_descr *rd;
unsigned i, j;
int h, t;
char *out = buf;
/********************************************************/
if (len < 3000)
return 0;
/* the memory required to hold the 2 descriptor rings */
out += sprintf(out, "size %u / mask 0x%04x / len %u / dir %d / hw %p\n",
r->size, r->mask, r->len, r->dir, r->rd[0].hw);
h = atomic_read(&r->head) & r->mask;
t = atomic_read(&r->tail) & r->mask;
out += sprintf(out, "head = %d / tail = %d ", h, t);
if (h == t)
out += sprintf(out, "(empty)\n");
else {
if (((t+1)&r->mask) == h)
out += sprintf(out, "(full)\n");
else
out += sprintf(out, "(level = %d)\n", ((unsigned)(t-h) & r->mask));
rd = &r->rd[h];
j = (unsigned) rd_get_count(rd);
out += sprintf(out, "current: rd = %d / status = %02x / len = %u\n",
h, (unsigned)rd_get_status(rd), j);
if (j > 0) {
out += sprintf(out, " data:");
if (j > 20)
j = 20;
for (i = 0; i < j; i++)
out += sprintf(out, " %02x", (unsigned)((unsigned char *)rd->buf)[i]);
out += sprintf(out, "\n");
}
}
for (i = 0; i < r->size; i++) {
rd = &r->rd[i];
out += sprintf(out, "> ring descr %u: ", i);
out += sprintf(out, "skb=%p data=%p hw=%p\n", rd->skb, rd->buf, rd->hw);
out += sprintf(out, " hw: status=%02x count=%u busaddr=0x%08x\n",
(unsigned) rd_get_status(rd),
(unsigned) rd_get_count(rd), (unsigned) rd_get_addr(rd));
}
return out - buf;
}
#define RING_AREA_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_descr))
static int vlsi_proc_print(struct net_device *ndev, char *buf, int len)
{
vlsi_irda_dev_t *idev;
unsigned long flags;
char *out = buf;
/* the memory required to hold the rings' buffer entries */
if (!ndev || !ndev->priv) {
printk(KERN_ERR "%s: invalid ptr!\n", __FUNCTION__);
return 0;
}
#define RING_ENTRY_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_entry))
idev = ndev->priv;
/********************************************************/
if (len < 8000)
return 0;
/* just dump all registers */
out += sprintf(out, "\n%s %s\n\n", DRIVER_NAME, DRIVER_VERSION);
out += sprintf(out, "clksrc: %s\n",
(clksrc>=2) ? ((clksrc==3)?"40MHz XCLK":"48MHz XCLK")
: ((clksrc==1)?"48MHz PLL":"autodetect"));
out += sprintf(out, "ringsize: tx=%d / rx=%d\n",
ringsize[0], ringsize[1]);
out += sprintf(out, "sirpulse: %s\n", (sirpulse)?"3/16 bittime":"short");
out += sprintf(out, "qos_mtt_bits: 0x%02x\n", (unsigned)qos_mtt_bits);
static void vlsi_reg_debug(unsigned iobase, const char *s)
{
int i;
spin_lock_irqsave(&idev->lock, flags);
if (idev->pdev != NULL) {
out += vlsi_proc_pdev(idev->pdev, out, len - (out-buf));
if (idev->pdev->current_state == 0)
out += vlsi_proc_ndev(ndev, out, len - (out-buf));
else
out += sprintf(out, "\nPCI controller down - resume_ok = %d\n",
idev->resume_ok);
if (netif_running(ndev) && idev->rx_ring && idev->tx_ring) {
out += sprintf(out, "\n--------- RX ring -----------\n\n");
out += vlsi_proc_ring(idev->rx_ring, out, len - (out-buf));
out += sprintf(out, "\n--------- TX ring -----------\n\n");
out += vlsi_proc_ring(idev->tx_ring, out, len - (out-buf));
}
}
out += sprintf(out, "\n");
spin_unlock_irqrestore(&idev->lock, flags);
mb();
printk(KERN_DEBUG "%s: ", s);
for (i = 0; i < 0x20; i++)
printk("%02x", (unsigned)inb((iobase+i)));
printk("\n");
return out - buf;
}
/********************************************************/
static struct proc_dir_entry *vlsi_proc_root = NULL;
static int vlsi_set_clock(struct pci_dev *pdev)
{
u8 clkctl, lock;
int i, count;
struct vlsi_proc_data {
int size;
char *data;
};
if (clksrc < 2) { /* auto or PLL: try PLL */
clkctl = CLKCTL_NO_PD | CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
/* most of the proc-fops code borrowed from usb/uhci */
/* procedure to detect PLL lock synchronisation:
* after 0.5 msec initial delay we expect to find 3 PLL lock
* indications within 10 msec for successful PLL detection.
*/
udelay(500);
count = 0;
for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
if (lock&CLKCTL_LOCK) {
if (++count >= 3)
break;
}
udelay(50);
}
if (count < 3) {
if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
printk(KERN_ERR "%s: no PLL or failed to lock!\n",
__FUNCTION__);
clkctl = CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
return -1;
}
else /* was: clksrc=0(auto) */
clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
static int vlsi_proc_open(struct inode *inode, struct file *file)
{
const struct proc_dir_entry *pde = PDE(inode);
struct net_device *ndev = pde->data;
vlsi_irda_dev_t *idev = ndev->priv;
struct vlsi_proc_data *procdata;
const int maxdata = 8000;
printk(KERN_INFO "%s: PLL not locked, fallback to clksrc=%d\n",
__FUNCTION__, clksrc);
}
else { /* got successful PLL lock */
clksrc = 1;
return 0;
lock_kernel();
procdata = kmalloc(sizeof(*procdata), GFP_KERNEL);
if (!procdata) {
unlock_kernel();
return -ENOMEM;
}
procdata->data = kmalloc(maxdata, GFP_KERNEL);
if (!procdata->data) {
kfree(procdata);
unlock_kernel();
return -ENOMEM;
}
/* we get here if either no PLL detected in auto-mode or
the external clock source was explicitly specified */
clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
if (clksrc == 3)
clkctl |= CLKCTL_XCKSEL;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
down(&idev->sem);
procdata->size = vlsi_proc_print(ndev, procdata->data, maxdata);
up(&idev->sem);
/* no way to test for working XCLK */
file->private_data = procdata;
return 0;
}
static void vlsi_start_clock(struct pci_dev *pdev)
static loff_t vlsi_proc_lseek(struct file *file, loff_t off, int whence)
{
u8 clkctl;
struct vlsi_proc_data *procdata;
loff_t new = -1;
printk(KERN_INFO "%s: start clock using %s as input\n", __FUNCTION__,
(clksrc&2)?((clksrc&1)?"40MHz XCLK":"48MHz XCLK"):"48MHz PLL");
pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
clkctl &= ~CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
}
lock_kernel();
procdata = file->private_data;
switch (whence) {
case 0:
new = off;
break;
case 1:
new = file->f_pos + off;
break;
}
if (new < 0 || new > procdata->size) {
unlock_kernel();
return -EINVAL;
}
unlock_kernel();
return (file->f_pos = new);
}
static void vlsi_stop_clock(struct pci_dev *pdev)
static ssize_t vlsi_proc_read(struct file *file, char *buf, size_t nbytes,
loff_t *ppos)
{
u8 clkctl;
struct vlsi_proc_data *procdata = file->private_data;
unsigned int pos;
unsigned int size;
pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
clkctl |= CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
}
pos = *ppos;
size = procdata->size;
if (pos >= size)
return 0;
if (nbytes >= size)
nbytes = size;
if (pos + nbytes > size)
nbytes = size - pos;
if (!access_ok(VERIFY_WRITE, buf, nbytes))
return -EINVAL;
static void vlsi_unset_clock(struct pci_dev *pdev)
{
u8 clkctl;
copy_to_user(buf, procdata->data + pos, nbytes);
pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
if (!(clkctl&CLKCTL_CLKSTP))
/* make sure clock is already stopped */
vlsi_stop_clock(pdev);
*ppos += nbytes;
clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_NO_PD);
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
return nbytes;
}
/********************************************************/
static int vlsi_proc_release(struct inode *inode, struct file *file)
{
struct vlsi_proc_data *procdata = file->private_data;
/* ### FIXME: don't use old virt_to_bus() anymore! */
kfree(procdata->data);
kfree(procdata);
return 0;
}
static void vlsi_arm_rx(struct vlsi_ring *r)
{
unsigned i;
dma_addr_t ba;
static struct file_operations vlsi_proc_fops = {
open: vlsi_proc_open,
llseek: vlsi_proc_lseek,
read: vlsi_proc_read,
release: vlsi_proc_release,
};
#endif
for (i = 0; i < r->size; i++) {
if (r->buf[i].data == NULL)
BUG();
ba = virt_to_bus(r->buf[i].data);
rd_set_addr_status(r, i, ba, RD_STAT_ACTIVE);
}
}
/********************************************************/
static int vlsi_alloc_ringbuf(struct vlsi_ring *r)
static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr_hw *hwmap,
unsigned size, unsigned len, int dir)
{
struct vlsi_ring *r;
struct ring_descr *rd;
unsigned i, j;
r->head = r->tail = 0;
r->mask = r->size - 1;
for (i = 0; i < r->size; i++) {
r->buf[i].skb = NULL;
r->buf[i].data = kmalloc(XFER_BUF_SIZE, GFP_KERNEL|GFP_DMA);
if (r->buf[i].data == NULL) {
dma_addr_t busaddr;
if (!size || ((size-1)&size)!=0) /* must be >0 and power of 2 */
return NULL;
r = kmalloc(sizeof(*r) + size * sizeof(struct ring_descr), GFP_KERNEL);
if (!r)
return NULL;
memset(r, 0, sizeof(*r));
r->pdev = pdev;
r->dir = dir;
r->len = len;
r->rd = (struct ring_descr *)(r+1);
r->mask = size - 1;
r->size = size;
atomic_set(&r->head, 0);
atomic_set(&r->tail, 0);
for (i = 0; i < size; i++) {
rd = r->rd + i;
memset(rd, 0, sizeof(*rd));
rd->hw = hwmap + i;
rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
if (rd->buf == NULL) {
for (j = 0; j < i; j++) {
kfree(r->buf[j].data);
r->buf[j].data = NULL;
rd = r->rd + j;
busaddr = rd_get_addr(rd);
rd_set_addr_status(rd, 0, 0);
if (busaddr)
pci_unmap_single(pdev, busaddr, len, dir);
kfree(rd->buf);
rd->buf = NULL;
}
return -ENOMEM;
kfree(r);
return NULL;
}
busaddr = pci_map_single(pdev, rd->buf, len, dir);
if (!busaddr) {
printk(KERN_ERR "%s: failed to create PCI-MAP for %p",
__FUNCTION__, rd->buf);
BUG();
}
rd_set_addr_status(rd, busaddr, 0);
pci_dma_sync_single(pdev, busaddr, len, dir);
/* initially, the dma buffer is owned by the CPU */
rd->skb = NULL;
}
return 0;
return r;
}
static void vlsi_free_ringbuf(struct vlsi_ring *r)
static int vlsi_free_ring(struct vlsi_ring *r)
{
struct ring_descr *rd;
unsigned i;
dma_addr_t busaddr;
for (i = 0; i < r->size; i++) {
if (r->buf[i].data == NULL)
continue;
if (r->buf[i].skb) {
dev_kfree_skb(r->buf[i].skb);
r->buf[i].skb = NULL;
}
else
kfree(r->buf[i].data);
r->buf[i].data = NULL;
rd = r->rd + i;
if (rd->skb)
dev_kfree_skb_any(rd->skb);
busaddr = rd_get_addr(rd);
rd_set_addr_status(rd, 0, 0);
if (busaddr)
pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
if (rd->buf)
kfree(rd->buf);
}
kfree(r);
return 0;
}
static int vlsi_init_ring(vlsi_irda_dev_t *idev)
static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
{
char *ringarea;
struct ring_descr_hw *hwmap;
idev->virtaddr = NULL;
idev->busaddr = 0;
ringarea = pci_alloc_consistent(idev->pdev, RING_AREA_SIZE, &idev->busaddr);
ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr);
if (!ringarea) {
printk(KERN_ERR "%s: insufficient memory for descriptor rings\n",
__FUNCTION__);
return -ENOMEM;
goto out;
}
memset(ringarea, 0, RING_AREA_SIZE);
memset(ringarea, 0, HW_RING_AREA_SIZE);
#if 0
printk(KERN_DEBUG "%s: (%d,%d)-ring %p / %p\n", __FUNCTION__,
ringsize[0], ringsize[1], ringarea,
(void *)(unsigned)idev->busaddr);
#endif
hwmap = (struct ring_descr_hw *)ringarea;
idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
XFER_BUF_SIZE, PCI_DMA_FROMDEVICE);
if (idev->rx_ring == NULL)
goto out_unmap;
hwmap += MAX_RING_DESCR;
idev->tx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[0],
XFER_BUF_SIZE, PCI_DMA_TODEVICE);
if (idev->tx_ring == NULL)
goto out_free_rx;
idev->rx_ring.size = ringsize[1];
idev->rx_ring.hw = (struct ring_descr *)ringarea;
if (!vlsi_alloc_ringbuf(&idev->rx_ring)) {
idev->tx_ring.size = ringsize[0];
idev->tx_ring.hw = idev->rx_ring.hw + MAX_RING_DESCR;
if (!vlsi_alloc_ringbuf(&idev->tx_ring)) {
idev->virtaddr = ringarea;
return 0;
}
vlsi_free_ringbuf(&idev->rx_ring);
}
pci_free_consistent(idev->pdev, RING_AREA_SIZE,
ringarea, idev->busaddr);
printk(KERN_ERR "%s: insufficient memory for ring buffers\n",
__FUNCTION__);
return -1;
out_free_rx:
vlsi_free_ring(idev->rx_ring);
out_unmap:
idev->rx_ring = idev->tx_ring = NULL;
pci_free_consistent(idev->pdev, HW_RING_AREA_SIZE, ringarea, idev->busaddr);
idev->busaddr = 0;
out:
return -ENOMEM;
}
static int vlsi_destroy_hwif(vlsi_irda_dev_t *idev)
{
vlsi_free_ring(idev->rx_ring);
vlsi_free_ring(idev->tx_ring);
idev->rx_ring = idev->tx_ring = NULL;
if (idev->busaddr)
pci_free_consistent(idev->pdev,HW_RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
/********************************************************/
idev->virtaddr = NULL;
idev->busaddr = 0;
return 0;
}
/********************************************************/
static int vlsi_set_baud(struct net_device *ndev)
static int vlsi_process_rx(struct vlsi_ring *r, struct ring_descr *rd)
{
u16 status;
int crclen, len = 0;
struct sk_buff *skb;
int ret = 0;
struct net_device *ndev = (struct net_device *)pci_get_drvdata(r->pdev);
vlsi_irda_dev_t *idev = ndev->priv;
unsigned long flags;
u16 nphyctl;
unsigned iobase;
u16 config;
unsigned mode;
int ret;
int baudrate;
baudrate = idev->new_baud;
iobase = ndev->base_addr;
printk(KERN_DEBUG "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud);
spin_lock_irqsave(&idev->lock, flags);
outw(0, iobase+VLSI_PIO_IRENABLE);
if (baudrate == 4000000) {
mode = IFF_FIR;
config = IRCFG_FIR;
nphyctl = PHYCTL_FIR;
pci_dma_sync_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
/* dma buffer now owned by the CPU */
status = rd_get_status(rd);
if (status & RD_RX_ERROR) {
if (status & RD_RX_OVER)
ret |= VLSI_RX_OVER;
if (status & RD_RX_LENGTH)
ret |= VLSI_RX_LENGTH;
if (status & RD_RX_PHYERR)
ret |= VLSI_RX_FRAME;
if (status & RD_RX_CRCERR)
ret |= VLSI_RX_CRC;
}
else {
len = rd_get_count(rd);
crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
len -= crclen; /* remove trailing CRC */
if (len <= 0) {
printk(KERN_ERR "%s: strange frame (len=%d)\n",
__FUNCTION__, len);
ret |= VLSI_RX_DROP;
}
else if (!rd->skb) {
printk(KERN_ERR "%s: rx packet dropped\n", __FUNCTION__);
ret |= VLSI_RX_DROP;
}
else {
skb = rd->skb;
rd->skb = NULL;
skb->dev = ndev;
memcpy(skb_put(skb,len), rd->buf, len);
skb->mac.raw = skb->data;
if (in_interrupt())
netif_rx(skb);
else
netif_rx_ni(skb);
ndev->last_rx = jiffies;
}
}
rd_set_status(rd, 0);
rd_set_count(rd, 0);
/* buffer still owned by CPU */
return (ret) ? -ret : len;
}
static void vlsi_fill_rx(struct vlsi_ring *r)
{
struct ring_descr *rd;
for (rd = ring_last(r); rd != NULL; rd = ring_put(r)) {
if (rd_is_active(rd)) {
BUG();
break;
}
if (!rd->skb) {
rd->skb = dev_alloc_skb(IRLAP_SKB_ALLOCSIZE);
if (rd->skb) {
skb_reserve(rd->skb,1);
rd->skb->protocol = htons(ETH_P_IRDA);
}
else
break; /* probably not worth logging? */
}
/* give dma buffer back to busmaster */
pci_dma_prep_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
rd_activate(rd);
}
}
static void vlsi_rx_interrupt(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
struct vlsi_ring *r = idev->rx_ring;
struct ring_descr *rd;
int ret;
for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
if (rd_is_active(rd))
break;
ret = vlsi_process_rx(r, rd);
if (ret < 0) {
ret = -ret;
idev->stats.rx_errors++;
if (ret & VLSI_RX_DROP)
idev->stats.rx_dropped++;
if (ret & VLSI_RX_OVER)
idev->stats.rx_over_errors++;
if (ret & VLSI_RX_LENGTH)
idev->stats.rx_length_errors++;
if (ret & VLSI_RX_FRAME)
idev->stats.rx_frame_errors++;
if (ret & VLSI_RX_CRC)
idev->stats.rx_crc_errors++;
}
else if (ret > 0) {
idev->stats.rx_packets++;
idev->stats.rx_bytes += ret;
}
}
do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
vlsi_fill_rx(r);
if (ring_first(r) == NULL) {
/* we are in big trouble, if this should ever happen */
printk(KERN_ERR "%s: rx ring exhausted!\n", __FUNCTION__);
vlsi_ring_debug(r);
}
else
outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
}
/* caller must have stopped the controller from busmastering */
static void vlsi_unarm_rx(vlsi_irda_dev_t *idev)
{
struct vlsi_ring *r = idev->rx_ring;
struct ring_descr *rd;
int ret;
for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
ret = 0;
if (rd_is_active(rd)) {
rd_set_status(rd, 0);
if (rd_get_count(rd)) {
printk(KERN_INFO "%s - dropping rx packet\n", __FUNCTION__);
ret = -VLSI_RX_DROP;
}
rd_set_count(rd, 0);
pci_dma_sync_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
if (rd->skb) {
dev_kfree_skb_any(rd->skb);
rd->skb = NULL;
}
}
else
ret = vlsi_process_rx(r, rd);
if (ret < 0) {
ret = -ret;
idev->stats.rx_errors++;
if (ret & VLSI_RX_DROP)
idev->stats.rx_dropped++;
if (ret & VLSI_RX_OVER)
idev->stats.rx_over_errors++;
if (ret & VLSI_RX_LENGTH)
idev->stats.rx_length_errors++;
if (ret & VLSI_RX_FRAME)
idev->stats.rx_frame_errors++;
if (ret & VLSI_RX_CRC)
idev->stats.rx_crc_errors++;
}
else if (ret > 0) {
idev->stats.rx_packets++;
idev->stats.rx_bytes += ret;
}
}
}
/********************************************************/
static int vlsi_process_tx(struct vlsi_ring *r, struct ring_descr *rd)
{
u16 status;
int len;
int ret;
pci_dma_sync_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
/* dma buffer now owned by the CPU */
status = rd_get_status(rd);
if (status & RD_TX_UNDRN)
ret = VLSI_TX_FIFO;
else
ret = 0;
rd_set_status(rd, 0);
if (rd->skb) {
len = rd->skb->len;
dev_kfree_skb_any(rd->skb);
rd->skb = NULL;
}
else /* tx-skb already freed? - should never happen */
len = rd_get_count(rd); /* incorrect for SIR! (due to wrapping) */
rd_set_count(rd, 0);
/* dma buffer still owned by the CPU */
return (ret) ? -ret : len;
}
static int vlsi_set_baud(struct net_device *ndev, int dolock)
{
vlsi_irda_dev_t *idev = ndev->priv;
unsigned long flags;
u16 nphyctl;
unsigned iobase;
u16 config;
unsigned mode;
unsigned idle_retry;
int ret;
int baudrate;
int fifocnt = 0; /* Keep compiler happy */
baudrate = idev->new_baud;
iobase = ndev->base_addr;
#if 0
printk(KERN_DEBUG "%s: %d -> %d\n", __FUNCTION__, idev->baud, idev->new_baud);
#endif
if (baudrate == 4000000) {
mode = IFF_FIR;
config = IRCFG_FIR;
nphyctl = PHYCTL_FIR;
}
else if (baudrate == 1152000) {
mode = IFF_MIR;
......@@ -473,6 +893,32 @@ static int vlsi_set_baud(struct net_device *ndev)
}
}
if (dolock)
spin_lock_irqsave(&idev->lock, flags);
else
flags = 0xdead; /* prevent bogus warning about possible uninitialized use */
for (idle_retry=0; idle_retry < 100; idle_retry++) {
fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt == 0)
break;
if (!idle_retry)
printk(KERN_WARNING "%s: waiting for rx fifo to become empty(%d)\n",
__FUNCTION__, fifocnt);
if (dolock) {
spin_unlock_irqrestore(&idev->lock, flags);
udelay(100);
spin_lock_irqsave(&idev->lock, flags);
}
else
udelay(100);
}
if (fifocnt != 0)
printk(KERN_ERR "%s: rx fifo not empty(%d)\n", __FUNCTION__, fifocnt);
outw(0, iobase+VLSI_PIO_IRENABLE);
wmb();
config |= IRCFG_MSTR | IRCFG_ENRX;
outw(config, iobase+VLSI_PIO_IRCFG);
......@@ -480,10 +926,12 @@ static int vlsi_set_baud(struct net_device *ndev)
outw(nphyctl, iobase+VLSI_PIO_NPHYCTL);
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
mb();
/* chip fetches IRCFG on next rising edge of its 8MHz clock */
udelay(1); /* chip applies IRCFG on next rising edge of its 8MHz clock */
/* read back settings for validation */
mb();
config = inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_MASK;
if (mode == IFF_FIR)
......@@ -493,7 +941,6 @@ static int vlsi_set_baud(struct net_device *ndev)
else
config ^= IRENABLE_SIR_ON;
if (config != (IRENABLE_IREN|IRENABLE_ENRXST)) {
printk(KERN_ERR "%s: failed to set %s mode!\n", __FUNCTION__,
(mode==IFF_SIR)?"SIR":((mode==IFF_MIR)?"MIR":"FIR"));
......@@ -512,6 +959,7 @@ static int vlsi_set_baud(struct net_device *ndev)
ret = 0;
}
}
if (dolock)
spin_unlock_irqrestore(&idev->lock, flags);
if (ret)
......@@ -520,278 +968,396 @@ static int vlsi_set_baud(struct net_device *ndev)
return ret;
}
static inline int vlsi_set_baud_lock(struct net_device *ndev)
{
return vlsi_set_baud(ndev, 1);
}
static inline int vlsi_set_baud_nolock(struct net_device *ndev)
{
return vlsi_set_baud(ndev, 0);
}
static int vlsi_init_chip(struct net_device *ndev)
static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
unsigned iobase;
u16 ptr;
struct vlsi_ring *r = idev->tx_ring;
struct ring_descr *rd;
unsigned long flags;
unsigned iobase = ndev->base_addr;
u8 status;
u16 config;
int mtt;
int len, speed;
struct timeval now, ready;
iobase = ndev->base_addr;
speed = irda_get_next_speed(skb);
if (speed != -1 && speed != idev->baud) {
netif_stop_queue(ndev);
idev->new_baud = speed;
if (!skb->len) {
dev_kfree_skb_any(skb);
/* due to the completely asynch tx operation we might have
* IrLAP racing with the hardware here, f.e. if the controller
* is just sending the last packet with current speed while
* the LAP is already switching the speed using synchronous
* len=0 packet. Immediate execution would lead to hw lockup
* requiring a powercycle to reset. Good candidate to trigger
* this is the final UA:RSP packet after receiving a DISC:CMD
* when getting the LAP down.
* Note that we are not protected by the queue_stop approach
* because the final UA:RSP arrives _without_ request to apply
* new-speed-after-this-packet - hence the driver doesn't know
* this was the last packet and doesn't stop the queue. So the
* forced switch to default speed from LAP gets through as fast
* as only some 10 usec later while the UA:RSP is still processed
* by the hardware and we would get screwed.
* Note: no locking required since we (netdev->xmit) are the only
* supplier for tx and the network layer provides serialization
*/
spin_lock_irqsave(&idev->lock, flags);
if (ring_first(idev->tx_ring) == NULL) {
/* no race - tx-ring already empty */
vlsi_set_baud_nolock(ndev);
netif_wake_queue(ndev);
}
else
; /* keep the speed change pending like it would
* for any len>0 packet. tx completion interrupt
* will apply it when the tx ring becomes empty.
*/
spin_unlock_irqrestore(&idev->lock, flags);
return 0;
}
status = RD_TX_CLRENTX; /* stop tx-ring after this frame */
}
else
status = 0;
outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
if (skb->len == 0) {
printk(KERN_ERR "%s: dropping len=0 packet\n", __FUNCTION__);
goto drop;
}
outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */
/* sanity checks - should never happen!
* simply BUGging the violation and dropping the packet
*/
/* disable everything, particularly IRCFG_MSTR - which resets the RING_PTR */
rd = ring_last(r);
if (!rd) { /* ring full - queue should have been stopped! */
BUG();
goto drop;
}
outw(0, iobase+VLSI_PIO_IRCFG);
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
if (rd_is_active(rd)) { /* entry still owned by hw! */
BUG();
goto drop;
}
mb();
if (!rd->buf) { /* no memory for this tx entry - weird! */
BUG();
goto drop;
}
outw(0, iobase+VLSI_PIO_IRENABLE);
if (rd->skb) { /* hm, associated old skb still there */
BUG();
goto drop;
}
outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */
/* tx buffer already owned by CPU due to pci_dma_sync_single() either
* after initial pci_map_single or after subsequent tx-completion
*/
outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
if (idev->mode == IFF_SIR) {
status |= RD_TX_DISCRC; /* no hw-crc creation */
len = async_wrap_skb(skb, rd->buf, r->len);
/* Some rare worst case situation in SIR mode might lead to
* potential buffer overflow. The wrapper detects this, returns
* with a shortened frame (without FCS/EOF) but doesn't provide
* any error indication about the invalid packet which we are
* going to transmit.
* Therefore we log if the buffer got filled to the point, where the
* wrapper would abort, i.e. when there are less than 5 bytes left to
* allow appending the FCS/EOF.
*/
outw(TX_RX_TO_RINGSIZE(idev->tx_ring.size, idev->rx_ring.size),
iobase+VLSI_PIO_RINGSIZE);
if (len >= r->len-5)
printk(KERN_WARNING "%s: possible buffer overflow with SIR wrapping!\n",
__FUNCTION__);
}
else {
/* hw deals with MIR/FIR mode wrapping */
status |= RD_TX_PULSE; /* send 2 us highspeed indication pulse */
len = skb->len;
if (len > r->len) {
printk(KERN_ERR "%s: no space - skb too big (%d)\n",
__FUNCTION__, skb->len);
goto drop;
}
else
memcpy(rd->buf, skb->data, len);
}
ptr = inw(iobase+VLSI_PIO_RINGPTR);
idev->rx_ring.head = idev->rx_ring.tail = RINGPTR_GET_RX(ptr);
idev->tx_ring.head = idev->tx_ring.tail = RINGPTR_GET_TX(ptr);
/* do mtt delay before we need to disable interrupts! */
outw(IRCFG_MSTR, iobase+VLSI_PIO_IRCFG); /* ready for memory access */
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
if ((mtt = irda_get_mtt(skb)) > 0) {
mb();
ready.tv_usec = idev->last_rx.tv_usec + mtt;
ready.tv_sec = idev->last_rx.tv_sec;
if (ready.tv_usec >= 1000000) {
ready.tv_usec -= 1000000;
ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
}
for(;;) {
do_gettimeofday(&now);
if (now.tv_sec > ready.tv_sec
|| (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
break;
udelay(100);
/* must not sleep here - we are called under xmit_lock! */
}
}
idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
vlsi_set_baud(ndev);
rd->skb = skb; /* remember skb for tx-complete stats */
outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */
wmb();
rd_set_count(rd, len);
rd_set_status(rd, status); /* not yet active! */
/* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
* basically every received pulse fires an ACTIVITY-INT
* leading to >>1000 INT's per second instead of few 10
/* give dma buffer back to busmaster-hw (flush caches to make
* CPU-driven changes visible from the pci bus).
*/
outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
wmb();
return 0;
}
pci_dma_prep_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
/*
* We need to disable IR output in order to switch to TX mode.
* Better not do this blindly anytime we want to transmit something
* because TX may already run. However we are racing with the controller
* which may stop TX at any time when fetching an inactive descriptor
* or one with CLR_ENTX set. So we switch on TX only, if TX was not running
* _after_ the new descriptor was activated on the ring. This ensures
* we will either find TX already stopped or we can be sure, there
* will be a TX-complete interrupt even if the chip stopped doing
* TX just after we found it still running. The ISR will then find
* the non-empty ring and restart TX processing. The enclosing
* spinlock provides the correct serialization to prevent race with isr.
*/
/**************************************************************/
spin_lock_irqsave(&idev->lock,flags);
rd_activate(rd);
static void vlsi_refill_rx(struct vlsi_ring *r)
{
do {
if (rd_is_active(r, r->head))
BUG();
rd_activate(r, r->head);
ring_put(r);
} while (r->head != r->tail);
}
if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
int fifocnt;
fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0)
printk(KERN_WARNING "%s: rx fifo not empty(%d)\n",
__FUNCTION__, fifocnt);
static int vlsi_rx_interrupt(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
struct vlsi_ring *r;
int len;
u8 status;
struct sk_buff *skb;
int crclen;
config = inw(iobase+VLSI_PIO_IRCFG);
rmb();
outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
mb();
outw(0, iobase+VLSI_PIO_PROMPT);
}
ndev->trans_start = jiffies;
r = &idev->rx_ring;
while (!rd_is_active(r, r->tail)) {
status = rd_get_status(r, r->tail);
if (status & RX_STAT_ERROR) {
idev->stats.rx_errors++;
if (status & RX_STAT_OVER)
idev->stats.rx_over_errors++;
if (status & RX_STAT_LENGTH)
idev->stats.rx_length_errors++;
if (status & RX_STAT_PHYERR)
idev->stats.rx_frame_errors++;
if (status & RX_STAT_CRCERR)
idev->stats.rx_crc_errors++;
}
else {
len = rd_get_count(r, r->tail);
crclen = (idev->mode==IFF_FIR) ? sizeof(u32) : sizeof(u16);
if (len < crclen)
printk(KERN_ERR "%s: strange frame (len=%d)\n",
__FUNCTION__, len);
else
len -= crclen; /* remove trailing CRC */
skb = dev_alloc_skb(len+1);
if (skb) {
skb->dev = ndev;
skb_reserve(skb,1);
memcpy(skb_put(skb,len), r->buf[r->tail].data, len);
idev->stats.rx_packets++;
idev->stats.rx_bytes += len;
skb->mac.raw = skb->data;
skb->protocol = htons(ETH_P_IRDA);
netif_rx(skb);
ndev->last_rx = jiffies;
}
else {
idev->stats.rx_dropped++;
printk(KERN_ERR "%s: rx packet dropped\n", __FUNCTION__);
}
}
rd_set_count(r, r->tail, 0);
rd_set_status(r, r->tail, 0);
ring_get(r);
if (r->tail == r->head) {
printk(KERN_WARNING "%s: rx ring exhausted\n", __FUNCTION__);
break;
}
if (ring_put(r) == NULL) {
netif_stop_queue(ndev);
printk(KERN_DEBUG "%s: tx ring full - queue stopped\n", __FUNCTION__);
}
do_gettimeofday(&idev->last_rx); /* remember "now" for later mtt delay */
vlsi_refill_rx(r);
mb();
outw(0, ndev->base_addr+VLSI_PIO_PROMPT);
spin_unlock_irqrestore(&idev->lock, flags);
return 0;
}
drop:
dev_kfree_skb_any(skb);
idev->stats.tx_errors++;
idev->stats.tx_dropped++;
return 1;
}
static int vlsi_tx_interrupt(struct net_device *ndev)
static void vlsi_tx_interrupt(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
struct vlsi_ring *r;
struct vlsi_ring *r = idev->tx_ring;
struct ring_descr *rd;
unsigned iobase;
int ret;
u16 config;
u16 status;
r = &idev->tx_ring;
while (!rd_is_active(r, r->tail)) {
if (r->tail == r->head)
break; /* tx ring empty - nothing to send anymore */
for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
if (rd_is_active(rd))
break;
ret = vlsi_process_tx(r, rd);
status = rd_get_status(r, r->tail);
if (status & TX_STAT_UNDRN) {
if (ret < 0) {
ret = -ret;
idev->stats.tx_errors++;
if (ret & VLSI_TX_DROP)
idev->stats.tx_dropped++;
if (ret & VLSI_TX_FIFO)
idev->stats.tx_fifo_errors++;
}
else {
else if (ret > 0){
idev->stats.tx_packets++;
idev->stats.tx_bytes += rd_get_count(r, r->tail); /* not correct for SIR */
}
rd_set_count(r, r->tail, 0);
rd_set_status(r, r->tail, 0);
if (r->buf[r->tail].skb) {
rd_set_addr_status(r, r->tail, 0, 0);
dev_kfree_skb(r->buf[r->tail].skb);
r->buf[r->tail].skb = NULL;
r->buf[r->tail].data = NULL;
idev->stats.tx_bytes += ret;
}
ring_get(r);
}
ret = 0;
iobase = ndev->base_addr;
if (idev->new_baud && rd == NULL) /* tx ring empty and speed change pending */
vlsi_set_baud_lock(ndev);
if (r->head == r->tail) { /* tx ring empty: re-enable rx */
outw(0, iobase+VLSI_PIO_IRENABLE);
iobase = ndev->base_addr;
config = inw(iobase+VLSI_PIO_IRCFG);
mb();
if (rd == NULL) /* tx ring empty: re-enable rx */
outw((config & ~IRCFG_ENTX) | IRCFG_ENRX, iobase+VLSI_PIO_IRCFG);
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
else if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
int fifocnt;
fifocnt = inw(iobase+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
if (fifocnt != 0)
printk(KERN_WARNING "%s: rx fifo not empty(%d)\n",
__FUNCTION__, fifocnt);
outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
}
else
ret = 1; /* no speed-change-check */
mb();
outw(0, iobase+VLSI_PIO_PROMPT);
if (netif_queue_stopped(ndev)) {
if (netif_queue_stopped(ndev) && !idev->new_baud) {
netif_wake_queue(ndev);
printk(KERN_DEBUG "%s: queue awoken\n", __FUNCTION__);
}
return ret;
}
/* caller must have stopped the controller from busmastering */
#if 0 /* disable ACTIVITY handling for now */
static int vlsi_act_interrupt(struct net_device *ndev)
static void vlsi_unarm_tx(vlsi_irda_dev_t *idev)
{
printk(KERN_DEBUG "%s\n", __FUNCTION__);
return 0;
}
#endif
struct vlsi_ring *r = idev->tx_ring;
struct ring_descr *rd;
int ret;
static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct net_device *ndev = dev_instance;
vlsi_irda_dev_t *idev = ndev->priv;
unsigned iobase;
u8 irintr;
int boguscount = 32;
int no_speed_check = 0;
unsigned got_act;
unsigned long flags;
for (rd = ring_first(r); rd != NULL; rd = ring_get(r)) {
got_act = 0;
iobase = ndev->base_addr;
spin_lock_irqsave(&idev->lock,flags);
do {
irintr = inb(iobase+VLSI_PIO_IRINTR);
rmb();
outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */
wmb();
ret = 0;
if (rd_is_active(rd)) {
rd_set_status(rd, 0);
rd_set_count(rd, 0);
pci_dma_sync_single(r->pdev, rd_get_addr(rd), r->len, r->dir);
if (rd->skb) {
dev_kfree_skb_any(rd->skb);
rd->skb = NULL;
}
printk(KERN_INFO "%s - dropping tx packet\n", __FUNCTION__);
ret = -VLSI_TX_DROP;
}
else
ret = vlsi_process_tx(r, rd);
if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */
break;
if (ret < 0) {
ret = -ret;
idev->stats.tx_errors++;
if (ret & VLSI_TX_DROP)
idev->stats.tx_dropped++;
if (ret & VLSI_TX_FIFO)
idev->stats.tx_fifo_errors++;
}
else if (ret > 0){
idev->stats.tx_packets++;
idev->stats.tx_bytes += ret;
}
}
// vlsi_reg_debug(iobase,__FUNCTION__);
}
if (irintr&IRINTR_RPKTINT)
no_speed_check |= vlsi_rx_interrupt(ndev);
/********************************************************/
if (irintr&IRINTR_TPKTINT)
no_speed_check |= vlsi_tx_interrupt(ndev);
static int vlsi_start_clock(struct pci_dev *pdev)
{
u8 clkctl, lock;
int i, count;
#if 0 /* disable ACTIVITY handling for now */
if (clksrc < 2) { /* auto or PLL: try PLL */
clkctl = CLKCTL_PD_INV | CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
if (got_act && irintr==IRINTR_ACTIVITY) /* nothing new */
/* procedure to detect PLL lock synchronisation:
* after 0.5 msec initial delay we expect to find 3 PLL lock
* indications within 10 msec for successful PLL detection.
*/
udelay(500);
count = 0;
for (i = 500; i <= 10000; i += 50) { /* max 10 msec */
pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &lock);
if (lock&CLKCTL_LOCK) {
if (++count >= 3)
break;
}
udelay(50);
}
if (count < 3) {
if (clksrc == 1) { /* explicitly asked for PLL hence bail out */
printk(KERN_ERR "%s: no PLL or failed to lock!\n",
__FUNCTION__);
clkctl = CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
return -1;
}
else /* was: clksrc=0(auto) */
clksrc = 3; /* fallback to 40MHz XCLK (OB800) */
if ((irintr&IRINTR_ACTIVITY) && !(irintr^IRINTR_ACTIVITY) ) {
no_speed_check |= vlsi_act_interrupt(ndev);
got_act = 1;
printk(KERN_INFO "%s: PLL not locked, fallback to clksrc=%d\n",
__FUNCTION__, clksrc);
}
else
clksrc = 1; /* got successful PLL lock */
}
#endif
if (irintr & ~(IRINTR_RPKTINT|IRINTR_TPKTINT|IRINTR_ACTIVITY))
printk(KERN_DEBUG "%s: IRINTR = %02x\n",
__FUNCTION__, (unsigned)irintr);
} while (--boguscount > 0);
spin_unlock_irqrestore(&idev->lock,flags);
if (clksrc != 1) {
/* we get here if either no PLL detected in auto-mode or
an external clock source was explicitly specified */
if (boguscount <= 0)
printk(KERN_ERR "%s: too much work in interrupt!\n", __FUNCTION__);
clkctl = CLKCTL_EXTCLK | CLKCTL_CLKSTP;
if (clksrc == 3)
clkctl |= CLKCTL_XCKSEL;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
else if (!no_speed_check) {
if (idev->new_baud)
vlsi_set_baud(ndev);
/* no way to test for working XCLK */
}
else
pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
/* ok, now going to connect the chip with the clock source */
clkctl &= ~CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
return 0;
}
static void vlsi_stop_clock(struct pci_dev *pdev)
{
u8 clkctl;
/**************************************************************/
/* disconnect chip from clock source */
pci_read_config_byte(pdev, VLSI_PCI_CLKCTL, &clkctl);
clkctl |= CLKCTL_CLKSTP;
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
/* disable all clock sources */
clkctl &= ~(CLKCTL_EXTCLK | CLKCTL_PD_INV);
pci_write_config_byte(pdev, VLSI_PCI_CLKCTL, clkctl);
}
/********************************************************/
/* writing all-zero to the VLSI PCI IO register area seems to prevent
* some occasional situations where the hardware fails (symptoms are
......@@ -811,283 +1377,150 @@ static inline void vlsi_clear_regs(unsigned iobase)
outw(0, iobase + i);
}
static int vlsi_open(struct net_device *ndev)
static int vlsi_init_chip(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
vlsi_irda_dev_t *idev = ndev->priv;
struct pci_dev *pdev = idev->pdev;
int err;
char hwname[32];
if (pci_request_regions(pdev,drivername)) {
printk(KERN_ERR "%s: io resource busy\n", __FUNCTION__);
return -EAGAIN;
}
/* under some rare occasions the chip apparently comes up
* with IRQ's pending. So we get interrupts invoked much too early
* which will immediately kill us again :-(
* so we better w/c pending IRQ and disable them all
*/
outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
unsigned iobase;
u16 ptr;
if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ,
drivername, ndev)) {
printk(KERN_ERR "%s: couldn't get IRQ: %d\n",
__FUNCTION__, ndev->irq);
pci_release_regions(pdev);
return -EAGAIN;
}
printk(KERN_INFO "%s: got resources for %s - irq=%d / io=%04lx\n",
__FUNCTION__, ndev->name, ndev->irq, ndev->base_addr );
/* start the clock and clean the registers */
if (vlsi_set_clock(pdev)) {
if (vlsi_start_clock(pdev)) {
printk(KERN_ERR "%s: no valid clock source\n",
__FUNCTION__);
free_irq(ndev->irq,ndev);
pci_release_regions(pdev);
return -EIO;
}
vlsi_start_clock(pdev);
vlsi_clear_regs(ndev->base_addr);
err = vlsi_init_ring(idev);
if (err) {
vlsi_unset_clock(pdev);
free_irq(ndev->irq,ndev);
pci_release_regions(pdev);
return err;
pci_disable_device(pdev);
return -1;
}
vlsi_init_chip(ndev);
printk(KERN_INFO "%s: IrPHY setup: %d baud (%s), %s SIR-pulses\n",
__FUNCTION__, idev->baud,
(idev->mode==IFF_SIR)?"SIR":((idev->mode==IFF_MIR)?"MIR":"FIR"),
(sirpulse)?"3/16 bittime":"short");
vlsi_arm_rx(&idev->rx_ring);
do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
idev->irlap = irlap_open(ndev,&idev->qos,hwname);
netif_start_queue(ndev);
outw(0, ndev->base_addr+VLSI_PIO_PROMPT); /* kick hw state machine */
printk(KERN_INFO "%s: device %s operational using (%d,%d) tx,rx-ring\n",
__FUNCTION__, ndev->name, ringsize[0], ringsize[1]);
return 0;
}
static int vlsi_close(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
struct pci_dev *pdev = idev->pdev;
u8 cmd;
unsigned iobase;
iobase = ndev->base_addr;
netif_stop_queue(ndev);
vlsi_clear_regs(iobase);
if (idev->irlap)
irlap_close(idev->irlap);
idev->irlap = NULL;
outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending + disable further IRQ */
wmb();
outw(0, iobase+VLSI_PIO_IRENABLE);
outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
mb(); /* ... from now on */
outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending IRQ, disable all INT */
outw(0, iobase+VLSI_PIO_IRENABLE);
wmb();
outw(0, iobase+VLSI_PIO_IRENABLE); /* disable IrPHY-interface */
vlsi_clear_regs(ndev->base_addr);
/* disable everything, particularly IRCFG_MSTR - (also resetting the RING_PTR) */
vlsi_stop_clock(pdev);
outw(0, iobase+VLSI_PIO_IRCFG);
wmb();
vlsi_unset_clock(pdev);
outw(MAX_PACKET_LENGTH, iobase+VLSI_PIO_MAXPKT); /* max possible value=0x0fff */
free_irq(ndev->irq,ndev);
outw(BUS_TO_RINGBASE(idev->busaddr), iobase+VLSI_PIO_RINGBASE);
vlsi_free_ringbuf(&idev->rx_ring);
vlsi_free_ringbuf(&idev->tx_ring);
outw(TX_RX_TO_RINGSIZE(idev->tx_ring->size, idev->rx_ring->size),
iobase+VLSI_PIO_RINGSIZE);
if (idev->busaddr)
pci_free_consistent(idev->pdev,RING_AREA_SIZE,idev->virtaddr,idev->busaddr);
ptr = inw(iobase+VLSI_PIO_RINGPTR);
atomic_set(&idev->rx_ring->head, RINGPTR_GET_RX(ptr));
atomic_set(&idev->rx_ring->tail, RINGPTR_GET_RX(ptr));
atomic_set(&idev->tx_ring->head, RINGPTR_GET_TX(ptr));
atomic_set(&idev->tx_ring->tail, RINGPTR_GET_TX(ptr));
idev->virtaddr = NULL;
idev->busaddr = 0;
vlsi_set_baud_lock(ndev); /* idev->new_baud used as provided by caller */
pci_read_config_byte(pdev, PCI_COMMAND, &cmd);
cmd &= ~PCI_COMMAND_MASTER;
pci_write_config_byte(pdev, PCI_COMMAND, cmd);
outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* just in case - w/c pending IRQ's */
wmb();
pci_release_regions(pdev);
/* DO NOT BLINDLY ENABLE IRINTR_ACTEN!
* basically every received pulse fires an ACTIVITY-INT
* leading to >>1000 INT's per second instead of few 10
*/
printk(KERN_INFO "%s: device %s stopped\n", __FUNCTION__, ndev->name);
outb(IRINTR_RPKTEN|IRINTR_TPKTEN, iobase+VLSI_PIO_IRINTR);
return 0;
}
static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
return &idev->stats;
}
static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static int vlsi_start_hw(vlsi_irda_dev_t *idev)
{
vlsi_irda_dev_t *idev = ndev->priv;
struct vlsi_ring *r;
unsigned long flags;
unsigned iobase;
u8 status;
u16 config;
int mtt;
int len, speed;
struct timeval now, ready;
struct pci_dev *pdev = idev->pdev;
struct net_device *ndev = pci_get_drvdata(pdev);
unsigned iobase = ndev->base_addr;
u8 byte;
/* we don't use the legacy UART, disable its address decoding */
status = 0;
pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
speed = irda_get_next_speed(skb);
/* enable PCI busmaster access to our 16MB page */
if (speed != -1 && speed != idev->baud) {
idev->new_baud = speed;
if (!skb->len) {
dev_kfree_skb(skb);
vlsi_set_baud(ndev);
return 0;
}
status = TX_STAT_CLRENTX; /* stop tx-ring after this frame */
}
if (skb->len == 0) {
printk(KERN_ERR "%s: blocking 0-size packet???\n",
__FUNCTION__);
dev_kfree_skb(skb);
return 0;
}
pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
pci_set_master(pdev);
r = &idev->tx_ring;
vlsi_init_chip(pdev);
if (rd_is_active(r, r->head))
BUG();
vlsi_fill_rx(idev->rx_ring);
if (idev->mode == IFF_SIR) {
status |= TX_STAT_DISCRC;
len = async_wrap_skb(skb, r->buf[r->head].data, XFER_BUF_SIZE);
}
else { /* hw deals with MIR/FIR mode */
len = skb->len;
memcpy(r->buf[r->head].data, skb->data, len);
}
do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
rd_set_count(r, r->head, len);
rd_set_addr_status(r, r->head, virt_to_bus(r->buf[r->head].data), status);
outw(0, iobase+VLSI_PIO_PROMPT); /* kick hw state machine */
/* new entry not yet activated! */
return 0;
}
#if 0
printk(KERN_DEBUG "%s: dump entry %d: %u %02x %08x\n",
__FUNCTION__, r->head,
idev->ring_hw[r->head].rd_count,
(unsigned)idev->ring_hw[r->head].rd_status,
idev->ring_hw[r->head].rd_addr & 0xffffffff);
vlsi_reg_debug(iobase,__FUNCTION__);
#endif
static int vlsi_stop_hw(vlsi_irda_dev_t *idev)
{
struct pci_dev *pdev = idev->pdev;
struct net_device *ndev = pci_get_drvdata(pdev);
unsigned iobase = ndev->base_addr;
unsigned long flags;
spin_lock_irqsave(&idev->lock,flags);
outw(0, iobase+VLSI_PIO_IRENABLE);
outw(0, iobase+VLSI_PIO_IRCFG); /* disable everything */
wmb();
/* let mtt delay pass before we need to acquire the spinlock! */
outb(IRINTR_INT_MASK, iobase+VLSI_PIO_IRINTR); /* w/c pending + disable further IRQ */
mb();
spin_unlock_irqrestore(&idev->lock,flags);
if ((mtt = irda_get_mtt(skb)) > 0) {
vlsi_unarm_tx(idev);
vlsi_unarm_rx(idev);
ready.tv_usec = idev->last_rx.tv_usec + mtt;
ready.tv_sec = idev->last_rx.tv_sec;
if (ready.tv_usec >= 1000000) {
ready.tv_usec -= 1000000;
ready.tv_sec++; /* IrLAP 1.1: mtt always < 1 sec */
}
for(;;) {
do_gettimeofday(&now);
if (now.tv_sec > ready.tv_sec
|| (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
break;
udelay(100);
}
}
vlsi_clear_regs(iobase);
vlsi_stop_clock(pdev);
/*
* race window ahead, due to concurrent controller processing!
*
* We need to disable IR output in order to switch to TX mode.
* Better not do this blindly anytime we want to transmit something
* because TX may already run. However the controller may stop TX
* at any time when fetching an inactive descriptor or one with
* CLR_ENTX set. So we switch on TX only, if TX was not running
* _after_ the new descriptor was activated on the ring. This ensures
* we will either find TX already stopped or we can be sure, there
* will be a TX-complete interrupt even if the chip stopped doing
* TX just after we found it still running. The ISR will then find
* the non-empty ring and restart TX processing. The enclosing
* spinlock is required to get serialization with the ISR right.
*/
pci_disable_device(pdev);
return 0;
}
iobase = ndev->base_addr;
/**************************************************************/
spin_lock_irqsave(&idev->lock,flags);
static struct net_device_stats * vlsi_get_stats(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
rd_activate(r, r->head);
ring_put(r);
return &idev->stats;
}
if (!(inw(iobase+VLSI_PIO_IRENABLE) & IRENABLE_ENTXST)) {
static void vlsi_tx_timeout(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
outw(0, iobase+VLSI_PIO_IRENABLE);
config = inw(iobase+VLSI_PIO_IRCFG);
rmb();
outw(config | IRCFG_ENTX, iobase+VLSI_PIO_IRCFG);
wmb();
outw(IRENABLE_IREN, iobase+VLSI_PIO_IRENABLE);
mb();
outw(0, iobase+VLSI_PIO_PROMPT);
wmb();
}
vlsi_reg_debug(ndev->base_addr, __FUNCTION__);
vlsi_ring_debug(idev->tx_ring);
if (r->head == r->tail) {
if (netif_running(ndev))
netif_stop_queue(ndev);
printk(KERN_DEBUG "%s: tx ring full - queue stopped: %d/%d\n",
__FUNCTION__, r->head, r->tail);
#if 0
printk(KERN_INFO "%s: dump stalled entry %d: %u %02x %08x\n",
__FUNCTION__, r->tail,
r->hw[r->tail].rd_count,
(unsigned)r->hw[r->tail].rd_status,
r->hw[r->tail].rd_addr & 0xffffffff);
#endif
vlsi_reg_debug(iobase,__FUNCTION__);
}
spin_unlock_irqrestore(&idev->lock, flags);
vlsi_stop_hw(idev);
dev_kfree_skb(skb);
/* now simply restart the whole thing */
return 0;
}
if (!idev->new_baud)
idev->new_baud = idev->baud; /* keep current baudrate */
if (vlsi_start_hw(idev))
printk(KERN_CRIT "%s: failed to restart hw - %s(%s) unusable!\n",
__FUNCTION__, idev->pdev->name, ndev->name);
else
netif_start_queue(ndev);
}
static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
......@@ -1097,14 +1530,20 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
u16 fifocnt;
int ret = 0;
spin_lock_irqsave(&idev->lock,flags);
switch (cmd) {
case SIOCSBANDWIDTH:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
spin_lock_irqsave(&idev->lock, flags);
idev->new_baud = irq->ifr_baudrate;
/* when called from userland there might be a minor race window here
* if the stack tries to change speed concurrently - which would be
* pretty strange anyway with the userland having full control...
*/
vlsi_set_baud_nolock(ndev);
spin_unlock_irqrestore(&idev->lock, flags);
break;
case SIOCSMEDIABUSY:
if (!capable(CAP_NET_ADMIN)) {
......@@ -1116,7 +1555,7 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
case SIOCGRECEIVING:
/* the best we can do: check whether there are any bytes in rx fifo.
* The trustable window (in case some data arrives just afterwards)
* may be as short as 1usec or so at 4Mbps - no way for future-telling.
* may be as short as 1usec or so at 4Mbps.
*/
fifocnt = inw(ndev->base_addr+VLSI_PIO_RCVBCNT) & RCVBCNT_MASK;
irq->ifr_receiving = (fifocnt!=0) ? 1 : 0;
......@@ -1126,42 +1565,159 @@ static int vlsi_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
__FUNCTION__, cmd);
ret = -EOPNOTSUPP;
}
spin_unlock_irqrestore(&idev->lock,flags);
return ret;
}
/********************************************************/
static void vlsi_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct net_device *ndev = dev_instance;
vlsi_irda_dev_t *idev = ndev->priv;
unsigned iobase;
u8 irintr;
int boguscount = 32;
unsigned got_act;
unsigned long flags;
got_act = 0;
iobase = ndev->base_addr;
do {
spin_lock_irqsave(&idev->lock,flags);
irintr = inb(iobase+VLSI_PIO_IRINTR);
rmb();
outb(irintr, iobase+VLSI_PIO_IRINTR); /* acknowledge asap */
spin_unlock_irqrestore(&idev->lock,flags);
if (!(irintr&=IRINTR_INT_MASK)) /* not our INT - probably shared */
break;
if (irintr&IRINTR_RPKTINT)
vlsi_rx_interrupt(ndev);
if (irintr&IRINTR_TPKTINT)
vlsi_tx_interrupt(ndev);
if (!(irintr & ~IRINTR_ACTIVITY))
break; /* done if only activity remaining */
if (irintr & ~(IRINTR_RPKTINT|IRINTR_TPKTINT|IRINTR_ACTIVITY)) {
printk(KERN_DEBUG "%s: IRINTR = %02x\n",
__FUNCTION__, (unsigned)irintr);
vlsi_reg_debug(iobase,__FUNCTION__);
}
} while (--boguscount > 0);
if (boguscount <= 0)
printk(KERN_WARNING "%s: too much work in interrupt!\n", __FUNCTION__);
}
/********************************************************/
int vlsi_irda_init(struct net_device *ndev)
static int vlsi_open(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
struct pci_dev *pdev = idev->pdev;
u8 byte;
int err = -EAGAIN;
char hwname[32];
if (pci_request_regions(idev->pdev, drivername)) {
printk(KERN_ERR "%s: io resource busy\n", __FUNCTION__);
goto errout;
}
ndev->base_addr = pci_resource_start(idev->pdev,0);
ndev->irq = idev->pdev->irq;
/* under some rare occasions the chip apparently comes up with
* IRQ's pending. We better w/c pending IRQ and disable them all
*/
outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ,
drivername, ndev)) {
printk(KERN_ERR "%s: couldn't get IRQ: %d\n",
__FUNCTION__, ndev->irq);
goto errout_io;
}
if ((err = vlsi_create_hwif(idev)) != 0)
goto errout_irq;
sprintf(hwname, "VLSI-FIR @ 0x%04x", (unsigned)ndev->base_addr);
idev->irlap = irlap_open(ndev,&idev->qos,hwname);
if (!idev->irlap)
goto errout_free_ring;
do_gettimeofday(&idev->last_rx); /* first mtt may start from now on */
idev->new_baud = 9600; /* start with IrPHY using 9600(SIR) mode */
if ((err = vlsi_start_hw(idev)) != 0)
goto errout_close_irlap;
netif_start_queue(ndev);
printk(KERN_INFO "%s: device %s operational\n", __FUNCTION__, ndev->name);
return 0;
errout_close_irlap:
irlap_close(idev->irlap);
errout_free_ring:
vlsi_destroy_hwif(idev);
errout_irq:
free_irq(ndev->irq,ndev);
errout_io:
pci_release_regions(idev->pdev);
errout:
return err;
}
static int vlsi_close(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
netif_stop_queue(ndev);
if (idev->irlap)
irlap_close(idev->irlap);
idev->irlap = NULL;
vlsi_stop_hw(idev);
vlsi_destroy_hwif(idev);
free_irq(ndev->irq,ndev);
pci_release_regions(idev->pdev);
printk(KERN_INFO "%s: device %s stopped\n", __FUNCTION__, ndev->name);
return 0;
}
static int vlsi_irda_init(struct net_device *ndev)
{
vlsi_irda_dev_t *idev = ndev->priv;
struct pci_dev *pdev = idev->pdev;
SET_MODULE_OWNER(ndev);
ndev->irq = pdev->irq;
ndev->base_addr = pci_resource_start(pdev,0);
/* PCI busmastering - see include file for details! */
/* PCI busmastering
* see include file for details why we need these 2 masks, in this order!
*/
if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)) {
if (pci_set_dma_mask(pdev,DMA_MASK_USED_BY_HW)
|| pci_set_dma_mask(pdev,DMA_MASK_MSTRPAGE)) {
printk(KERN_ERR "%s: aborting due to PCI BM-DMA address limitations\n",
__FUNCTION__);
return -1;
}
pci_set_master(pdev);
pdev->dma_mask = DMA_MASK_MSTRPAGE;
pci_write_config_byte(pdev, VLSI_PCI_MSTRPAGE, MSTRPAGE_VALUE);
/* we don't use the legacy UART, disable its address decoding */
pci_read_config_byte(pdev, VLSI_PCI_IRMISC, &byte);
byte &= ~(IRMISC_UARTEN | IRMISC_UARTTST);
pci_write_config_byte(pdev, VLSI_PCI_IRMISC, byte);
irda_init_max_qos_capabilies(&idev->qos);
......@@ -1187,6 +1743,8 @@ int vlsi_irda_init(struct net_device *ndev)
ndev->get_stats = vlsi_get_stats;
ndev->hard_start_xmit = vlsi_hard_start_xmit;
ndev->do_ioctl = vlsi_ioctl;
ndev->tx_timeout = vlsi_tx_timeout;
ndev->watchdog_timeo = 500*HZ/1000; /* max. allowed turn time for IrLAP */
return 0;
}
......@@ -1203,6 +1761,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev))
goto out;
else
pdev->current_state = 0; /* hw must be running now */
printk(KERN_INFO "%s: IrDA PCI controller %s detected\n",
drivername, pdev->name);
......@@ -1228,6 +1788,8 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ndev->priv = (void *) idev;
spin_lock_init(&idev->lock);
init_MUTEX(&idev->sem);
down(&idev->sem);
idev->pdev = pdev;
ndev->init = vlsi_irda_init;
strcpy(ndev->name,"irda%d");
......@@ -1236,13 +1798,36 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
__FUNCTION__);
goto out_freedev;
}
#ifdef CONFIG_PROC_FS
{
struct proc_dir_entry *ent;
ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root);
if (!ent) {
printk(KERN_ERR "%s: failed to create proc entry\n", __FUNCTION__);
goto out_unregister;
}
ent->data = ndev;
ent->proc_fops = &vlsi_proc_fops;
ent->size = 0;
idev->proc_entry = ent;
}
#endif
printk(KERN_INFO "%s: registered device %s\n", drivername, ndev->name);
pci_set_drvdata(pdev, ndev);
up(&idev->sem);
return 0;
out_unregister:
up(&idev->sem);
unregister_netdev(ndev);
goto out_disable;
out_freedev:
up(&idev->sem);
kfree(ndev);
out_disable:
pci_disable_device(pdev);
......@@ -1254,37 +1839,145 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void __devexit vlsi_irda_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
vlsi_irda_dev_t *idev;
if (!ndev) {
printk(KERN_CRIT "%s: lost netdevice?\n", drivername);
return;
}
if (ndev) {
printk(KERN_INFO "%s: unregister device %s\n",
drivername, ndev->name);
idev = ndev->priv;
down(&idev->sem);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
#ifdef CONFIG_PROC_FS
if (idev->proc_entry) {
remove_proc_entry(ndev->name, vlsi_proc_root);
idev->proc_entry = NULL;
}
#endif
up(&idev->sem);
unregister_netdev(ndev);
/* do not free - async completed by unregister_netdev()
* ndev->destructor called (if present) when going to free
*/
}
else
printk(KERN_CRIT "%s: lost netdevice?\n", drivername);
pci_set_drvdata(pdev, NULL);
printk(KERN_INFO "%s: %s removed\n", drivername, pdev->name);
}
pci_disable_device(pdev);
printk(KERN_INFO "%s: %s disabled\n", drivername, pdev->name);
#ifdef CONFIG_PM
/* The Controller doesn't provide PCI PM capabilities as defined by PCI specs.
* Some of the Linux PCI-PM code however depends on this, for example in
* pci_set_power_state(). So we have to take care to perform the required
* operations on our own (particularly reflecting the pdev->current_state)
* otherwise we might get cheated by pci-pm.
*/
static int vlsi_irda_save_state(struct pci_dev *pdev, u32 state)
{
if (state < 1 || state > 3 ) {
printk( KERN_ERR "%s - %s: invalid pm state request: %u\n",
__FUNCTION__, pdev->name, state);
return -1;
}
return 0;
}
static int vlsi_irda_suspend(struct pci_dev *pdev, u32 state)
{
printk(KERN_ERR "%s - %s\n", __FUNCTION__, pdev->name);
struct net_device *ndev = pci_get_drvdata(pdev);
vlsi_irda_dev_t *idev;
if (state < 1 || state > 3 ) {
printk( KERN_ERR "%s - %s: invalid pm state request: %u\n",
__FUNCTION__, pdev->name, state);
return 0;
}
if (!ndev) {
printk(KERN_ERR "%s - %s: no netdevice \n", __FUNCTION__, pdev->name);
return 0;
}
idev = ndev->priv;
down(&idev->sem);
if (pdev->current_state != 0) { /* already suspended */
if (state > pdev->current_state) { /* simply go deeper */
pci_set_power_state(pdev,state);
pdev->current_state = state;
}
else
printk(KERN_ERR "%s - %s: invalid suspend request %u -> %u\n",
__FUNCTION__, pdev->name, pdev->current_state, state);
up(&idev->sem);
return 0;
}
if (netif_running(ndev)) {
netif_device_detach(ndev);
vlsi_stop_hw(idev);
pci_save_state(pdev, idev->cfg_space);
if (!idev->new_baud)
/* remember speed settings to restore on resume */
idev->new_baud = idev->baud;
}
pci_set_power_state(pdev,state);
pdev->current_state = state;
idev->resume_ok = 1;
up(&idev->sem);
return 0;
}
static int vlsi_irda_resume(struct pci_dev *pdev)
{
printk(KERN_ERR "%s - %s\n", __FUNCTION__, pdev->name);
struct net_device *ndev = pci_get_drvdata(pdev);
vlsi_irda_dev_t *idev;
if (!ndev) {
printk(KERN_ERR "%s - %s: no netdevice \n", __FUNCTION__, pdev->name);
return 0;
}
idev = ndev->priv;
down(&idev->sem);
if (pdev->current_state == 0) {
up(&idev->sem);
printk(KERN_ERR "%s - %s: already resumed\n", __FUNCTION__, pdev->name);
return 0;
}
pci_set_power_state(pdev, 0);
pdev->current_state = 0;
if (!idev->resume_ok) {
/* should be obsolete now - but used to happen due to:
* - pci layer initially setting pdev->current_state = 4 (unknown)
* - pci layer did not walk the save_state-tree (might be APM problem)
* so we could not refuse to suspend from undefined state
* - vlsi_irda_suspend detected invalid state and refused to save
* configuration for resume - but was too late to stop suspending
* - vlsi_irda_resume got screwed when trying to resume from garbage
*
* now we explicitly set pdev->current_state = 0 after enabling the
* device and independently resume_ok should catch any garbage config.
*/
printk(KERN_ERR "%s - hm, nothing to resume?\n", __FUNCTION__);
up(&idev->sem);
return 0;
}
if (netif_running(ndev)) {
pci_restore_state(pdev, idev->cfg_space);
vlsi_start_hw(idev);
netif_device_attach(ndev);
}
idev->resume_ok = 0;
up(&idev->sem);
return 0;
}
#endif /* CONFIG_PM */
/*********************************************************/
static struct pci_driver vlsi_irda_driver = {
......@@ -1292,13 +1985,20 @@ static struct pci_driver vlsi_irda_driver = {
.id_table = vlsi_irda_table,
.probe = vlsi_irda_probe,
.remove = __devexit_p(vlsi_irda_remove),
#ifdef CONFIG_PM
.save_state = vlsi_irda_save_state,
.suspend = vlsi_irda_suspend,
.resume = vlsi_irda_resume,
#endif
};
#ifdef CONFIG_PROC_FS
#define PROC_DIR ("driver/" DRIVER_NAME)
#endif
static int __init vlsi_mod_init(void)
{
int i;
int i, ret;
if (clksrc < 0 || clksrc > 3) {
printk(KERN_ERR "%s: invalid clksrc=%d\n", drivername, clksrc);
......@@ -1324,14 +2024,27 @@ static int __init vlsi_mod_init(void)
sirpulse = !!sirpulse;
return pci_module_init(&vlsi_irda_driver);
#ifdef CONFIG_PROC_FS
vlsi_proc_root = create_proc_entry(PROC_DIR, S_IFDIR, 0);
if (!vlsi_proc_root)
return -ENOMEM;
#endif
ret = pci_module_init(&vlsi_irda_driver);
#ifdef CONFIG_PROC_FS
if (ret)
remove_proc_entry(PROC_DIR, 0);
#endif
return ret;
}
static void __exit vlsi_mod_exit(void)
{
pci_unregister_driver(&vlsi_irda_driver);
remove_proc_entry(PROC_DIR, 0);
}
module_init(vlsi_mod_init);
module_exit(vlsi_mod_exit);
......@@ -28,6 +28,6 @@ static inline __u16 irda_fcs(__u16 fcs, __u8 c)
}
/* Recompute the FCS with len bytes appended. */
unsigned short crc_calc( __u16 fcs, __u8 const *buf, size_t len);
unsigned short irda_calc_crc16( __u16 fcs, __u8 const *buf, size_t len);
#endif
......@@ -48,7 +48,9 @@
/* This is used as an initial value to max_header_size before the proper
* value is filled in (5 for ttp, 4 for lmp). This allow us to detect
* the state of the underlying connection. - Jean II */
#define IRCOMM_TTY_HDR_UNITIALISED 32
#define IRCOMM_TTY_HDR_UNINITIALISED 16
/* Same for payload size. See qos.c for the smallest max data size */
#define IRCOMM_TTY_DATA_UNINITIALISED (64 - IRCOMM_TTY_HDR_UNINITIALISED)
/*
* IrCOMM TTY driver state
......@@ -83,6 +85,7 @@ struct ircomm_tty_cb {
__u32 max_data_size; /* Max data we can transmit in one packet */
__u32 max_header_size; /* The amount of header space we must reserve */
__u32 tx_data_size; /* Max data size of current tx_skb */
struct iriap_cb *iriap; /* Instance used for querying remote IAS */
struct ias_object* obj;
......
......@@ -3,9 +3,9 @@
*
* vlsi_ir.h: VLSI82C147 PCI IrDA controller driver for Linux
*
* Version: 0.3, Sep 30, 2001
* Version: 0.4
*
* Copyright (c) 2001 Martin Diehl
* Copyright (c) 2001-2002 Martin Diehl
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
......@@ -27,6 +27,26 @@
#ifndef IRDA_VLSI_FIR_H
#define IRDA_VLSI_FIR_H
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,4)
#ifdef CONFIG_PROC_FS
/* PDE() introduced in 2.5.4 */
#define PDE(inode) ((inode)->u.generic_ip)
#endif
#endif
/*
* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,xx)
*
* missing pci-dma api call to give streaming dma buffer back to hw
* patch floating on lkml - probably present in 2.5.26 or later
* otherwise defining it as noop is ok, since the vlsi-ir is only
* used on two oldish x86-based notebooks which are cache-coherent
*/
#define pci_dma_prep_single(dev, addr, size, direction) /* nothing */
/*
* #endif
*/
/* ================================================================ */
/* non-standard PCI registers */
......@@ -58,20 +78,20 @@ enum vlsi_pci_clkctl {
/* PLL control */
CLKCTL_NO_PD = 0x04, /* PD# (inverted power down) signal,
* i.e. PLL is powered, if NO_PD set */
CLKCTL_PD_INV = 0x04, /* PD#: inverted power down signal,
* i.e. PLL is powered, if PD_INV set */
CLKCTL_LOCK = 0x40, /* (ro) set, if PLL is locked */
/* clock source selection */
CLKCTL_EXTCLK = 0x20, /* set to select external clock input */
CLKCTL_XCKSEL = 0x10, /* set to indicate 40MHz EXTCLK, not 48MHz */
CLKCTL_EXTCLK = 0x20, /* set to select external clock input, not PLL */
CLKCTL_XCKSEL = 0x10, /* set to indicate EXTCLK is 40MHz, not 48MHz */
/* IrDA block control */
CLKCTL_CLKSTP = 0x80, /* set to disconnect from selected clock source */
CLKCTL_WAKE = 0x08 /* set to enable wakeup feature: whenever IR activity
* is detected, NO_PD gets set and CLKSTP cleared */
* is detected, PD_INV gets set(?) and CLKSTP cleared */
};
/* ------------------------------------------ */
......@@ -82,10 +102,9 @@ enum vlsi_pci_clkctl {
#define DMA_MASK_MSTRPAGE 0x00ffffff
#define MSTRPAGE_VALUE (DMA_MASK_MSTRPAGE >> 24)
/* PCI busmastering is somewhat special for this guy - in short:
*
* We select to operate using MSTRPAGE=0 fixed, use ISA DMA
* We select to operate using fixed MSTRPAGE=0, use ISA DMA
* address restrictions to make the PCI BM api aware of this,
* but ensure the hardware is dealing with real 32bit access.
*
......@@ -151,7 +170,6 @@ enum vlsi_pci_irmisc {
IRMISC_UARTSEL_2e8 = 0x03
};
/* ================================================================ */
/* registers mapped to 32 byte PCI IO space */
......@@ -350,22 +368,17 @@ enum vlsi_pio_irenable {
#define IRENABLE_MASK 0xff00 /* Read mask */
/* ------------------------------------------ */
/* VLSI_PIO_PHYCTL: IR Physical Layer Current Control Register (u16, ro) */
/* read-back of the currently applied physical layer status.
* applied from VLSI_PIO_NPHYCTL at rising edge of IRENABLE_IREN
* contents identical to VLSI_PIO_NPHYCTL (see below)
*/
/* ------------------------------------------ */
/* VLSI_PIO_NPHYCTL: IR Physical Layer Next Control Register (u16, rw) */
/* latched during IRENABLE_IREN=0 and applied at 0-1 transition
......@@ -382,10 +395,10 @@ enum vlsi_pio_irenable {
* fixed for all SIR speeds at 40MHz input clock (PLSWID=24 at 48MHz).
* IrPHY also allows shorter pulses down to the nominal pulse duration
* at 115.2kbaud (minus some tolerance) which is 1.41 usec.
* Using the expression PLSWID = 12/(BAUD+1)-1 (multiplied by to for 48MHz)
* Using the expression PLSWID = 12/(BAUD+1)-1 (multiplied by two for 48MHz)
* we get the minimum acceptable PLSWID values according to the VLSI
* specification, which provides 1.5 usec pulse width for all speeds (except
* for 2.4kbaud getting 6usec). This is well inside IrPHY v1.3 specs and
* for 2.4kbaud getting 6usec). This is fine with IrPHY v1.3 specs and
* reduces the transceiver power which drains the battery. At 9.6kbaud for
* example this amounts to more than 90% battery power saving!
*
......@@ -399,7 +412,21 @@ enum vlsi_pio_irenable {
* PREAMB = 15
*/
#define BWP_TO_PHYCTL(B,W,P) ((((B)&0x3f)<<10) | (((W)&0x1f)<<5) | (((P)&0x1f)<<0))
#define PHYCTL_BAUD_SHIFT 10
#define PHYCTL_BAUD_MASK 0xfc00
#define PHYCTL_PLSWID_SHIFT 5
#define PHYCTL_PLSWID_MASK 0x03e0
#define PHYCTL_PREAMB_SHIFT 0
#define PHYCTL_PREAMB_MASK 0x001f
#define PHYCTL_TO_BAUD(bwp) (((bwp)&PHYCTL_BAUD_MASK)>>PHYCTL_BAUD_SHIFT)
#define PHYCTL_TO_PLSWID(bwp) (((bwp)&PHYCTL_PLSWID_MASK)>>PHYCTL_PLSWID_SHIFT)
#define PHYCTL_TO_PREAMB(bwp) (((bwp)&PHYCTL_PREAMB_MASK)>>PHYCTL_PREAMB_SHIFT)
#define BWP_TO_PHYCTL(b,w,p) ((((b)<<PHYCTL_BAUD_SHIFT)&PHYCTL_BAUD_MASK) \
| (((w)<<PHYCTL_PLSWID_SHIFT)&PHYCTL_PLSWID_MASK) \
| (((p)<<PHYCTL_PREAMB_SHIFT)&PHYCTL_PREAMB_MASK))
#define BAUD_BITS(br) ((115200/(br))-1)
static inline unsigned
......@@ -417,7 +444,6 @@ calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
return (tmp>0) ? (tmp-1) : 0;
}
#define PHYCTL_SIR(br,ws,cs) BWP_TO_PHYCTL(BAUD_BITS(br),calc_width_bits((br),(ws),(cs)),0)
#define PHYCTL_MIR(cs) BWP_TO_PHYCTL(0,((cs)?9:10),1)
#define PHYCTL_FIR BWP_TO_PHYCTL(0,0,15)
......@@ -445,42 +471,61 @@ calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
/* VLSI_PIO_MAXPKT: Maximum Packet Length register (u16, rw) */
/* specifies the maximum legth (up to 4k - or (4k-1)? - bytes), which a
* received frame may have - i.e. the size of the corresponding
* receive buffers. For simplicity we use the same length for
* receive and submit buffers and increase transfer buffer size
* byond IrDA-MTU = 2048 so we have sufficient space left when
* packet size increases during wrapping due to XBOFs and CE's.
* Even for receiving unwrapped frames we need >MAX_PACKET_LEN
* space since the controller appends FCS/CRC (2 or 4 bytes)
* so we use 2*IrDA-MTU for both directions and cover even the
* worst case, where all data bytes have to be escaped when wrapping.
* well, this wastes some memory - anyway, later we will
* either map skb's directly or use pci_pool allocator...
/* maximum acceptable length for received packets */
/* hw imposed limitation - register uses only [11:0] */
#define MAX_PACKET_LENGTH 0x0fff
/* IrLAP I-field (apparently not defined elsewhere) */
#define IRDA_MTU 2048
/* complete packet consists of A(1)+C(1)+I(<=IRDA_MTU) */
#define IRLAP_SKB_ALLOCSIZE (1+1+IRDA_MTU)
/* the buffers we use to exchange frames with the hardware need to be
* larger than IRLAP_SKB_ALLOCSIZE because we may have up to 4 bytes FCS
* appended and, in SIR mode, a lot of frame wrapping bytes. The worst
* case appears to be a SIR packet with I-size==IRDA_MTU and all bytes
* requiring to be escaped to provide transparency. Furthermore, the peer
* might ask for quite a number of additional XBOFs:
* up to 115+48 XBOFS 163
* regular BOF 1
* A-field 1
* C-field 1
* I-field, IRDA_MTU, all escaped 4096
* FCS (16 bit at SIR, escaped) 4
* EOF 1
* AFAICS nothing in IrLAP guarantees A/C field not to need escaping
* (f.e. 0xc0/0xc1 - i.e. BOF/EOF - are legal values there) so in the
* worst case we have 4269 bytes total frame size.
* However, the VLSI uses 12 bits only for all buffer length values,
* which limits the maximum useable buffer size <= 4095.
* Note this is not a limitation in the receive case because we use
* the SIR filtering mode where the hw unwraps the frame and only the
* bare packet+fcs is stored into the buffer - in contrast to the SIR
* tx case where we have to pass frame-wrapped packets to the hw.
* If this would ever become an issue in real life, the only workaround
* I see would be using the legacy UART emulation in SIR mode.
*/
#define IRDA_MTU 2048 /* seems to be undefined elsewhere */
#define XFER_BUF_SIZE (2*IRDA_MTU)
#define MAX_PACKET_LENGTH (XFER_BUF_SIZE-1) /* register uses only [11:0] */
#define XFER_BUF_SIZE MAX_PACKET_LENGTH
/* ------------------------------------------ */
/* VLSI_PIO_RCVBCNT: Receive Byte Count Register (u16, ro) */
/* recive packet counter gets incremented on every non-filtered
/* receive packet counter gets incremented on every non-filtered
* byte which was put in the receive fifo and reset for each
* new packet. Used to decide whether we are just in the middle
* of receiving
*/
/* better apply the [11:0] mask when reading, as some docs say the
* reserved [15:12] would return 1 when reading - which is wrong AFAICS
*/
#define RCVBCNT_MASK 0x0fff
/* ================================================================ */
/******************************************************************/
/* descriptors for rx/tx ring
*
......@@ -494,10 +539,10 @@ calc_width_bits(unsigned baudrate, unsigned widthselect, unsigned clockselect)
*
* Attention: Writing addr overwrites status!
*
* ### FIXME: we depend on endianess here
* ### FIXME: depends on endianess (but there ain't no non-i586 ob800 ;-)
*/
struct ring_descr {
struct ring_descr_hw {
volatile u16 rd_count; /* tx/rx count [11:0] */
u16 reserved;
union {
......@@ -505,60 +550,168 @@ struct ring_descr {
struct {
u8 addr_res[3];
volatile u8 status; /* descriptor status */
} rd_s;
} rd_u;
};
} rd_s __attribute__((packed));
} rd_u __attribute((packed));
} __attribute__ ((packed));
#define rd_addr rd_u.addr
#define rd_status rd_u.rd_s.status
/* ring descriptor status bits */
#define RD_STAT_ACTIVE 0x80 /* descriptor owned by hw (both TX,RX) */
#define RD_ACTIVE 0x80 /* descriptor owned by hw (both TX,RX) */
/* TX ring descriptor status */
#define TX_STAT_DISCRC 0x40 /* do not send CRC (for SIR) */
#define TX_STAT_BADCRC 0x20 /* force a bad CRC */
#define TX_STAT_PULSE 0x10 /* send indication pulse after this frame (MIR/FIR) */
#define TX_STAT_FRCEUND 0x08 /* force underrun */
#define TX_STAT_CLRENTX 0x04 /* clear ENTX after this frame */
#define TX_STAT_UNDRN 0x01 /* TX fifo underrun (probably PCI problem) */
#define RD_TX_DISCRC 0x40 /* do not send CRC (for SIR) */
#define RD_TX_BADCRC 0x20 /* force a bad CRC */
#define RD_TX_PULSE 0x10 /* send indication pulse after this frame (MIR/FIR) */
#define RD_TX_FRCEUND 0x08 /* force underrun */
#define RD_TX_CLRENTX 0x04 /* clear ENTX after this frame */
#define RD_TX_UNDRN 0x01 /* TX fifo underrun (probably PCI problem) */
/* RX ring descriptor status */
#define RX_STAT_PHYERR 0x40 /* physical encoding error */
#define RX_STAT_CRCERR 0x20 /* CRC error (MIR/FIR) */
#define RX_STAT_LENGTH 0x10 /* frame exceeds buffer length */
#define RX_STAT_OVER 0x08 /* RX fifo overrun (probably PCI problem) */
#define RX_STAT_SIRBAD 0x04 /* EOF missing: BOF follows BOF (SIR, filtered) */
#define RD_RX_PHYERR 0x40 /* physical encoding error */
#define RD_RX_CRCERR 0x20 /* CRC error (MIR/FIR) */
#define RD_RX_LENGTH 0x10 /* frame exceeds buffer length */
#define RD_RX_OVER 0x08 /* RX fifo overrun (probably PCI problem) */
#define RD_RX_SIRBAD 0x04 /* EOF missing: BOF follows BOF (SIR, filtered) */
#define RX_STAT_ERROR 0x7c /* any error in frame */
#define RD_RX_ERROR 0x7c /* any error in received frame */
/* the memory required to hold the 2 descriptor rings */
#define HW_RING_AREA_SIZE (2 * MAX_RING_DESCR * sizeof(struct ring_descr_hw))
/* ------------------------------------------ */
/******************************************************************/
/* contains the objects we've put into the ring descriptors
* static buffers for now - probably skb's later
/* sw-ring descriptors consists of a bus-mapped transfer buffer with
* associated skb and a pointer to the hw entry descriptor
*/
struct ring_entry {
struct ring_descr {
struct ring_descr_hw *hw;
struct sk_buff *skb;
void *data;
void *buf;
};
/* wrappers for operations on hw-exposed ring descriptors
* access to the hw-part of the descriptors must use these.
*/
static inline int rd_is_active(struct ring_descr *rd)
{
return ((rd->hw->rd_status & RD_ACTIVE) != 0);
}
static inline void rd_activate(struct ring_descr *rd)
{
rd->hw->rd_status |= RD_ACTIVE;
}
static inline void rd_set_status(struct ring_descr *rd, u8 s)
{
rd->hw->rd_status = s; /* may pass ownership to the hardware */
}
static inline void rd_set_addr_status(struct ring_descr *rd, dma_addr_t a, u8 s)
{
/* order is important for two reasons:
* - overlayed: writing addr overwrites status
* - we want to write status last so we have valid address in
* case status has RD_ACTIVE set
*/
if ((a & ~DMA_MASK_MSTRPAGE)>>24 != MSTRPAGE_VALUE) {
BUG();
return;
}
a &= DMA_MASK_MSTRPAGE; /* clear highbyte to make sure we won't write
* to status - just in case MSTRPAGE_VALUE!=0
*/
rd->hw->rd_addr = a;
wmb();
rd_set_status(rd, s); /* may pass ownership to the hardware */
}
static inline void rd_set_count(struct ring_descr *rd, u16 c)
{
rd->hw->rd_count = c;
}
static inline u8 rd_get_status(struct ring_descr *rd)
{
return rd->hw->rd_status;
}
static inline dma_addr_t rd_get_addr(struct ring_descr *rd)
{
dma_addr_t a;
a = (rd->hw->rd_addr & DMA_MASK_MSTRPAGE) | (MSTRPAGE_VALUE << 24);
return a;
}
static inline u16 rd_get_count(struct ring_descr *rd)
{
return rd->hw->rd_count;
}
/******************************************************************/
/* sw descriptor rings for rx, tx:
*
* operations follow producer-consumer paradigm, with the hw
* in the middle doing the processing.
* ring size must be power of two.
*
* producer advances r->tail after inserting for processing
* consumer advances r->head after removing processed rd
* ring is empty if head==tail / full if (tail+1)==head
*/
struct vlsi_ring {
struct pci_dev *pdev;
int dir;
unsigned len;
unsigned size;
unsigned mask;
unsigned head, tail;
struct ring_descr *hw;
struct ring_entry buf[MAX_RING_DESCR];
atomic_t head, tail;
struct ring_descr *rd;
};
/* ------------------------------------------ */
/* ring processing helpers */
static inline struct ring_descr *ring_last(struct vlsi_ring *r)
{
int t;
t = atomic_read(&r->tail) & r->mask;
return (((t+1) & r->mask) == (atomic_read(&r->head) & r->mask)) ? NULL : &r->rd[t];
}
static inline struct ring_descr *ring_put(struct vlsi_ring *r)
{
atomic_inc(&r->tail);
return ring_last(r);
}
static inline struct ring_descr *ring_first(struct vlsi_ring *r)
{
int h;
h = atomic_read(&r->head) & r->mask;
return (h == (atomic_read(&r->tail) & r->mask)) ? NULL : &r->rd[h];
}
static inline struct ring_descr *ring_get(struct vlsi_ring *r)
{
atomic_inc(&r->head);
return ring_first(r);
}
/******************************************************************/
/* our private compound VLSI-PCI-IRDA device information */
......@@ -575,15 +728,40 @@ typedef struct vlsi_irda_dev {
dma_addr_t busaddr;
void *virtaddr;
struct vlsi_ring tx_ring, rx_ring;
struct vlsi_ring *tx_ring, *rx_ring;
struct timeval last_rx;
spinlock_t lock;
struct semaphore sem;
u32 cfg_space[64/sizeof(u32)];
u8 resume_ok;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
#endif
} vlsi_irda_dev_t;
/********************************************************/
/* the remapped error flags we use for returning from frame
* post-processing in vlsi_process_tx/rx() after it was completed
* by the hardware. These functions either return the >=0 number
* of transfered bytes in case of success or the negative (-)
* of the or'ed error flags.
*/
#define VLSI_TX_DROP 0x0001
#define VLSI_TX_FIFO 0x0002
#define VLSI_RX_DROP 0x0100
#define VLSI_RX_OVER 0x0200
#define VLSI_RX_LENGTH 0x0400
#define VLSI_RX_FRAME 0x0800
#define VLSI_RX_CRC 0x1000
/********************************************************/
#endif /* IRDA_VLSI_FIR_H */
......@@ -57,7 +57,7 @@ __u16 const irda_crc16_table[256] =
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
};
unsigned short crc_calc( __u16 fcs, __u8 const *buf, size_t len)
unsigned short irda_calc_crc16( __u16 fcs, __u8 const *buf, size_t len)
{
while (len--)
fcs = irda_fcs(fcs, *buf++);
......
......@@ -220,9 +220,16 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param,
/*
* Now the line is ready for some communication. Check if we are a
* server, and send over some initial parameters
* server, and send over some initial parameters.
* Client do it in ircomm_tty_state_setup().
* Note : we may get called from ircomm_tty_getvalue_confirm(),
* therefore before we even have open any socket. And self->client
* is initialised to TRUE only later. So, we check if the link is
* really initialised. - Jean II
*/
if (!self->client && (self->settings.service_type != IRCOMM_3_WIRE_RAW))
if ((self->max_header_size != IRCOMM_TTY_HDR_UNINITIALISED) &&
(!self->client) &&
(self->settings.service_type != IRCOMM_3_WIRE_RAW))
{
/* Init connection */
ircomm_tty_send_initial_parameters(self);
......
......@@ -421,8 +421,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
self->line = line;
INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self);
self->max_header_size = IRCOMM_TTY_HDR_UNITIALISED;
self->max_data_size = 64-self->max_header_size;
self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED;
self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED;
self->close_delay = 5*HZ/10;
self->closing_wait = 30*HZ;
......@@ -719,16 +719,26 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
/* We may receive packets from the TTY even before we have finished
* our setup. Not cool.
* The problem is that we would allocate a skb with bogus header and
* data size, and when adding data to it later we would get
* confused.
* Better to not accept data until we are properly setup. Use bogus
* header size to check that (safest way to detect it).
* The problem is that we don't know the final header and data size
* to create the proper skb, so any skb we would create would have
* bogus header and data size, so need care.
* We use a bogus header size to safely detect this condition.
* Another problem is that hw_stopped was set to 0 way before it
* should be, so we would drop this skb. It should now be fixed.
* One option is to not accept data until we are properly setup.
* But, I suspect that when it happens, the ppp line discipline
* just "drops" the data, which might screw up connect scripts.
* The second option is to create a "safe skb", with large header
* and small size (see ircomm_tty_open() for values).
* We just need to make sure that when the real values get filled,
* we don't mess up the original "safe skb" (see tx_data_size).
* Jean II */
if (self->max_header_size == IRCOMM_TTY_HDR_UNITIALISED) {
/* TTY will retry */
IRDA_DEBUG(2, "%s() : not initialised\n", __FUNCTION__ );
return len;
if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) {
IRDA_DEBUG(1, "%s() : not initialised\n", __FUNCTION__);
#ifdef IRCOMM_NO_TX_BEFORE_INIT
/* We didn't consume anything, TTY will retry */
return 0;
#endif
}
spin_lock_irqsave(&self->spinlock, flags);
......@@ -761,8 +771,11 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
* transmit buffer? Cannot use skb_tailroom, since
* dev_alloc_skb gives us a larger skb than we
* requested
* Note : use tx_data_size, because max_data_size
* may have changed and we don't want to overwrite
* the skb. - Jean II
*/
if ((tailroom = (self->max_data_size-skb->len)) > 0) {
if ((tailroom = (self->tx_data_size - skb->len)) > 0) {
/* Adjust data to tailroom */
if (size > tailroom)
size = tailroom;
......@@ -783,6 +796,9 @@ static int ircomm_tty_write(struct tty_struct *tty, int from_user,
}
skb_reserve(skb, self->max_header_size);
self->tx_skb = skb;
/* Remember skb size because max_data_size may
* change later on - Jean II */
self->tx_data_size = self->max_data_size;
}
/* Copy data */
......@@ -825,17 +841,22 @@ static int ircomm_tty_write_room(struct tty_struct *tty)
ASSERT(self != NULL, return -1;);
ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;);
#ifdef IRCOMM_NO_TX_BEFORE_INIT
/* max_header_size tells us if the channel is initialised or not. */
if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED)
/* Don't bother us yet */
return 0;
#endif
/* Check if we are allowed to transmit any data.
* hw_stopped is the regular flow control.
* max_header_size tells us if the channel is initialised or not.
* Jean II */
if ((tty->hw_stopped) ||
(self->max_header_size == IRCOMM_TTY_HDR_UNITIALISED))
if (tty->hw_stopped)
ret = 0;
else {
spin_lock_irqsave(&self->spinlock, flags);
if (self->tx_skb)
ret = self->max_data_size - self->tx_skb->len;
ret = self->tx_data_size - self->tx_skb->len;
else
ret = self->max_data_size;
spin_unlock_irqrestore(&self->spinlock, flags);
......
......@@ -517,6 +517,23 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
del_timer(&self->watchdog_timer);
/* Remove LM-IAS object now so it is not reused.
* IrCOMM deals very poorly with multiple incomming connections.
* It should looks a lot more like IrNET, and "dup" a server TSAP
* to the application TSAP (based on various rules).
* This is a cheap workaround allowing multiple clients to
* connect to us. It will not always work.
* Each IrCOMM socket has an IAS entry. Incomming connection will
* pick the first one found. So, when we are fully connected,
* we remove our IAS entries so that the next IAS entry is used.
* We do that for *both* client and server, because a server
* can also create client instances.
* Jean II */
if (self->obj) {
irias_delete_object(self->obj);
self->obj = NULL;
}
/*
* IrCOMM link is now up, and if we are not using hardware
* flow-control, then declare the hardware as running. Otherwise we
......@@ -527,7 +544,7 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self)
IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __FUNCTION__ );
return;
} else {
IRDA_DEBUG(2, "%s(), starting hardware!\n", __FUNCTION__ );
IRDA_DEBUG(1, "%s(), starting hardware!\n", __FUNCTION__ );
self->tty->hw_stopped = 0;
......
......@@ -514,10 +514,10 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_SETUP_PEND);
irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
/* Start watchdog timer (5 secs for now) */
irlmp_start_watchdog_timer(self, 5*HZ);
irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
break;
case LM_CONNECT_INDICATION:
if (self->conn_skb) {
......@@ -529,8 +529,6 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_CONNECT_PEND);
irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
/* Start watchdog timer
* This is not mentionned in the spec, but there is a rare
* race condition that can get the socket stuck.
......@@ -543,10 +541,12 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
* a backup plan. 1 second is plenty (should be immediate).
* Jean II */
irlmp_start_watchdog_timer(self, 1*HZ);
irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL);
break;
default:
IRDA_DEBUG(2, "%s(), Unknown event %s\n",
__FUNCTION__, irlmp_event[event]);
IRDA_DEBUG(1, "%s(), Unknown event %s on LSAP %#02x\n",
__FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
......@@ -604,8 +604,8 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %s\n",
__FUNCTION__, irlmp_event[event]);
IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
__FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
......@@ -666,8 +666,8 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_next_lsap_state(self, LSAP_DISCONNECTED);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %s\n",
__FUNCTION__, irlmp_event[event]);
IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
__FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
......@@ -757,8 +757,8 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, reason, skb);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %s\n",
__FUNCTION__, irlmp_event[event]);
IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
__FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
......@@ -830,8 +830,8 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, LM_CONNECT_FAILURE, NULL);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %s\n",
__FUNCTION__, irlmp_event[event]);
IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
__FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
......@@ -889,8 +889,8 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event,
irlmp_disconnect_indication(self, reason, NULL);
break;
default:
IRDA_DEBUG(0, "%s(), Unknown event %s\n",
__FUNCTION__, irlmp_event[event]);
IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n",
__FUNCTION__, irlmp_event[event], self->slsap_sel);
if (skb)
dev_kfree_skb(skb);
break;
......
......@@ -42,6 +42,7 @@
#include <net/irda/wrapper.h>
#include <net/irda/timer.h>
#include <net/irda/parameters.h>
#include <net/irda/crc.h>
extern struct proc_dir_entry *proc_irda;
......@@ -163,6 +164,7 @@ EXPORT_SYMBOL(irda_task_delete);
EXPORT_SYMBOL(async_wrap_skb);
EXPORT_SYMBOL(async_unwrap_char);
EXPORT_SYMBOL(irda_calc_crc16);
EXPORT_SYMBOL(irda_start_timer);
EXPORT_SYMBOL(setup_dma);
EXPORT_SYMBOL(infrared_mode);
......
......@@ -35,7 +35,7 @@
#define NET_IRDA 412 /* Random number */
enum { DISCOVERY=1, DEVNAME, DEBUG, FAST_POLL, DISCOVERY_SLOTS,
DISCOVERY_TIMEOUT, SLOT_TIMEOUT, MAX_BAUD_RATE, MIN_TX_TURN_TIME,
MAX_TX_DATA_SIZE, MAX_NOREPLY_TIME, WARN_NOREPLY_TIME,
MAX_TX_DATA_SIZE, MAX_TX_WINDOW, MAX_NOREPLY_TIME, WARN_NOREPLY_TIME,
LAP_KEEPALIVE_TIME };
extern int sysctl_discovery;
......@@ -48,6 +48,7 @@ extern char sysctl_devname[];
extern int sysctl_max_baud_rate;
extern int sysctl_min_tx_turn_time;
extern int sysctl_max_tx_data_size;
extern int sysctl_max_tx_window;
extern int sysctl_max_noreply_time;
extern int sysctl_warn_noreply_time;
extern int sysctl_lap_keepalive_time;
......@@ -69,6 +70,8 @@ static int max_min_tx_turn_time = 10000; /* See qos.c - IrLAP spec */
static int min_min_tx_turn_time = 0;
static int max_max_tx_data_size = 2048; /* See qos.c - IrLAP spec */
static int min_max_tx_data_size = 64;
static int max_max_tx_window = 7; /* See qos.c - IrLAP spec */
static int min_max_tx_window = 1;
static int max_max_noreply_time = 40; /* See qos.c - IrLAP spec */
static int min_max_noreply_time = 3;
static int max_warn_noreply_time = 3; /* 3s == standard */
......@@ -125,6 +128,9 @@ static ctl_table irda_table[] = {
{ MAX_TX_DATA_SIZE, "max_tx_data_size", &sysctl_max_tx_data_size,
sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
NULL, &min_max_tx_data_size, &max_max_tx_data_size },
{ MAX_TX_WINDOW, "max_tx_window", &sysctl_max_tx_window,
sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
NULL, &min_max_tx_window, &max_max_tx_window },
{ MAX_NOREPLY_TIME, "max_noreply_time", &sysctl_max_noreply_time,
sizeof(int), 0644, NULL, &proc_dointvec_minmax, &sysctl_intvec,
NULL, &min_max_noreply_time, &max_max_noreply_time },
......
......@@ -200,11 +200,13 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
{
irda_param_t p;
int n = 0;
int extract_len; /* Real lenght we extract */
int err;
p.pi = pi; /* In case handler needs to know */
p.pl = buf[1]; /* Extract lenght of value */
p.pv.i = 0; /* Clear value */
extract_len = p.pl; /* Default : extract all */
/* Check if buffer is long enough for parsing */
if (len < (2+p.pl)) {
......@@ -217,18 +219,30 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi,
/*
* Check that the integer length is what we expect it to be. If the
* handler want a 16 bits integer then a 32 bits is not good enough
* PV_INTEGER means that the handler is flexible.
*/
if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) {
ERROR("%s: invalid parameter length! "
"Expected %d bytes, but value had %d bytes!\n",
__FUNCTION__, type & PV_MASK, p.pl);
/* Most parameters are bit/byte fields or little endian,
* so it's ok to only extract a subset of it (the subset
* that the handler expect). This is necessary, as some
* broken implementations seems to add extra undefined bits.
* If the parameter is shorter than we expect or is big
* endian, we can't play those tricks. Jean II */
if((p.pl < (type & PV_MASK)) || (type & PV_BIG_ENDIAN)) {
/* Skip parameter */
return p.pl+2;
} else {
/* Extract subset of it, fallthrough */
extract_len = type & PV_MASK;
}
}
switch (p.pl) {
switch (extract_len) {
case 1:
n += irda_param_unpack(buf+2, "b", &p.pv.i);
break;
......
......@@ -70,13 +70,18 @@ unsigned sysctl_min_tx_turn_time = 10;
* 1.2, chapt 5.3.2.1, p41). But, this number includes the LAP header
* (2 bytes), and CRC (32 bits at 4 Mb/s). So, for the I field (LAP
* payload), that's only 2042 bytes. Oups !
* I've had trouble trouble transmitting 2048 bytes frames with USB
* dongles and nsc-ircc at 4 Mb/s, so adjust to 2042... I don't know
* if this bug applies only for 2048 bytes frames or all negociated
* frame sizes, but all hardware seem to support "2048 bytes" frames.
* You can use the sysctl to play with this value anyway.
* My nsc-ircc hardware has troubles receiving 2048 bytes frames at 4 Mb/s,
* so adjust to 2042... I don't know if this bug applies only for 2048
* bytes frames or all negociated frame sizes, but you can use the sysctl
* to play with this value anyway.
* Jean II */
unsigned sysctl_max_tx_data_size = 2042;
/*
* Maximum transmit window, i.e. number of LAP frames between turn-around.
* This allow to override what the peer told us. Some peers are buggy and
* don't always support what they tell us.
* Jean II */
unsigned sysctl_max_tx_window = 7;
static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get);
static int irlap_param_link_disconnect(void *instance, irda_param_t *parm,
......@@ -185,6 +190,18 @@ int msb_index (__u16 word)
__u16 msb = 0x8000;
int index = 15; /* Current MSB */
/* Check for buggy peers.
* Note : there is a small probability that it could be us, but I
* would expect driver authors to catch that pretty early and be
* able to check precisely what's going on. If a end user sees this,
* it's very likely the peer. - Jean II */
if (word == 0) {
WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n",
__FUNCTION__);
/* The only safe choice (we don't know the array size) */
word = 0x1;
}
while (msb) {
if (word & msb)
break; /* Found it! */
......@@ -335,10 +352,14 @@ void irlap_adjust_qos_settings(struct qos_info *qos)
/*
* Make sure the mintt is sensible.
* Main culprit : Ericsson T39. - Jean II
*/
if (sysctl_min_tx_turn_time > qos->min_turn_time.value) {
int i;
WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n",
__FUNCTION__, sysctl_min_tx_turn_time);
/* We don't really need bits, but easier this way */
i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times,
8, &qos->min_turn_time.bits);
......@@ -398,6 +419,11 @@ void irlap_adjust_qos_settings(struct qos_info *qos)
if (qos->data_size.value > sysctl_max_tx_data_size)
/* Allow non discrete adjustement to avoid loosing capacity */
qos->data_size.value = sysctl_max_tx_data_size;
/*
* Override Tx window if user request it. - Jean II
*/
if (qos->window_size.value > sysctl_max_tx_window)
qos->window_size.value = sysctl_max_tx_window;
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment