Commit 38b1ae7e authored by Linus Torvalds's avatar Linus Torvalds

After doing too many last-minute updates of critical code that we really

shouldn't have left this late (*), I'm going to calm things down. I've
released 2.3.33 which fixes a few smaller problems with 2.3.32, and I'll
let it quiet down a bit for a while.

We're obviously not going to have a 2.4 this millenium, but let's get the
pre-2.4 series going this year, with the real release Q1 of 2000.

		Linus

(*) Both the mm layer and the SCSI layer was changed quite a lot: we'll be
better for it, but I'd have been happier if we hadn't needed to.
parent 620b47d6
VERSION = 2
PATCHLEVEL = 3
SUBLEVEL = 32
SUBLEVEL = 33
EXTRAVERSION =
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
......
......@@ -29,23 +29,32 @@ ifeq ($(have_mcpu),y)
# the host compiler might have on by default. Given that EV4 and EV5
# have the same instruction set, prefer EV5 because an EV5 schedule is
# more likely to keep an EV4 processor busy than vice-versa.
mcpu_done :=
ifeq ($(CONFIG_ALPHA_GENERIC),y)
CFLAGS := $(CFLAGS) -mcpu=ev5
mcpu_done := y
endif
ifeq ($(CONFIG_ALPHA_EV4),y)
CFLAGS := $(CFLAGS) -mcpu=ev4
endif
ifeq ($(CONFIG_ALPHA_PYXIS),y)
ifeq ($(mcpu_done)$(CONFIG_ALPHA_PYXIS),y)
CFLAGS := $(CFLAGS) -mcpu=ev56
mcpu_done := y
endif
ifeq ($(CONFIG_ALPHA_POLARIS),y)
ifeq ($(mcpu_done)$(CONFIG_ALPHA_POLARIS),y)
ifeq ($(have_mcpu_pca56),y)
CFLAGS := $(CFLAGS) -mcpu=pca56
else
CFLAGS := $(CFLAGS) -mcpu=ev56
endif
mcpu_done := y
endif
ifeq ($(mcpu_done)$(CONFIG_ALPHA_NAUTILUS)$(have_mcpu_ev67),yy)
CFLAGS := $(CFLAGS) -mcpu=ev67
mcpu_done := y
endif
ifeq ($(mcpu_done)$(CONFIG_ALPHA_EV4),y)
CFLAGS := $(CFLAGS) -mcpu=ev4
mcpu_done := y
endif
ifeq ($(CONFIG_ALPHA_EV6),y)
ifeq ($(mcpu_done)$(CONFIG_ALPHA_EV6),y)
ifeq ($(have_mcpu_ev6),y)
CFLAGS := $(CFLAGS) -mcpu=ev6
else
......@@ -55,6 +64,7 @@ ifeq ($(have_mcpu),y)
CFLAGS := $(CFLAGS) -mcpu=ev56
endif
endif
mcpu_done := y
endif
endif
......
......@@ -36,7 +36,6 @@
extern struct hwrpb_struct *hwrpb;
extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
extern void ___delay(void);
/* these are C runtime functions with special calling conventions: */
extern void __divl (void);
......@@ -151,11 +150,6 @@ EXPORT_SYMBOL_NOVERS(__down_failed);
EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
EXPORT_SYMBOL_NOVERS(__up_wakeup);
/*
* This is called specially from __delay.
*/
EXPORT_SYMBOL_NOVERS(___delay);
/*
* SMP-specific symbols.
*/
......
......@@ -338,6 +338,7 @@ irongate_init_arch(void)
{
struct pci_controler *hose;
IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100;
irongate_pci_clr_err();
irongate_register_dump(__FUNCTION__);
......
......@@ -306,7 +306,7 @@ mcpcia_probe_hose(int h)
mb();
draina();
wrmces(7);
mcheck_expected(cpu) = 1;
mcheck_expected(cpu) = 2; /* indicates probing */
mcheck_taken(cpu) = 0;
mcheck_extra(cpu) = mid;
mb();
......@@ -415,7 +415,7 @@ mcpcia_startup_hose(struct pci_controler *hose)
#if 0
tmp = *(vuip)MCPCIA_INT_CTL(mid);
printk("mcpcia_init_arch: INT_CTL was 0x%x\n", tmp);
printk("mcpcia_startup_hose: INT_CTL was 0x%x\n", tmp);
*(vuip)MCPCIA_INT_CTL(mid) = 1U;
mb();
tmp = *(vuip)MCPCIA_INT_CTL(mid);
......@@ -548,30 +548,37 @@ mcpcia_machine_check(unsigned long vector, unsigned long la_ptr,
struct el_common *mchk_header;
struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout;
unsigned int cpu = smp_processor_id();
int expected;
mchk_header = (struct el_common *)la_ptr;
mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr;
expected = mcheck_expected(cpu);
mb();
mb(); /* magic */
draina();
if (mcheck_expected(cpu)) {
mcpcia_pci_clr_err(mcheck_extra(cpu));
} else {
switch (expected) {
case 0:
/* FIXME: how do we figure out which hose the
error was on? */
struct pci_controler *hose;
for (hose = hose_head; hose; hose = hose->next)
mcpcia_pci_clr_err(hose2mid(hose->index));
break;
case 1:
mcpcia_pci_clr_err(mcheck_extra(cpu));
break;
default:
/* Otherwise, we're being called from mcpcia_probe_hose
and there's no hose clear an error from. */
break;
}
wrmces(0x7);
mb();
if (mcheck_expected(cpu)) {
process_mcheck_info(vector, la_ptr, regs, "MCPCIA", 1);
} else {
process_mcheck_info(vector, la_ptr, regs, "MCPCIA", 0);
if (vector != 0x620 && vector != 0x630)
mcpcia_print_uncorrectable(mchk_logout);
}
process_mcheck_info(vector, la_ptr, regs, "MCPCIA", expected != 0);
if (!expected && vector != 0x620 && vector != 0x630)
mcpcia_print_uncorrectable(mchk_logout);
}
......@@ -95,21 +95,3 @@ halt:
.prologue 0
call_pal PAL_halt
.end halt
#
# Having the delay loop out of line guarantees that we wont
# run into weird alignment conditions (on new processors)
# that vary the speed of the loop.
#
.align 5
.globl ___delay
.ent ___delay
___delay:
.set noat
.frame $30,0,$28,0
.prologue 0
1: subq $0,1,$0
bge $0,1b
ret $31,($28),0
.set at
.end ___delay
......@@ -57,6 +57,17 @@ quirk_isa_bridge(struct pci_dev *dev)
dev->class = PCI_CLASS_BRIDGE_ISA;
}
static void __init
quirk_ali_ide_ports(struct pci_dev *dev)
{
if (dev->resource[0].end == 0xffff)
dev->resource[0].end = dev->resource[0].start + 7;
if (dev->resource[2].end == 0xffff)
dev->resource[2].end = dev->resource[2].start + 7;
if (dev->resource[3].end == 0xffff)
dev->resource[3].end = dev->resource[3].start + 7;
}
static void __init
quirk_vga_enable_rom(struct pci_dev *dev)
{
......@@ -82,6 +93,8 @@ struct pci_fixup pcibios_fixups[] __initdata = {
quirk_eisa_bridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378,
quirk_isa_bridge },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229,
quirk_ali_ide_ports },
{ PCI_FIXUP_FINAL, PCI_ANY_ID, PCI_ANY_ID, quirk_vga_enable_rom },
{ 0 }
};
......@@ -131,13 +144,7 @@ pcibios_align_resource(void *data, struct resource *res, unsigned long size)
/* Align to multiple of size of minimum base. */
alignto = MAX(0x1000, size);
start = ALIGN(start, alignto);
if (size > 7 * 16*MB) {
printk(KERN_WARNING "PCI: dev %s "
"requests %ld bytes of contiguous "
"address space---don't use sparse "
"memory accesses on this device!\n",
dev->name, size);
} else {
if (size <= 7 * 16*MB) {
if (((start / (16*MB)) & 0x7) == 0) {
start &= ~(128*MB - 1);
start += 16*MB;
......
......@@ -88,13 +88,28 @@ nautilus_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
void
nautilus_kill_arch(int mode)
{
u8 tmp;
switch (mode) {
case LINUX_REBOOT_CMD_RESTART:
{
u8 t8;
pcibios_read_config_byte(0, 0x38, 0x43, &t8);
pcibios_write_config_byte(0, 0x38, 0x43, t8 | 0x80);
outb(1, 0x92);
outb(0, 0x92);
/* NOTREACHED */
}
break;
if (mode == LINUX_REBOOT_CMD_RESTART) {
pcibios_read_config_byte(0, 0x38, 0x43, &tmp);
pcibios_write_config_byte(0, 0x38, 0x43, tmp | 0x80);
outb(1, 0x92);
outb(0, 0x92);
case LINUX_REBOOT_CMD_POWER_OFF:
{
u32 pmuport;
pcibios_read_config_dword(0, 0x88, 0x10, &pmuport);
pmuport &= 0xfffe;
outl(0xffff, pmuport); /* clear pending events */
outw(0x2000, pmuport+4); /* power off */
/* NOTREACHED */
}
break;
}
}
......@@ -435,8 +450,8 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr,
Add to that the two levels of severity - correctable or not. */
if (vector == SCB_Q_SYSMCHK
&& ((IRONGATE0->dramms & 0x3FF) == 0x300)) {
unsigned long nmi_ctl, temp;
&& ((IRONGATE0->dramms & 0x300) == 0x300)) {
unsigned long nmi_ctl;
/* Clear ALI NMI */
nmi_ctl = inb(0x61);
......@@ -445,15 +460,15 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr,
nmi_ctl &= ~0x0c;
outb(nmi_ctl, 0x61);
temp = IRONGATE0->stat_cmd;
IRONGATE0->stat_cmd = temp; /* write again clears error bits */
/* Write again clears error bits. */
IRONGATE0->stat_cmd = IRONGATE0->stat_cmd & ~0x100;
mb();
temp = IRONGATE0->stat_cmd; /* re-read to force write */
IRONGATE0->stat_cmd;
temp = IRONGATE0->dramms;
IRONGATE0->dramms = temp; /* write again clears error bits */
/* Write again clears error bits. */
IRONGATE0->dramms = IRONGATE0->dramms;
mb();
temp = IRONGATE0->dramms; /* re-read to force write */
IRONGATE0->dramms;
draina();
wrmces(0x7);
......
......@@ -308,6 +308,7 @@ mem_init(void)
{
max_mapnr = num_physpages = max_low_pfn;
totalram_pages += free_all_bootmem();
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
printk_memory_info();
}
......
......@@ -57,9 +57,7 @@ else
bool ' AEC6210 Tuning support (EXPERIMENTAL)' CONFIG_BLK_DEV_AEC6210_TUNING
fi
if [ "$CONFIG_IDEDMA_PCI_EXPERIMENTAL" = "y" ]; then
if [ "$CONFIG_X86" = "y" ]; then
bool ' ALI M15x3 chipset support (EXPERIMENTAL)' CONFIG_BLK_DEV_ALI15X3
fi
bool ' ALI M15x3 chipset support (EXPERIMENTAL)' CONFIG_BLK_DEV_ALI15X3
bool ' CMD646 chipset support (EXPERIMENTAL)' CONFIG_BLK_DEV_CMD646
bool ' CY82C693 chipset support (EXPERIMENTAL)' CONFIG_BLK_DEV_CY82C693
fi
......
......@@ -134,13 +134,13 @@ struct agp_bridge_data {
#endif
};
#define OUTREG32(mmap, addr, val) *(volatile u32 *)(mmap + (addr)) = (val)
#define OUTREG16(mmap, addr, val) *(volatile u16 *)(mmap + (addr)) = (val)
#define OUTREG8 (mmap, addr, val) *(volatile u8 *) (mmap + (addr)) = (val)
#define OUTREG32(mmap, addr, val) __raw_writel((val), (mmap)+(addr))
#define OUTREG16(mmap, addr, val) __raw_writew((val), (mmap)+(addr))
#define OUTREG8 (mmap, addr, val) __raw_writeb((val), (mmap)+(addr))
#define INREG32(mmap, addr) *(volatile u32 *)(mmap + (addr))
#define INREG16(mmap, addr) *(volatile u16 *)(mmap + (addr))
#define INREG8 (mmap, addr) *(volatile u8 *) (mmap + (addr))
#define INREG32(mmap, addr) __raw_readl((mmap)+(addr))
#define INREG16(mmap, addr) __raw_readw((mmap)+(addr))
#define INREG8 (mmap, addr) __raw_readb((mmap)+(addr))
#define CACHE_FLUSH agp_bridge.cache_flush
#define A_SIZE_8(x) ((aper_size_info_8 *) x)
......
......@@ -62,6 +62,26 @@ static void flush_cache(void);
static struct agp_bridge_data agp_bridge;
static int agp_try_unsupported __initdata = 0;
static inline void flush_cache(void)
{
#if defined(__i386__)
asm volatile ("wbinvd":::"memory");
#elif defined(__alpha__)
/* ??? I wonder if we'll really need to flush caches, or if the
core logic can manage to keep the system coherent. The ARM
speaks only of using `cflush' to get things in memory in
preparation for power failure.
If we do need to call `cflush', we'll need a target page,
as we can only flush one page at a time. */
mb();
#else
#error "Please define flush_cache."
#endif
}
#ifdef __SMP__
static atomic_t cpus_waiting;
......@@ -87,12 +107,6 @@ static void smp_flush_cache(void)
#define global_cache_flush flush_cache
#endif /* __SMP__ */
static void flush_cache(void)
{
asm volatile ("wbinvd":::"memory");
}
int agp_backend_acquire(void)
{
atomic_inc(&agp_bridge.agp_in_use);
......@@ -1356,6 +1370,7 @@ static int amd_irongate_fetch_size(void)
static int amd_irongate_configure(void)
{
aper_size_info_32 *current_size;
unsigned long addr;
u32 temp;
u16 enable_reg;
......@@ -1389,8 +1404,16 @@ static int amd_irongate_configure(void)
/* Get the address for the gart region */
pci_read_config_dword(agp_bridge.dev, AMD_APBASE, &temp);
temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
agp_bridge.gart_bus_addr = temp;
addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
#ifdef __alpha__
/* ??? Presumably what is wanted is the bus address as seen
from the CPU side, since it appears that this value is
exported to userland via an ioctl. The terminology below
is confused, mixing `physical address' with `bus address',
as x86 folk are wont to do. */
addr = virt_to_phys(ioremap(addr, 0));
#endif
agp_bridge.gart_bus_addr = addr;
return 0;
}
......@@ -1894,13 +1917,10 @@ static struct agp_max_table maxes_table[9] =
static int agp_find_max(void)
{
int memory;
float t;
int index;
int result;
long memory, t, index, result;
memory = virt_to_phys(high_memory) / 0x100000;
index = 0;
memory = virt_to_phys(high_memory) >> 20;
index = 1;
while ((memory > maxes_table[index].mem) &&
(index < 8)) {
......@@ -1914,8 +1934,8 @@ static int agp_find_max(void)
(t * (maxes_table[index].agp - maxes_table[index - 1].agp));
printk(KERN_INFO "agpgart: Maximum main memory to use "
"for agp memory: %dM\n", result);
result = (result * 0x100000) / 4096;
"for agp memory: %ldM\n", result);
result = result << (20 - PAGE_SHIFT);
return result;
}
......
......@@ -412,6 +412,15 @@ static inline void handle_mouse_event(unsigned char scancode)
#endif
}
static inline void handle_keyboard_event(unsigned char scancode)
{
#ifdef CONFIG_VT
if (do_acknowledge(scancode))
handle_scancode(scancode, !(scancode & 0x80));
#endif
mark_bh(KEYBOARD_BH);
}
/*
* This reads the keyboard status port, and does the
* appropriate action.
......@@ -428,20 +437,18 @@ static unsigned char handle_kbd_event(void)
unsigned char scancode;
scancode = kbd_read_input();
if (status & KBD_STAT_MOUSE_OBF) {
handle_mouse_event(scancode);
} else {
#ifdef CONFIG_VT
if (do_acknowledge(scancode))
handle_scancode(scancode, !(scancode & 0x80));
#endif
mark_bh(KEYBOARD_BH);
/* Ignore error bytes */
if (!(status & (KBD_STAT_GTO | KBD_STAT_PERR))) {
if (status & KBD_STAT_MOUSE_OBF)
handle_mouse_event(scancode);
else
handle_keyboard_event(scancode);
}
status = kbd_read_status();
if(!work--)
{
if (!--work) {
printk(KERN_ERR "pc_keyb: controller jammed (0x%02X).\n",
status);
break;
......
......@@ -34,10 +34,15 @@ pci_claim_resource(struct pci_dev *dev, int resource)
int err;
err = -EINVAL;
if (root != NULL)
if (root != NULL) {
err = request_resource(root, res);
if (err) {
printk(KERN_ERR "PCI: Address space collision on region %d "
if (err) {
printk(KERN_ERR "PCI: Address space collision on "
"region %d of device %s [%lx:%lx]\n",
resource, dev->name, res->start, res->end);
}
} else {
printk(KERN_ERR "PCI: No parent found for region %d "
"of device %s\n", resource, dev->name);
}
......@@ -72,14 +77,14 @@ pdev_assign_unassigned_resources(struct pci_dev *dev, u32 min_io, u32 min_mem)
continue;
/* Determine the root we allocate from. */
res->end -= res->start;
res->start = 0;
root = pci_find_parent_resource(dev, res);
if (root == NULL)
continue;
min = (res->flags & IORESOURCE_IO ? min_io : min_mem);
min += root->start;
size = res->end - res->start + 1;
size = res->end + 1;
DBGC((" for root[%lx:%lx] min[%lx] size[%lx]\n",
root->start, root->end, min, size));
......
......@@ -46,11 +46,33 @@
#define SCSI_PA(address) virt_to_bus(address)
#define BAD_DMA(msg, address, length) \
{ \
printk(KERN_CRIT "%s address %p length %d\n", msg, address, length); \
panic("Buffer at physical address > 16Mb used for aha1542"); \
}
static void BAD_DMA(void * address, unsigned int length)
{
printk(KERN_CRIT "buf vaddress %p paddress 0x%lx length %d\n",
address,
SCSI_PA(address),
length);
panic("Buffer at physical address > 16Mb used for aha1542");
}
static void BAD_SG_DMA(Scsi_Cmnd * SCpnt,
struct scatterlist * sgpnt,
int nseg,
int badseg)
{
printk(KERN_CRIT "sgpnt[%d:%d] addr %p/0x%lx alt %p/0x%lx length %d\n",
badseg, nseg,
sgpnt[badseg].address,
SCSI_PA(sgpnt[badseg].address),
sgpnt[badseg].alt_address,
sgpnt[badseg].alt_address ? SCSI_PA(sgpnt[badseg].alt_address) : 0,
sgpnt[badseg].length);
/*
* Not safe to continue.
*/
panic("Buffer at physical address > 16Mb used for aha1542");
}
#include<linux/stat.h>
......@@ -655,7 +677,7 @@ int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
};
any2scsi(cptr[i].dataptr, SCSI_PA(sgpnt[i].address));
if(SCSI_PA(sgpnt[i].address+sgpnt[i].length-1) > ISA_DMA_THRESHOLD)
BAD_DMA("sgpnt", sgpnt[i].address, sgpnt[i].length);
BAD_SG_DMA(SCpnt, sgpnt, SCpnt->use_sg, i);
any2scsi(cptr[i].datalen, sgpnt[i].length);
};
any2scsi(ccb[mbo].datalen, SCpnt->use_sg * sizeof(struct chain));
......@@ -670,7 +692,7 @@ int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
SCpnt->host_scribble = NULL;
any2scsi(ccb[mbo].datalen, bufflen);
if(buff && SCSI_PA(buff+bufflen-1) > ISA_DMA_THRESHOLD)
BAD_DMA("buff", buff, bufflen);
BAD_DMA(buff, bufflen);
any2scsi(ccb[mbo].dataptr, SCSI_PA(buff));
};
ccb[mbo].idlun = (target&7)<<5 | direction | (lun & 7); /*SCSI Target Id*/
......
......@@ -1250,6 +1250,12 @@ static int isp1020_init(struct Scsi_Host *sh)
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 16);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
#endif
#ifdef __alpha__
/* Force ALPHA to use bus I/O and not bus MEM.
This is to avoid having to use HAE_MEM registers,
which is broken on some platforms and with SMP. */
command &= ~PCI_COMMAND_MEMORY;
#endif
if ((command & PCI_COMMAND_MEMORY) &&
((mem_flags & 1) == 0)) {
......
......@@ -1139,12 +1139,18 @@ Scsi_Cmnd *scsi_allocate_device(Scsi_Device * device, int wait)
* to complete */
atomic_inc(&SCpnt->host->host_active);
SCpnt->buffer = NULL;
SCpnt->bufflen = 0;
SCpnt->request_buffer = NULL;
SCpnt->request_bufflen = 0;
SCpnt->use_sg = 0; /* Reset the scatter-gather flag */
SCpnt->old_use_sg = 0;
SCpnt->transfersize = 0; /* No default transfer size */
SCpnt->cmd_len = 0;
SCpnt->underflow = 0; /* Do not flag underflow conditions */
SCpnt->resid = 0;
SCpnt->state = SCSI_STATE_INITIALIZING;
SCpnt->owner = SCSI_OWNER_HIGHLEVEL;
......@@ -1344,7 +1350,7 @@ int scsi_dispatch_cmd(Scsi_Cmnd * SCpnt)
* need be held upon entry. The old queueing code the lock was
* assumed to be held upon entry.
*
* Returns: Pointer to command descriptor.
* Returns: Nothing.
*
* Notes: Prior to the new queue code, this function was not SMP-safe.
* Also, this function is now only used for queueing requests
......@@ -1482,6 +1488,7 @@ void scsi_done(Scsi_Cmnd * SCpnt)
* etc, etc.
*/
if (!tstatus) {
SCpnt->done_late = 1;
return;
}
/* Set the serial numbers back to zero */
......
......@@ -397,6 +397,11 @@ extern int scsi_partsize(struct buffer_head *bh, unsigned long capacity,
unsigned int *cyls, unsigned int *hds,
unsigned int *secs);
/*
* Prototypes for functions in scsi_merge.c
*/
extern void recount_segments(Scsi_Cmnd * SCpnt);
/*
* Prototypes for functions in scsi_lib.c
*/
......@@ -422,8 +427,6 @@ extern void scsi_wait_cmd(Scsi_Cmnd *, const void *cmnd,
void (*done) (struct scsi_cmnd *),
int timeout, int retries);
extern void scsi_request_fn(request_queue_t * q);
extern Scsi_Cmnd *scsi_allocate_device(Scsi_Device *, int);
extern Scsi_Cmnd *scsi_request_queueable(struct request *, Scsi_Device *);
......@@ -626,6 +629,14 @@ struct scsi_cmnd {
unsigned flags;
/*
* Used to indicate that a command which has timed out also
* completed normally. Typically the completion function will
* do nothing but set this flag in this instance because the
* timeout handler is already running.
*/
unsigned done_late:1;
/*
* These two flags are used to track commands that are in the
* mid-level queue. The idea is that a command can be there for
......@@ -636,11 +647,6 @@ struct scsi_cmnd {
unsigned host_wait:1;
unsigned device_wait:1;
/* These variables are for the cdrom only. Once we have variable size
* buffers in the buffer cache, they will go away. */
int this_count;
/* End of special cdrom variables */
/* Low-level done function - can be used by low-level driver to point
* to completion function. Not used by mid/upper level code. */
void (*scsi_done) (struct scsi_cmnd *);
......
......@@ -208,6 +208,7 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
sgcount = 0;
sgpnt = NULL;
#ifdef CONFIG_SMP
/*
* The io_request_lock *must* be held at this point.
*/
......@@ -215,6 +216,7 @@ int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
{
printk("Warning - io_request_lock is not held in queuecommand\n");
}
#endif
/*
* If we are being notified of the mid-level reposessing a command due to timeout,
......
This diff is collapsed.
......@@ -109,6 +109,7 @@ int scsi_insert_special_cmd(Scsi_Cmnd * SCpnt, int at_head)
for (req = q->current_request; req; req = req->next) {
if (req->next == NULL) {
req->next = &SCpnt->request;
break;
}
}
}
......@@ -383,6 +384,16 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
scsi_free(SCpnt->buffer, SCpnt->bufflen);
}
}
/*
* Zero these out. They now point to freed memory, and it is
* dangerous to hang onto the pointers.
*/
SCpnt->buffer = NULL;
SCpnt->bufflen = 0;
SCpnt->request_buffer = NULL;
SCpnt->request_bufflen = 0;
/*
* Next deal with any sectors which we were able to correctly
* handle.
......@@ -630,9 +641,14 @@ void scsi_request_fn(request_queue_t * q)
/*
* Find the actual device driver associated with this command.
* The SPECIAL requests are things like character device or
* ioctls, which did not originate from ll_rw_blk.
* ioctls, which did not originate from ll_rw_blk. Note that
* the special field is also used to indicate the SCpnt for
* the remainder of a partially fulfilled request that can
* come up when there is a medium error. We have to treat
* these two cases differently. We differentiate by looking
* at request.cmd, as this tells us the real story.
*/
if (req->special != NULL) {
if (req->cmd == SPECIAL) {
STpnt = NULL;
SCpnt = (Scsi_Cmnd *) req->special;
} else {
......@@ -643,7 +659,20 @@ void scsi_request_fn(request_queue_t * q)
/*
* Now try and find a command block that we can use.
*/
SCpnt = scsi_allocate_device(SDpnt, FALSE);
if( req->special != NULL ) {
SCpnt = (Scsi_Cmnd *) req->special;
/*
* We need to recount the number of
* scatter-gather segments here - the
* normal case code assumes this to be
* correct, as it would be a performance
* lose to always recount. Handling
* errors is always unusual, of course.
*/
recount_segments(SCpnt);
} else {
SCpnt = scsi_allocate_device(SDpnt, FALSE);
}
/*
* If so, we are ready to do something. Bump the count
* while the queue is locked and then break out of the loop.
......@@ -689,8 +718,9 @@ void scsi_request_fn(request_queue_t * q)
* in this queue are for the same device.
*/
q->current_request = req->next;
SCpnt->request.next = NULL;
if (req->special == NULL) {
if (req != &SCpnt->request) {
memcpy(&SCpnt->request, req, sizeof(struct request));
/*
......@@ -702,13 +732,15 @@ void scsi_request_fn(request_queue_t * q)
wake_up(&wait_for_request);
}
/*
* Now it is finally safe to release the lock. We are not going
* to noodle the request list until this request has been queued
* and we loop back to queue another.
* Now it is finally safe to release the lock. We are
* not going to noodle the request list until this
* request has been queued and we loop back to queue
* another.
*/
req = NULL;
spin_unlock_irq(&io_request_lock);
if (req->special == NULL) {
if (SCpnt->request.cmd != SPECIAL) {
/*
* This will do a couple of things:
* 1) Fill in the actual SCSI command.
......
......@@ -156,7 +156,7 @@ __inline static int __count_segments(struct request *req,
* the DMA threshold boundary.
*/
if (dma_host &&
virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
ret++;
} else if (CONTIGUOUS_BUFFERS(bh, bh->b_reqnext)) {
/*
......@@ -172,6 +172,43 @@ __inline static int __count_segments(struct request *req,
return ret;
}
/*
* Function: recount_segments()
*
* Purpose: Recount the number of scatter-gather segments for this request.
*
* Arguments: req - request that needs recounting.
*
* Returns: Count of the number of SG segments for the request.
*
* Lock status: Irrelevant.
*
* Notes: This is only used when we have partially completed requests
* and the bit that is leftover is of an indeterminate size.
* This can come up if you get a MEDIUM_ERROR, for example,
* as we will have "completed" all of the sectors up to and
* including the bad sector, and the leftover bit is what
* we have to do now. This tends to be a rare occurence, so
* we aren't busting our butts to instantiate separate versions
* of this function for the 4 different flag values. We
* probably should, however.
*/
void
recount_segments(Scsi_Cmnd * SCpnt)
{
struct request *req;
struct Scsi_Host *SHpnt;
Scsi_Device * SDpnt;
req = &SCpnt->request;
SHpnt = SCpnt->host;
SDpnt = SCpnt->device;
req->nr_segments = __count_segments(req,
CLUSTERABLE_DEVICE(SHpnt, SDpnt),
SHpnt->unchecked_isa_dma);
}
/*
* Function: __scsi_merge_fn()
*
......@@ -236,7 +273,7 @@ __inline static int __scsi_merge_fn(request_queue_t * q,
* the DMA threshold boundary.
*/
if (dma_host &&
virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) {
virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
goto new_segment;
}
if (CONTIGUOUS_BUFFERS(req->bhtail, bh)) {
......@@ -256,7 +293,7 @@ __inline static int __scsi_merge_fn(request_queue_t * q,
* the DMA threshold boundary.
*/
if (dma_host &&
virt_to_phys(bh->b_data - 1) == ISA_DMA_THRESHOLD) {
virt_to_phys(bh->b_data) - 1 == ISA_DMA_THRESHOLD) {
goto new_segment;
}
if (CONTIGUOUS_BUFFERS(bh, req->bh)) {
......@@ -380,7 +417,7 @@ __inline static int __scsi_merge_requests_fn(request_queue_t * q,
* the DMA threshold boundary.
*/
if (dma_host &&
virt_to_phys(req->bhtail->b_data - 1) == ISA_DMA_THRESHOLD) {
virt_to_phys(req->bhtail->b_data) - 1 == ISA_DMA_THRESHOLD) {
goto dont_combine;
}
if (CONTIGUOUS_BUFFERS(req->bhtail, next->bh)) {
......@@ -573,7 +610,7 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
bh; bh = bh->b_reqnext) {
if (use_clustering && bhprev != NULL) {
if (dma_host &&
virt_to_phys(bhprev->b_data - 1) == ISA_DMA_THRESHOLD) {
virt_to_phys(bhprev->b_data) - 1 == ISA_DMA_THRESHOLD) {
/* Nothing - fall through */
} else if (CONTIGUOUS_BUFFERS(bhprev, bh)) {
/*
......@@ -612,7 +649,23 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt,
for (i = 0; i < count; i++) {
SCpnt->request_bufflen += sgpnt[i].length;
if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
ISA_DMA_THRESHOLD && !sgpnt[count].alt_address) {
ISA_DMA_THRESHOLD) {
if( scsi_dma_free_sectors <= 10 ) {
/*
* If the DMA pool is nearly empty, then
* let's stop here. Don't make this request
* any larger. This is kind of a safety valve
* that we use - we could get screwed later on
* if we run out completely.
*/
SCpnt->request_bufflen -= sgpnt[i].length;
SCpnt->use_sg = i;
if (i == 0) {
panic("DMA pool exhausted");
}
break;
}
sgpnt[i].alt_address = sgpnt[i].address;
sgpnt[i].address =
(char *) scsi_malloc(sgpnt[i].length);
......
......@@ -981,8 +981,10 @@ static int sg_init()
SCSI_LOG_TIMEOUT(3, printk("sg_init\n"));
sg_dev_arr = (Sg_device *)
scsi_init_malloc((sg_template.dev_noticed + SG_EXTRA_DEVS)
* sizeof(Sg_device), GFP_ATOMIC);
kmalloc((sg_template.dev_noticed + SG_EXTRA_DEVS)
* sizeof(Sg_device), GFP_ATOMIC);
memset(sg_dev_arr, 0, (sg_template.dev_noticed + SG_EXTRA_DEVS)
* sizeof(Sg_device));
if (NULL == sg_dev_arr) {
printk("sg_init: no space for sg_dev_arr\n");
return 1;
......@@ -1085,9 +1087,7 @@ void cleanup_module( void)
if(sg_dev_arr != NULL) {
/* Really worrying situation of writes still pending and get here */
/* Strategy: shorten timeout on release + wait on detach ... */
scsi_init_free((char *) sg_dev_arr,
(sg_template.dev_noticed + SG_EXTRA_DEVS)
* sizeof(Sg_device));
kfree((char *) sg_dev_arr);
sg_dev_arr = NULL;
}
sg_template.dev_max = 0;
......
This diff is collapsed.
......@@ -25,7 +25,7 @@
#define DUMMY_ROWS 25
#endif
static const char __init *dummycon_startup(void)
static const char *__init dummycon_startup(void)
{
return "dummy device";
}
......
......@@ -683,7 +683,7 @@ static void fbcon_setup(int con, int init, int logo)
}
if (save) {
q = (unsigned short *)(conp->vc_origin + conp->vc_size_row * old_rows);
memcpy(q, save, logo_lines * nr_cols * 2);
scr_memcpyw(q, save, logo_lines * nr_cols * 2);
conp->vc_y += logo_lines;
conp->vc_pos += logo_lines * conp->vc_size_row;
kfree(save);
......
......@@ -937,7 +937,7 @@ static int tgafb_blank(int blank, struct fb_info_gen *info)
static void tgafb_set_disp(const void *fb_par, struct display *disp,
struct fb_info_gen *info)
{
disp->screen_base = ioremap(fb_info.tga_fb_base, 0);
disp->screen_base = fb_info.tga_fb_base;
switch (fb_info.tga_type) {
#ifdef FBCON_HAS_CFB8
case 0: /* 8-plane */
......@@ -1034,14 +1034,16 @@ int __init tgafb_init(void)
pdev = pci_find_device(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TGA, NULL);
if (!pdev)
return -ENXIO;
fb_info.tga_mem_base = pdev->resource[0].start;
fb_info.tga_mem_base = ioremap(pdev->resource[0].start, 0);
#ifdef DEBUG
printk(KERN_DEBUG "tgafb_init: mem_base 0x%x\n", fb_info.tga_mem_base);
#endif /* DEBUG */
fb_info.tga_type = (readl((unsigned long)fb_info.tga_mem_base) >> 12) & 0x0f;
fb_info.tga_regs_base = ((unsigned long)fb_info.tga_mem_base + TGA_REGS_OFFSET);
fb_info.tga_fb_base = ((unsigned long)fb_info.tga_mem_base + fb_offset_presets[fb_info.tga_type]);
fb_info.tga_type = (readl(fb_info.tga_mem_base) >> 12) & 0x0f;
fb_info.tga_regs_base = fb_info.tga_mem_base + TGA_REGS_OFFSET;
fb_info.tga_fb_base = (fb_info.tga_mem_base
+ fb_offset_presets[fb_info.tga_type]);
/* XXX Why the fuck is it called modename if it identifies the board? */
strcpy (fb_info.gen.info.modename,"DEC 21030 TGA ");
......
......@@ -168,7 +168,7 @@ struct tgafb_info {
/* Device dependent information */
int tga_type; /* TGA type: {8plane, 24plane, 24plusZ} */
unsigned int tga_mem_base;
unsigned long tga_mem_base;
unsigned long tga_fb_base;
unsigned long tga_regs_base;
struct fb_var_screeninfo default_var; /* default video mode */
......
......@@ -15,46 +15,3 @@
* This file will contain the Access Control Lists management for the
* second extended file system.
*/
/*
* ext2_permission ()
*
* Check for access rights
*/
int ext2_permission (struct inode * inode, int mask)
{
unsigned short mode = inode->i_mode;
/*
* Nobody gets write access to a file on a readonly-fs
*/
if ((mask & S_IWOTH) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) &&
IS_RDONLY(inode))
return -EROFS;
/*
* Nobody gets write access to an immutable file
*/
if ((mask & S_IWOTH) && IS_IMMUTABLE(inode))
return -EACCES;
/*
* If no ACL, checks using the file mode
*/
else if (current->fsuid == inode->i_uid)
mode >>= 6;
else if (in_group_p (inode->i_gid))
mode >>= 3;
/*
* Access is always granted for root. We now check last,
* though, for BSD process accounting correctness
*/
if (((mode & mask & S_IRWXO) == mask) || capable(CAP_DAC_OVERRIDE))
return 0;
if ((mask == S_IROTH) ||
(S_ISDIR(mode) && !(mask & ~(S_IROTH | S_IXOTH))))
if (capable(CAP_DAC_READ_SEARCH))
return 0;
return -EACCES;
}
......@@ -67,7 +67,7 @@ struct inode_operations ext2_dir_inode_operations = {
NULL, /* readpage */
NULL, /* writepage */
NULL, /* truncate */
ext2_permission, /* permission */
NULL, /* permission */
NULL /* revalidate */
};
......
......@@ -177,6 +177,6 @@ struct inode_operations ext2_file_inode_operations = {
block_read_full_page, /* readpage */
block_write_full_page, /* writepage */
ext2_truncate, /* truncate */
ext2_permission, /* permission */
NULL, /* permission */
NULL, /* revalidate */
};
......@@ -12,7 +12,8 @@
* This file is based on:
*
* IronGate management library, (c) 1999 Alpha Processor, Inc.
* Begun 19 January 1999 by Stig Telfer, Alpha Processor, Inc.
* Copyright (C) 1999 Alpha Processor, Inc.,
* (David Daniel, Stig Telfer, Soohoon Lee)
*/
/*
......@@ -24,9 +25,9 @@
*
*/
/* Eh? Not offset from memory? */
#define IRONGATE_DMA_WIN_BASE (0U)
#define IRONGATE_DMA_WIN_SIZE (0U)
#define IRONGATE_DMA_WIN_BASE (0UL)
#define IRONGATE_DMA_WIN_SIZE (0UL)
/*
* Irongate CSR map. Some of the CSRs are 8 or 16 bits, but all access
......@@ -334,9 +335,7 @@ typedef union {
* Memory spaces:
*/
/* ??? the following probably needs fixing */
/* Irongate is consistent with a subset of the Tsunami memory map */
/* XXX: Do we need to conditionalize on this? */
#ifdef USE_48_BIT_KSEG
#define IRONGATE_BIAS 0x80000000000UL
#else
......@@ -349,7 +348,6 @@ typedef union {
#define IRONGATE_IO (IDENT_ADDR | IRONGATE_BIAS | 0x1FC000000UL)
#define IRONGATE_CONF (IDENT_ADDR | IRONGATE_BIAS | 0x1FE000000UL)
#define IRONGATE0 ((Irongate0 *) IRONGATE_CONF)
/*
......
......@@ -9,16 +9,19 @@
* Delay routines, using a pre-computed "loops_per_second" value.
*/
/* We can make the delay loop inline, but we have to be very careful wrt
scheduling for ev6 machines, so that we keep a consistent number of
iterations for all invocations. */
extern __inline__ void
__delay(unsigned long loops)
{
register unsigned long r0 __asm__("$0") = loops;
#ifdef MODULE
__asm__ __volatile__("lda $28,___delay; jsr $28,($28),0"
: "=r"(r0) : "r"(r0) : "$28");
#else
__asm__ __volatile__("bsr $28,___delay" : "=r"(r0) : "r"(r0) : "$28");
#endif
__asm__ __volatile__(
".align 4\n"
"1: subq %0,1,%0\n"
" bge %0,1b\n"
" nop"
: "=r" (loops) : "0"(loops));
}
/*
......
......@@ -53,10 +53,7 @@ static inline void set_hae(unsigned long new_hae)
*/
static inline unsigned long virt_to_phys(volatile void * address)
{
/* Conditionalize this on the CPU? This here is 40 bits,
whereas EV4 only supports 34. But KSEG is farther out
so it shouldn't _really_ matter. */
return 0xffffffffffUL & (unsigned long) address;
return (unsigned long)address - IDENT_ADDR;
}
static inline void * phys_to_virt(unsigned long address)
......@@ -266,6 +263,11 @@ static inline void iounmap(void *addr)
{
}
static inline void * ioremap_nocache(unsigned long offset, unsigned long size)
{
return ioremap(offset, size);
}
/* Indirect back to the macros provided. */
extern unsigned long ___raw_readb(unsigned long addr);
......
......@@ -296,7 +296,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define kern_addr_valid(addr) (1)
#define io_remap_page_range(start, busaddr, size, prot) \
remap_page_range(start, virt_to_phys(__ioremap(busaddr)), size, prot)
remap_page_range(start, virt_to_phys(__ioremap(busaddr, 0)), size, prot)
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
......
......@@ -245,6 +245,7 @@ static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \
return __r0; \
}
__CALL_PAL_W1(cflush, unsigned long);
__CALL_PAL_R0(rdmces, unsigned long);
__CALL_PAL_R0(rdps, unsigned long);
__CALL_PAL_R0(rdusp, unsigned long);
......
......@@ -146,6 +146,7 @@
#define PCI_DEVICE_ID_NCR_53C875 0x000f
#define PCI_DEVICE_ID_NCR_53C1510 0x0010
#define PCI_DEVICE_ID_NCR_53C875J 0x008f
#define PCI_DEVICE_ID_NCR_YELLOWFIN 0x0701
#define PCI_VENDOR_ID_ATI 0x1002
#define PCI_DEVICE_ID_ATI_68800 0x4158
......@@ -627,6 +628,7 @@
#define PCI_DEVICE_ID_INTERG_1682 0x1682
#define PCI_DEVICE_ID_INTERG_2000 0x2000
#define PCI_DEVICE_ID_INTERG_2010 0x2010
#define PCI_DEVICE_ID_INTERG_5000 0x5000
#define PCI_VENDOR_ID_REALTEK 0x10ec
#define PCI_DEVICE_ID_REALTEK_8029 0x8029
......
......@@ -130,7 +130,7 @@ int ipc_addid(struct ipc_ids* ids, struct ipc_perm* new, int size)
if(ids->seq > ids->seq_max)
ids->seq = 0;
ipc_lock(ids,id);
spin_lock(&ids->ary);
ids->entries[id].p = new;
return id;
}
......
......@@ -16,10 +16,6 @@
#include <linux/sysrq.h>
#include <linux/interrupt.h>
#ifdef __alpha__
#include <asm/machvec.h>
#endif
asmlinkage void sys_sync(void); /* it's really int */
extern void unblank_console(void);
extern int C_A_D;
......@@ -77,10 +73,6 @@ NORET_TYPE void panic(const char * fmt, ...)
}
#ifdef __sparc__
printk("Press L1-A to return to the boot prom\n");
#endif
#ifdef __alpha__
if (alpha_using_srm)
halt();
#endif
sti();
for(;;) {
......
......@@ -55,7 +55,7 @@ int get_resource_list(struct resource *root, char *buf, int size)
int retval;
fmt = " %08lx-%08lx : %s\n";
if (root == &ioport_resource)
if (root->end < 0x10000)
fmt = " %04lx-%04lx : %s\n";
read_lock(&resource_lock);
retval = do_resource_list(root->child, fmt, 8, buf, buf + size) - buf;
......
......@@ -165,11 +165,15 @@ static inline unsigned long move_vma(struct vm_area_struct * vma,
*
* MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
* This option implies MREMAP_MAYMOVE.
*
* "__new_addr" toying in order to not change the saved stack layout
* for old x86 binaries that don't want %edi to change..
*/
asmlinkage unsigned long sys_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr)
unsigned long flags, unsigned long __new_addr)
{
unsigned long new_addr = __new_addr;
struct vm_area_struct *vma;
unsigned long ret = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment