Commit bd81e60e authored by Linus Torvalds's avatar Linus Torvalds

Merge http://kernel-acme.bkbits.net:8080/copy_to_from_user-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents 73b8dabc 916e75e3
......@@ -88,6 +88,7 @@ prototypes:
void (*read_inode) (struct inode *);
void (*write_inode) (struct inode *, int);
void (*put_inode) (struct inode *);
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
void (*put_super) (struct super_block *);
void (*write_super) (struct super_block *);
......@@ -102,6 +103,7 @@ locking rules:
read_inode: yes (see below)
write_inode: no
put_inode: no
drop_inode: no !!!inode_lock!!!
delete_inode: no
clear_inode: no
put_super: yes yes maybe (see below)
......
......@@ -178,6 +178,7 @@ struct super_operations {
void (*read_inode) (struct inode *);
void (*write_inode) (struct inode *, int);
void (*put_inode) (struct inode *);
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
int (*notify_change) (struct dentry *, struct iattr *);
void (*put_super) (struct super_block *);
......@@ -204,6 +205,19 @@ or bottom half).
put_inode: called when the VFS inode is removed from the inode
cache. This method is optional
drop_inode: called when the last access to the inode is dropped,
with the inode_lock spinlock held.
This method should be either NULL (normal unix filesystem
semantics) or "generic_delete_inode" (for filesystems that do not
want to cache inodes - causing "delete_inode" to always be
called regardless of the value of i_nlink)
The "generic_delete_inode()" behaviour is equivalent to the
old practice of using "force_delete" in the put_inode() case,
but does not have the races that the "force_delete()" approach
had.
delete_inode: called when the VFS wants to delete an inode
notify_change: called when VFS inode attributes are changed. If this
......
......@@ -1388,6 +1388,14 @@ M: emoenke@gwdg.de
L: linux-kernel@vger.kernel.org
S: Maintained
SCHEDULER
P: Ingo Molnar
M: mingo@elte.hu
P: Robert Love
M: rml@tech9.net
L: linux-kernel@vger.kernel.org
S: Maintained
SCSI CDROM DRIVER
P: Jens Axboe
M: axboe@suse.de
......
......@@ -359,10 +359,12 @@ static void inline leave_mm (unsigned long cpu)
asmlinkage void smp_invalidate_interrupt (void)
{
unsigned long cpu = smp_processor_id();
unsigned long cpu;
cpu = get_cpu();
if (!test_bit(cpu, &flush_cpumask))
return;
goto out;
/*
* This was a BUG() but until someone can quote me the
* line from the intel manual that guarantees an IPI to
......@@ -383,6 +385,9 @@ asmlinkage void smp_invalidate_interrupt (void)
}
ack_APIC_irq();
clear_bit(cpu, &flush_cpumask);
out:
put_cpu();
}
static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
......@@ -432,16 +437,23 @@ static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
unsigned long cpu_mask;
preempt_disable();
cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
local_flush_tlb();
if (cpu_mask)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
void flush_tlb_mm (struct mm_struct * mm)
{
unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
unsigned long cpu_mask;
preempt_disable();
cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
if (current->active_mm == mm) {
if (current->mm)
......@@ -451,12 +463,17 @@ void flush_tlb_mm (struct mm_struct * mm)
}
if (cpu_mask)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
unsigned long cpu_mask;
preempt_disable();
cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
if (current->active_mm == mm) {
if(current->mm)
......@@ -467,6 +484,8 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
if (cpu_mask)
flush_tlb_others(cpu_mask, mm, va);
preempt_enable();
}
static inline void do_flush_tlb_all_local(void)
......
......@@ -99,6 +99,17 @@ static void __devinit pci_fixup_ide_trash(struct pci_dev *d)
d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
}
static void __devinit pci_fixup_ide_exbar(struct pci_dev *d)
{
/*
* Some new Intel IDE controllers have an EXBAR register for
* MMIO instead of PIO. It's unused, undocumented (though maybe
* functional). BIOSes often assign conflicting memory address
* to this. Just kill it.
*/
d->resource[5].start = d->resource[5].end = d->resource[5].flags = 0;
}
static void __devinit pci_fixup_latency(struct pci_dev *d)
{
/*
......@@ -174,6 +185,9 @@ struct pci_fixup pcibios_fixups[] = {
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, pci_fixup_ide_exbar },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_11, pci_fixup_ide_exbar },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_9, pci_fixup_ide_exbar },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug },
{ PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug },
......
......@@ -290,7 +290,7 @@ static struct inode_operations pcihpfs_dir_inode_operations = {
static struct super_operations pcihpfs_ops = {
statfs: simple_statfs,
put_inode: force_delete,
drop_inode: generic_delete_inode,
};
static int pcihpfs_fill_super(struct super_block *sb, void *data, int silent)
......
......@@ -409,13 +409,14 @@ CONFIG_BLK_DEV_PIIX
the chip to optimum performance.
CONFIG_BLK_DEV_PIIX_TRY133
The ICH2, ICH2-M, ICH3, ICH3-M, ICH3-S and CICH chips can support
UDMA133 in hardware, even though the specifications of the chips
say otherwise. By enabling this option, you allow the driver to
enable the UDMA133 mode on these chips.
If you want to stay on the safe side, say N here.
If you prefer maximum performance, say Y here.
The ICH2, ICH2-M, ICH3, ICH3-M, ICH3-S, ICH-4 and CICH chips can
support UDMA133 in hardware, even though the specifications of
the chips say otherwise. By enabling this option, you allow the
driver to enable the UDMA133 mode on these chips. Note that if
it doesn't work, your data gets lost, you're on your own, don't
expect any help.
Say N here, unless you really know what are you doing.
CONFIG_BLK_DEV_PDC202XX
Promise Ultra33 or PDC20246
......
......@@ -53,7 +53,7 @@ if [ "$CONFIG_BLK_DEV_IDE" != "n" ]; then
dep_bool ' HPT366 chipset support' CONFIG_BLK_DEV_HPT366 $CONFIG_BLK_DEV_IDEDMA_PCI
dep_bool ' Intel and Efar (SMsC) chipset support' CONFIG_BLK_DEV_PIIX $CONFIG_BLK_DEV_IDEDMA_PCI
if [ "$CONFIG_BLK_DEV_PIIX" = "y" ]; then
dep_bool ' Use UDMA133 even on ICH2, ICH3 and CICH chips (EXPERIMENTAL)' CONFIG_BLK_DEV_PIIX_TRY133 $CONFIG_EXPERIMENTAL
dep_bool ' Allow undocumented UDMA133 on ICH chips (EXPERIMENTAL)' CONFIG_BLK_DEV_PIIX_TRY133 $CONFIG_EXPERIMENTAL
fi
if [ "$CONFIG_MIPS_ITE8172" = "y" -o "$CONFIG_MIPS_IVR" = "y" ]; then
dep_mbool ' IT8172 IDE support' CONFIG_BLK_DEV_IT8172 $CONFIG_BLK_DEV_IDEDMA_PCI
......
......@@ -159,4 +159,18 @@ void ata_out_regfile(struct ata_device *drive, struct hd_drive_task_hdr *rf)
OUT_BYTE(rf->high_cylinder, ch->io_ports[IDE_HCYL_OFFSET]);
}
/*
* Output a complete register file.
*/
void ata_in_regfile(struct ata_device *drive, struct hd_drive_task_hdr *rf)
{
struct ata_channel *ch = drive->channel;
rf->sector_count = IN_BYTE(ch->io_ports[IDE_NSECTOR_OFFSET]);
rf->sector_number = IN_BYTE(ch->io_ports[IDE_SECTOR_OFFSET]);
rf->low_cylinder = IN_BYTE(ch->io_ports[IDE_LCYL_OFFSET]);
rf->high_cylinder = IN_BYTE(ch->io_ports[IDE_HCYL_OFFSET]);
}
MODULE_LICENSE("GPL");
......@@ -253,7 +253,7 @@ static int hpt34x_udma_init(struct ata_device *drive, struct request *rq)
unsigned int count;
u8 cmd;
if (!(count = udma_new_table(ch, rq)))
if (!(count = udma_new_table(drive, rq)))
return 1; /* try PIO instead of DMA */
if (rq_data_dir(rq) == READ)
......
......@@ -275,8 +275,9 @@ static void icside_maskproc(struct ata_device *drive)
#define NR_ENTRIES 256
#define TABLE_SIZE (NR_ENTRIES * 8)
static int ide_build_sglist(struct ata_channel *ch, struct request *rq)
static int ide_build_sglist(struct ata_device *drive, struct request *rq)
{
struct ata_channel *ch = drive->channel;
struct scatterlist *sg = ch->sg_table;
int nents;
......@@ -294,7 +295,7 @@ static int ide_build_sglist(struct ata_channel *ch, struct request *rq)
sg->length = rq->nr_sectors * SECTOR_SIZE;
nents = 1;
} else {
nents = blk_rq_map_sg(rq->q, rq, sg);
nents = blk_rq_map_sg(&drive->queue, rq, sg);
if (rq->q && nents > rq->nr_phys_segments)
printk("icside: received %d segments, build %d\n",
......@@ -586,7 +587,7 @@ static void icside_dma_timeout(struct ata_device *drive)
{
printk(KERN_ERR "ATA: %s: UDMA timeout occured:", drive->name);
ata_status(drive, 0, 0);
ide_dump_status(drive, NULL, "UDMA timeout", drive->status);
ata_dump(drive, NULL, "UDMA timeout");
}
static void icside_irq_lost(struct ata_device *drive)
......
......@@ -614,7 +614,7 @@ static int cdrom_decode_status(ide_startstop_t *startstop, struct ata_device *dr
return 0;
} else if (!pc->quiet) {
/* Otherwise, print an error. */
ide_dump_status(drive, rq, "packet command error", drive->status);
ata_dump(drive, rq, "packet command error");
}
/* Set the error flag and complete the request.
......@@ -662,13 +662,13 @@ static int cdrom_decode_status(ide_startstop_t *startstop, struct ata_device *dr
sense_key == DATA_PROTECT) {
/* No point in retrying after an illegal
request or data protect error.*/
ide_dump_status(drive, rq, "command error", drive->status);
ata_dump(drive, rq, "command error");
cdrom_end_request(drive, rq, 0);
} else if (sense_key == MEDIUM_ERROR) {
/* No point in re-trying a zillion times on a bad
* sector. The error is not correctable at all.
*/
ide_dump_status(drive, rq, "media error (bad sector)", drive->status);
ata_dump(drive, rq, "media error (bad sector)");
cdrom_end_request(drive, rq, 0);
} else if ((err & ~ABRT_ERR) != 0) {
/* Go to the default handler
......
......@@ -405,10 +405,7 @@ void ide_end_drive_cmd(struct ata_device *drive, struct request *rq)
rq->errors = !ata_status(drive, READY_STAT, BAD_STAT);
if (ar) {
ar->taskfile.feature = IN_BYTE(IDE_ERROR_REG);
ar->taskfile.sector_count = IN_BYTE(IDE_NSECTOR_REG);
ar->taskfile.sector_number = IN_BYTE(IDE_SECTOR_REG);
ar->taskfile.low_cylinder = IN_BYTE(IDE_LCYL_REG);
ar->taskfile.high_cylinder = IN_BYTE(IDE_HCYL_REG);
ata_in_regfile(drive, &ar->taskfile);
ar->taskfile.device_head = IN_BYTE(IDE_SELECT_REG);
if ((drive->id->command_set_2 & 0x0400) &&
(drive->id->cfs_enable_2 & 0x0400) &&
......@@ -416,10 +413,7 @@ void ide_end_drive_cmd(struct ata_device *drive, struct request *rq)
/* The following command goes to the hob file! */
OUT_BYTE(0x80, drive->channel->io_ports[IDE_CONTROL_OFFSET]);
ar->hobfile.feature = IN_BYTE(IDE_FEATURE_REG);
ar->hobfile.sector_count = IN_BYTE(IDE_NSECTOR_REG);
ar->hobfile.sector_number = IN_BYTE(IDE_SECTOR_REG);
ar->hobfile.low_cylinder = IN_BYTE(IDE_LCYL_REG);
ar->hobfile.high_cylinder = IN_BYTE(IDE_HCYL_REG);
ata_in_regfile(drive, &ar->hobfile);
}
}
}
......@@ -437,46 +431,46 @@ struct ata_bit_messages {
};
static struct ata_bit_messages ata_status_msgs[] = {
{ BUSY_STAT, BUSY_STAT, "Busy" },
{ READY_STAT, READY_STAT, "DriveReady" },
{ WRERR_STAT, WRERR_STAT, "DeviceFault" },
{ SEEK_STAT, SEEK_STAT, "SeekComplete" },
{ DRQ_STAT, DRQ_STAT, "DataRequest" },
{ ECC_STAT, ECC_STAT, "CorrectedError" },
{ INDEX_STAT, INDEX_STAT, "Index" },
{ ERR_STAT, ERR_STAT, "Error" }
{ BUSY_STAT, BUSY_STAT, "busy" },
{ READY_STAT, READY_STAT, "drive ready" },
{ WRERR_STAT, WRERR_STAT, "device fault" },
{ SEEK_STAT, SEEK_STAT, "seek complete" },
{ DRQ_STAT, DRQ_STAT, "data request" },
{ ECC_STAT, ECC_STAT, "corrected error" },
{ INDEX_STAT, INDEX_STAT, "index" },
{ ERR_STAT, ERR_STAT, "error" }
};
static struct ata_bit_messages ata_error_msgs[] = {
{ ICRC_ERR|ABRT_ERR, ABRT_ERR, "DriveStatusError" },
{ ICRC_ERR|ABRT_ERR, ICRC_ERR, "BadSector" },
{ ICRC_ERR|ABRT_ERR, ICRC_ERR|ABRT_ERR, "BadCRC" },
{ ECC_ERR, ECC_ERR, "UncorrectableError" },
{ ID_ERR, ID_ERR, "SectorIdNotFound" },
{ TRK0_ERR, TRK0_ERR, "TrackZeroNotFound" },
{ MARK_ERR, MARK_ERR, "AddrMarkNotFound" }
{ ICRC_ERR|ABRT_ERR, ABRT_ERR, "drive status error" },
{ ICRC_ERR|ABRT_ERR, ICRC_ERR, "bad sectorr" },
{ ICRC_ERR|ABRT_ERR, ICRC_ERR|ABRT_ERR, "invalid checksum" },
{ ECC_ERR, ECC_ERR, "uncorrectable error" },
{ ID_ERR, ID_ERR, "sector id not found" },
{ TRK0_ERR, TRK0_ERR, "track zero not found" },
{ MARK_ERR, MARK_ERR, "addr mark not found" }
};
static void ata_dump_bits(struct ata_bit_messages *msgs, int nr, byte bits)
static void dump_bits(struct ata_bit_messages *msgs, int nr, byte bits)
{
int i;
printk(" { ");
printk(" [ ");
for (i = 0; i < nr; i++, msgs++)
if ((bits & msgs->mask) == msgs->match)
printk("%s ", msgs->msg);
printk("} ");
printk("] ");
}
#else
# define ata_dump_bits(msgs,nr,bits) do { } while (0)
# define dump_bits(msgs,nr,bits) do { } while (0)
#endif
/*
* Error reporting, in human readable form (luxurious, but a memory hog).
*/
u8 ide_dump_status(struct ata_device *drive, struct request * rq, const char *msg, u8 stat)
u8 ata_dump(struct ata_device *drive, struct request * rq, const char *msg)
{
unsigned long flags;
u8 err = 0;
......@@ -484,16 +478,16 @@ u8 ide_dump_status(struct ata_device *drive, struct request * rq, const char *ms
__save_flags (flags); /* local CPU only */
ide__sti(); /* local CPU only */
printk("%s: %s: status=0x%02x", drive->name, msg, stat);
ata_dump_bits(ata_status_msgs, ARRAY_SIZE(ata_status_msgs), stat);
printk("%s: %s: status=0x%02x", drive->name, msg, drive->status);
dump_bits(ata_status_msgs, ARRAY_SIZE(ata_status_msgs), drive->status);
printk("\n");
if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
if ((drive->status & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
err = GET_ERR();
printk("%s: %s: error=0x%02x", drive->name, msg, err);
#if FANCY_STATUS_DUMPS
if (drive->type == ATA_DISK) {
ata_dump_bits(ata_error_msgs, ARRAY_SIZE(ata_error_msgs), err);
dump_bits(ata_error_msgs, ARRAY_SIZE(ata_error_msgs), err);
if ((err & (BBD_ERR | ABRT_ERR)) == BBD_ERR || (err & (ECC_ERR|ID_ERR|MARK_ERR))) {
if ((drive->id->command_set_2 & 0x0400) &&
......@@ -592,7 +586,7 @@ ide_startstop_t ata_error(struct ata_device *drive, struct request *rq, const ch
u8 err;
u8 stat = drive->status;
err = ide_dump_status(drive, rq, msg, stat);
err = ata_dump(drive, rq, msg);
if (!drive || !rq)
return ide_stopped;
......@@ -1476,7 +1470,7 @@ EXPORT_SYMBOL(ide_timer_expiry);
EXPORT_SYMBOL(do_ide_request);
EXPORT_SYMBOL(ide_set_handler);
EXPORT_SYMBOL(ide_dump_status);
EXPORT_SYMBOL(ata_dump);
EXPORT_SYMBOL(ata_error);
EXPORT_SYMBOL(ide_wait_stat);
......
......@@ -58,8 +58,9 @@ ide_startstop_t ide_dma_intr(struct ata_device *drive, struct request *rq)
* FIXME: taskfiles should be a map of pages, not a long virt address... /jens
* FIXME: I agree with Jens --mdcki!
*/
static int build_sglist(struct ata_channel *ch, struct request *rq)
static int build_sglist(struct ata_device *drive, struct request *rq)
{
struct ata_channel *ch = drive->channel;
struct scatterlist *sg = ch->sg_table;
int nents = 0;
......@@ -69,7 +70,7 @@ static int build_sglist(struct ata_channel *ch, struct request *rq)
unsigned char *virt_addr = rq->buffer;
int sector_count = rq->nr_sectors;
#else
nents = blk_rq_map_sg(rq->q, rq, ch->sg_table);
nents = blk_rq_map_sg(&drive->queue, rq, ch->sg_table);
if (nents > rq->nr_segments)
printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents);
......@@ -99,7 +100,7 @@ static int build_sglist(struct ata_channel *ch, struct request *rq)
sg[nents].length = sector_count * SECTOR_SIZE;
++nents;
} else {
nents = blk_rq_map_sg(rq->q, rq, ch->sg_table);
nents = blk_rq_map_sg(&drive->queue, rq, ch->sg_table);
if (rq->q && nents > rq->nr_phys_segments)
printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents);
......@@ -150,7 +151,7 @@ int ata_start_dma(struct ata_device *drive, struct request *rq)
reading = 1 << 3;
/* try PIO instead of DMA */
if (!udma_new_table(ch, rq))
if (!udma_new_table(drive, rq))
return 1;
outl(ch->dmatable_dma, dma_base + 4); /* PRD table */
......@@ -306,8 +307,9 @@ void udma_pci_enable(struct ata_device *drive, int on, int verbose)
* This prepares a dma request. Returns 0 if all went okay, returns 1
* otherwise. May also be invoked from trm290.c
*/
int udma_new_table(struct ata_channel *ch, struct request *rq)
int udma_new_table(struct ata_device *drive, struct request *rq)
{
struct ata_channel *ch = drive->channel;
unsigned int *table = ch->dmatable_cpu;
#ifdef CONFIG_BLK_DEV_TRM290
unsigned int is_trm290_chipset = (ch->chipset == ide_trm290);
......@@ -318,7 +320,7 @@ int udma_new_table(struct ata_channel *ch, struct request *rq)
int i;
struct scatterlist *sg;
ch->sg_nents = i = build_sglist(ch, rq);
ch->sg_nents = i = build_sglist(drive, rq);
if (!i)
return 0;
......
/**** vi:set ts=8 sts=8 sw=8:************************************************
*
* $Id: piix.c,v 1.3 2002/03/29 16:06:06 vojtech Exp $
/*
* piix.c, v1.5 2002/05/03
*
* Copyright (c) 2000-2002 Vojtech Pavlik
*
......@@ -9,6 +8,7 @@
* Andre Hedrick
*
* Thanks to Daniela Egbert for advice on PIIX bugs.
* Thanks to Ulf Axelsson for noticing that ICH4 only documents UDMA100.
*/
/*
......@@ -85,13 +85,13 @@ static struct piix_ide_chip {
unsigned short id;
unsigned char flags;
} piix_ide_chips[] = {
{ PCI_DEVICE_ID_INTEL_82801DB_9, PIIX_UDMA_133 | PIIX_PINGPONG }, /* Intel 82801DB ICH4 */
{ PCI_DEVICE_ID_INTEL_82801DB_9, PIIX_UDMA_100 | PIIX_PINGPONG }, /* Intel 82801DB ICH4 */
{ PCI_DEVICE_ID_INTEL_82801CA_11, PIIX_UDMA_100 | PIIX_PINGPONG }, /* Intel 82801CA ICH3/ICH3-S */
{ PCI_DEVICE_ID_INTEL_82801CA_10, PIIX_UDMA_100 | PIIX_PINGPONG }, /* Intel 82801CAM ICH3-M */
{ PCI_DEVICE_ID_INTEL_82801E_9, PIIX_UDMA_100 | PIIX_PINGPONG }, /* Intel 82801E C-ICH */
{ PCI_DEVICE_ID_INTEL_82801BA_9, PIIX_UDMA_100 | PIIX_PINGPONG }, /* Intel 82801BA ICH2 */
{ PCI_DEVICE_ID_INTEL_82801BA_8, PIIX_UDMA_100 | PIIX_PINGPONG }, /* Intel 82801BAM ICH2-M */
{ PCI_DEVICE_ID_INTEL_82801AB_1, PIIX_UDMA_33 | PIIX_PINGPONG }, /* Intel 82801AB ICH0 */
{ PCI_DEVICE_ID_INTEL_82801AB_1, PIIX_UDMA_33 | PIIX_PINGPONG}, /* Intel 82801AB ICH0 */
{ PCI_DEVICE_ID_INTEL_82801AA_1, PIIX_UDMA_66 | PIIX_PINGPONG }, /* Intel 82801AA ICH */
{ PCI_DEVICE_ID_INTEL_82372FB_1, PIIX_UDMA_66 }, /* Intel 82372FB PIIX5 */
{ PCI_DEVICE_ID_INTEL_82443MX_1, PIIX_UDMA_33 }, /* Intel 82443MX MPIIX4 */
......@@ -280,6 +280,7 @@ static int piix_udma_setup(struct ata_device *drive)
*/
static unsigned int __init piix_init_chipset(struct pci_dev *dev)
{
struct pci_dev *orion = NULL;
unsigned int u;
unsigned short w;
unsigned char t;
......@@ -293,18 +294,10 @@ static unsigned int __init piix_init_chipset(struct pci_dev *dev)
if (dev->device == piix_config->id)
break;
if (!piix_config->id) {
printk(KERN_WARNING "PIIX: Unknown PIIX/ICH chip %#x, contact Vojtech Pavlik <vojtech@ucw.cz>\n", dev->device);
return -ENODEV;
}
/*
* Check for possibly broken DMA configs.
*/
{
struct pci_dev *orion = NULL;
if (piix_config->flags & PIIX_CHECK_REV) {
pci_read_config_byte(dev, PCI_REVISION_ID, &t);
if (t < 2) {
......@@ -320,29 +313,6 @@ static unsigned int __init piix_init_chipset(struct pci_dev *dev)
piix_config->flags |= PIIX_NODMA;
}
}
}
/*
* Check 80-wire cable presence.
*/
switch (piix_config->flags & PIIX_UDMA) {
case PIIX_UDMA_66:
if (piix_config->flags && PIIX_VICTORY) {
pci_read_config_byte(dev, PIIX_IDESTAT, &t);
piix_80w = ((t & 2) ? 1 : 0) | ((t & 1) ? 2 : 0);
break;
}
#ifndef CONFIG_BLK_DEV_PIIX_TRY133
case PIIX_UDMA_100:
#endif
case PIIX_UDMA_133:
pci_read_config_dword(dev, PIIX_IDECFG, &u);
piix_80w = ((u & 0x30) ? 1 : 0) | ((u & 0xc0) ? 2 : 0);
break;
}
/*
* Enable ping-pong buffers where applicable.
......@@ -377,32 +347,44 @@ static unsigned int __init piix_init_chipset(struct pci_dev *dev)
return 0;
}
static unsigned int __init piix_ata66_check(struct ata_channel *hwif)
static unsigned int __init piix_ata66_check(struct ata_channel *ch)
{
return ((piix_enabled & piix_80w) >> hwif->unit) & 1;
unsigned char t;
unsigned int u;
if ((piix_config->flags & PIIX_UDMA) < PIIX_UDMA_66)
return 0;
if (piix_config->flags & PIIX_VICTORY) {
pci_read_config_byte(ch->pci_dev, PIIX_IDESTAT, &t);
return ch->unit ? (t & 1) : !!(t & 2);
}
pci_read_config_dword(ch->pci_dev, PIIX_IDECFG, &u);
return ch->unit ? !!(u & 0xc0) : !!(u & 0x30);
}
static void __init piix_init_channel(struct ata_channel *hwif)
static void __init piix_init_channel(struct ata_channel *ch)
{
int i;
hwif->tuneproc = &piix_tune_drive;
hwif->speedproc = &piix_set_drive;
hwif->autodma = 0;
hwif->io_32bit = 1;
hwif->unmask = 1;
ch->tuneproc = &piix_tune_drive;
ch->speedproc = &piix_set_drive;
ch->autodma = 0;
ch->io_32bit = 1;
ch->unmask = 1;
for (i = 0; i < 2; i++) {
hwif->drives[i].autotune = 1;
hwif->drives[i].dn = hwif->unit * 2 + i;
ch->drives[i].autotune = 1;
ch->drives[i].dn = ch->unit * 2 + i;
}
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->highmem = 1;
hwif->udma_setup = piix_udma_setup;
if (ch->dma_base) {
ch->highmem = 1;
ch->udma_setup = piix_udma_setup;
# ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
hwif->autodma = 1;
ch->autodma = 1;
# endif
}
#endif
......@@ -412,11 +394,11 @@ static void __init piix_init_channel(struct ata_channel *hwif)
* We allow the BM-DMA driver only work on enabled interfaces,
* and only if DMA is safe with the chip and bridge.
*/
static void __init piix_init_dma(struct ata_channel *hwif, unsigned long dmabase)
static void __init piix_init_dma(struct ata_channel *ch, unsigned long dmabase)
{
if (((piix_enabled >> hwif->unit) & 1)
if (((piix_enabled >> ch->unit) & 1)
&& !(piix_config->flags & PIIX_NODMA))
ata_init_dma(hwif, dmabase);
ata_init_dma(ch, dmabase);
}
......
......@@ -385,7 +385,7 @@ int ide_config_drive_speed(struct ata_device *drive, byte speed)
enable_irq(ch->irq);
if (error) {
ide_dump_status(drive, NULL, "set_drive_speed_status", drive->status);
ata_dump(drive, NULL, "set drive speed");
return error;
}
......
......@@ -247,8 +247,7 @@ static ide_startstop_t service(struct ata_device *drive, struct request *rq)
OUT_BYTE(WIN_QUEUED_SERVICE, IDE_COMMAND_REG);
if (wait_altstat(drive, &stat, BUSY_STAT)) {
printk(KERN_ERR"%s: BUSY clear took too long\n", __FUNCTION__);
ide_dump_status(drive, rq, __FUNCTION__, stat);
ata_dump(drive, rq, "BUSY clear took too long");
tcq_invalidate_queue(drive);
return ide_stopped;
......@@ -262,7 +261,7 @@ static ide_startstop_t service(struct ata_device *drive, struct request *rq)
* FIXME, invalidate queue
*/
if (stat & ERR_STAT) {
ide_dump_status(drive, rq, __FUNCTION__, stat);
ata_dump(drive, rq, "ERR condition");
tcq_invalidate_queue(drive);
return ide_stopped;
......@@ -328,8 +327,7 @@ static ide_startstop_t dmaq_complete(struct ata_device *drive, struct request *r
* must be end of I/O, check status and complete as necessary
*/
if (!ata_status(drive, READY_STAT, drive->bad_wstat | DRQ_STAT)) {
printk(KERN_ERR "%s: %s: error status %x\n", __FUNCTION__, drive->name, drive->status);
ide_dump_status(drive, rq, __FUNCTION__, drive->status);
ata_dump(drive, rq, __FUNCTION__);
tcq_invalidate_queue(drive);
return ide_stopped;
......@@ -557,7 +555,7 @@ ide_startstop_t udma_tcq_taskfile(struct ata_device *drive, struct request *rq)
OUT_BYTE(args->cmd, IDE_COMMAND_REG);
if (wait_altstat(drive, &stat, BUSY_STAT)) {
ide_dump_status(drive, rq, "queued start", stat);
ata_dump(drive, rq, "queued start");
tcq_invalidate_queue(drive);
return ide_stopped;
}
......@@ -567,7 +565,7 @@ ide_startstop_t udma_tcq_taskfile(struct ata_device *drive, struct request *rq)
#endif
if (stat & ERR_STAT) {
ide_dump_status(drive, rq, "tcq_start", stat);
ata_dump(drive, rq, "tcq_start");
return ide_stopped;
}
......
......@@ -217,7 +217,7 @@ static int trm290_udma_init(struct ata_device *drive, struct request *rq)
writing = 0;
}
if (!(count = udma_new_table(ch, rq))) {
if (!(count = udma_new_table(drive, rq))) {
trm290_prepare_drive(drive, 0); /* select PIO xfer */
return 1; /* try PIO instead of DMA */
}
......
......@@ -191,12 +191,6 @@ static char *statsLabels[] = {
#ifndef RUN_AT
#define RUN_AT(x) (jiffies+(x))
#endif
#ifndef PDE
static inline struct proc_dir_entry *PDE(const struct inode *inode)
{
return inode->u.generic_ip;
}
#endif
/* These variables are for insmod, since it seems that the rates
......@@ -845,7 +839,7 @@ struct airo_info {
struct proc_dir_entry *proc_entry;
struct airo_info *next;
spinlock_t aux_lock;
int flags;
unsigned long flags;
#define FLAG_PROMISC IFF_PROMISC
#define FLAG_RADIO_OFF 0x02
#define FLAG_LOCKED 2
......
......@@ -1152,9 +1152,9 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
// this code is taken from kernel/sched.c:interruptible_sleep_on_timeout
wait.task = current;
init_waitqueue_entry(&wait, current);
wq_write_lock_irqsave(&adpt_wq_i2o_post.lock,flags);
spin_lock_irqsave(&adpt_wq_i2o_post.lock, flags);
__add_wait_queue(&adpt_wq_i2o_post, &wait);
wq_write_unlock(&adpt_wq_i2o_post.lock);
spin_unlock(&adpt_wq_i2o_post.lock);
msg[2] |= 0x80000000 | ((u32)wait_data->id);
timeout *= HZ;
......@@ -1167,9 +1167,9 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
schedule_timeout(timeout*HZ);
spin_lock_irq(pHba->host->host_lock);
}
wq_write_lock_irq(&adpt_wq_i2o_post.lock);
spin_lock_irq(&adpt_wq_i2o_post.lock);
__remove_wait_queue(&adpt_wq_i2o_post, &wait);
wq_write_unlock_irqrestore(&adpt_wq_i2o_post.lock,flags);
spin_unlock_irqrestore(&adpt_wq_i2o_post.lock, flags);
if(status == -ETIMEDOUT){
printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
......
......@@ -229,10 +229,15 @@ static void hexdump(u8 *x, int len)
printk("]\n");
}
static inline idescsi_scsi_t *idescsi_private(struct Scsi_Host *host)
{
return (idescsi_scsi_t*) &host[1];
}
static int idescsi_end_request(struct ata_device *drive, struct request *rq, int uptodate)
{
struct Scsi_Host *host = drive->driver_data;
idescsi_scsi_t *scsi = (idescsi_scsi_t *) host->hostdata[0];
idescsi_scsi_t *scsi = idescsi_private(host);
struct atapi_packet_command *pc = (struct atapi_packet_command *) rq->special;
int log = test_bit(IDESCSI_LOG_CMD, &scsi->log);
u8 *scsi_buf;
......@@ -289,7 +294,7 @@ static inline unsigned long get_timeout(struct atapi_packet_command *pc)
static ide_startstop_t idescsi_pc_intr(struct ata_device *drive, struct request *rq)
{
struct Scsi_Host *host = drive->driver_data;
idescsi_scsi_t *scsi = (idescsi_scsi_t *) host->hostdata[0];
idescsi_scsi_t *scsi = idescsi_private(host);
u8 ireason;
int bcount;
struct atapi_packet_command *pc=scsi->pc;
......@@ -372,7 +377,7 @@ static ide_startstop_t idescsi_pc_intr(struct ata_device *drive, struct request
static ide_startstop_t idescsi_transfer_pc(struct ata_device *drive, struct request *rq)
{
struct Scsi_Host *host = drive->driver_data;
idescsi_scsi_t *scsi = (idescsi_scsi_t *) host->hostdata[0];
idescsi_scsi_t *scsi = idescsi_private(host);
struct atapi_packet_command *pc = scsi->pc;
byte ireason;
ide_startstop_t startstop;
......@@ -398,7 +403,7 @@ static ide_startstop_t idescsi_issue_pc(struct ata_device *drive, struct request
struct atapi_packet_command *pc)
{
struct Scsi_Host *host = drive->driver_data;
idescsi_scsi_t *scsi = (idescsi_scsi_t *) host->hostdata[0];
idescsi_scsi_t *scsi = idescsi_private(host);
int bcount;
int dma_ok = 0;
......@@ -473,7 +478,6 @@ static int idescsi_cleanup (struct ata_device *drive)
if (ide_unregister_subdriver (drive)) {
return 1;
}
kfree((idescsi_scsi_t *) host->hostdata[0]);
scsi_unregister(host);
return 0;
......@@ -515,7 +519,7 @@ static const char *idescsi_info(struct Scsi_Host *host)
static int idescsi_ioctl(Scsi_Device *dev, int cmd, void *arg)
{
idescsi_scsi_t *scsi = (idescsi_scsi_t *) dev->host->hostdata[0];
idescsi_scsi_t *scsi = idescsi_private(dev->host);
if (cmd == SG_SET_TRANSFORM) {
if (arg)
......@@ -612,7 +616,7 @@ static inline struct bio *idescsi_dma_bio(struct ata_device *drive, struct atapi
static inline int should_transform(struct ata_device *drive, Scsi_Cmnd *cmd)
{
struct Scsi_Host *host = drive->driver_data;
idescsi_scsi_t *scsi = (idescsi_scsi_t *) host->hostdata[0];
idescsi_scsi_t *scsi = idescsi_private(host);
if (major(cmd->request.rq_dev) == SCSI_GENERIC_MAJOR)
return test_bit(IDESCSI_SG_TRANSFORM, &scsi->transform);
......@@ -621,7 +625,7 @@ static inline int should_transform(struct ata_device *drive, Scsi_Cmnd *cmd)
static int idescsi_queue(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
{
idescsi_scsi_t *scsi = (idescsi_scsi_t *) cmd->host->hostdata[0];
idescsi_scsi_t *scsi = idescsi_private(cmd->host);
struct ata_device *drive = scsi->drive;
struct request *rq = NULL;
struct atapi_packet_command *pc = NULL;
......@@ -691,7 +695,7 @@ static int idescsi_device_reset(Scsi_Cmnd *cmd)
static int idescsi_bios(Disk *disk, kdev_t dev, int *parm)
{
idescsi_scsi_t *scsi = (idescsi_scsi_t *) disk->device->host->hostdata[0];
idescsi_scsi_t *scsi = idescsi_private(disk->device->host);
struct ata_device *drive = scsi->drive;
if (drive->bios_cyl && drive->bios_head && drive->bios_sect) {
......@@ -758,8 +762,7 @@ static void idescsi_attach(struct ata_device *drive)
drive->driver_data = host;
drive->ready_stat = 0;
scsi = kmalloc(sizeof(*scsi), GFP_ATOMIC);
host->hostdata[0] = (unsigned long) scsi;
scsi = idescsi_private(host);
memset(scsi,0, sizeof (*scsi));
scsi->drive = drive;
......
......@@ -298,7 +298,7 @@ static struct inode_operations usbfs_dir_inode_operations = {
static struct super_operations usbfs_ops = {
statfs: simple_statfs,
put_inode: force_delete,
drop_inode: generic_delete_inode,
};
static int usbfs_fill_super(struct super_block *sb, void *data, int silent)
......
......@@ -621,7 +621,7 @@ static struct file_operations bm_status_operations = {
static struct super_operations s_ops = {
statfs: simple_statfs,
put_inode: force_delete,
drop_inode: generic_delete_inode,
clear_inode: bm_clear_inode,
};
......
......@@ -2576,7 +2576,7 @@ static void devfs_clear_inode (struct inode *inode)
static struct super_operations devfs_sops =
{
put_inode: force_delete,
drop_inode: generic_delete_inode,
clear_inode: devfs_clear_inode,
statfs: simple_statfs,
};
......
......@@ -442,7 +442,7 @@ static struct dentry_operations driverfs_dentry_file_ops = {
static struct super_operations driverfs_ops = {
statfs: simple_statfs,
put_inode: force_delete,
drop_inode: generic_delete_inode,
};
static int driverfs_fill_super(struct super_block *sb, void *data, int silent)
......
......@@ -782,32 +782,10 @@ void remove_inode_hash(struct inode *inode)
spin_unlock(&inode_lock);
}
/**
* iput - put an inode
* @inode: inode to put
*
* Puts an inode, dropping its usage count. If the inode use count hits
* zero the inode is also then freed and may be destroyed.
*/
void iput(struct inode *inode)
void generic_delete_inode(struct inode *inode)
{
if (inode) {
struct super_block *sb = inode->i_sb;
struct super_operations *op = NULL;
if (inode->i_state == I_CLEAR)
BUG();
if (sb && sb->s_op)
op = sb->s_op;
if (op && op->put_inode)
op->put_inode(inode);
if (!atomic_dec_and_lock(&inode->i_count, &inode_lock))
return;
struct super_operations *op = inode->i_sb->s_op;
if (!inode->i_nlink) {
list_del(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_hash);
list_del(&inode->i_list);
......@@ -829,7 +807,14 @@ void iput(struct inode *inode)
clear_inode(inode);
if (inode->i_state != I_CLEAR)
BUG();
} else {
destroy_inode(inode);
}
EXPORT_SYMBOL(generic_delete_inode);
static void generic_forget_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
if (!list_empty(&inode->i_hash)) {
if (!(inode->i_state & (I_DIRTY|I_LOCK))) {
list_del(&inode->i_list);
......@@ -851,19 +836,65 @@ void iput(struct inode *inode)
if (inode->i_data.nrpages)
truncate_inode_pages(&inode->i_data, 0);
clear_inode(inode);
}
destroy_inode(inode);
}
}
void force_delete(struct inode *inode)
/*
* Normal UNIX filesystem behaviour: delete the
* inode when the usage count drops to zero, and
* i_nlink is zero.
*/
static void generic_drop_inode(struct inode *inode)
{
if (!inode->i_nlink)
generic_delete_inode(inode);
else
generic_forget_inode(inode);
}
/*
* Called when we're dropping the last reference
* to an inode.
*
* Call the FS "drop()" function, defaulting to
* the legacy UNIX filesystem behaviour..
*
* NOTE! NOTE! NOTE! We're called with the inode lock
* held, and the drop function is supposed to release
* the lock!
*/
static inline void iput_final(struct inode *inode)
{
/*
* Kill off unused inodes ... iput() will unhash and
* delete the inode if we set i_nlink to zero.
struct super_operations *op = inode->i_sb->s_op;
void (*drop)(struct inode *) = generic_drop_inode;
if (op && op->drop_inode)
drop = op->drop_inode;
drop(inode);
}
/**
* iput - put an inode
* @inode: inode to put
*
* Puts an inode, dropping its usage count. If the inode use count hits
* zero the inode is also then freed and may be destroyed.
*/
if (atomic_read(&inode->i_count) == 1)
inode->i_nlink = 0;
void iput(struct inode *inode)
{
if (inode) {
struct super_operations *op = inode->i_sb->s_op;
if (inode->i_state == I_CLEAR)
BUG();
if (op && op->put_inode)
op->put_inode(inode);
if (atomic_dec_and_lock(&inode->i_count, &inode_lock))
iput_final(inode);
}
}
/**
......
......@@ -84,7 +84,7 @@ static struct super_operations ncp_sops =
{
alloc_inode: ncp_alloc_inode,
destroy_inode: ncp_destroy_inode,
put_inode: force_delete,
drop_inode: generic_delete_inode,
delete_inode: ncp_delete_inode,
put_super: ncp_put_super,
statfs: ncp_statfs,
......
......@@ -338,7 +338,14 @@ asmlinkage long sys_access(const char * filename, int mode)
current->fsuid = current->uid;
current->fsgid = current->gid;
/* Clear the capabilities if we switch to a non-root user */
/*
* Clear the capabilities if we switch to a non-root user
*
* FIXME: There is a race here against sys_capset. The
* capabilities can change yet we will restore the old
* value below. We should hold task_capabilities_lock,
* but we cannot because user_path_walk can sleep.
*/
if (current->uid)
cap_clear(current->cap_effective);
else
......
......@@ -121,7 +121,7 @@ static struct super_operations proc_sops = {
alloc_inode: proc_alloc_inode,
destroy_inode: proc_destroy_inode,
read_inode: proc_read_inode,
put_inode: force_delete,
drop_inode: generic_delete_inode,
delete_inode: proc_delete_inode,
statfs: simple_statfs,
};
......
......@@ -277,7 +277,7 @@ static struct inode_operations ramfs_dir_inode_operations = {
static struct super_operations ramfs_ops = {
statfs: simple_statfs,
put_inode: force_delete,
drop_inode: generic_delete_inode,
};
static int ramfs_fill_super(struct super_block * sb, void * data, int silent)
......
......@@ -94,7 +94,7 @@ static struct super_operations smb_sops =
{
alloc_inode: smb_alloc_inode,
destroy_inode: smb_destroy_inode,
put_inode: force_delete,
drop_inode: generic_delete_inode,
delete_inode: smb_delete_inode,
put_super: smb_put_super,
statfs: smb_statfs,
......
......@@ -41,6 +41,10 @@ typedef struct __user_cap_data_struct {
#ifdef __KERNEL__
#include <linux/spinlock.h>
extern spinlock_t task_capability_lock;
/* #define STRICT_CAP_T_TYPECHECKS */
#ifdef STRICT_CAP_T_TYPECHECKS
......
......@@ -800,6 +800,7 @@ struct super_operations {
void (*dirty_inode) (struct inode *);
void (*write_inode) (struct inode *, int);
void (*put_inode) (struct inode *);
void (*drop_inode) (struct inode *);
void (*delete_inode) (struct inode *);
void (*put_super) (struct super_block *);
void (*write_super) (struct super_block *);
......@@ -1183,10 +1184,10 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
extern void inode_init_once(struct inode *);
extern void iput(struct inode *);
extern void force_delete(struct inode *);
extern struct inode * igrab(struct inode *);
extern ino_t iunique(struct super_block *, ino_t);
extern int inode_needs_sync(struct inode *inode);
extern void generic_delete_inode(struct inode *inode);
extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
extern struct inode * iget_locked(struct super_block *, unsigned long);
......
......@@ -610,11 +610,7 @@ extern int ide_end_request(struct ata_device *drive, struct request *, int);
extern void ide_set_handler(struct ata_device *drive, ata_handler_t handler,
unsigned long timeout, ata_expiry_t expiry);
/*
* Error reporting, in human readable form (luxurious, but a memory hog).
*/
extern u8 ide_dump_status(struct ata_device *, struct request *rq, const char *, u8);
extern u8 ata_dump(struct ata_device *, struct request *, const char *);
extern ide_startstop_t ata_error(struct ata_device *, struct request *rq, const char *);
extern void ide_fixstring(char *s, const int bytecount, const int byteswap);
......@@ -799,7 +795,7 @@ extern void udma_pci_timeout(struct ata_device *drive);
extern void udma_pci_irq_lost(struct ata_device *);
extern int udma_pci_setup(struct ata_device *);
extern int udma_new_table(struct ata_channel *, struct request *);
extern int udma_new_table(struct ata_device *, struct request *);
extern void udma_destroy_table(struct ata_channel *);
extern void udma_print(struct ata_device *);
......@@ -834,5 +830,6 @@ extern int ata_status(struct ata_device *, u8, u8);
extern int ata_irq_enable(struct ata_device *, int);
extern void ata_reset(struct ata_channel *);
extern void ata_out_regfile(struct ata_device *, struct hd_drive_task_hdr *);
extern void ata_in_regfile(struct ata_device *, struct hd_drive_task_hdr *);
#endif
......@@ -27,42 +27,8 @@ struct __wait_queue {
};
typedef struct __wait_queue wait_queue_t;
/*
* 'dual' spinlock architecture. Can be switched between spinlock_t and
* rwlock_t locks via changing this define. Since waitqueues are quite
* decoupled in the new architecture, lightweight 'simple' spinlocks give
* us slightly better latencies and smaller waitqueue structure size.
*/
#define USE_RW_WAIT_QUEUE_SPINLOCK 0
#if USE_RW_WAIT_QUEUE_SPINLOCK
# define wq_lock_t rwlock_t
# define WAITQUEUE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
# define wq_read_lock read_lock
# define wq_read_lock_irqsave read_lock_irqsave
# define wq_read_unlock_irqrestore read_unlock_irqrestore
# define wq_read_unlock read_unlock
# define wq_write_lock_irq write_lock_irq
# define wq_write_lock_irqsave write_lock_irqsave
# define wq_write_unlock_irqrestore write_unlock_irqrestore
# define wq_write_unlock write_unlock
#else
# define wq_lock_t spinlock_t
# define WAITQUEUE_RW_LOCK_UNLOCKED SPIN_LOCK_UNLOCKED
# define wq_read_lock spin_lock
# define wq_read_lock_irqsave spin_lock_irqsave
# define wq_read_unlock spin_unlock
# define wq_read_unlock_irqrestore spin_unlock_irqrestore
# define wq_write_lock_irq spin_lock_irq
# define wq_write_lock_irqsave spin_lock_irqsave
# define wq_write_unlock_irqrestore spin_unlock_irqrestore
# define wq_write_unlock spin_unlock
#endif
struct __wait_queue_head {
wq_lock_t lock;
spinlock_t lock;
struct list_head task_list;
};
typedef struct __wait_queue_head wait_queue_head_t;
......@@ -80,7 +46,7 @@ typedef struct __wait_queue_head wait_queue_head_t;
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
lock: WAITQUEUE_RW_LOCK_UNLOCKED, \
lock: SPIN_LOCK_UNLOCKED, \
task_list: { &(name).task_list, &(name).task_list } }
#define DECLARE_WAIT_QUEUE_HEAD(name) \
......@@ -88,7 +54,7 @@ typedef struct __wait_queue_head wait_queue_head_t;
static inline void init_waitqueue_head(wait_queue_head_t *q)
{
q->lock = WAITQUEUE_RW_LOCK_UNLOCKED;
q->lock = SPIN_LOCK_UNLOCKED;
INIT_LIST_HEAD(&q->task_list);
}
......
......@@ -13,7 +13,7 @@ export-objs = signal.o sys.o kmod.o context.o ksyms.o pm.o exec_domain.o \
printk.o platform.o suspend.o
obj-y = sched.o dma.o fork.o exec_domain.o panic.o printk.o \
module.o exit.o itimer.o info.o time.o softirq.o resource.o \
module.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o context.o futex.o platform.o
......
......@@ -2,17 +2,21 @@
* linux/kernel/capability.c
*
* Copyright (C) 1997 Andrew Main <zefram@fysh.org>
*
* Integrated into 2.1.97+, Andrew G. Morgan <morgan@transmeta.com>
* 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net>
*/
#include <linux/mm.h>
#include <asm/uaccess.h>
unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
kernel_cap_t cap_bset = CAP_INIT_EFF_SET;
/* Note: never hold tasklist_lock while spinning for this one */
/*
* This global lock protects task->cap_* for all tasks including current.
* Locking rule: acquire this prior to tasklist_lock.
*/
spinlock_t task_capability_lock = SPIN_LOCK_UNLOCKED;
/*
......@@ -21,23 +25,24 @@ spinlock_t task_capability_lock = SPIN_LOCK_UNLOCKED;
* uninteresting and/or not to be changed.
*/
/*
* sys_capget - get the capabilities of a given process.
*/
asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
{
int error, pid;
int ret = 0;
pid_t pid;
__u32 version;
struct task_struct *target;
task_t *target;
struct __user_cap_data_struct data;
if (get_user(version, &header->version))
return -EFAULT;
error = -EINVAL;
if (version != _LINUX_CAPABILITY_VERSION) {
version = _LINUX_CAPABILITY_VERSION;
if (put_user(version, &header->version))
error = -EFAULT;
return error;
}
if (version != _LINUX_CAPABILITY_VERSION)
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
if (get_user(pid, &header->pid))
return -EFAULT;
......@@ -45,48 +50,39 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
if (pid < 0)
return -EINVAL;
error = 0;
spin_lock(&task_capability_lock);
if (pid && pid != current->pid) {
read_lock(&tasklist_lock);
target = find_task_by_pid(pid); /* identify target of query */
if (!target)
error = -ESRCH;
} else {
target = current;
target = find_task_by_pid(pid);
if (!target) {
ret = -ESRCH;
goto out;
}
if (!error) {
data.permitted = cap_t(target->cap_permitted);
data.inheritable = cap_t(target->cap_inheritable);
data.effective = cap_t(target->cap_effective);
}
if (target != current)
out:
read_unlock(&tasklist_lock);
spin_unlock(&task_capability_lock);
if (!error) {
if (copy_to_user(dataptr, &data, sizeof data))
if (!ret && copy_to_user(dataptr, &data, sizeof data))
return -EFAULT;
}
return error;
return ret;
}
/* set capabilities for all processes in a given process group */
static void cap_set_pg(int pgrp,
kernel_cap_t *effective,
/*
* cap_set_pg - set capabilities for all processes in a given process
* group. We call this holding task_capability_lock and tasklist_lock.
*/
static inline void cap_set_pg(int pgrp, kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
struct task_struct *target;
task_t *target;
/* FIXME: do we need to have a write lock here..? */
read_lock(&tasklist_lock);
for_each_task(target) {
if (target->pgrp != pgrp)
continue;
......@@ -94,20 +90,18 @@ static void cap_set_pg(int pgrp,
target->cap_inheritable = *inheritable;
target->cap_permitted = *permitted;
}
read_unlock(&tasklist_lock);
}
/* set capabilities for all processes other than 1 and self */
static void cap_set_all(kernel_cap_t *effective,
/*
* cap_set_all - set capabilities for all processes other than init
* and self. We call this holding task_capability_lock and tasklist_lock.
*/
static inline void cap_set_all(kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
struct task_struct *target;
task_t *target;
/* FIXME: do we need to have a write lock here..? */
read_lock(&tasklist_lock);
/* ALL means everyone other than self or 'init' */
for_each_task(target) {
if (target == current || target->pid == 1)
continue;
......@@ -115,35 +109,35 @@ static void cap_set_all(kernel_cap_t *effective,
target->cap_inheritable = *inheritable;
target->cap_permitted = *permitted;
}
read_unlock(&tasklist_lock);
}
/*
* sys_capset - set capabilities for a given process, all processes, or all
* processes in a given process group.
*
* The restrictions on setting capabilities are specified as:
*
* [pid is for the 'target' task. 'current' is the calling task.]
*
* I: any raised capabilities must be a subset of the (old current) Permitted
* I: any raised capabilities must be a subset of the (old current) permitted
* P: any raised capabilities must be a subset of the (old current) permitted
* E: must be set to a subset of (new target) Permitted
* E: must be set to a subset of (new target) permitted
*/
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
{
kernel_cap_t inheritable, permitted, effective;
__u32 version;
struct task_struct *target;
int error, pid;
task_t *target;
int ret;
pid_t pid;
if (get_user(version, &header->version))
return -EFAULT;
if (version != _LINUX_CAPABILITY_VERSION) {
version = _LINUX_CAPABILITY_VERSION;
if (put_user(version, &header->version))
if (version != _LINUX_CAPABILITY_VERSION)
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
}
if (get_user(pid, &header->pid))
return -EFAULT;
......@@ -156,43 +150,35 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
return -EFAULT;
error = -EPERM;
spin_lock(&task_capability_lock);
read_lock(&tasklist_lock);
if (pid > 0 && pid != current->pid) {
read_lock(&tasklist_lock);
target = find_task_by_pid(pid); /* identify target of query */
target = find_task_by_pid(pid);
if (!target) {
error = -ESRCH;
ret = -ESRCH;
goto out;
}
} else {
} else
target = current;
}
ret = -EPERM;
/* verify restrictions on target's new Inheritable set */
if (!cap_issubset(inheritable,
cap_combine(target->cap_inheritable,
current->cap_permitted))) {
if (!cap_issubset(inheritable, cap_combine(target->cap_inheritable,
current->cap_permitted)))
goto out;
}
/* verify restrictions on target's new Permitted set */
if (!cap_issubset(permitted,
cap_combine(target->cap_permitted,
current->cap_permitted))) {
if (!cap_issubset(permitted, cap_combine(target->cap_permitted,
current->cap_permitted)))
goto out;
}
/* verify the _new_Effective_ is a subset of the _new_Permitted_ */
if (!cap_issubset(effective, permitted)) {
if (!cap_issubset(effective, permitted))
goto out;
}
/* having verified that the proposed changes are legal,
we now put them into effect. */
error = 0;
ret = 0;
if (pid < 0) {
if (pid == -1) /* all procs other than current and init */
......@@ -200,19 +186,15 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
else /* all procs in process group */
cap_set_pg(-pid, &effective, &inheritable, &permitted);
goto spin_out;
} else {
/* FIXME: do we need to have a write lock here..? */
target->cap_effective = effective;
target->cap_inheritable = inheritable;
target->cap_permitted = permitted;
}
out:
if (target != current) {
read_unlock(&tasklist_lock);
}
spin_out:
spin_unlock(&task_capability_lock);
return error;
return ret;
}
......@@ -53,9 +53,9 @@ void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
unsigned long flags;
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
wq_write_lock_irqsave(&q->lock, flags);
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, wait);
wq_write_unlock_irqrestore(&q->lock, flags);
spin_unlock_irqrestore(&q->lock, flags);
}
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
......@@ -63,18 +63,18 @@ void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
unsigned long flags;
wait->flags |= WQ_FLAG_EXCLUSIVE;
wq_write_lock_irqsave(&q->lock, flags);
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue_tail(q, wait);
wq_write_unlock_irqrestore(&q->lock, flags);
spin_unlock_irqrestore(&q->lock, flags);
}
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{
unsigned long flags;
wq_write_lock_irqsave(&q->lock, flags);
spin_lock_irqsave(&q->lock, flags);
__remove_wait_queue(q, wait);
wq_write_unlock_irqrestore(&q->lock, flags);
spin_unlock_irqrestore(&q->lock, flags);
}
void __init fork_init(unsigned long mempages)
......
/*
* linux/kernel/info.c
*
* Copyright (C) 1992 Darren Senn
*/
/* This implements the sysinfo() system call */
#include <linux/mm.h>
#include <linux/unistd.h>
#include <linux/swap.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
asmlinkage long sys_sysinfo(struct sysinfo *info)
{
struct sysinfo val;
memset((char *)&val, 0, sizeof(struct sysinfo));
cli();
val.uptime = jiffies / HZ;
val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
val.procs = nr_threads;
sti();
si_meminfo(&val);
si_swapinfo(&val);
{
unsigned long mem_total, sav_total;
unsigned int mem_unit, bitcount;
/* If the sum of all the available memory (i.e. ram + swap)
* is less than can be stored in a 32 bit unsigned long then
* we can be binary compatible with 2.2.x kernels. If not,
* well, in that case 2.2.x was broken anyways...
*
* -Erik Andersen <andersee@debian.org> */
mem_total = val.totalram + val.totalswap;
if (mem_total < val.totalram || mem_total < val.totalswap)
goto out;
bitcount = 0;
mem_unit = val.mem_unit;
while (mem_unit > 1) {
bitcount++;
mem_unit >>= 1;
sav_total = mem_total;
mem_total <<= 1;
if (mem_total < sav_total)
goto out;
}
/* If mem_total did not overflow, multiply all memory values by
* val.mem_unit and set it to 1. This leaves things compatible
* with 2.2.x, and also retains compatibility with earlier 2.4.x
* kernels... */
val.mem_unit = 1;
val.totalram <<= bitcount;
val.freeram <<= bitcount;
val.sharedram <<= bitcount;
val.bufferram <<= bitcount;
val.totalswap <<= bitcount;
val.freeswap <<= bitcount;
val.totalhigh <<= bitcount;
val.freehigh <<= bitcount;
}
out:
if (copy_to_user(info, &val, sizeof(struct sysinfo)))
return -EFAULT;
return 0;
}
......@@ -140,7 +140,6 @@ EXPORT_SYMBOL(igrab);
EXPORT_SYMBOL(iunique);
EXPORT_SYMBOL(iput);
EXPORT_SYMBOL(inode_init_once);
EXPORT_SYMBOL(force_delete);
EXPORT_SYMBOL(follow_up);
EXPORT_SYMBOL(follow_down);
EXPORT_SYMBOL(lookup_mnt);
......
......@@ -605,7 +605,7 @@ static void load_balance(runqueue_t *this_rq, int idle)
#define CAN_MIGRATE_TASK(p,rq,this_cpu) \
((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \
((p) != (rq)->curr) && \
((p)->cpus_allowed & (1 << (this_cpu))))
((p)->cpus_allowed & (1UL << (this_cpu))))
if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) {
curr = curr->next;
......@@ -777,8 +777,8 @@ asmlinkage void schedule(void)
spin_lock_irq(&rq->lock);
/*
* if entering from preempt_schedule, off a kernel preemption,
* go straight to picking the next task.
* if entering off a kernel preemption go straight
* to picking the next task.
*/
if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
goto pick_next_task;
......@@ -854,7 +854,9 @@ asmlinkage void schedule(void)
#ifdef CONFIG_PREEMPT
/*
* this is is the entry point to schedule() from in-kernel preemption.
* this is is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void preempt_schedule(void)
{
......@@ -866,7 +868,6 @@ asmlinkage void preempt_schedule(void)
ti->preempt_count = PREEMPT_ACTIVE;
schedule();
ti->preempt_count = 0;
barrier();
}
#endif /* CONFIG_PREEMPT */
......@@ -903,9 +904,9 @@ void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
if (unlikely(!q))
return;
wq_read_lock_irqsave(&q->lock, flags);
spin_lock_irqsave(&q->lock, flags);
__wake_up_common(q, mode, nr_exclusive);
wq_read_unlock_irqrestore(&q->lock, flags);
spin_unlock_irqrestore(&q->lock, flags);
}
void complete(struct completion *x)
......@@ -944,14 +945,14 @@ void wait_for_completion(struct completion *x)
init_waitqueue_entry(&wait, current);
#define SLEEP_ON_HEAD \
wq_write_lock_irqsave(&q->lock,flags); \
spin_lock_irqsave(&q->lock,flags); \
__add_wait_queue(q, &wait); \
wq_write_unlock(&q->lock);
spin_unlock(&q->lock);
#define SLEEP_ON_TAIL \
wq_write_lock_irq(&q->lock); \
spin_lock_irq(&q->lock); \
__remove_wait_queue(q, &wait); \
wq_write_unlock_irqrestore(&q->lock,flags);
spin_unlock_irqrestore(&q->lock, flags);
void interruptible_sleep_on(wait_queue_head_t *q)
{
......@@ -1661,7 +1662,8 @@ typedef struct {
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
* task must not exit() & deallocate itself prematurely.
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
void set_cpus_allowed(task_t *p, unsigned long new_mask)
{
......
......@@ -13,6 +13,7 @@
* serialize accesses to xtime/lost_ticks).
* Copyright (C) 1998 Andrea Arcangeli
* 1999-03-10 Improved NTP compatibility by Ulrich Windl
* 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
*/
#include <linux/config.h>
......@@ -605,9 +606,15 @@ static unsigned long count_active_tasks(void)
* imply that avenrun[] is the standard name for this kind of thing.
* Nothing else seems to be standardized: the fractional size etc
* all seem to differ on different machines.
*
* Requires xtime_lock to access.
*/
unsigned long avenrun[3];
/*
* calc_load - given tick count, update the avenrun load estimates.
* This is called while holding a write_lock on xtime_lock.
*/
static inline void calc_load(unsigned long ticks)
{
unsigned long active_tasks; /* fixed-point */
......@@ -627,7 +634,8 @@ static inline void calc_load(unsigned long ticks)
unsigned long wall_jiffies;
/*
* This spinlock protect us from races in SMP while playing with xtime. -arca
* This read-write spinlock protects us from races in SMP while
* playing with xtime and avenrun.
*/
rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
unsigned long last_time_offset;
......@@ -649,8 +657,8 @@ static inline void update_times(void)
update_wall_time(ticks);
}
last_time_offset = 0;
write_unlock_irq(&xtime_lock);
calc_load(ticks);
write_unlock_irq(&xtime_lock);
}
void timer_bh(void)
......@@ -912,3 +920,73 @@ asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
return 0;
}
/*
* sys_sysinfo - fill in sysinfo struct
*/
asmlinkage long sys_sysinfo(struct sysinfo *info)
{
struct sysinfo val;
unsigned long mem_total, sav_total;
unsigned int mem_unit, bitcount;
memset((char *)&val, 0, sizeof(struct sysinfo));
read_lock_irq(&xtime_lock);
val.uptime = jiffies / HZ;
val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
val.procs = nr_threads;
read_unlock_irq(&xtime_lock);
si_meminfo(&val);
si_swapinfo(&val);
/*
* If the sum of all the available memory (i.e. ram + swap)
* is less than can be stored in a 32 bit unsigned long then
* we can be binary compatible with 2.2.x kernels. If not,
* well, in that case 2.2.x was broken anyways...
*
* -Erik Andersen <andersee@debian.org>
*/
mem_total = val.totalram + val.totalswap;
if (mem_total < val.totalram || mem_total < val.totalswap)
goto out;
bitcount = 0;
mem_unit = val.mem_unit;
while (mem_unit > 1) {
bitcount++;
mem_unit >>= 1;
sav_total = mem_total;
mem_total <<= 1;
if (mem_total < sav_total)
goto out;
}
/*
* If mem_total did not overflow, multiply all memory values by
* val.mem_unit and set it to 1. This leaves things compatible
* with 2.2.x, and also retains compatibility with earlier 2.4.x
* kernels...
*/
val.mem_unit = 1;
val.totalram <<= bitcount;
val.freeram <<= bitcount;
val.sharedram <<= bitcount;
val.bufferram <<= bitcount;
val.totalswap <<= bitcount;
val.freeswap <<= bitcount;
val.totalhigh <<= bitcount;
val.freehigh <<= bitcount;
out:
if (copy_to_user(info, &val, sizeof(struct sysinfo)))
return -EFAULT;
return 0;
}
......@@ -1483,7 +1483,7 @@ static struct super_operations shmem_ops = {
remount_fs: shmem_remount_fs,
#endif
delete_inode: shmem_delete_inode,
put_inode: force_delete,
drop_inode: generic_delete_inode,
put_super: shmem_put_super,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment