Commit c9ce94ec authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linux-acpi.bkbits.net/linux-acpi-release-2.6.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 2061546a 2b44ea57
......@@ -1308,11 +1308,10 @@ S: USA
N: Benjamin Herrenschmidt
E: benh@kernel.crashing.org
E: benh@mipsys.com
D: Various parts of PPC & PowerMac
S: 122, boulevard Baille
S: 13005 Marseille
S: France
D: Various parts of PPC/PPC64 & PowerMac
S: 312/107 Canberra Avenue
S: Griffith, ACT 2603
S: Australia
N: Sebastian Hetze
E: she@lunetix.de
......
......@@ -589,8 +589,7 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
ret = dmabounce_register_dev(&dev->dev, 1024, 4096);
if (ret) {
printk("SA1111: Failed to register %s with dmabounce", dev->dev.bus_id);
kfree(dev);
device_unregister(dev);
device_unregister(&dev->dev);
}
}
}
......@@ -779,8 +778,7 @@ int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
/*
* Check to see if either the start or end are illegal.
*/
return ((addr & ~(*dev->dma_mask))) ||
((addr + size - 1) & ~(*dev->dma_mask));
return ((addr & ~dma_mask)) || ((addr + size - 1) & ~dma_mask);
}
struct sa1111_save_data {
......
......@@ -6,7 +6,7 @@
# To add an entry into this database, please see Documentation/arm/README,
# or contact rmk@arm.linux.org.uk
#
# Last update: Thu Apr 15 10:14:37 2004
# Last update: Thu Apr 29 19:06:33 2004
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
......@@ -242,7 +242,7 @@ killbear SA1100_KILLBEAR KILLBEAR 230
yoho ARCH_YOHO YOHO 231
jasper ARCH_JASPER JASPER 232
dsc25 ARCH_DSC25 DSC25 233
innovator ARCH_INNOVATOR INNOVATOR 234
omap_innovator MACH_OMAP_INNOVATOR OMAP_INNOVATOR 234
ramses ARCH_RAMSES RAMSES 235
s28x ARCH_S28X S28X 236
mport3 ARCH_MPORT3 MPORT3 237
......@@ -390,7 +390,7 @@ espd_4510b ARCH_ESPD_4510B ESPD_4510B 378
mp1x ARCH_MP1X MP1X 379
at91rm9200tb ARCH_AT91RM9200TB AT91RM9200TB 380
adsvgx ARCH_ADSVGX ADSVGX 381
omap_h2 ARCH_OMAP_H2 OMAP_H2 382
omap_h2 MACH_OMAP_H2 OMAP_H2 382
pelee ARCH_PELEE PELEE 383
e740 MACH_E740 E740 384
iq80331 ARCH_IQ80331 IQ80331 385
......@@ -523,3 +523,13 @@ montajade MACH_MONTAJADE MONTAJADE 512
sg560 MACH_SG560 SG560 513
dp1000 MACH_DP1000 DP1000 514
omap_osk MACH_OMAP_OSK OMAP_OSK 515
rg100v3 MACH_RG100V3 RG100V3 516
mx2ads MACH_MX2ADS MX2ADS 517
pxa_kilo MACH_PXA_KILO PXA_KILO 518
ixp4xx_eagle MACH_IXP4XX_EAGLE IXP4XX_EAGLE 519
tosa MACH_TOSA TOSA 520
mb2520f MACH_MB2520F MB2520F 521
emc1000 MACH_EMC1000 EMC1000 522
tidsc25 MACH_TIDSC25 TIDSC25 523
akcpmxl MACH_AKCPMXL AKCPMXL 524
av3xx MACH_AV3XX AV3XX 525
......@@ -36,6 +36,20 @@ static unsigned int nmi_hz = HZ;
unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
extern void show_registers(struct pt_regs *regs);
/*
* lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
* - it may be reserved by some other driver, or not
* - when not reserved by some other driver, it may be used for
* the NMI watchdog, or not
*
* This is maintained separately from nmi_active because the NMI
* watchdog may also be driven from the I/O APIC timer.
*/
static spinlock_t lapic_nmi_owner_lock = SPIN_LOCK_UNLOCKED;
static unsigned int lapic_nmi_owner;
#define LAPIC_NMI_WATCHDOG (1<<0)
#define LAPIC_NMI_RESERVED (1<<1)
/* nmi_active:
* +1: the lapic NMI watchdog is active, but can be disabled
* 0: the lapic NMI watchdog has not been set up, and cannot
......@@ -102,6 +116,7 @@ int __init check_nmi_watchdog (void)
if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
printk("CPU#%d: NMI appears to be stuck!\n", cpu);
nmi_active = 0;
lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
return -1;
}
}
......@@ -151,7 +166,7 @@ static int __init setup_nmi_watchdog(char *str)
__setup("nmi_watchdog=", setup_nmi_watchdog);
void disable_lapic_nmi_watchdog(void)
static void disable_lapic_nmi_watchdog(void)
{
if (nmi_active <= 0)
return;
......@@ -182,7 +197,7 @@ void disable_lapic_nmi_watchdog(void)
nmi_watchdog = 0;
}
void enable_lapic_nmi_watchdog(void)
static void enable_lapic_nmi_watchdog(void)
{
if (nmi_active < 0) {
nmi_watchdog = NMI_LOCAL_APIC;
......@@ -190,6 +205,33 @@ void enable_lapic_nmi_watchdog(void)
}
}
int reserve_lapic_nmi(void)
{
unsigned int old_owner;
spin_lock(&lapic_nmi_owner_lock);
old_owner = lapic_nmi_owner;
lapic_nmi_owner |= LAPIC_NMI_RESERVED;
spin_unlock(&lapic_nmi_owner_lock);
if (old_owner & LAPIC_NMI_RESERVED)
return -EBUSY;
if (old_owner & LAPIC_NMI_WATCHDOG)
disable_lapic_nmi_watchdog();
return 0;
}
void release_lapic_nmi(void)
{
unsigned int new_owner;
spin_lock(&lapic_nmi_owner_lock);
new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
lapic_nmi_owner = new_owner;
spin_unlock(&lapic_nmi_owner_lock);
if (new_owner & LAPIC_NMI_WATCHDOG)
enable_lapic_nmi_watchdog();
}
void disable_timer_nmi_watchdog(void)
{
if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
......@@ -243,7 +285,7 @@ static int __init init_lapic_nmi_sysfs(void)
{
int error;
if (nmi_active == 0)
if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
return 0;
error = sysdev_class_register(&nmi_sysclass);
......@@ -373,6 +415,7 @@ void setup_apic_nmi_watchdog (void)
default:
return;
}
lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
nmi_active = 1;
}
......@@ -470,7 +513,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
EXPORT_SYMBOL(nmi_active);
EXPORT_SYMBOL(nmi_watchdog);
EXPORT_SYMBOL(disable_lapic_nmi_watchdog);
EXPORT_SYMBOL(enable_lapic_nmi_watchdog);
EXPORT_SYMBOL(reserve_lapic_nmi);
EXPORT_SYMBOL(release_lapic_nmi);
EXPORT_SYMBOL(disable_timer_nmi_watchdog);
EXPORT_SYMBOL(enable_timer_nmi_watchdog);
......@@ -183,7 +183,10 @@ static int nmi_setup(void)
* without actually triggering any NMIs as this will
* break the core code horrifically.
*/
disable_lapic_nmi_watchdog();
if (reserve_lapic_nmi() < 0) {
free_msrs();
return -EBUSY;
}
/* We need to serialize save and setup for HT because the subset
* of msrs are distinct for save and setup operations
*/
......@@ -241,7 +244,7 @@ static void nmi_shutdown(void)
nmi_enabled = 0;
on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
unset_nmi_callback();
enable_lapic_nmi_watchdog();
release_lapic_nmi();
free_msrs();
}
......
......@@ -187,6 +187,42 @@ static void __devinit pci_fixup_transparent_bridge(struct pci_dev *dev)
dev->transparent = 1;
}
/*
* Fixup for C1 Halt Disconnect problem on nForce2 systems.
*
* From information provided by "Allen Martin" <AMartin@nvidia.com>:
*
* A hang is caused when the CPU generates a very fast CONNECT/HALT cycle
* sequence. Workaround is to set the SYSTEM_IDLE_TIMEOUT to 80 ns.
* This allows the state-machine and timer to return to a proper state within
* 80 ns of the CONNECT and probe appearing together. Since the CPU will not
* issue another HALT within 80 ns of the initial HALT, the failure condition
* is avoided.
*/
static void __init pci_fixup_nforce2(struct pci_dev *dev)
{
u32 val, fixed_val;
u8 rev;
pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
/*
* Chip Old value New value
* C17 0x1F0FFF01 0x1F01FF01
* C18D 0x9F0FFF01 0x9F01FF01
*
* Northbridge chip version may be determined by
* reading the PCI revision ID (0xC1 or greater is C18D).
*/
fixed_val = rev < 0xC1 ? 0x1F01FF01 : 0x9F01FF01;
pci_read_config_dword(dev, 0x6c, &val);
if (val != fixed_val) {
printk(KERN_WARNING "PCI: nForce2 C1 Halt Disconnect fixup\n");
pci_write_config_dword(dev, 0x6c, fixed_val);
}
}
struct pci_fixup pcibios_fixups[] = {
{
.pass = PCI_FIXUP_HEADER,
......@@ -290,5 +326,11 @@ struct pci_fixup pcibios_fixups[] = {
.device = PCI_ANY_ID,
.hook = pci_fixup_transparent_bridge
},
{
.pass = PCI_FIXUP_HEADER,
.vendor = PCI_VENDOR_ID_NVIDIA,
.device = PCI_DEVICE_ID_NVIDIA_NFORCE2,
.hook = pci_fixup_nforce2
},
{ .pass = 0 }
};
......@@ -733,3 +733,47 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long *size)
return 0;
}
int __init
efi_uart_console_only(void)
{
efi_status_t status;
char *s, name[] = "ConOut";
efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
efi_char16_t *utf16, name_utf16[32];
unsigned char data[1024];
unsigned long size = sizeof(data);
struct efi_generic_dev_path *hdr, *end_addr;
int uart = 0;
/* Convert to UTF-16 */
utf16 = name_utf16;
s = name;
while (*s)
*utf16++ = *s++ & 0x7f;
*utf16 = 0;
status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
if (status != EFI_SUCCESS) {
printk(KERN_ERR "No EFI %s variable?\n", name);
return 0;
}
hdr = (struct efi_generic_dev_path *) data;
end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
while (hdr < end_addr) {
if (hdr->type == EFI_DEV_MSG &&
hdr->sub_type == EFI_DEV_MSG_UART)
uart = 1;
else if (hdr->type == EFI_DEV_END_PATH ||
hdr->type == EFI_DEV_END_PATH2) {
if (!uart)
return 0;
if (hdr->sub_type == EFI_DEV_END_ENTIRE)
return 1;
uart = 0;
}
hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);
}
printk(KERN_ERR "Malformed %s value\n", name);
return 0;
}
......@@ -89,13 +89,14 @@ $(obj)/coffcrt0.o $(obj)/crt0.o: $(common)/crt0.S FORCE
$(call if_changed_dep,as_o_S)
quiet_cmd_gencoffb = COFF $@
cmd_gencoffb = $(LD) -o $@ $(COFF_LD_ARGS) $(filter-out FORCE,$^) && \
cmd_gencoffb = $(LD) -o $@ $(COFF_LD_ARGS) $(COFFOBJS) $< $(LIBS) && \
$(OBJCOPY) $@ $@ -R .comment $(del-ramdisk-sec)
targets += coffboot
$(obj)/coffboot: $(COFFOBJS) $(obj)/image.o $(LIBS) FORCE
$(obj)/coffboot: $(obj)/image.o $(COFFOBJS) $(LIBS) $(boot)/ld.script FORCE
$(call if_changed,gencoffb)
targets += coffboot.initrd
$(obj)/coffboot.initrd: $(COFFOBJS) $(obj)/image.initrd.o $(LIBS) FORCE
$(obj)/coffboot.initrd: $(obj)/image.initrd.o $(COFFOBJS) $(LIBS) \
$(boot)/ld.script FORCE
$(call if_changed,gencoffb)
......@@ -104,10 +105,10 @@ quiet_cmd_gen-coff = COFF $@
$(HACKCOFF) $@ && \
ln -sf $(notdir $@) $(images)/zImage$(initrd).pmac
$(images)/vmlinux.coff: $(obj)/coffboot $(boot)/ld.script
$(images)/vmlinux.coff: $(obj)/coffboot
$(call cmd,gen-coff)
$(images)/vmlinux.initrd.coff: $(obj)/coffboot.initrd $(boot)/ld.script
$(images)/vmlinux.initrd.coff: $(obj)/coffboot.initrd
$(call cmd,gen-coff)
quiet_cmd_gen-elf-pmac = ELF $@
......@@ -116,19 +117,21 @@ quiet_cmd_gen-elf-pmac = ELF $@
$(OBJCOPY) $@ $@ --add-section=.note=$(obj)/note \
-R .comment $(del-ramdisk-sec)
$(images)/vmlinux.elf-pmac: $(obj)/image.o $(NEWWORLDOBJS) $(LIBS) $(obj)/note $(boot)/ld.script
$(images)/vmlinux.elf-pmac: $(obj)/image.o $(NEWWORLDOBJS) $(LIBS) \
$(obj)/note $(boot)/ld.script
$(call cmd,gen-elf-pmac)
$(images)/vmlinux.initrd.elf-pmac: $(obj)/image.initrd.o $(NEWWORLDOBJS) \
$(LIBS) $(obj)/note $(boot)/ld.script
$(call cmd,gen-elf-pmac)
quiet_cmd_gen-chrp = CHRP $@
cmd_gen-chrp = $(LD) $(CHRP_LD_ARGS) -o $@ $^ && \
cmd_gen-chrp = $(LD) $(CHRP_LD_ARGS) -o $@ $(CHRPOBJS) $< $(LIBS) && \
$(OBJCOPY) $@ $@ -R .comment $(del-ramdisk-sec)
$(images)/zImage.chrp: $(CHRPOBJS) $(obj)/image.o $(LIBS) $(boot)/ld.script
$(images)/zImage.chrp: $(obj)/image.o $(CHRPOBJS) $(LIBS) $(boot)/ld.script
$(call cmd,gen-chrp)
$(images)/zImage.initrd.chrp: $(CHRPOBJS) $(obj)/image.initrd.o $(LIBS) $(boot)/ld.script
$(images)/zImage.initrd.chrp: $(obj)/image.initrd.o $(CHRPOBJS) $(LIBS) \
$(boot)/ld.script
$(call cmd,gen-chrp)
quiet_cmd_addnote = ADDNOTE $@
......
......@@ -18,24 +18,37 @@
#include <asm/offsets.h>
#include <asm/cache.h>
_GLOBAL(__power4_cpu_preinit)
_GLOBAL(__970_cpu_preinit)
/*
* On the PPC970, we have to turn off real-mode cache inhibit
* early, before we first turn the MMU off.
* Deal only with PPC970 and PPC970FX.
*/
mfspr r0,SPRN_PVR
srwi r0,r0,16
cmpwi r0,0x39
cmpwi cr0,r0,0x39
cmpwi cr1,r0,0x3c
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
bnelr
/* Make sure HID4:rm_ci is off before MMU is turned off, that large
* pages are enabled with HID4:61 and clear HID5:DCBZ_size and
* HID5:DCBZ32_ill
*/
li r0,0
mfspr r11,SPRN_HID4
rldimi r11,r0,40,23 /* clear bit 23 (rm_ci) */
rldimi r11,r0,2,61 /* clear bit 61 (lg_pg_en) */
sync
mtspr SPRN_HID4,r0
mtspr SPRN_HID4,r11
isync
sync
mtspr SPRN_HID5,r0
mfspr r11,SPRN_HID5
rldimi r11,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */
sync
mtspr SPRN_HID5,r11
isync
sync
/* Setup some basic HID1 features */
mfspr r0,SPRN_HID1
li r11,0x1200 /* enable i-fetch cacheability */
sldi r11,r11,44 /* and prefetch */
......@@ -43,6 +56,8 @@ _GLOBAL(__power4_cpu_preinit)
mtspr SPRN_HID1,r0
mtspr SPRN_HID1,r0
isync
/* Clear HIOR */
li r0,0
sync
mtspr SPRN_HIOR,0 /* Clear interrupt prefix */
......
......@@ -323,6 +323,17 @@ struct cpu_spec cpu_specs[] = {
32, 32,
__setup_cpu_745x
},
{ /* 7447A */
0xffff0000, 0x80030000, "7447A",
CPU_FTR_COMMON |
CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_CAN_NAP |
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP |
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR |
CPU_FTR_HAS_HIGH_BATS,
COMMON_PPC | PPC_FEATURE_ALTIVEC_COMP,
32, 32,
__setup_cpu_745x
},
{ /* 82xx (8240, 8245, 8260 are all 603e cores) */
0x7fff0000, 0x00810000, "82xx",
CPU_FTR_COMMON |
......
......@@ -153,7 +153,7 @@ __start:
* like real mode cache inhibit or exception base
*/
#ifdef CONFIG_POWER4
bl __power4_cpu_preinit
bl __970_cpu_preinit
#endif /* CONFIG_POWER4 */
#ifdef CONFIG_APUS
......
......@@ -578,7 +578,6 @@ heathrow_sleep(struct macio_chip* macio, int secondary)
/* Let things settle */
(void)MACIO_IN32(HEATHROW_FCR);
mdelay(1);
}
static void __pmac
......@@ -2102,7 +2101,7 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
0,
},
{ "PowerMac3,6", "PowerMac G4 Windtunnel",
PMAC_TYPE_WINDTUNNEL, rackmac_features,
PMAC_TYPE_WINDTUNNEL, core99_features,
0,
},
{ "PowerBook5,1", "PowerBook G4 17\"",
......@@ -2129,6 +2128,10 @@ static struct pmac_mb_def pmac_mb_defs[] __pmacdata = {
PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
},
{ "PowerBook6,5", "iBook G4",
PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
},
#else /* CONFIG_POWER4 */
{ "PowerMac7,2", "PowerMac G5",
PMAC_TYPE_POWERMAC_G5, g5_features,
......
......@@ -18,31 +18,53 @@
#include <asm/offsets.h>
#include <asm/cache.h>
_GLOBAL(__power4_cpu_preinit)
_GLOBAL(__970_cpu_preinit)
/*
* On the PPC970, we have to turn off real-mode cache inhibit
* early, before we first turn the MMU off.
* Do nothing if not running in HV mode
*/
mfmsr r0
rldicl. r0,r0,4,63
beqlr
/*
* Deal only with PPC970 and PPC970FX.
*/
mfspr r0,SPRN_PVR
srwi r0,r0,16
cmpwi r0,0x39
cmpwi cr0,r0,0x39
cmpwi cr1,r0,0x3c
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
bnelr
/* Make sure HID4:rm_ci is off before MMU is turned off, that large
* pages are enabled with HID4:61 and clear HID5:DCBZ_size and
* HID5:DCBZ32_ill
*/
li r0,0
mfspr r3,SPRN_HID4
rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
rldimi r3,r0,2,61 /* clear bit 61 (lg_pg_en) */
sync
mtspr SPRN_HID4,r0
mtspr SPRN_HID4,r3
isync
sync
mtspr SPRN_HID5,r0
mfspr r3,SPRN_HID5
rldimi r3,r0,6,56 /* clear bits 56 & 57 (DCBZ*) */
sync
mtspr SPRN_HID5,r3
isync
sync
/* Setup some basic HID1 features */
mfspr r0,SPRN_HID1
li r11,0x1200 /* enable i-fetch cacheability */
sldi r11,r11,44 /* and prefetch */
or r0,r0,r11
li r3,0x1200 /* enable i-fetch cacheability */
sldi r3,r3,44 /* and prefetch */
or r0,r0,r3
mtspr SPRN_HID1,r0
mtspr SPRN_HID1,r0
isync
/* Clear HIOR */
li r0,0
sync
mtspr SPRN_HIOR,0 /* Clear interrupt prefix */
......
......@@ -1469,7 +1469,7 @@ _GLOBAL(__start_initialization_pSeries)
mr r23,r3 /* Save phys address we are running at */
/* Setup some critical 970 SPRs before switching MMU off */
bl .__power4_cpu_preinit
bl .__970_cpu_preinit
li r24,0 /* cpu # */
......
......@@ -155,9 +155,20 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
return 0;
}
static void do_slbia(void *unused)
static void flush_segments(void *parm)
{
asm volatile ("isync; slbia; isync":::"memory");
u16 segs = (unsigned long) parm;
unsigned long i;
asm volatile("isync" : : : "memory");
for (i = 0; i < 16; i++) {
if (! (segs & (1U << i)))
continue;
asm volatile("slbie %0" : : "r" (i << SID_SHIFT));
}
asm volatile("isync" : : : "memory");
}
static int prepare_low_seg_for_htlb(struct mm_struct *mm, unsigned long seg)
......@@ -226,10 +237,10 @@ static int open_low_hpage_segs(struct mm_struct *mm, u16 newsegs)
return -EBUSY;
mm->context.htlb_segs |= newsegs;
/* the context change must make it to memory before the slbia,
/* the context change must make it to memory before the flush,
* so that further SLB misses do the right thing. */
mb();
on_each_cpu(do_slbia, NULL, 0, 1);
on_each_cpu(flush_segments, (void *)(unsigned long)newsegs, 0, 1);
return 0;
}
......
......@@ -42,7 +42,10 @@ int bit_map_string_get(struct bit_map *t, int len, int align)
BUG();
spin_lock(&t->lock);
offset = t->last_off & ~align1;
if (len < t->last_size)
offset = t->first_free;
else
offset = t->last_off & ~align1;
count = 0;
for (;;) {
off_new = find_next_zero_bit(t->map, t->size, offset);
......@@ -71,9 +74,14 @@ int bit_map_string_get(struct bit_map *t, int len, int align)
if (i == len) {
for (i = 0; i < len; i++)
__set_bit(offset + i, t->map);
if (offset == t->first_free)
t->first_free = find_next_zero_bit
(t->map, t->size,
t->first_free + len);
if ((t->last_off = offset + len) >= t->size)
t->last_off = 0;
t->used += len;
t->last_size = len;
spin_unlock(&t->lock);
return offset;
}
......@@ -96,6 +104,8 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
BUG();
__clear_bit(offset + i, t->map);
}
if (offset < t->first_free)
t->first_free = offset;
t->used -= len;
spin_unlock(&t->lock);
}
......@@ -111,4 +121,6 @@ void bit_map_init(struct bit_map *t, unsigned long *map, int size)
spin_lock_init(&t->lock);
t->map = map;
t->size = size;
t->last_size = 0;
t->first_free = 0;
}
......@@ -369,9 +369,8 @@ static void __init taint_real_pages(void)
end = start + sp_banks[i].num_bytes;
while (start < end) {
set_bit (start >> 20,
sparc_valid_addr_bitmap);
start += PAGE_SIZE;
set_bit(start >> 20, sparc_valid_addr_bitmap);
start += PAGE_SIZE;
}
}
}
......@@ -400,6 +399,7 @@ void __init mem_init(void)
int codepages = 0;
int datapages = 0;
int initpages = 0;
int reservedpages = 0;
int i;
highmem_start_page = pfn_to_page(highstart_pfn);
......@@ -434,12 +434,14 @@ void __init mem_init(void)
max_mapnr = last_valid_pfn - pfn_base;
high_memory = __va(max_low_pfn << PAGE_SHIFT);
num_physpages = totalram_pages = free_all_bootmem();
totalram_pages = free_all_bootmem();
for (i = 0; sp_banks[i].num_bytes != 0; i++) {
unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
num_physpages += sp_banks[i].num_bytes >> PAGE_SHIFT;
if (end_pfn <= highstart_pfn)
continue;
......@@ -458,13 +460,20 @@ void __init mem_init(void)
initpages = (((unsigned long) &__init_end) - ((unsigned long) &__init_begin));
initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
printk(KERN_INFO "Memory: %dk available (%dk kernel code, %dk data, %dk init, %ldk highmem) [%08lx,%08lx]\n",
nr_free_pages() << (PAGE_SHIFT-10),
/* Ignore memory holes for the purpose of counting reserved pages */
for (i=0; i < max_low_pfn; i++)
if (test_bit(i >> (20 - PAGE_SHIFT), sparc_valid_addr_bitmap)
&& PageReserved(pfn_to_page(i)))
reservedpages++;
printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
num_physpages << (PAGE_SHIFT - 10),
codepages << (PAGE_SHIFT-10),
reservedpages << (PAGE_SHIFT - 10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10),
totalhigh_pages << (PAGE_SHIFT-10),
(unsigned long)PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
totalhigh_pages << (PAGE_SHIFT-10));
}
void free_initmem (void)
......
......@@ -276,6 +276,23 @@ static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
((address >> PAGE_SHIFT) & (SRMMU_PTRS_PER_PTE_SOFT - 1));
}
static unsigned long srmmu_swp_type(swp_entry_t entry)
{
return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
}
static unsigned long srmmu_swp_offset(swp_entry_t entry)
{
return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
}
static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
{
return (swp_entry_t) {
(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
}
/*
* size: bytes to allocate in the nocache area.
* align: bytes, number to align at.
......@@ -2205,6 +2222,10 @@ void __init ld_mmu_srmmu(void)
BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM);
......
......@@ -1863,6 +1863,23 @@ pte_t *sun4c_pte_offset_kernel(pmd_t * dir, unsigned long address)
((address >> PAGE_SHIFT) & (SUN4C_PTRS_PER_PTE - 1));
}
static unsigned long sun4c_swp_type(swp_entry_t entry)
{
return (entry.val & SUN4C_SWP_TYPE_MASK);
}
static unsigned long sun4c_swp_offset(swp_entry_t entry)
{
return (entry.val >> SUN4C_SWP_OFF_SHIFT) & SUN4C_SWP_OFF_MASK;
}
static swp_entry_t sun4c_swp_entry(unsigned long type, unsigned long offset)
{
return (swp_entry_t) {
(offset & SUN4C_SWP_OFF_MASK) << SUN4C_SWP_OFF_SHIFT
| (type & SUN4C_SWP_TYPE_MASK) };
}
static void sun4c_free_pte_slow(pte_t *pte)
{
free_page((unsigned long)pte);
......@@ -2242,6 +2259,10 @@ void __init ld_mmu_sun4c(void)
BTFIXUPSET_CALL(sparc_mapiorange, sun4c_mapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(sparc_unmapiorange, sun4c_unmapiorange, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_type, sun4c_swp_type, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_offset, sun4c_swp_offset, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__swp_entry, sun4c_swp_entry, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(alloc_thread_info, sun4c_alloc_thread_info, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(free_thread_info, sun4c_free_thread_info, BTFIXUPCALL_NORM);
......
......@@ -57,7 +57,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
return -EINVAL;
return addr;
}
......
......@@ -33,6 +33,20 @@
#include <asm/proto.h>
#include <asm/kdebug.h>
/*
* lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
* - it may be reserved by some other driver, or not
* - when not reserved by some other driver, it may be used for
* the NMI watchdog, or not
*
* This is maintained separately from nmi_active because the NMI
* watchdog may also be driven from the I/O APIC timer.
*/
static spinlock_t lapic_nmi_owner_lock = SPIN_LOCK_UNLOCKED;
static unsigned int lapic_nmi_owner;
#define LAPIC_NMI_WATCHDOG (1<<0)
#define LAPIC_NMI_RESERVED (1<<1)
/* nmi_active:
* +1: the lapic NMI watchdog is active, but can be disabled
* 0: the lapic NMI watchdog has not been set up, and cannot
......@@ -122,6 +136,7 @@ int __init check_nmi_watchdog (void)
cpu,
cpu_pda[cpu].__nmi_count);
nmi_active = 0;
lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG;
return -1;
}
}
......@@ -157,7 +172,7 @@ int __init setup_nmi_watchdog(char *str)
__setup("nmi_watchdog=", setup_nmi_watchdog);
void disable_lapic_nmi_watchdog(void)
static void disable_lapic_nmi_watchdog(void)
{
if (nmi_active <= 0)
return;
......@@ -174,7 +189,7 @@ void disable_lapic_nmi_watchdog(void)
nmi_watchdog = 0;
}
void enable_lapic_nmi_watchdog(void)
static void enable_lapic_nmi_watchdog(void)
{
if (nmi_active < 0) {
nmi_watchdog = NMI_LOCAL_APIC;
......@@ -182,6 +197,33 @@ void enable_lapic_nmi_watchdog(void)
}
}
int reserve_lapic_nmi(void)
{
unsigned int old_owner;
spin_lock(&lapic_nmi_owner_lock);
old_owner = lapic_nmi_owner;
lapic_nmi_owner |= LAPIC_NMI_RESERVED;
spin_unlock(&lapic_nmi_owner_lock);
if (old_owner & LAPIC_NMI_RESERVED)
return -EBUSY;
if (old_owner & LAPIC_NMI_WATCHDOG)
disable_lapic_nmi_watchdog();
return 0;
}
void release_lapic_nmi(void)
{
unsigned int new_owner;
spin_lock(&lapic_nmi_owner_lock);
new_owner = lapic_nmi_owner & ~LAPIC_NMI_RESERVED;
lapic_nmi_owner = new_owner;
spin_unlock(&lapic_nmi_owner_lock);
if (new_owner & LAPIC_NMI_WATCHDOG)
enable_lapic_nmi_watchdog();
}
void disable_timer_nmi_watchdog(void)
{
if ((nmi_watchdog != NMI_IO_APIC) || (nmi_active <= 0))
......@@ -236,7 +278,7 @@ static int __init init_lapic_nmi_sysfs(void)
{
int error;
if (nmi_active == 0)
if (nmi_active == 0 || nmi_watchdog != NMI_LOCAL_APIC)
return 0;
error = sysdev_class_register(&nmi_sysclass);
......@@ -298,6 +340,7 @@ void setup_apic_nmi_watchdog(void)
default:
return;
}
lapic_nmi_owner = LAPIC_NMI_WATCHDOG;
nmi_active = 1;
}
......@@ -405,8 +448,8 @@ void unset_nmi_callback(void)
EXPORT_SYMBOL(nmi_active);
EXPORT_SYMBOL(nmi_watchdog);
EXPORT_SYMBOL(disable_lapic_nmi_watchdog);
EXPORT_SYMBOL(enable_lapic_nmi_watchdog);
EXPORT_SYMBOL(reserve_lapic_nmi);
EXPORT_SYMBOL(release_lapic_nmi);
EXPORT_SYMBOL(disable_timer_nmi_watchdog);
EXPORT_SYMBOL(enable_timer_nmi_watchdog);
EXPORT_SYMBOL(touch_nmi_watchdog);
......@@ -994,7 +994,7 @@ void abort_timedouts(unsigned long __opaque)
* packets that have a "complete" function are sent here. This way, the
* completion is run out of kernel context, and doesn't block the rest of
* the stack. */
static int khpsbpkt_pid = -1;
static int khpsbpkt_pid = -1, khpsbpkt_kill;
static DECLARE_COMPLETION(khpsbpkt_complete);
struct sk_buff_head hpsbpkt_queue;
static DECLARE_MUTEX_LOCKED(khpsbpkt_sig);
......@@ -1021,6 +1021,9 @@ static int hpsbpkt_thread(void *__hi)
daemonize("khpsbpkt");
while (!down_interruptible(&khpsbpkt_sig)) {
if (khpsbpkt_kill)
break;
while ((skb = skb_dequeue(&hpsbpkt_queue)) != NULL) {
packet = (struct hpsb_packet *)skb->data;
......@@ -1094,7 +1097,9 @@ static void __exit ieee1394_cleanup(void)
bus_unregister(&ieee1394_bus_type);
if (khpsbpkt_pid >= 0) {
kill_proc(khpsbpkt_pid, SIGTERM, 1);
khpsbpkt_kill = 1;
mb();
up(&khpsbpkt_sig);
wait_for_completion(&khpsbpkt_complete);
}
......
......@@ -117,6 +117,7 @@ struct host_info {
struct semaphore reset_sem;
int pid;
char daemon_name[15];
int kill_me;
};
static int nodemgr_bus_match(struct device * dev, struct device_driver * drv);
......@@ -1478,6 +1479,9 @@ static int nodemgr_host_thread(void *__hi)
unsigned int generation = 0;
int i;
if (hi->kill_me)
break;
/* Pause for 1/4 second in 1/16 second intervals,
* to make sure things settle down. */
for (i = 0; i < 4 ; i++) {
......@@ -1678,7 +1682,9 @@ static void nodemgr_remove_host(struct hpsb_host *host)
if (hi) {
if (hi->pid >= 0) {
kill_proc(hi->pid, SIGTERM, 1);
hi->kill_me = 1;
mb();
up(&hi->reset_sem);
wait_for_completion(&hi->exited);
nodemgr_remove_host_dev(&host->device);
}
......
......@@ -316,7 +316,14 @@ int video_register_device(struct video_device *vfd, int type, int nr)
/* pick a minor number */
down(&videodev_lock);
if (-1 == nr) {
if (nr >= 0 && nr < end-base) {
/* use the one the driver asked for */
i = base+nr;
if (NULL != video_device[i]) {
up(&videodev_lock);
return -ENFILE;
}
} else {
/* use first free */
for(i=base;i<end;i++)
if (NULL == video_device[i])
......@@ -325,13 +332,6 @@ int video_register_device(struct video_device *vfd, int type, int nr)
up(&videodev_lock);
return -ENFILE;
}
} else {
/* use the one the driver asked for */
i = base+nr;
if (NULL != video_device[i]) {
up(&videodev_lock);
return -ENFILE;
}
}
video_device[i]=vfd;
vfd->minor=i;
......
......@@ -104,7 +104,7 @@ static struct net_device **dummies;
/* Number of dummy devices to be set up by this module. */
module_param(numdummies, int, 0);
MODULE_PARM_DESC(numdimmies, "Number of dummy psuedo devices");
MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
static int __init dummy_init_one(int index)
{
......
......@@ -150,6 +150,10 @@
/* ExCA IO offset registers */
#define TI113X_IO_OFFSET(map) (0x36+((map)<<1))
/* EnE test register */
#define ENE_TEST_C9 0xc9 /* 8bit */
#define ENE_TEST_C9_TLTENABLE 0x02
#ifdef CONFIG_CARDBUS
/*
......@@ -160,6 +164,7 @@
#define ti_devctl(socket) ((socket)->private[2])
#define ti_diag(socket) ((socket)->private[3])
#define ti_mfunc(socket) ((socket)->private[4])
#define ene_test_c9(socket) ((socket)->private[5])
/*
* These are the TI specific power management handlers.
......@@ -171,6 +176,9 @@ static void ti_save_state(struct yenta_socket *socket)
ti_cardctl(socket) = config_readb(socket, TI113X_CARD_CONTROL);
ti_devctl(socket) = config_readb(socket, TI113X_DEVICE_CONTROL);
ti_diag(socket) = config_readb(socket, TI1250_DIAGNOSTIC);
if (socket->dev->vendor == PCI_VENDOR_ID_ENE)
ene_test_c9(socket) = config_readb(socket, ENE_TEST_C9);
}
static void ti_restore_state(struct yenta_socket *socket)
......@@ -180,6 +188,9 @@ static void ti_restore_state(struct yenta_socket *socket)
config_writeb(socket, TI113X_CARD_CONTROL, ti_cardctl(socket));
config_writeb(socket, TI113X_DEVICE_CONTROL, ti_devctl(socket));
config_writeb(socket, TI1250_DIAGNOSTIC, ti_diag(socket));
if (socket->dev->vendor == PCI_VENDOR_ID_ENE)
config_writeb(socket, ENE_TEST_C9, ene_test_c9(socket));
}
/*
......@@ -591,6 +602,16 @@ static int ti12xx_override(struct yenta_socket *socket)
config_writel(socket, TI113X_SYSTEM_CONTROL, val);
}
/*
* for EnE bridges only: clear testbit TLTEnable. this makes the
* RME Hammerfall DSP sound card working.
*/
if (socket->dev->vendor == PCI_VENDOR_ID_ENE) {
u8 test_c9 = config_readb(socket, ENE_TEST_C9);
test_c9 &= ~ENE_TEST_C9_TLTENABLE;
config_writeb(socket, ENE_TEST_C9, test_c9);
}
/*
* Yenta expects controllers to use CSCINT to route
* CSC interrupts to PCI rather than INTVAL.
......
......@@ -11,6 +11,7 @@
*/
#include <linux/config.h>
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/init.h>
......@@ -44,6 +45,7 @@ setup_serial_hcdp(void *tablep)
unsigned long iobase;
hcdp_t hcdp;
int gsi, nr;
static char options[16];
#if 0
static int shift_once = 1;
#endif
......@@ -147,7 +149,7 @@ setup_serial_hcdp(void *tablep)
printk(" gsi = %d, baud rate = %lu, bits = %d, clock = %d\n",
gsi, (unsigned long) hcdp_dev->baud, hcdp_dev->bits,
hcdp_dev->clock_rate);
if (hcdp_dev->base_addr.space_id == ACPI_PCICONF_SPACE)
if (HCDP_PCI_UART(hcdp_dev))
printk(" PCI id: %02x:%02x:%02x, vendor ID=0x%x, "
"dev ID=0x%x\n", hcdp_dev->pci_seg,
hcdp_dev->pci_bus, hcdp_dev->pci_dev,
......@@ -179,16 +181,26 @@ setup_serial_hcdp(void *tablep)
printk(KERN_WARNING"warning: No support for PCI serial console\n");
return;
}
if (HCDP_IRQ_SUPPORTED(hcdp_dev)) {
#ifdef CONFIG_IA64
port.irq = acpi_register_irq(gsi, ACPI_ACTIVE_HIGH,
ACPI_EDGE_SENSITIVE);
if (HCDP_PCI_UART(hcdp_dev))
port.irq = acpi_register_irq(gsi,
ACPI_ACTIVE_LOW, ACPI_LEVEL_SENSITIVE);
else
port.irq = acpi_register_irq(gsi,
ACPI_ACTIVE_HIGH, ACPI_EDGE_SENSITIVE);
#else
port.irq = gsi;
port.irq = gsi;
#endif
port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_RESOURCES;
if (gsi)
port.flags |= UPF_AUTO_IRQ;
if (HCDP_PCI_UART(hcdp_dev))
port.flags |= UPF_SHARE_IRQ;
}
port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_RESOURCES;
/*
* Note: the above memset() initializes port.line to 0,
* so we register this port as ttyS0.
......@@ -197,8 +209,15 @@ setup_serial_hcdp(void *tablep)
printk("setup_serial_hcdp(): early_serial_setup() "
"for HCDP serial console port failed. "
"Will try any additional consoles in HCDP.\n");
memset(&port, 0, sizeof(port));
continue;
}
if (efi_uart_console_only()) {
snprintf(options, sizeof(options), "%lun%d",
hcdp_dev->baud, hcdp_dev->bits);
add_preferred_console("ttyS", port.line, options);
}
break;
}
......
......@@ -77,3 +77,6 @@ typedef struct {
u32 num_entries;
hcdp_dev_t hcdp_dev[MAX_HCDP_DEVICES];
} hcdp_t;
#define HCDP_PCI_UART(x) (x->pci_func & 1UL<<7)
#define HCDP_IRQ_SUPPORTED(x) (x->pci_func & 1UL<<6)
......@@ -20,6 +20,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/tty.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
......@@ -305,6 +306,72 @@ static void __devexit pci_plx9050_exit(struct pci_dev *dev)
}
}
/* SBS Technologies Inc. PMC-OCTPRO and P-OCTAL cards */
static int
sbs_setup(struct pci_dev *dev, struct pci_board *board,
struct serial_struct *req, int idx)
{
unsigned int bar, offset = board->first_offset;
bar = 0;
if (idx < 4) {
/* first four channels map to 0, 0x100, 0x200, 0x300 */
offset += idx * board->uart_offset;
} else if (idx < 8) {
/* last four channels map to 0x1000, 0x1100, 0x1200, 0x1300 */
offset += idx * board->uart_offset + 0xC00;
} else /* we have only 8 ports on PMC-OCTALPRO */
return 1;
return setup_port(dev, req, bar, offset, board->reg_shift);
}
/*
* This does initialization for PMC OCTALPRO cards:
* maps the device memory, resets the UARTs (needed, bc
* if the module is removed and inserted again, the card
* is in the sleep mode) and enables global interrupt.
*/
/* global control register offset for SBS PMC-OctalPro */
#define OCT_REG_CR_OFF 0x500
static int __devinit sbs_init(struct pci_dev *dev)
{
u8 * p;
p = ioremap(pci_resource_start(dev, 0),pci_resource_len(dev,0));
if (p == NULL)
return -ENOMEM;
/* Set bit-4 Control Register (UART RESET) in to reset the uarts */
writeb(0x10,p + OCT_REG_CR_OFF);
udelay(50);
writeb(0x0,p + OCT_REG_CR_OFF);
/* Set bit-2 (INTENABLE) of Control Register */
writeb(0x4, p + OCT_REG_CR_OFF);
iounmap(p);
return 0;
}
/*
* Disables the global interrupt of PMC-OctalPro
*/
static void __devexit sbs_exit(struct pci_dev *dev)
{
u8 * p;
p = ioremap(pci_resource_start(dev, 0),pci_resource_len(dev,0));
if (p != NULL) {
writeb(0, p + OCT_REG_CR_OFF);
}
iounmap(p);
}
/*
* SIIG serial cards have an PCI interface chip which also controls
* the UART clocking frequency. Each UART can be clocked independently
......@@ -534,6 +601,15 @@ pci_default_setup(struct pci_dev *dev, struct pci_board *board,
return setup_port(dev, req, bar, offset, board->reg_shift);
}
/* This should be in linux/pci_ids.h */
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
#define PCI_DEVICE_ID_OCTPRO 0x0001
#define PCI_SUBDEVICE_ID_OCTPRO232 0x0108
#define PCI_SUBDEVICE_ID_OCTPRO422 0x0208
#define PCI_SUBDEVICE_ID_POCTAL232 0x0308
#define PCI_SUBDEVICE_ID_POCTAL422 0x0408
/*
* Master list of serial port init/setup/exit quirks.
* This does not describe the general nature of the port.
......@@ -618,6 +694,55 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = pci_default_setup,
.exit = __devexit_p(pci_plx9050_exit),
},
/*
* SBS Technologies, Inc., PMC-OCTALPRO 232
*/
{
.vendor = PCI_VENDOR_ID_SBSMODULARIO,
.device = PCI_DEVICE_ID_OCTPRO,
.subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO,
.subdevice = PCI_SUBDEVICE_ID_OCTPRO232,
.init = sbs_init,
.setup = sbs_setup,
.exit = sbs_exit
},
/*
* SBS Technologies, Inc., PMC-OCTALPRO 422
*/
{
.vendor = PCI_VENDOR_ID_SBSMODULARIO,
.device = PCI_DEVICE_ID_OCTPRO,
.subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO,
.subdevice = PCI_SUBDEVICE_ID_OCTPRO422,
.init = sbs_init,
.setup = sbs_setup,
.exit = sbs_exit
},
/*
* SBS Technologies, Inc., P-Octal 232
*/
{
.vendor = PCI_VENDOR_ID_SBSMODULARIO,
.device = PCI_DEVICE_ID_OCTPRO,
.subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO,
.subdevice = PCI_SUBDEVICE_ID_POCTAL232,
.init = sbs_init,
.setup = sbs_setup,
.exit = sbs_exit
},
/*
* SBS Technologies, Inc., P-Octal 422
*/
{
.vendor = PCI_VENDOR_ID_SBSMODULARIO,
.device = PCI_DEVICE_ID_OCTPRO,
.subvendor = PCI_SUBVENDOR_ID_SBSMODULARIO,
.subdevice = PCI_SUBDEVICE_ID_POCTAL422,
.init = sbs_init,
.setup = sbs_setup,
.exit = sbs_exit
},
/*
* SIIG cards.
* It is not clear whether these could be collapsed.
......@@ -944,8 +1069,19 @@ enum pci_board_num_t {
pbn_computone_4,
pbn_computone_6,
pbn_computone_8,
pbn_sbsxrsio,
};
/*
* uart_offset - the space between channels
* reg_shift - describes how the UART registers are mapped
* to PCI memory by the card.
* For example IER register on SBS, Inc. PMC-OctPro is located at
* offset 0x10 from the UART base, while UART_IER is defined as 1
* in include/linux/serial_reg.h,
* see first lines of serial_in() and serial_out() in 8250.c
*/
static struct pci_board pci_boards[] __devinitdata = {
[pbn_default] = {
.flags = FL_BASE0,
......@@ -1348,6 +1484,13 @@ static struct pci_board pci_boards[] __devinitdata = {
.reg_shift = 2,
.first_offset = 0x200,
},
[pbn_sbsxrsio] = {
.flags = FL_BASE0,
.num_ports = 8,
.base_baud = 460800,
.uart_offset = 256,
.reg_shift = 4,
}
};
/*
......@@ -1755,27 +1898,44 @@ static struct pci_device_id serial_pci_tbl[] = {
0x10b5, 0x106a, 0, 0,
pbn_plx_romulus },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_QSC100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_4_115200 },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_DSC100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_2_115200 },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100D,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_8_115200 },
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_ESC100M,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b1_8_115200 },
{ PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954,
PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4, 0, 0,
PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4, 0, 0,
pbn_b0_4_921600 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_4_115200 },
{ PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_bt_2_921600 },
/*
* SBS Technologies, Inc. P-Octal and PMC-OCTPRO cards,
* from skokodyn@yahoo.com
*/
{ PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO,
PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_OCTPRO232, 0, 0,
pbn_sbsxrsio },
{ PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO,
PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_OCTPRO422, 0, 0,
pbn_sbsxrsio },
{ PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO,
PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_POCTAL232, 0, 0,
pbn_sbsxrsio },
{ PCI_VENDOR_ID_SBSMODULARIO, PCI_DEVICE_ID_OCTPRO,
PCI_SUBVENDOR_ID_SBSMODULARIO, PCI_SUBDEVICE_ID_POCTAL422, 0, 0,
pbn_sbsxrsio },
/*
* Digitan DS560-558, from jimd@esoft.com
*/
......
......@@ -982,7 +982,9 @@ static int dtSplitUp(tid_t tid,
split->pxdlist = &pxdlist;
rc = dtSplitRoot(tid, ip, split, &rmp);
DT_PUTPAGE(rmp);
if (!rc)
DT_PUTPAGE(rmp);
DT_PUTPAGE(smp);
goto freeKeyName;
......@@ -1876,6 +1878,9 @@ static int dtSplitRoot(tid_t tid,
xlen = lengthPXD(pxd);
xsize = xlen << JFS_SBI(sb)->l2bsize;
rmp = get_metapage(ip, rbn, xsize, 1);
if (!rmp)
return -EIO;
rp = rmp->data;
BT_MARK_DIRTY(rmp, ip);
......
......@@ -176,7 +176,6 @@ static void dtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
static void mapLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
struct tlock * tlck);
static void txAbortCommit(struct commit * cd);
static void txAllocPMap(struct inode *ip, struct maplock * maplock,
struct tblock * tblk);
static void txForce(struct tblock * tblk);
......@@ -1315,7 +1314,7 @@ int txCommit(tid_t tid, /* transaction identifier */
out:
if (rc != 0)
txAbortCommit(&cd);
txAbort(tid, 1);
TheEnd:
jfs_info("txCommit: tid = %d, returning %d", tid, rc);
......@@ -2647,64 +2646,6 @@ void txAbort(tid_t tid, int dirty)
return;
}
/*
* txAbortCommit()
*
* function: abort commit.
*
* frees tlocks of transaction; line-locks and segment locks for all
* segments in comdata structure. frees malloc storage
* sets state of file-system to FM_MDIRTY in super-block.
* log age of page-frames in memory for which caller has
* are reset to 0 (to avoid logwarap).
*/
static void txAbortCommit(struct commit * cd)
{
struct tblock *tblk;
tid_t tid;
lid_t lid, next;
struct metapage *mp;
jfs_warn("txAbortCommit: cd:0x%p", cd);
/*
* free tlocks of the transaction
*/
tid = cd->tid;
tblk = tid_to_tblock(tid);
for (lid = tblk->next; lid; lid = next) {
next = lid_to_tlock(lid)->next;
mp = lid_to_tlock(lid)->mp;
if (mp) {
mp->lid = 0;
/*
* reset lsn of page to avoid logwarap;
*/
if (mp->xflag & COMMIT_PAGE)
LogSyncRelease(mp);
}
/* insert tlock at head of freelist */
TXN_LOCK();
txLockFree(lid);
TXN_UNLOCK();
}
tblk->next = tblk->last = 0;
/* free the transaction block */
txEnd(tid);
/*
* mark filesystem dirty
*/
jfs_error(cd->sb, "txAbortCommit");
}
/*
* txLazyCommit(void)
*
......
......@@ -792,14 +792,14 @@ static int jfs_link(struct dentry *old_dentry,
goto out;
if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE)))
goto out;
goto free_dname;
/*
* create entry for new link in parent directory
*/
ino = ip->i_ino;
if ((rc = dtInsert(tid, dir, &dname, &ino, &btstack)))
goto out;
goto free_dname;
/* update object inode */
ip->i_nlink++; /* for new link */
......@@ -812,6 +812,9 @@ static int jfs_link(struct dentry *old_dentry,
iplist[1] = dir;
rc = txCommit(tid, 2, &iplist[0], 0);
free_dname:
free_UCSname(&dname);
out:
txEnd(tid);
......
......@@ -81,8 +81,8 @@ extern void smp_local_timer_interrupt (struct pt_regs * regs);
extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void);
extern void setup_apic_nmi_watchdog (void);
extern void disable_lapic_nmi_watchdog(void);
extern void enable_lapic_nmi_watchdog(void);
extern int reserve_lapic_nmi(void);
extern void release_lapic_nmi(void);
extern void disable_timer_nmi_watchdog(void);
extern void enable_timer_nmi_watchdog(void);
extern void nmi_watchdog_tick (struct pt_regs * regs);
......
......@@ -184,4 +184,10 @@ static inline void dma_cache_sync(void *vaddr, size_t size,
{
consistent_sync(vaddr, size, (int)direction);
}
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
return 0;
}
#endif /* __ASM_PPC_DMA_MAPPING_H */
......@@ -290,6 +290,11 @@ pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr,
/* Nothing to do. */
}
static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
{
return 0;
}
/* Return the index of the PCI controller for device PDEV. */
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
......
......@@ -15,6 +15,8 @@ struct bit_map {
int size;
int used;
int last_off;
int last_size;
int first_free;
};
extern int bit_map_string_get(struct bit_map *t, int len, int align);
......
......@@ -11,6 +11,7 @@
#include <linux/config.h>
#include <linux/spinlock.h>
#include <linux/swap.h>
#include <asm/types.h>
#ifdef CONFIG_SUN4
#include <asm/pgtsun4.h>
......@@ -401,9 +402,14 @@ BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int)
extern int invalid_segment;
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 2) & 0x7f)
#define __swp_offset(x) (((x).val >> 9) & 0x3ffff)
#define __swp_entry(type,offset) ((swp_entry_t) { (((type) & 0x7f) << 2) | (((offset) & 0x3ffff) << 9) })
BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t)
BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t)
BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long)
#define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x)
#define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x)
#define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
......
......@@ -90,6 +90,22 @@
#define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
/* SRMMU swap entry encoding
*
* We use 5 bits for the type and 19 for the offset. This gives us
* 32 swapfiles of 4GB each. Encoding looks like:
*
* oooooooooooooooooootttttRRRRRRRR
* fedcba9876543210fedcba9876543210
*
* The bottom 8 bits are reserved for protection and status bits, especially
* FILE and PRESENT.
*/
#define SRMMU_SWP_TYPE_MASK 0x1f
#define SRMMU_SWP_TYPE_SHIFT SRMMU_PTE_FILE_SHIFT
#define SRMMU_SWP_OFF_MASK 0x7ffff
#define SRMMU_SWP_OFF_SHIFT (SRMMU_PTE_FILE_SHIFT + 5)
/* Some day I will implement true fine grained access bits for
* user pages because the SRMMU gives us the capabilities to
* enforce all the protection levels that vma's can have.
......
......@@ -74,6 +74,21 @@
#define SUN4C_PAGE_KERNEL __pgprot(_SUN4C_READABLE|_SUN4C_WRITEABLE|\
_SUN4C_PAGE_DIRTY|_SUN4C_PAGE_PRIV)
/* SUN4C swap entry encoding
*
* We use 5 bits for the type and 19 for the offset. This gives us
* 32 swapfiles of 4GB each. Encoding looks like:
*
* RRRRRRRRooooooooooooooooooottttt
* fedcba9876543210fedcba9876543210
*
* The top 8 bits are reserved for protection and status bits, especially
* FILE and PRESENT.
*/
#define SUN4C_SWP_TYPE_MASK 0x1f
#define SUN4C_SWP_OFF_MASK 0x7ffff
#define SUN4C_SWP_OFF_SHIFT 5
#ifndef __ASSEMBLY__
static inline unsigned long sun4c_get_synchronous_error(void)
......
......@@ -75,8 +75,8 @@ extern void smp_local_timer_interrupt (struct pt_regs * regs);
extern void setup_boot_APIC_clock (void);
extern void setup_secondary_APIC_clock (void);
extern void setup_apic_nmi_watchdog (void);
extern void disable_lapic_nmi_watchdog(void);
extern void enable_lapic_nmi_watchdog(void);
extern int reserve_lapic_nmi(void);
extern void release_lapic_nmi(void);
extern void disable_timer_nmi_watchdog(void);
extern void enable_timer_nmi_watchdog(void);
extern void nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
......
......@@ -212,6 +212,9 @@ typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_siz
#define UGA_IO_PROTOCOL_GUID \
EFI_GUID( 0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 0x7, 0xa2 )
#define EFI_GLOBAL_VARIABLE_GUID \
EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
typedef struct {
efi_guid_t guid;
unsigned long table;
......@@ -294,6 +297,7 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
extern u64 efi_get_iobase (void);
extern u32 efi_mem_type (unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
extern int __init efi_uart_console_only (void);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource);
extern efi_status_t phys_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc);
......@@ -322,6 +326,49 @@ extern struct efi_memory_map memmap;
#define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002
#define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004
/*
* EFI Device Path information
*/
#define EFI_DEV_HW 0x01
#define EFI_DEV_PCI 1
#define EFI_DEV_PCCARD 2
#define EFI_DEV_MEM_MAPPED 3
#define EFI_DEV_VENDOR 4
#define EFI_DEV_CONTROLLER 5
#define EFI_DEV_ACPI 0x02
#define EFI_DEV_BASIC_ACPI 1
#define EFI_DEV_EXPANDED_ACPI 2
#define EFI_DEV_MSG 0x03
#define EFI_DEV_MSG_ATAPI 1
#define EFI_DEV_MSG_SCSI 2
#define EFI_DEV_MSG_FC 3
#define EFI_DEV_MSG_1394 4
#define EFI_DEV_MSG_USB 5
#define EFI_DEV_MSG_USB_CLASS 15
#define EFI_DEV_MSG_I20 6
#define EFI_DEV_MSG_MAC 11
#define EFI_DEV_MSG_IPV4 12
#define EFI_DEV_MSG_IPV6 13
#define EFI_DEV_MSG_INFINIBAND 9
#define EFI_DEV_MSG_UART 14
#define EFI_DEV_MSG_VENDOR 10
#define EFI_DEV_MEDIA 0x04
#define EFI_DEV_MEDIA_HARD_DRIVE 1
#define EFI_DEV_MEDIA_CDROM 2
#define EFI_DEV_MEDIA_VENDOR 3
#define EFI_DEV_MEDIA_FILE 4
#define EFI_DEV_MEDIA_PROTOCOL 5
#define EFI_DEV_BIOS_BOOT 0x05
#define EFI_DEV_END_PATH 0x7F
#define EFI_DEV_END_PATH2 0xFF
#define EFI_DEV_END_INSTANCE 0x01
#define EFI_DEV_END_ENTIRE 0xFF
struct efi_generic_dev_path {
u8 type;
u8 sub_type;
u16 length;
} __attribute ((packed));
/*
* efi_dir is allocated in arch/ia64/kernel/efi.c.
......
......@@ -7,6 +7,7 @@
#include <linux/timer.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
struct workqueue_struct;
......@@ -75,8 +76,12 @@ extern void init_workqueues(void);
*/
static inline int cancel_delayed_work(struct work_struct *work)
{
return del_timer_sync(&work->timer);
int ret;
ret = del_timer_sync(&work->timer);
if (ret)
clear_bit(0, &work->pending);
return ret;
}
#endif
......@@ -231,11 +231,6 @@ const char *sctp_pname(const sctp_subtype_t); /* primitives */
/* This is a table of printable names of sctp_state_t's. */
extern const char *sctp_state_tbl[], *sctp_evttype_tbl[], *sctp_status_tbl[];
/* SCTP reachability state for each address */
#define SCTP_ADDR_NOHB 4
#define SCTP_ADDR_REACHABLE 2
#define SCTP_ADDR_NOT_REACHABLE 1
/* Maximum chunk length considering padding requirements. */
enum { SCTP_MAX_CHUNK_LEN = ((1<<16) - sizeof(__u32)) };
......
/* SCTP kernel reference Implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2003 International Business Machines, Corp.
* Copyright (c) 2002 Intel Corp.
*
* This file is part of the SCTP kernel reference Implementation
......@@ -246,7 +246,7 @@ struct sctp_paddr_change {
* event that happened to the address. They include:
*/
enum sctp_spc_state {
SCTP_ADDR_REACHABLE,
SCTP_ADDR_AVAILABLE,
SCTP_ADDR_UNREACHABLE,
SCTP_ADDR_REMOVED,
SCTP_ADDR_ADDED,
......
......@@ -215,9 +215,11 @@ static void mqueue_delete_inode(struct inode *inode)
clear_inode(inode);
spin_lock(&mq_lock);
queues_count--;
spin_unlock(&mq_lock);
if (info->messages) {
spin_lock(&mq_lock);
queues_count--;
spin_unlock(&mq_lock);
}
}
static int mqueue_create(struct inode *dir, struct dentry *dentry,
......@@ -811,9 +813,9 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
wait.msg = (void *) msg_ptr;
wait.state = STATE_NONE;
ret = wq_sleep(info, SEND, timeout, &wait);
if (ret < 0)
free_msg(msg_ptr);
}
if (ret < 0)
free_msg(msg_ptr);
} else {
receiver = wq_get_first_waiter(info, RECV);
if (receiver) {
......
......@@ -240,6 +240,7 @@ __setup("log_buf_len=", log_buf_len_setup);
* 7 -- Enable printk's to console
* 8 -- Set level of messages printed to console
* 9 -- Return number of unread characters in the log buffer
* 10 -- Return size of the log buffer
*/
int do_syslog(int type, char __user * buf, int len)
{
......@@ -359,6 +360,9 @@ int do_syslog(int type, char __user * buf, int len)
case 9: /* Number of chars in the log buffer */
error = log_end - log_start;
break;
case 10: /* Size of the log buffer */
error = log_buf_len;
break;
default:
error = -EINVAL;
break;
......
......@@ -71,10 +71,11 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
sem->activity = -1;
list_del(&waiter->list);
mb();
tsk = waiter->task;
mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
goto out;
}
......@@ -85,10 +86,11 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
struct list_head *next = waiter->list.next;
list_del(&waiter->list);
mb();
tsk = waiter->task;
mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
woken++;
if (list_empty(&sem->wait_list))
break;
......@@ -115,10 +117,11 @@ static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
list_del(&waiter->list);
mb();
tsk = waiter->task;
mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
return sem;
}
......@@ -147,6 +150,7 @@ void fastcall __down_read(struct rw_semaphore *sem)
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk);
list_add_tail(&waiter.list,&sem->wait_list);
......@@ -215,6 +219,7 @@ void fastcall __down_write(struct rw_semaphore *sem)
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.flags = RWSEM_WAITING_FOR_WRITE;
get_task_struct(tsk);
list_add_tail(&waiter.list,&sem->wait_list);
......
......@@ -65,10 +65,11 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
goto readers_only;
list_del(&waiter->list);
mb();
tsk = waiter->task;
mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
goto out;
/* don't want to wake any writers */
......@@ -102,10 +103,11 @@ static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int
for (; loop>0; loop--) {
waiter = list_entry(next,struct rwsem_waiter,list);
next = waiter->list.next;
mb();
tsk = waiter->task;
mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
}
sem->wait_list.next = next;
......@@ -137,6 +139,7 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore
/* set up my own style of waitqueue */
spin_lock(&sem->wait_lock);
waiter->task = tsk;
get_task_struct(tsk);
list_add_tail(&waiter->list,&sem->wait_list);
......
......@@ -240,12 +240,6 @@ int filemap_write_and_wait(struct address_space *mapping)
}
/*
* This adds a page to the page cache, starting out as locked, unreferenced,
* not uptodate and with no errors.
*
* This function is used for two things: adding newly allocated pagecache
* pages and for moving existing anon pages into swapcache.
*
* This function is used to add newly allocated pagecache pages:
* the page is new, so we can just run SetPageLocked() against it.
* The other page state flags were set by rmqueue().
......
......@@ -137,7 +137,8 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr,
error = -ENOMEM;
pte_unmap_nested(src);
}
pte_unmap(dst);
if (dst)
pte_unmap(dst);
}
spin_unlock(&mm->page_table_lock);
pte_chain_free(pte_chain);
......
......@@ -39,7 +39,7 @@
* Something oopsable to put for now in the page->mapping
* of an anonymous page, to test that it is ignored.
*/
#define ANON_MAPPING_DEBUG ((struct address_space *) 0xADB)
#define ANON_MAPPING_DEBUG ((struct address_space *) 0)
static inline void clear_page_anon(struct page *page)
{
......
......@@ -2,6 +2,11 @@
* Common framework for low-level network console, dump, and debugger code
*
* Sep 8 2003 Matt Mackall <mpm@selenic.com>
*
* based on the netconsole code from:
*
* Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2002 Red Hat, Inc.
*/
#include <linux/smp_lock.h>
......
......@@ -608,7 +608,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
switch (command) {
case SCTP_TRANSPORT_UP:
transport->active = SCTP_ACTIVE;
spc_state = SCTP_ADDR_REACHABLE;
spc_state = SCTP_ADDR_AVAILABLE;
break;
case SCTP_TRANSPORT_DOWN:
......@@ -936,6 +936,9 @@ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
void sctp_assoc_update(struct sctp_association *asoc,
struct sctp_association *new)
{
struct sctp_transport *trans;
struct list_head *pos, *temp;
/* Copy in new parameters of peer. */
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
......@@ -944,20 +947,19 @@ void sctp_assoc_update(struct sctp_association *asoc,
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE,
asoc->peer.i.initial_tsn);
/* FIXME:
* Do we need to copy primary_path etc?
*
* More explicitly, addresses may have been removed and
* this needs accounting for.
*/
/* Remove any peer addresses not present in the new association. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
trans = list_entry(pos, struct sctp_transport, transports);
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
sctp_assoc_del_peer(asoc, &trans->ipaddr);
}
/* If the case is A (association restart), use
* initial_tsn as next_tsn. If the case is B, use
* current next_tsn in case data sent to peer
* has been discarded and needs retransmission.
*/
if (sctp_state(asoc, ESTABLISHED)) {
if (asoc->state >= SCTP_STATE_ESTABLISHED) {
asoc->next_tsn = new->next_tsn;
asoc->ctsn_ack_point = new->ctsn_ack_point;
asoc->adv_peer_ack_point = new->adv_peer_ack_point;
......@@ -968,6 +970,15 @@ void sctp_assoc_update(struct sctp_association *asoc,
sctp_ssnmap_clear(asoc->ssnmap);
} else {
/* Add any peer addresses from the new association. */
list_for_each(pos, &new->peer.transport_addr_list) {
trans = list_entry(pos, struct sctp_transport,
transports);
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC);
}
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
if (!asoc->ssnmap) {
......@@ -976,7 +987,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
new->ssnmap = NULL;
}
}
}
/* Update the retran path for sending a retransmitted packet.
......
......@@ -69,7 +69,7 @@
/* with 24 and 32 Parity Bits", */
/* IEEE Transactions on Communications, Vol.41, No.6, June 1993 */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
__u32 crc_c[256] = {
static const __u32 crc_c[256] = {
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment