Commit 692dd963 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linux-dj.bkbits.net/agpgart

into home.osdl.org:/home/torvalds/v2.5/linux
parents bfba17d3 5af7d0d9
...@@ -879,6 +879,8 @@ W: http://www.developer.ibm.com/welcome/netfinity/serveraid.html ...@@ -879,6 +879,8 @@ W: http://www.developer.ibm.com/welcome/netfinity/serveraid.html
S: Supported S: Supported
IDE DRIVER [GENERAL] IDE DRIVER [GENERAL]
P: Bartlomiej Zolnierkiewicz
M: B.Zolnierkiewicz@elka.pw.edu.pl
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
L: linux-ide@vger.kernel.org L: linux-ide@vger.kernel.org
S: Maintained S: Maintained
......
...@@ -32,3 +32,6 @@ $(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE ...@@ -32,3 +32,6 @@ $(obj)/tftpboot.img: $(obj)/piggyback $(obj)/System.map $(obj)/image FORCE
$(obj)/btfix.s: $(obj)/btfixupprep vmlinux FORCE $(obj)/btfix.s: $(obj)/btfixupprep vmlinux FORCE
$(call if_changed,btfix) $(call if_changed,btfix)
clean:
rm $(obj)/System.map
...@@ -138,29 +138,61 @@ CONFIG_BLK_DEV_INITRD=y ...@@ -138,29 +138,61 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_SCSI=y CONFIG_SCSI=y
# #
# SCSI support type (disk, tape, CDrom) # SCSI support type (disk, tape, CD-ROM)
# #
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
CONFIG_SD_EXTRA_DEVS=40
# CONFIG_CHR_DEV_ST is not set # CONFIG_CHR_DEV_ST is not set
# CONFIG_CHR_DEV_OSST is not set # CONFIG_CHR_DEV_OSST is not set
CONFIG_BLK_DEV_SR=m CONFIG_BLK_DEV_SR=m
# CONFIG_BLK_DEV_SR_VENDOR is not set # CONFIG_BLK_DEV_SR_VENDOR is not set
CONFIG_SR_EXTRA_DEVS=2
CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SG=m
# #
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs # Some SCSI devices (e.g. CD jukebox) support multiple LUNs
# #
# CONFIG_SCSI_MULTI_LUN is not set # CONFIG_SCSI_MULTI_LUN is not set
CONFIG_SCSI_REPORT_LUNS=y
# CONFIG_SCSI_CONSTANTS is not set # CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set # CONFIG_SCSI_LOGGING is not set
# #
# SCSI low-level drivers # SCSI low-level drivers
# #
CONFIG_SCSI_SUNESP=y # CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
# CONFIG_SCSI_AIC7XXX_OLD is not set
# CONFIG_SCSI_AIC79XX is not set
# CONFIG_SCSI_DPT_I2O is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_AM53C974 is not set
# CONFIG_SCSI_MEGARAID is not set
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set
# CONFIG_SCSI_EATA_PIO is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GDTH is not set
# CONFIG_SCSI_GENERIC_NCR5380 is not set
# CONFIG_SCSI_GENERIC_NCR5380_MMIO is not set
# CONFIG_SCSI_INITIO is not set
# CONFIG_SCSI_INIA100 is not set
# CONFIG_SCSI_SYM53C8XX_2 is not set
# CONFIG_SCSI_NCR53C8XX is not set
# CONFIG_SCSI_PCI2000 is not set
# CONFIG_SCSI_PCI2220I is not set
# CONFIG_SCSI_QLOGIC_ISP is not set
# CONFIG_SCSI_QLOGIC_FC is not set
# CONFIG_SCSI_QLOGIC_1280 is not set
CONFIG_SCSI_QLOGICPTI=m CONFIG_SCSI_QLOGICPTI=m
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_U14_34F is not set
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
CONFIG_SCSI_SUNESP=y
# #
# Fibre Channel support # Fibre Channel support
......
...@@ -12,8 +12,9 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \ ...@@ -12,8 +12,9 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
sys_sparc.o sunos_asm.o systbls.o \ sys_sparc.o sunos_asm.o systbls.o \
time.o windows.o cpu.o devices.o sclow.o \ time.o windows.o cpu.o devices.o sclow.o \
tadpole.o tick14.o ptrace.o sys_solaris.o \ tadpole.o tick14.o ptrace.o sys_solaris.o \
unaligned.o muldiv.o pcic.o semaphore.o sparc_ksyms.o unaligned.o muldiv.o semaphore.o sparc_ksyms.o
obj-$(CONFIG_PCI) += pcic.o
obj-$(CONFIG_SUN4) += sun4setup.o obj-$(CONFIG_SUN4) += sun4setup.o
obj-$(CONFIG_SMP) += trampoline.o smp.o sun4m_smp.o sun4d_smp.o obj-$(CONFIG_SMP) += trampoline.o smp.o sun4m_smp.o sun4d_smp.o
obj-$(CONFIG_SUN_AUXIO) += auxio.o obj-$(CONFIG_SUN_AUXIO) += auxio.o
......
...@@ -35,27 +35,6 @@ ...@@ -35,27 +35,6 @@
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#ifndef CONFIG_PCI
asmlinkage int sys_pciconfig_read(unsigned long bus,
unsigned long dfn,
unsigned long off,
unsigned long len,
unsigned char *buf)
{
return -EINVAL;
}
asmlinkage int sys_pciconfig_write(unsigned long bus,
unsigned long dfn,
unsigned long off,
unsigned long len,
unsigned char *buf)
{
return -EINVAL;
}
#else
struct pci_fixup pcibios_fixups[] = { struct pci_fixup pcibios_fixups[] = {
{ 0 } { 0 }
...@@ -1044,5 +1023,3 @@ void insl(unsigned long addr, void *dst, unsigned long count) { ...@@ -1044,5 +1023,3 @@ void insl(unsigned long addr, void *dst, unsigned long count) {
} }
subsys_initcall(pcic_init); subsys_initcall(pcic_init);
#endif
...@@ -287,13 +287,22 @@ void show_regs(struct pt_regs *r) ...@@ -287,13 +287,22 @@ void show_regs(struct pt_regs *r)
rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]); rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
} }
/*
* The show_stack is an external API which we do not use ourselves.
* The oops is printed in die_if_kernel.
*/
void show_stack(struct task_struct *tsk, unsigned long *_ksp) void show_stack(struct task_struct *tsk, unsigned long *_ksp)
{ {
unsigned long pc, fp; unsigned long pc, fp;
unsigned long task_base = (unsigned long) tsk; unsigned long task_base;
struct reg_window *rw; struct reg_window *rw;
int count = 0; int count = 0;
if (tsk != NULL)
task_base = (unsigned long) tsk->thread_info;
else
task_base = (unsigned long) current_thread_info();
fp = (unsigned long) _ksp; fp = (unsigned long) _ksp;
do { do {
/* Bogus frame pointer? */ /* Bogus frame pointer? */
...@@ -308,13 +317,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) ...@@ -308,13 +317,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
printk("\n"); printk("\n");
} }
void show_trace_task(struct task_struct *tsk)
{
if (tsk)
show_stack(tsk,
(unsigned long *) tsk->thread_info->ksp);
}
/* /*
* Note: sparc64 has a pretty intricated thread_saved_pc, check it out. * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
*/ */
......
...@@ -123,7 +123,10 @@ asmlinkage int sys_ipc (uint call, int first, int second, int third, void __user ...@@ -123,7 +123,10 @@ asmlinkage int sys_ipc (uint call, int first, int second, int third, void __user
if (call <= SEMCTL) if (call <= SEMCTL)
switch (call) { switch (call) {
case SEMOP: case SEMOP:
err = sys_semop (first, (struct sembuf __user *)ptr, second); err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL);
goto out;
case SEMTIMEDOP:
err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, (const struct timespec __user *) fifth);
goto out; goto out;
case SEMGET: case SEMGET:
err = sys_semget (first, second, third); err = sys_semget (first, second, third);
......
...@@ -148,6 +148,9 @@ extern unsigned long fix_kmap_end; ...@@ -148,6 +148,9 @@ extern unsigned long fix_kmap_end;
/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
/* The context table is a nocache user with the biggest alignment needs. */
#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
void *srmmu_nocache_pool; void *srmmu_nocache_pool;
void *srmmu_nocache_bitmap; void *srmmu_nocache_bitmap;
static struct bit_map srmmu_nocache_map; static struct bit_map srmmu_nocache_map;
...@@ -320,6 +323,10 @@ static unsigned long __srmmu_get_nocache(int size, int align) ...@@ -320,6 +323,10 @@ static unsigned long __srmmu_get_nocache(int size, int align)
printk("Size 0x%x unaligned int nocache request\n", size); printk("Size 0x%x unaligned int nocache request\n", size);
size += SRMMU_NOCACHE_BITMAP_SHIFT-1; size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
} }
if (align > SRMMU_NOCACHE_ALIGN_MAX) {
BUG();
return 0;
}
offset = bit_map_string_get(&srmmu_nocache_map, offset = bit_map_string_get(&srmmu_nocache_map,
size >> SRMMU_NOCACHE_BITMAP_SHIFT, size >> SRMMU_NOCACHE_BITMAP_SHIFT,
...@@ -425,7 +432,8 @@ void srmmu_nocache_init(void) ...@@ -425,7 +432,8 @@ void srmmu_nocache_init(void)
bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, PAGE_SIZE, 0UL); srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
SRMMU_NOCACHE_ALIGN_MAX, 0UL);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size); memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
......
...@@ -924,13 +924,6 @@ config DEBUG_SPINLOCK ...@@ -924,13 +924,6 @@ config DEBUG_SPINLOCK
best used in conjunction with the NMI watchdog so that spinlock best used in conjunction with the NMI watchdog so that spinlock
deadlocks are also debuggable. deadlocks are also debuggable.
config KALLSYMS
bool "Load all symbols for debugging/ksymoops"
help
Say Y here to let the kernel print out symbolic crash information and
symbolic stack backtraces. This increases the size of the kernel
somewhat, as all symbols have to be loaded into the kernel image.
config DEBUG_SPINLOCK_SLEEP config DEBUG_SPINLOCK_SLEEP
bool "Sleep-inside-spinlock checking" bool "Sleep-inside-spinlock checking"
help help
......
...@@ -1749,8 +1749,8 @@ ret_sys_call: ...@@ -1749,8 +1749,8 @@ ret_sys_call:
cmp %o0, -ENOIOCTLCMD cmp %o0, -ENOIOCTLCMD
sllx %g2, 32, %g2 sllx %g2, 32, %g2
bgeu,pn %xcc, 1f bgeu,pn %xcc, 1f
andcc %l0, _TIF_SYSCALL_TRACE, %l6 andcc %l0, _TIF_SYSCALL_TRACE, %l6
80:
andn %g3, %g2, %g3 /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 /* System call success, clear Carry condition code. */
stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
bne,pn %icc, linux_syscall_trace2 bne,pn %icc, linux_syscall_trace2
...@@ -1760,9 +1760,21 @@ ret_sys_call: ...@@ -1760,9 +1760,21 @@ ret_sys_call:
stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
1: 1:
/* Really a failure? Check if force_successful_syscall_return()
* was invoked.
*/
ldx [%curptr + TI_FLAGS], %l0 ! Load
andcc %l0, _TIF_SYSCALL_SUCCESS, %g0
be,pt %icc, 1f
andcc %l0, _TIF_SYSCALL_TRACE, %l6
andn %l0, _TIF_SYSCALL_SUCCESS, %l0
ba,pt %xcc, 80b
stx %l0, [%curptr + TI_FLAGS]
/* System call failure, set Carry condition code. /* System call failure, set Carry condition code.
* Also, get abs(errno) to return to the process. * Also, get abs(errno) to return to the process.
*/ */
1:
sub %g0, %o0, %o0 sub %g0, %o0, %o0
or %g3, %g2, %g3 or %g3, %g2, %g3
stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
......
...@@ -46,8 +46,10 @@ asmlinkage unsigned long sys_getpagesize(void) ...@@ -46,8 +46,10 @@ asmlinkage unsigned long sys_getpagesize(void)
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
{ {
struct vm_area_struct * vmm; struct mm_struct *mm = current->mm;
struct vm_area_struct * vma;
unsigned long task_size = TASK_SIZE; unsigned long task_size = TASK_SIZE;
unsigned long start_addr;
int do_color_align; int do_color_align;
if (flags & MAP_FIXED) { if (flags & MAP_FIXED) {
...@@ -63,30 +65,54 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi ...@@ -63,30 +65,54 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
task_size = 0xf0000000UL; task_size = 0xf0000000UL;
if (len > task_size || len > -PAGE_OFFSET) if (len > task_size || len > -PAGE_OFFSET)
return -ENOMEM; return -ENOMEM;
if (!addr)
addr = TASK_UNMAPPED_BASE;
do_color_align = 0; do_color_align = 0;
if (filp || (flags & MAP_SHARED)) if (filp || (flags & MAP_SHARED))
do_color_align = 1; do_color_align = 1;
if (addr) {
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
start_addr = addr = mm->free_area_cache;
task_size -= len;
full_search:
if (do_color_align) if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff); addr = COLOUR_ALIGN(addr, pgoff);
else else
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
task_size -= len;
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vmm || addr < vmm->vm_end). */ /* At this point: (!vma || addr < vma->vm_end). */
if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) { if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
addr = PAGE_OFFSET; addr = PAGE_OFFSET;
vmm = find_vma(current->mm, PAGE_OFFSET); vma = find_vma(mm, PAGE_OFFSET);
} }
if (task_size < addr) if (task_size < addr) {
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = addr = TASK_UNMAPPED_BASE;
goto full_search;
}
return -ENOMEM; return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start) }
if (!vma || addr + len <= vma->vm_start) {
/*
* Remember the place where we stopped the search:
*/
mm->free_area_cache = addr + len;
return addr; return addr;
addr = vmm->vm_end; }
addr = vma->vm_end;
if (do_color_align) if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff); addr = COLOUR_ALIGN(addr, pgoff);
} }
...@@ -182,7 +208,10 @@ asmlinkage int sys_ipc (unsigned call, int first, int second, unsigned long thir ...@@ -182,7 +208,10 @@ asmlinkage int sys_ipc (unsigned call, int first, int second, unsigned long thir
if (call <= SEMCTL) if (call <= SEMCTL)
switch (call) { switch (call) {
case SEMOP: case SEMOP:
err = sys_semop (first, (struct sembuf *)ptr, second); err = sys_semtimedop (first, (struct sembuf *)ptr, second, NULL);
goto out;
case SEMTIMEDOP:
err = sys_semtimedop (first, (struct sembuf *)ptr, second, (const struct timespec *) fifth);
goto out; goto out;
case SEMGET: case SEMGET:
err = sys_semget (first, second, (int)third); err = sys_semget (first, second, (int)third);
......
...@@ -480,7 +480,7 @@ static int do_sys32_semctl(int first, int second, int third, void *uptr) ...@@ -480,7 +480,7 @@ static int do_sys32_semctl(int first, int second, int third, void *uptr)
static int do_sys32_msgsnd (int first, int second, int third, void *uptr) static int do_sys32_msgsnd (int first, int second, int third, void *uptr)
{ {
struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER); struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf), GFP_USER);
struct msgbuf32 *up = (struct msgbuf32 *)uptr; struct msgbuf32 *up = (struct msgbuf32 *)uptr;
mm_segment_t old_fs; mm_segment_t old_fs;
int err; int err;
...@@ -522,12 +522,12 @@ static int do_sys32_msgrcv (int first, int second, int msgtyp, int third, ...@@ -522,12 +522,12 @@ static int do_sys32_msgrcv (int first, int second, int msgtyp, int third,
msgtyp = ipck.msgtyp; msgtyp = ipck.msgtyp;
} }
err = -ENOMEM; err = -ENOMEM;
p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER); p = kmalloc (second + sizeof (struct msgbuf), GFP_USER);
if (!p) if (!p)
goto out; goto out;
old_fs = get_fs (); old_fs = get_fs ();
set_fs (KERNEL_DS); set_fs (KERNEL_DS);
err = sys_msgrcv (first, p, second + 4, msgtyp, third); err = sys_msgrcv (first, p, second, msgtyp, third);
set_fs (old_fs); set_fs (old_fs);
if (err < 0) if (err < 0)
goto free_then_out; goto free_then_out;
...@@ -736,6 +736,22 @@ static int do_sys32_shmctl (int first, int second, void *uptr) ...@@ -736,6 +736,22 @@ static int do_sys32_shmctl (int first, int second, void *uptr)
return err; return err;
} }
static int sys32_semtimedop(int semid, struct sembuf *tsems, int nsems,
const struct compat_timespec *timeout32)
{
struct compat_timespec t32;
struct timespec *t64 = compat_alloc_user_space(sizeof(*t64));
if (copy_from_user(&t32, timeout32, sizeof(t32)))
return -EFAULT;
if (put_user(t32.tv_sec, &t64->tv_sec) ||
put_user(t32.tv_nsec, &t64->tv_nsec))
return -EFAULT;
return sys_semtimedop(semid, tsems, nsems, t64);
}
asmlinkage int sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) asmlinkage int sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{ {
int version, err; int version, err;
...@@ -747,8 +763,10 @@ asmlinkage int sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u ...@@ -747,8 +763,10 @@ asmlinkage int sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u
switch (call) { switch (call) {
case SEMOP: case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */ /* struct sembuf is the same on 32 and 64bit :)) */
err = sys_semop (first, (struct sembuf *)AA(ptr), second); err = sys_semtimedop (first, (struct sembuf *)AA(ptr), second, NULL);
goto out; goto out;
case SEMTIMEDOP:
err = sys32_semtimedop (first, (struct sembuf *)AA(ptr), second, (const struct compat_timespec *) AA(fifth));
case SEMGET: case SEMGET:
err = sys_semget (first, second, third); err = sys_semget (first, second, third);
goto out; goto out;
......
...@@ -176,6 +176,9 @@ static int ti_override(struct yenta_socket *socket) ...@@ -176,6 +176,9 @@ static int ti_override(struct yenta_socket *socket)
if (new != reg) if (new != reg)
exca_writeb(socket, I365_INTCTL, new); exca_writeb(socket, I365_INTCTL, new);
#if 0
/* THIS CAUSES HANGS! Disabled for now, do not know why */
/* /*
* If ISA interrupts don't work, then fall back to routing card * If ISA interrupts don't work, then fall back to routing card
* interrupts to the PCI interrupt of the socket. * interrupts to the PCI interrupt of the socket.
...@@ -195,6 +198,7 @@ static int ti_override(struct yenta_socket *socket) ...@@ -195,6 +198,7 @@ static int ti_override(struct yenta_socket *socket)
config_writel(socket, TI122X_IRQMUX, irqmux); config_writel(socket, TI122X_IRQMUX, irqmux);
config_writeb(socket, TI113X_DEVICE_CONTROL, devctl); config_writeb(socket, TI113X_DEVICE_CONTROL, devctl);
} }
#endif
socket->socket.ss_entry->init = ti_init; socket->socket.ss_entry->init = ti_init;
return 0; return 0;
......
...@@ -14,6 +14,7 @@ struct ipc_kludge { ...@@ -14,6 +14,7 @@ struct ipc_kludge {
#define SEMOP 1 #define SEMOP 1
#define SEMGET 2 #define SEMGET 2
#define SEMCTL 3 #define SEMCTL 3
#define SEMTIMEDOP 4
#define MSGSND 11 #define MSGSND 11
#define MSGRCV 12 #define MSGRCV 12
#define MSGGET 13 #define MSGGET 13
......
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
#include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */ #include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */
#endif #endif
/* Number of contexts is implementation-dependent; 64k is the most we support */
#define SRMMU_MAX_CONTEXTS 65536
/* PMD_SHIFT determines the size of the area a second-level page table entry can map */ /* PMD_SHIFT determines the size of the area a second-level page table entry can map */
#define SRMMU_PMD_SHIFT 18 #define SRMMU_PMD_SHIFT 18
#define SRMMU_PMD_SIZE (1UL << SRMMU_PMD_SHIFT) #define SRMMU_PMD_SIZE (1UL << SRMMU_PMD_SHIFT)
......
...@@ -14,6 +14,7 @@ struct ipc_kludge { ...@@ -14,6 +14,7 @@ struct ipc_kludge {
#define SEMOP 1 #define SEMOP 1
#define SEMGET 2 #define SEMGET 2
#define SEMCTL 3 #define SEMCTL 3
#define SEMTIMEDOP 4
#define MSGSND 11 #define MSGSND 11
#define MSGRCV 12 #define MSGRCV 12
#define MSGGET 13 #define MSGGET 13
......
...@@ -94,6 +94,8 @@ struct sparc_trapf { ...@@ -94,6 +94,8 @@ struct sparc_trapf {
#define STACKFRAME32_SZ sizeof(struct sparc_stackf32) #define STACKFRAME32_SZ sizeof(struct sparc_stackf32)
#ifdef __KERNEL__ #ifdef __KERNEL__
#define force_successful_syscall_return() \
set_thread_flag(TIF_SYSCALL_SUCCESS)
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
#define instruction_pointer(regs) ((regs)->tpc) #define instruction_pointer(regs) ((regs)->tpc)
extern void show_regs(struct pt_regs *); extern void show_regs(struct pt_regs *);
......
...@@ -205,7 +205,12 @@ register struct thread_info *current_thread_info_reg asm("g6"); ...@@ -205,7 +205,12 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define TIF_BLKCOMMIT 9 /* use ASI_BLK_COMMIT_* in copy_user_page */ #define TIF_BLKCOMMIT 9 /* use ASI_BLK_COMMIT_* in copy_user_page */
#define TIF_POLLING_NRFLAG 10 #define TIF_POLLING_NRFLAG 10
#define TIF_ABI_PENDING 11 #define TIF_SYSCALL_SUCCESS 11
/* NOTE: Thread flags >= 12 should be ones we have no interest
* in using in assembly, else we can't use the mask as
* an immediate value in instructions such as andcc.
*/
#define TIF_ABI_PENDING 12
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
...@@ -219,6 +224,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); ...@@ -219,6 +224,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
#define _TIF_BLKCOMMIT (1<<TIF_BLKCOMMIT) #define _TIF_BLKCOMMIT (1<<TIF_BLKCOMMIT)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_SYSCALL_SUCCESS (1<<TIF_SYSCALL_SUCCESS)
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \ #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
......
...@@ -61,7 +61,11 @@ extern struct rt6_info *rt6_lookup(struct in6_addr *daddr, ...@@ -61,7 +61,11 @@ extern struct rt6_info *rt6_lookup(struct in6_addr *daddr,
struct in6_addr *saddr, struct in6_addr *saddr,
int oif, int flags); int oif, int flags);
extern struct rt6_info *ip6_dst_alloc(void); extern struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
struct neighbour *neigh,
int (*output)(struct sk_buff *));
extern int ndisc_dst_gc(int *more);
extern void fib6_force_start_gc(void);
/* /*
* support functions for ND * support functions for ND
......
This diff is collapsed.
...@@ -233,6 +233,8 @@ cond_syscall(compat_sys_futex) ...@@ -233,6 +233,8 @@ cond_syscall(compat_sys_futex)
cond_syscall(sys_epoll_create) cond_syscall(sys_epoll_create)
cond_syscall(sys_epoll_ctl) cond_syscall(sys_epoll_ctl)
cond_syscall(sys_epoll_wait) cond_syscall(sys_epoll_wait)
cond_syscall(sys_pciconfig_read)
cond_syscall(sys_pciconfig_write)
static int set_one_prio(struct task_struct *p, int niceval, int error) static int set_one_prio(struct task_struct *p, int niceval, int error)
{ {
......
...@@ -405,34 +405,6 @@ static int do_set_attach_filter(int fd, int level, int optname, ...@@ -405,34 +405,6 @@ static int do_set_attach_filter(int fd, int level, int optname,
sizeof(struct sock_fprog)); sizeof(struct sock_fprog));
} }
static int do_set_icmpv6_filter(int fd, int level, int optname,
char *optval, int optlen)
{
struct icmp6_filter kfilter;
mm_segment_t old_fs;
int ret, i;
if (optlen < sizeof(kfilter))
return -EINVAL;
if (copy_from_user(&kfilter, optval, sizeof(kfilter)))
return -EFAULT;
for (i = 0; i < 8; i += 2) {
u32 tmp = kfilter.data[i];
kfilter.data[i] = kfilter.data[i + 1];
kfilter.data[i + 1] = tmp;
}
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_setsockopt(fd, level, optname,
(char *) &kfilter, sizeof(kfilter));
set_fs(old_fs);
return ret;
}
static int do_set_sock_timeout(int fd, int level, int optname, char *optval, int optlen) static int do_set_sock_timeout(int fd, int level, int optname, char *optval, int optlen)
{ {
struct compat_timeval *up = (struct compat_timeval *) optval; struct compat_timeval *up = (struct compat_timeval *) optval;
...@@ -465,9 +437,6 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, ...@@ -465,9 +437,6 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
optval, optlen); optval, optlen);
if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
return do_set_sock_timeout(fd, level, optname, optval, optlen); return do_set_sock_timeout(fd, level, optname, optval, optlen);
if (level == SOL_ICMPV6 && optname == ICMPV6_FILTER)
return do_set_icmpv6_filter(fd, level, optname,
optval, optlen);
return sys_setsockopt(fd, level, optname, optval, optlen); return sys_setsockopt(fd, level, optname, optval, optlen);
} }
......
/* /*
* net-sysfs.c - network device class and attributes * net-sysfs.c - network device class and attributes
* *
* Copyright (c) 2003 Stephen Hemminber <shemminger@osdl.org> * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
* *
*/ */
......
...@@ -375,4 +375,5 @@ config INET_IPCOMP ...@@ -375,4 +375,5 @@ config INET_IPCOMP
If unsure, say Y. If unsure, say Y.
source "net/ipv4/netfilter/Kconfig" source "net/ipv4/netfilter/Kconfig"
source "net/ipv4/ipvs/Kconfig"
...@@ -21,5 +21,6 @@ obj-$(CONFIG_INET_ESP) += esp4.o ...@@ -21,5 +21,6 @@ obj-$(CONFIG_INET_ESP) += esp4.o
obj-$(CONFIG_INET_IPCOMP) += ipcomp.o obj-$(CONFIG_INET_IPCOMP) += ipcomp.o
obj-$(CONFIG_IP_PNP) += ipconfig.o obj-$(CONFIG_IP_PNP) += ipconfig.o
obj-$(CONFIG_NETFILTER) += netfilter/ obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_IP_VS) += ipvs/
obj-y += xfrm4_policy.o xfrm4_state.o xfrm4_input.o xfrm4_tunnel.o obj-y += xfrm4_policy.o xfrm4_state.o xfrm4_input.o xfrm4_tunnel.o
This diff is collapsed.
#
# Makefile for the IPVS modules on top of IPv4.
#
# Note! Dependencies are done automagically by 'make dep', which also
# removes any old dependencies. DON'T put your own dependencies here
# unless it's something special (ie not a .c file).
#
# Note 2! The CFLAGS definition is now in the main makefile...
# IPVS transport protocol load balancing support
ip_vs_proto-objs-y :=
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_TCP) += ip_vs_proto_tcp.o
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_UDP) += ip_vs_proto_udp.o
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_ESP) += ip_vs_proto_esp.o
ip_vs_proto-objs-$(CONFIG_IP_VS_PROTO_AH) += ip_vs_proto_ah.o
ip_vs-objs := ip_vs_conn.o ip_vs_core.o ip_vs_ctl.o ip_vs_sched.o \
ip_vs_xmit.o ip_vs_app.o ip_vs_sync.o \
ip_vs_est.o ip_vs_proto.o ip_vs_proto_icmp.o \
$(ip_vs_proto-objs-y)
# IPVS core
obj-$(CONFIG_IP_VS) += ip_vs.o
# IPVS schedulers
obj-$(CONFIG_IP_VS_RR) += ip_vs_rr.o
obj-$(CONFIG_IP_VS_WRR) += ip_vs_wrr.o
obj-$(CONFIG_IP_VS_LC) += ip_vs_lc.o
obj-$(CONFIG_IP_VS_WLC) += ip_vs_wlc.o
obj-$(CONFIG_IP_VS_LBLC) += ip_vs_lblc.o
obj-$(CONFIG_IP_VS_LBLCR) += ip_vs_lblcr.o
obj-$(CONFIG_IP_VS_DH) += ip_vs_dh.o
obj-$(CONFIG_IP_VS_SH) += ip_vs_sh.o
obj-$(CONFIG_IP_VS_SED) += ip_vs_sed.o
obj-$(CONFIG_IP_VS_NQ) += ip_vs_nq.o
# IPVS application helpers
obj-$(CONFIG_IP_VS_FTP) += ip_vs_ftp.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* IPVS: Destination Hashing scheduling module
*
* Version: $Id: ip_vs_dh.c,v 1.5 2002/09/15 08:14:08 wensong Exp $
*
* Authors: Wensong Zhang <wensong@gnuchina.org>
*
* Inspired by the consistent hashing scheduler patch from
* Thomas Proell <proellt@gmx.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
*/
/*
* The dh algorithm is to select server by the hash key of destination IP
* address. The pseudo code is as follows:
*
* n <- servernode[dest_ip];
* if (n is dead) OR
* (n is overloaded) OR (n.weight <= 0) then
* return NULL;
*
* return n;
*
* Notes that servernode is a 256-bucket hash table that maps the hash
* index derived from packet destination IP address to the current server
* array. If the dh scheduler is used in cache cluster, it is good to
* combine it with cache_bypass feature. When the statically assigned
* server is dead or overloaded, the load balancer can bypass the cache
* server and send requests to the original server directly.
*
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <net/ip_vs.h>
/*
* IPVS DH bucket
*/
struct ip_vs_dh_bucket {
struct ip_vs_dest *dest; /* real server (cache) */
};
/*
* for IPVS DH entry hash table
*/
#ifndef CONFIG_IP_VS_DH_TAB_BITS
#define CONFIG_IP_VS_DH_TAB_BITS 8
#endif
#define IP_VS_DH_TAB_BITS CONFIG_IP_VS_DH_TAB_BITS
#define IP_VS_DH_TAB_SIZE (1 << IP_VS_DH_TAB_BITS)
#define IP_VS_DH_TAB_MASK (IP_VS_DH_TAB_SIZE - 1)
/*
* Returns hash value for IPVS DH entry
*/
static inline unsigned ip_vs_dh_hashkey(__u32 addr)
{
return (ntohl(addr)*2654435761UL) & IP_VS_DH_TAB_MASK;
}
/*
* Get ip_vs_dest associated with supplied parameters.
*/
static inline struct ip_vs_dest *
ip_vs_dh_get(struct ip_vs_dh_bucket *tbl, __u32 addr)
{
return (tbl[ip_vs_dh_hashkey(addr)]).dest;
}
/*
* Assign all the hash buckets of the specified table with the service.
*/
static int
ip_vs_dh_assign(struct ip_vs_dh_bucket *tbl, struct ip_vs_service *svc)
{
int i;
struct ip_vs_dh_bucket *b;
struct list_head *p;
struct ip_vs_dest *dest;
b = tbl;
p = &svc->destinations;
for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
if (list_empty(p)) {
b->dest = NULL;
} else {
if (p == &svc->destinations)
p = p->next;
dest = list_entry(p, struct ip_vs_dest, n_list);
atomic_inc(&dest->refcnt);
b->dest = dest;
p = p->next;
}
b++;
}
return 0;
}
/*
* Flush all the hash buckets of the specified table.
*/
static void ip_vs_dh_flush(struct ip_vs_dh_bucket *tbl)
{
int i;
struct ip_vs_dh_bucket *b;
b = tbl;
for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
if (b->dest) {
atomic_dec(&b->dest->refcnt);
b->dest = NULL;
}
b++;
}
}
static int ip_vs_dh_init_svc(struct ip_vs_service *svc)
{
struct ip_vs_dh_bucket *tbl;
/* allocate the DH table for this service */
tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE,
GFP_ATOMIC);
if (tbl == NULL) {
IP_VS_ERR("ip_vs_dh_init_svc(): no memory\n");
return -ENOMEM;
}
svc->sched_data = tbl;
IP_VS_DBG(6, "DH hash table (memory=%dbytes) allocated for "
"current service\n",
sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE);
/* assign the hash buckets with the updated service */
ip_vs_dh_assign(tbl, svc);
return 0;
}
static int ip_vs_dh_done_svc(struct ip_vs_service *svc)
{
struct ip_vs_dh_bucket *tbl = svc->sched_data;
/* got to clean up hash buckets here */
ip_vs_dh_flush(tbl);
/* release the table itself */
kfree(svc->sched_data);
IP_VS_DBG(6, "DH hash table (memory=%dbytes) released\n",
sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE);
return 0;
}
static int ip_vs_dh_update_svc(struct ip_vs_service *svc)
{
struct ip_vs_dh_bucket *tbl = svc->sched_data;
/* got to clean up hash buckets here */
ip_vs_dh_flush(tbl);
/* assign the hash buckets with the updated service */
ip_vs_dh_assign(tbl, svc);
return 0;
}
/*
* If the dest flags is set with IP_VS_DEST_F_OVERLOAD,
* consider that the server is overloaded here.
*/
static inline int is_overloaded(struct ip_vs_dest *dest)
{
return dest->flags & IP_VS_DEST_F_OVERLOAD;
}
/*
* Destination hashing scheduling
*/
static struct ip_vs_dest *
ip_vs_dh_schedule(struct ip_vs_service *svc, struct iphdr *iph)
{
struct ip_vs_dest *dest;
struct ip_vs_dh_bucket *tbl;
IP_VS_DBG(6, "ip_vs_dh_schedule(): Scheduling...\n");
tbl = (struct ip_vs_dh_bucket *)svc->sched_data;
dest = ip_vs_dh_get(tbl, iph->daddr);
if (!dest
|| !(dest->flags & IP_VS_DEST_F_AVAILABLE)
|| atomic_read(&dest->weight) <= 0
|| is_overloaded(dest)) {
return NULL;
}
IP_VS_DBG(6, "DH: destination IP address %u.%u.%u.%u "
"--> server %u.%u.%u.%u:%d\n",
NIPQUAD(iph->daddr),
NIPQUAD(dest->addr),
ntohs(dest->port));
return dest;
}
/*
* IPVS DH Scheduler structure
*/
static struct ip_vs_scheduler ip_vs_dh_scheduler =
{
.name = "dh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.init_service = ip_vs_dh_init_svc,
.done_service = ip_vs_dh_done_svc,
.update_service = ip_vs_dh_update_svc,
.schedule = ip_vs_dh_schedule,
};
static int __init ip_vs_dh_init(void)
{
INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
}
static void __exit ip_vs_dh_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_dh_scheduler);
}
module_init(ip_vs_dh_init);
module_exit(ip_vs_dh_cleanup);
MODULE_LICENSE("GPL");
/*
* ip_vs_est.c: simple rate estimator for IPVS
*
* Version: $Id: ip_vs_est.c,v 1.4 2002/11/30 01:50:35 wensong Exp $
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <net/ip_vs.h>
/*
This code is to estimate rate in a shorter interval (such as 8
seconds) for virtual services and real servers. For measure rate in a
long interval, it is easy to implement a user level daemon which
periodically reads those statistical counters and measure rate.
Currently, the measurement is activated by slow timer handler. Hope
this measurement will not introduce too much load.
We measure rate during the last 8 seconds every 2 seconds:
avgrate = avgrate*(1-W) + rate*W
where W = 2^(-2)
NOTES.
* The stored value for average bps is scaled by 2^5, so that maximal
rate is ~2.15Gbits/s, average pps and cps are scaled by 2^10.
* A lot code is taken from net/sched/estimator.c
*/
struct ip_vs_estimator
{
struct ip_vs_estimator *next;
struct ip_vs_stats *stats;
u32 last_conns;
u32 last_inpkts;
u32 last_outpkts;
u64 last_inbytes;
u64 last_outbytes;
u32 cps;
u32 inpps;
u32 outpps;
u32 inbps;
u32 outbps;
};
static struct ip_vs_estimator *est_list = NULL;
static rwlock_t est_lock = RW_LOCK_UNLOCKED;
static struct timer_list est_timer;
static void estimation_timer(unsigned long arg)
{
struct ip_vs_estimator *e;
struct ip_vs_stats *s;
u32 n_conns;
u32 n_inpkts, n_outpkts;
u64 n_inbytes, n_outbytes;
u32 rate;
read_lock(&est_lock);
for (e = est_list; e; e = e->next) {
s = e->stats;
n_conns = s->conns;
n_inpkts = s->inpkts;
n_outpkts = s->outpkts;
n_inbytes = s->inbytes;
n_outbytes = s->outbytes;
/* scaled by 2^10, but divided 2 seconds */
rate = (n_conns - e->last_conns)<<9;
e->last_conns = n_conns;
e->cps += ((long)rate - (long)e->cps)>>2;
s->cps = (e->cps+0x1FF)>>10;
rate = (n_inpkts - e->last_inpkts)<<9;
e->last_inpkts = n_inpkts;
e->inpps += ((long)rate - (long)e->inpps)>>2;
s->inpps = (e->inpps+0x1FF)>>10;
rate = (n_outpkts - e->last_outpkts)<<9;
e->last_outpkts = n_outpkts;
e->outpps += ((long)rate - (long)e->outpps)>>2;
s->outpps = (e->outpps+0x1FF)>>10;
rate = (n_inbytes - e->last_inbytes)<<4;
e->last_inbytes = n_inbytes;
e->inbps += ((long)rate - (long)e->inbps)>>2;
s->inbps = (e->inbps+0xF)>>5;
rate = (n_outbytes - e->last_outbytes)<<4;
e->last_outbytes = n_outbytes;
e->outbps += ((long)rate - (long)e->outbps)>>2;
s->outbps = (e->outbps+0xF)>>5;
}
read_unlock(&est_lock);
mod_timer(&est_timer, jiffies + 2*HZ);
}
int ip_vs_new_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est;
est = kmalloc(sizeof(*est), GFP_KERNEL);
if (est == NULL)
return -ENOMEM;
memset(est, 0, sizeof(*est));
est->stats = stats;
est->last_conns = stats->conns;
est->cps = stats->cps<<10;
est->last_inpkts = stats->inpkts;
est->inpps = stats->inpps<<10;
est->last_outpkts = stats->outpkts;
est->outpps = stats->outpps<<10;
est->last_inbytes = stats->inbytes;
est->inbps = stats->inbps<<5;
est->last_outbytes = stats->outbytes;
est->outbps = stats->outbps<<5;
write_lock_bh(&est_lock);
est->next = est_list;
if (est->next == NULL) {
init_timer(&est_timer);
est_timer.expires = jiffies + 2*HZ;
est_timer.function = estimation_timer;
add_timer(&est_timer);
}
est_list = est;
write_unlock_bh(&est_lock);
return 0;
}
void ip_vs_kill_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *est, **pest;
int killed = 0;
write_lock_bh(&est_lock);
pest = &est_list;
while ((est=*pest) != NULL) {
if (est->stats != stats) {
pest = &est->next;
continue;
}
*pest = est->next;
kfree(est);
killed++;
}
if (killed && est_list == NULL)
del_timer_sync(&est_timer);
write_unlock_bh(&est_lock);
}
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
{
struct ip_vs_estimator *e;
write_lock_bh(&est_lock);
for (e = est_list; e; e = e->next) {
if (e->stats != stats)
continue;
/* set counters zero */
e->last_conns = 0;
e->last_inpkts = 0;
e->last_outpkts = 0;
e->last_inbytes = 0;
e->last_outbytes = 0;
e->cps = 0;
e->inpps = 0;
e->outpps = 0;
e->inbps = 0;
e->outbps = 0;
}
write_unlock_bh(&est_lock);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -19,6 +19,12 @@ struct notifier_block; ...@@ -19,6 +19,12 @@ struct notifier_block;
static struct firewall_ops *fwops; static struct firewall_ops *fwops;
#ifdef CONFIG_IP_VS
/* From ip_vs_core.c */
extern unsigned int
check_for_ip_vs_out(struct sk_buff **skb_p, int (*okfn)(struct sk_buff *));
#endif
/* They call these; we do what they want. */ /* They call these; we do what they want. */
int register_firewall(int pf, struct firewall_ops *fw) int register_firewall(int pf, struct firewall_ops *fw)
{ {
...@@ -134,8 +140,14 @@ fw_in(unsigned int hooknum, ...@@ -134,8 +140,14 @@ fw_in(unsigned int hooknum,
return NF_ACCEPT; return NF_ACCEPT;
case FW_MASQUERADE: case FW_MASQUERADE:
if (hooknum == NF_IP_FORWARD) if (hooknum == NF_IP_FORWARD) {
#ifdef CONFIG_IP_VS
/* check if it is for ip_vs */
if (check_for_ip_vs_out(pskb, okfn) == NF_STOLEN)
return NF_STOLEN;
#endif
return do_masquerade(pskb, out); return do_masquerade(pskb, out);
}
else return NF_ACCEPT; else return NF_ACCEPT;
case FW_REDIRECT: case FW_REDIRECT:
......
...@@ -496,6 +496,12 @@ static __inline__ void fib6_start_gc(struct rt6_info *rt) ...@@ -496,6 +496,12 @@ static __inline__ void fib6_start_gc(struct rt6_info *rt)
mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval); mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval);
} }
void fib6_force_start_gc(void)
{
if (ip6_fib_timer.expires == 0)
mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval);
}
/* /*
* Add routing information to the routing tree. * Add routing information to the routing tree.
* <destination addr>/<source addr> * <destination addr>/<source addr>
...@@ -1214,6 +1220,7 @@ void fib6_run_gc(unsigned long dummy) ...@@ -1214,6 +1220,7 @@ void fib6_run_gc(unsigned long dummy)
write_lock_bh(&rt6_lock); write_lock_bh(&rt6_lock);
ndisc_dst_gc(&gc_args.more);
fib6_clean_tree(&ip6_routing_table, fib6_age, 0, NULL); fib6_clean_tree(&ip6_routing_table, fib6_age, 0, NULL);
write_unlock_bh(&rt6_lock); write_unlock_bh(&rt6_lock);
......
This diff is collapsed.
This diff is collapsed.
...@@ -430,6 +430,10 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock ...@@ -430,6 +430,10 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock
goto no_dst; goto no_dst;
nlk = nlk_sk(sk); nlk = nlk_sk(sk);
/* Don't bother queuing skb if kernel socket has no input function */
if (nlk->pid == 0 && !nlk->data_ready)
goto no_dst;
#ifdef NL_EMULATE_DEV #ifdef NL_EMULATE_DEV
if (nlk->handler) { if (nlk->handler) {
skb_orphan(skb); skb_orphan(skb);
......
...@@ -265,6 +265,7 @@ EXPORT_SYMBOL(inet_family_ops); ...@@ -265,6 +265,7 @@ EXPORT_SYMBOL(inet_family_ops);
EXPORT_SYMBOL(in_aton); EXPORT_SYMBOL(in_aton);
EXPORT_SYMBOL(ip_mc_inc_group); EXPORT_SYMBOL(ip_mc_inc_group);
EXPORT_SYMBOL(ip_mc_dec_group); EXPORT_SYMBOL(ip_mc_dec_group);
EXPORT_SYMBOL(ip_mc_join_group);
EXPORT_SYMBOL(ip_finish_output); EXPORT_SYMBOL(ip_finish_output);
EXPORT_SYMBOL(inet_stream_ops); EXPORT_SYMBOL(inet_stream_ops);
EXPORT_SYMBOL(inet_dgram_ops); EXPORT_SYMBOL(inet_dgram_ops);
......
...@@ -855,6 +855,7 @@ xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, ...@@ -855,6 +855,7 @@ xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
{ {
return x->id.proto == tmpl->id.proto && return x->id.proto == tmpl->id.proto &&
(x->id.spi == tmpl->id.spi || !tmpl->id.spi) && (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
x->props.mode == tmpl->mode && x->props.mode == tmpl->mode &&
(tmpl->aalgos & (1<<x->props.aalgo)) && (tmpl->aalgos & (1<<x->props.aalgo)) &&
!(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family)); !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment