Commit 37b5d10a authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.phunnypharm.org/linux-2.6

into home.osdl.org:/home/torvalds/v2.5/linux
parents 7a272c66 d1ca66cc
...@@ -690,7 +690,7 @@ modules_install: _modinst_ _modinst_post ...@@ -690,7 +690,7 @@ modules_install: _modinst_ _modinst_post
_modinst_: _modinst_:
@if [ -z "`$(DEPMOD) -V | grep module-init-tools`" ]; then \ @if [ -z "`$(DEPMOD) -V | grep module-init-tools`" ]; then \
echo "Warning: you may need to install module-init-tools"; \ echo "Warning: you may need to install module-init-tools"; \
echo "See http://www.codemonkey.org.uk/post-halloween-2.5.txt";\ echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
sleep 1; \ sleep 1; \
fi fi
@rm -rf $(MODLIB)/kernel @rm -rf $(MODLIB)/kernel
......
...@@ -399,7 +399,7 @@ config X86_USE_3DNOW ...@@ -399,7 +399,7 @@ config X86_USE_3DNOW
config X86_OOSTORE config X86_OOSTORE
bool bool
depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
default y default y
config HPET_TIMER config HPET_TIMER
...@@ -1261,11 +1261,6 @@ config FRAME_POINTER ...@@ -1261,11 +1261,6 @@ config FRAME_POINTER
If you don't debug the kernel, you can say N, but we may not be able If you don't debug the kernel, you can say N, but we may not be able
to solve problems without frame pointers. to solve problems without frame pointers.
config X86_EXTRA_IRQS
bool
depends on X86_LOCAL_APIC || X86_VOYAGER
default y
config X86_FIND_SMP_CONFIG config X86_FIND_SMP_CONFIG
bool bool
depends on X86_LOCAL_APIC || X86_VOYAGER depends on X86_LOCAL_APIC || X86_VOYAGER
......
...@@ -1101,7 +1101,6 @@ CONFIG_USB_HIDINPUT=y ...@@ -1101,7 +1101,6 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_DEBUG_KERNEL is not set # CONFIG_DEBUG_KERNEL is not set
CONFIG_DEBUG_SPINLOCK_SLEEP=y CONFIG_DEBUG_SPINLOCK_SLEEP=y
CONFIG_FRAME_POINTER=y CONFIG_FRAME_POINTER=y
CONFIG_X86_EXTRA_IRQS=y
CONFIG_X86_FIND_SMP_CONFIG=y CONFIG_X86_FIND_SMP_CONFIG=y
CONFIG_X86_MPPARSE=y CONFIG_X86_MPPARSE=y
......
...@@ -844,6 +844,8 @@ static void apm_cpu_idle(void) ...@@ -844,6 +844,8 @@ static void apm_cpu_idle(void)
idle_percentage *= 100; idle_percentage *= 100;
idle_percentage /= jiffies_since_last_check; idle_percentage /= jiffies_since_last_check;
use_apm_idle = (idle_percentage > idle_threshold); use_apm_idle = (idle_percentage > idle_threshold);
if (apm_info.forbid_idle)
use_apm_idle = 0;
last_jiffies = jiffies; last_jiffies = jiffies;
last_stime = current->stime; last_stime = current->stime;
} }
......
...@@ -352,6 +352,14 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset) ...@@ -352,6 +352,14 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
static int __init mtrr_if_init(void) static int __init mtrr_if_init(void)
{ {
struct cpuinfo_x86 *c = &boot_cpu_data;
if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
(!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
(!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
(!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
return -ENODEV;
proc_root_mtrr = proc_root_mtrr =
create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root); create_proc_entry("mtrr", S_IWUSR | S_IRUGO, &proc_root);
if (proc_root_mtrr) { if (proc_root_mtrr) {
......
...@@ -283,6 +283,30 @@ static __init int apm_is_horked(struct dmi_blacklist *d) ...@@ -283,6 +283,30 @@ static __init int apm_is_horked(struct dmi_blacklist *d)
return 0; return 0;
} }
static __init int apm_is_horked_d850md(struct dmi_blacklist *d)
{
if (apm_info.disabled == 0) {
apm_info.disabled = 1;
printk(KERN_INFO "%s machine detected. Disabling APM.\n", d->ident);
printk(KERN_INFO "This bug is fixed in bios P15 which is available for \n");
printk(KERN_INFO "download from support.intel.com \n");
}
return 0;
}
/*
* Some APM bioses hang on APM idle calls
*/
static __init int apm_likes_to_melt(struct dmi_blacklist *d)
{
if (apm_info.forbid_idle == 0) {
apm_info.forbid_idle = 1;
printk(KERN_INFO "%s machine detected. Disabling APM idle calls.\n", d->ident);
}
return 0;
}
/* /*
* Some machines, usually laptops, can't handle an enabled local APIC. * Some machines, usually laptops, can't handle an enabled local APIC.
* The symptoms include hangs or reboots when suspending or resuming, * The symptoms include hangs or reboots when suspending or resuming,
...@@ -558,6 +582,22 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={ ...@@ -558,6 +582,22 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={
MATCH(DMI_PRODUCT_NAME, "Latitude C600"), MATCH(DMI_PRODUCT_NAME, "Latitude C600"),
NO_MATCH, NO_MATCH NO_MATCH, NO_MATCH
} }, } },
{ set_apm_ints, "Dell Latitude", { /* Allow interrupts during suspend on Dell Latitude laptops*/
MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
MATCH(DMI_PRODUCT_NAME, "Latitude C510"),
NO_MATCH, NO_MATCH
} },
{ apm_is_horked, "Dell Inspiron 2500", { /* APM crashes */
MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
MATCH(DMI_PRODUCT_NAME, "Inspiron 2500"),
MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
MATCH(DMI_BIOS_VERSION,"A11")
} },
{ set_apm_ints, "Dell Inspiron", { /* Allow interrupts during suspend on Dell Inspiron laptops*/
MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
MATCH(DMI_PRODUCT_NAME, "Inspiron 4000"),
NO_MATCH, NO_MATCH
} },
{ broken_apm_power, "Dell Inspiron 5000e", { /* Handle problems with APM on Inspiron 5000e */ { broken_apm_power, "Dell Inspiron 5000e", { /* Handle problems with APM on Inspiron 5000e */
MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
MATCH(DMI_BIOS_VERSION, "A04"), MATCH(DMI_BIOS_VERSION, "A04"),
...@@ -568,6 +608,12 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={ ...@@ -568,6 +608,12 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={
MATCH(DMI_BIOS_VERSION, "A12"), MATCH(DMI_BIOS_VERSION, "A12"),
MATCH(DMI_BIOS_DATE, "02/04/2002"), NO_MATCH MATCH(DMI_BIOS_DATE, "02/04/2002"), NO_MATCH
} }, } },
{ apm_is_horked, "Dell Dimension 4100", { /* APM crashes */
MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
MATCH(DMI_PRODUCT_NAME, "XPS-Z"),
MATCH(DMI_BIOS_VENDOR,"Intel Corp."),
MATCH(DMI_BIOS_VERSION,"A11")
} },
{ set_realmode_power_off, "Award Software v4.60 PGMA", { /* broken PM poweroff bios */ { set_realmode_power_off, "Award Software v4.60 PGMA", { /* broken PM poweroff bios */
MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."), MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
MATCH(DMI_BIOS_VERSION, "4.60 PGMA"), MATCH(DMI_BIOS_VERSION, "4.60 PGMA"),
...@@ -578,21 +624,16 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={ ...@@ -578,21 +624,16 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={
MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"), MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
NO_MATCH, NO_MATCH NO_MATCH, NO_MATCH
} }, } },
{ set_bios_reboot, "Dell PowerEdge 300", { /* Handle problems with rebooting on Dell 1300's */ { set_bios_reboot, "Dell PowerEdge 300", { /* Handle problems with rebooting on Dell 300's */
MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"), MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
NO_MATCH, NO_MATCH NO_MATCH, NO_MATCH
} }, } },
{ set_bios_reboot, "Dell PowerEdge 2400", { /* Handle problems with rebooting on Dell 300/800's */ { set_bios_reboot, "Dell PowerEdge 2400", { /* Handle problems with rebooting on Dell 2400's */
MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
NO_MATCH, NO_MATCH NO_MATCH, NO_MATCH
} }, } },
{ set_apm_ints, "Dell Inspiron", { /* Allow interrupts during suspend on Dell Inspiron laptops*/
MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
MATCH(DMI_PRODUCT_NAME, "Inspiron 4000"),
NO_MATCH, NO_MATCH
} },
{ set_apm_ints, "Compaq 12XL125", { /* Allow interrupts during suspend on Compaq Laptops*/ { set_apm_ints, "Compaq 12XL125", { /* Allow interrupts during suspend on Compaq Laptops*/
MATCH(DMI_SYS_VENDOR, "Compaq"), MATCH(DMI_SYS_VENDOR, "Compaq"),
MATCH(DMI_PRODUCT_NAME, "Compaq PC"), MATCH(DMI_PRODUCT_NAME, "Compaq PC"),
...@@ -647,6 +688,16 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={ ...@@ -647,6 +688,16 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={
MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
MATCH(DMI_BIOS_VERSION,"A11") MATCH(DMI_BIOS_VERSION,"A11")
} }, } },
{ apm_likes_to_melt, "Jabil AMD", { /* APM idle hangs */
MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
MATCH(DMI_BIOS_VERSION, "0AASNP06"),
NO_MATCH, NO_MATCH,
} },
{ apm_likes_to_melt, "AMI Bios", { /* APM idle hangs */
MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
MATCH(DMI_BIOS_VERSION, "0AASNP05"),
NO_MATCH, NO_MATCH,
} },
{ sony_vaio_laptop, "Sony Vaio", { /* This is a Sony Vaio laptop */ { sony_vaio_laptop, "Sony Vaio", { /* This is a Sony Vaio laptop */
MATCH(DMI_SYS_VENDOR, "Sony Corporation"), MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
MATCH(DMI_PRODUCT_NAME, "PCG-"), MATCH(DMI_PRODUCT_NAME, "PCG-"),
...@@ -775,6 +826,11 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={ ...@@ -775,6 +826,11 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={
MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0094.P10"), MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0094.P10"),
NO_MATCH, NO_MATCH NO_MATCH, NO_MATCH
} }, } },
{ broken_pirq, "l44GX Bios", { /* Bad $PIR */
MATCH(DMI_BIOS_VENDOR, "Intel Corporation"),
MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0115.P12"),
NO_MATCH, NO_MATCH
} },
{ broken_pirq, "l44GX Bios", { /* Bad $PIR */ { broken_pirq, "l44GX Bios", { /* Bad $PIR */
MATCH(DMI_BIOS_VENDOR, "Intel Corporation"), MATCH(DMI_BIOS_VENDOR, "Intel Corporation"),
MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0120.P12"), MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0120.P12"),
...@@ -790,6 +846,12 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={ ...@@ -790,6 +846,12 @@ static __initdata struct dmi_blacklist dmi_blacklist[]={
MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0066.P07.9906041405"), MATCH(DMI_BIOS_VERSION,"L440GX0.86B.0066.P07.9906041405"),
NO_MATCH, NO_MATCH NO_MATCH, NO_MATCH
} }, } },
{ broken_pirq, "IBM xseries 370", { /* Bad $PIR */
MATCH(DMI_BIOS_VENDOR, "IBM"),
MATCH(DMI_BIOS_VERSION,"MMKT33AUS"),
NO_MATCH, NO_MATCH
} },
/* Intel in disguise - In this case they can't hide and they don't run /* Intel in disguise - In this case they can't hide and they don't run
too well either... */ too well either... */
......
...@@ -259,7 +259,7 @@ static int __init pci_direct_init(void) ...@@ -259,7 +259,7 @@ static int __init pci_direct_init(void)
release_resource(region); release_resource(region);
type2: type2:
if ((!pci_probe & PCI_PROBE_CONF2) == 0) if ((pci_probe & PCI_PROBE_CONF2) == 0)
goto out; goto out;
region = request_region(0xCF8, 4, "PCI conf2"); region = request_region(0xCF8, 4, "PCI conf2");
if (!region) if (!region)
......
...@@ -241,12 +241,16 @@ save_user_regs(struct pt_regs *regs, struct mcontext *frame, int sigret) ...@@ -241,12 +241,16 @@ save_user_regs(struct pt_regs *regs, struct mcontext *frame, int sigret)
* (except for MSR). * (except for MSR).
*/ */
static int static int
restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr) restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr, int sig)
{ {
unsigned long save_r2;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
unsigned long msr; unsigned long msr;
#endif #endif
/* backup/restore the TLS as we don't want it to be modified */
if (!sig)
save_r2 = regs->gpr[2];
/* copy up to but not including MSR */ /* copy up to but not including MSR */
if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t))) if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t)))
return 1; return 1;
...@@ -254,6 +258,8 @@ restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr) ...@@ -254,6 +258,8 @@ restore_user_regs(struct pt_regs *regs, struct mcontext __user *sr)
if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t))) GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
return 1; return 1;
if (!sig)
regs->gpr[2] = save_r2;
/* force the process to reload the FP registers from /* force the process to reload the FP registers from
current->thread when it next does FP instructions */ current->thread when it next does FP instructions */
...@@ -359,7 +365,7 @@ handle_rt_signal(unsigned long sig, struct k_sigaction *ka, ...@@ -359,7 +365,7 @@ handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
force_sig(SIGSEGV, current); force_sig(SIGSEGV, current);
} }
static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs) static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
{ {
sigset_t set; sigset_t set;
struct mcontext *mcp; struct mcontext *mcp;
...@@ -368,7 +374,7 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs) ...@@ -368,7 +374,7 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs)
|| __get_user(mcp, &ucp->uc_regs)) || __get_user(mcp, &ucp->uc_regs))
return -EFAULT; return -EFAULT;
restore_sigmask(&set); restore_sigmask(&set);
if (restore_user_regs(regs, mcp)) if (restore_user_regs(regs, mcp, sig))
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -376,10 +382,16 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs) ...@@ -376,10 +382,16 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs)
int sys_swapcontext(struct ucontext __user *old_ctx, int sys_swapcontext(struct ucontext __user *old_ctx,
struct ucontext __user *new_ctx, struct ucontext __user *new_ctx,
int r5, int r6, int r7, int r8, struct pt_regs *regs) int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
{ {
unsigned char tmp; unsigned char tmp;
/* Context size is for future use. Right now, we only make sure
* we are passed something we understand
*/
if (ctx_size < sizeof(struct ucontext))
return -EINVAL;
if (old_ctx != NULL) { if (old_ctx != NULL) {
if (verify_area(VERIFY_WRITE, old_ctx, sizeof(*old_ctx)) if (verify_area(VERIFY_WRITE, old_ctx, sizeof(*old_ctx))
|| save_user_regs(regs, &old_ctx->uc_mcontext, 0) || save_user_regs(regs, &old_ctx->uc_mcontext, 0)
...@@ -406,7 +418,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx, ...@@ -406,7 +418,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
* or if another thread unmaps the region containing the context. * or if another thread unmaps the region containing the context.
* We kill the task with a SIGSEGV in this situation. * We kill the task with a SIGSEGV in this situation.
*/ */
if (do_setcontext(new_ctx, regs)) if (do_setcontext(new_ctx, regs, 0))
do_exit(SIGSEGV); do_exit(SIGSEGV);
sigreturn_exit(regs); sigreturn_exit(regs);
/* doesn't actually return back to here */ /* doesn't actually return back to here */
...@@ -425,7 +437,7 @@ int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, ...@@ -425,7 +437,7 @@ int sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
if (verify_area(VERIFY_READ, rt_sf, sizeof(struct rt_sigframe))) if (verify_area(VERIFY_READ, rt_sf, sizeof(struct rt_sigframe)))
goto bad; goto bad;
if (do_setcontext(&rt_sf->uc, regs)) if (do_setcontext(&rt_sf->uc, regs, 1))
goto bad; goto bad;
/* /*
...@@ -484,7 +496,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, ...@@ -484,7 +496,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
if (save_user_regs(regs, &frame->mctx, __NR_sigreturn)) if (save_user_regs(regs, &frame->mctx, __NR_sigreturn))
goto badframe; goto badframe;
if (put_user(regs->gpr[1], (unsigned long *)newsp)) if (put_user(regs->gpr[1], (unsigned long __user *)newsp))
goto badframe; goto badframe;
regs->gpr[1] = newsp; regs->gpr[1] = newsp;
regs->gpr[3] = sig; regs->gpr[3] = sig;
...@@ -529,7 +541,7 @@ int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, ...@@ -529,7 +541,7 @@ int sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
sr = (struct mcontext *) sigctx.regs; sr = (struct mcontext *) sigctx.regs;
if (verify_area(VERIFY_READ, sr, sizeof(*sr)) if (verify_area(VERIFY_READ, sr, sizeof(*sr))
|| restore_user_regs(regs, sr)) || restore_user_regs(regs, sr, 1))
goto badframe; goto badframe;
sigreturn_exit(regs); /* doesn't return */ sigreturn_exit(regs); /* doesn't return */
......
...@@ -109,10 +109,6 @@ typedef void irqreturn_t; ...@@ -109,10 +109,6 @@ typedef void irqreturn_t;
#define pci_get_drvdata(pci_dev) (pci_dev)->driver_data #define pci_get_drvdata(pci_dev) (pci_dev)->driver_data
#endif #endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,44)
#define pci_pool_create(a, b, c, d, e) pci_pool_create(a, b, c, d, e, SLAB_KERNEL)
#endif
#include "he.h" #include "he.h"
#include "suni.h" #include "suni.h"
...@@ -1986,8 +1982,7 @@ he_service_tbrq(struct he_dev *he_dev, int group) ...@@ -1986,8 +1982,7 @@ he_service_tbrq(struct he_dev *he_dev, int group)
TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : ""); TBRQ_MULTIPLE(he_dev->tbrq_head) ? " MULTIPLE" : "");
#ifdef USE_TPD_POOL #ifdef USE_TPD_POOL
tpd = NULL; tpd = NULL;
p = &he_dev->outstanding_tpds; list_for_each(p, &he_dev->outstanding_tpds) {
while ((p = p->next) != &he_dev->outstanding_tpds) {
struct he_tpd *__tpd = list_entry(p, struct he_tpd, entry); struct he_tpd *__tpd = list_entry(p, struct he_tpd, entry);
if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) { if (TPD_ADDR(__tpd->status) == TBRQ_TPD(he_dev->tbrq_head)) {
tpd = __tpd; tpd = __tpd;
......
...@@ -3106,7 +3106,8 @@ static inline void md_enter_safemode(mddev_t *mddev) ...@@ -3106,7 +3106,8 @@ static inline void md_enter_safemode(mddev_t *mddev)
{ {
if (!mddev->safemode) return; if (!mddev->safemode) return;
if (mddev->safemode == 2 && if (mddev->safemode == 2 &&
(atomic_read(&mddev->writes_pending) || mddev->in_sync)) (atomic_read(&mddev->writes_pending) || mddev->in_sync ||
mddev->recovery_cp != MaxSector))
return; /* avoid the lock */ return; /* avoid the lock */
mddev_lock_uninterruptible(mddev); mddev_lock_uninterruptible(mddev);
if (mddev->safemode && !atomic_read(&mddev->writes_pending) && if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
......
...@@ -29,6 +29,7 @@ extern const struct raid6_calls raid6_intx2; ...@@ -29,6 +29,7 @@ extern const struct raid6_calls raid6_intx2;
extern const struct raid6_calls raid6_intx4; extern const struct raid6_calls raid6_intx4;
extern const struct raid6_calls raid6_intx8; extern const struct raid6_calls raid6_intx8;
extern const struct raid6_calls raid6_intx16; extern const struct raid6_calls raid6_intx16;
extern const struct raid6_calls raid6_intx32;
extern const struct raid6_calls raid6_mmxx1; extern const struct raid6_calls raid6_mmxx1;
extern const struct raid6_calls raid6_mmxx2; extern const struct raid6_calls raid6_mmxx2;
extern const struct raid6_calls raid6_sse1x1; extern const struct raid6_calls raid6_sse1x1;
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* *
* $#-way unrolled portable integer math RAID-6 instruction set * $#-way unrolled portable integer math RAID-6 instruction set
* *
* This file is postprocessed using unroller.pl * This file is postprocessed using unroll.pl
*/ */
#include "raid6.h" #include "raid6.h"
......
...@@ -1146,7 +1146,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) ...@@ -1146,7 +1146,7 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x61, 0x00 }; 0x61, 0x00 };
u8 b1[] = { 0x61 }; u8 b1[] = { 0x61 };
u8 b[ARM_PACKET_SIZE]; u8 *b;
char idstring[21]; char idstring[21];
u8 *firmware = NULL; u8 *firmware = NULL;
size_t firmware_size = 0; size_t firmware_size = 0;
...@@ -1203,6 +1203,10 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) ...@@ -1203,6 +1203,10 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
trans_count = 0; trans_count = 0;
j = 0; j = 0;
b = kmalloc(ARM_PACKET_SIZE, GFP_KERNEL);
if (b == NULL)
return -ENOMEM;
for (i = 0; i < firmware_size; i += COMMAND_PACKET_SIZE) { for (i = 0; i < firmware_size; i += COMMAND_PACKET_SIZE) {
size = firmware_size - i; size = firmware_size - i;
if (size > COMMAND_PACKET_SIZE) if (size > COMMAND_PACKET_SIZE)
...@@ -1230,6 +1234,8 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec) ...@@ -1230,6 +1234,8 @@ static int ttusb_dec_boot_dsp(struct ttusb_dec *dec)
result = ttusb_dec_send_command(dec, 0x43, sizeof(b1), b1, NULL, NULL); result = ttusb_dec_send_command(dec, 0x43, sizeof(b1), b1, NULL, NULL);
kfree(b);
return result; return result;
} }
......
...@@ -875,6 +875,7 @@ static ssize_t w9966_v4l_read(struct file *file, char *buf, ...@@ -875,6 +875,7 @@ static ssize_t w9966_v4l_read(struct file *file, char *buf,
unsigned char addr = 0xa0; // ECP, read, CCD-transfer, 00000 unsigned char addr = 0xa0; // ECP, read, CCD-transfer, 00000
unsigned char* dest = (unsigned char*)buf; unsigned char* dest = (unsigned char*)buf;
unsigned long dleft = count; unsigned long dleft = count;
unsigned char *tbuf;
// Why would anyone want more than this?? // Why would anyone want more than this??
if (count > cam->width * cam->height * 2) if (count > cam->width * cam->height * 2)
...@@ -894,25 +895,33 @@ static ssize_t w9966_v4l_read(struct file *file, char *buf, ...@@ -894,25 +895,33 @@ static ssize_t w9966_v4l_read(struct file *file, char *buf,
w9966_pdev_release(cam); w9966_pdev_release(cam);
return -EFAULT; return -EFAULT;
} }
tbuf = kmalloc(W9966_RBUFFER, GFP_KERNEL);
if (tbuf == NULL) {
count = -ENOMEM;
goto out;
}
while(dleft > 0) while(dleft > 0)
{ {
unsigned long tsize = (dleft > W9966_RBUFFER) ? W9966_RBUFFER : dleft; unsigned long tsize = (dleft > W9966_RBUFFER) ? W9966_RBUFFER : dleft;
unsigned char tbuf[W9966_RBUFFER];
if (parport_read(cam->pport, tbuf, tsize) < tsize) { if (parport_read(cam->pport, tbuf, tsize) < tsize) {
w9966_pdev_release(cam); count = -EFAULT;
return -EFAULT; goto out;
} }
if (copy_to_user(dest, tbuf, tsize) != 0) { if (copy_to_user(dest, tbuf, tsize) != 0) {
w9966_pdev_release(cam); count = -EFAULT;
return -EFAULT; goto out;
} }
dest += tsize; dest += tsize;
dleft -= tsize; dleft -= tsize;
} }
w9966_wReg(cam, 0x01, 0x18); // Disable capture w9966_wReg(cam, 0x01, 0x18); // Disable capture
out:
kfree(tbuf);
w9966_pdev_release(cam); w9966_pdev_release(cam);
return count; return count;
......
...@@ -5093,7 +5093,7 @@ static struct pci_device_id megaraid_pci_tbl[] = { ...@@ -5093,7 +5093,7 @@ static struct pci_device_id megaraid_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3, {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_AMI_MEGARAID, {PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,} {0,}
}; };
......
...@@ -2102,7 +2102,7 @@ aty128_set_backlight_enable(int on, int level, void *data) ...@@ -2102,7 +2102,7 @@ aty128_set_backlight_enable(int on, int level, void *data)
reg |= LVDS_BL_MOD_EN | LVDS_BLON; reg |= LVDS_BL_MOD_EN | LVDS_BLON;
if (on && level > BACKLIGHT_OFF) { if (on && level > BACKLIGHT_OFF) {
reg |= LVDS_DIGION; reg |= LVDS_DIGION;
if (!reg & LVDS_ON) { if (!(reg & LVDS_ON)) {
reg &= ~LVDS_BLON; reg &= ~LVDS_BLON;
aty_st_le32(LVDS_GEN_CNTL, reg); aty_st_le32(LVDS_GEN_CNTL, reg);
(void)aty_ld_le32(LVDS_GEN_CNTL); (void)aty_ld_le32(LVDS_GEN_CNTL);
......
...@@ -2319,7 +2319,7 @@ static int radeon_set_backlight_enable(int on, int level, void *data) ...@@ -2319,7 +2319,7 @@ static int radeon_set_backlight_enable(int on, int level, void *data)
lvds_gen_cntl |= (LVDS_BL_MOD_EN | LVDS_BLON); lvds_gen_cntl |= (LVDS_BL_MOD_EN | LVDS_BLON);
if (on && (level > BACKLIGHT_OFF)) { if (on && (level > BACKLIGHT_OFF)) {
lvds_gen_cntl |= LVDS_DIGON; lvds_gen_cntl |= LVDS_DIGON;
if (!lvds_gen_cntl & LVDS_ON) { if (!(lvds_gen_cntl & LVDS_ON)) {
lvds_gen_cntl &= ~LVDS_BLON; lvds_gen_cntl &= ~LVDS_BLON;
OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl); OUTREG(LVDS_GEN_CNTL, lvds_gen_cntl);
(void)INREG(LVDS_GEN_CNTL); (void)INREG(LVDS_GEN_CNTL);
......
...@@ -407,8 +407,10 @@ map_unwritten( ...@@ -407,8 +407,10 @@ map_unwritten(
offset <<= PAGE_CACHE_SHIFT; offset <<= PAGE_CACHE_SHIFT;
offset += p_offset; offset += p_offset;
pb = pagebuf_lookup(iomapp->iomap_target, /* get an "empty" pagebuf to manage IO completion
iomapp->iomap_offset, iomapp->iomap_bsize, 0); * Proper values will be set before returning */
pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
if (!pb) if (!pb)
return -EAGAIN; return -EAGAIN;
...@@ -471,6 +473,11 @@ map_unwritten( ...@@ -471,6 +473,11 @@ map_unwritten(
nblocks += bs; nblocks += bs;
atomic_add(bs, &pb->pb_io_remaining); atomic_add(bs, &pb->pb_io_remaining);
convert_page(inode, page, iomapp, pb, startio, all_bh); convert_page(inode, page, iomapp, pb, startio, all_bh);
/* stop if converting the next page might add
* enough blocks that the corresponding byte
* count won't fit in our ulong page buf length */
if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
goto enough;
} }
if (tindex == tlast && if (tindex == tlast &&
...@@ -481,16 +488,20 @@ map_unwritten( ...@@ -481,16 +488,20 @@ map_unwritten(
nblocks += bs; nblocks += bs;
atomic_add(bs, &pb->pb_io_remaining); atomic_add(bs, &pb->pb_io_remaining);
convert_page(inode, page, iomapp, pb, startio, all_bh); convert_page(inode, page, iomapp, pb, startio, all_bh);
if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
goto enough;
} }
} }
} }
enough:
size = nblocks; /* NB: using 64bit number here */ size = nblocks; /* NB: using 64bit number here */
size <<= block_bits; /* convert fsb's to byte range */ size <<= block_bits; /* convert fsb's to byte range */
XFS_BUF_DATAIO(pb); XFS_BUF_DATAIO(pb);
XFS_BUF_ASYNC(pb); XFS_BUF_ASYNC(pb);
XFS_BUF_SET_SIZE(pb, size); XFS_BUF_SET_SIZE(pb, size);
XFS_BUF_SET_COUNT(pb, size);
XFS_BUF_SET_OFFSET(pb, offset); XFS_BUF_SET_OFFSET(pb, offset);
XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode)); XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert); XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
...@@ -925,8 +936,10 @@ linvfs_get_block_core( ...@@ -925,8 +936,10 @@ linvfs_get_block_core(
} }
if (blocks) { if (blocks) {
size = (iomap.iomap_bsize - iomap.iomap_delta); loff_t iosize;
bh_result->b_size = min_t(ssize_t, size, blocks << inode->i_blkbits); iosize = (iomap.iomap_bsize - iomap.iomap_delta);
bh_result->b_size =
(ssize_t)min(iosize, (loff_t)(blocks << inode->i_blkbits));
} }
return 0; return 0;
......
...@@ -66,27 +66,26 @@ typedef enum { ...@@ -66,27 +66,26 @@ typedef enum {
/* /*
* xfs_iomap_t: File system I/O map * xfs_iomap_t: File system I/O map
* *
* The iomap_bn, iomap_offset and iomap_length fields are expressed in disk blocks. * The iomap_bn field is expressed in 512-byte blocks, and is where the
* The iomap_length field specifies the size of the underlying backing store * mapping starts on disk.
* for the particular mapping.
* *
* The iomap_bsize, iomap_size and iomap_delta fields are in bytes and indicate * The iomap_offset, iomap_bsize and iomap_delta fields are in bytes.
* the size of the mapping, the number of bytes that are valid to access * iomap_offset is the offset of the mapping in the file itself.
* (read or write), and the offset into the mapping, given the offset * iomap_bsize is the size of the mapping, iomap_delta is the
* supplied to the file I/O map routine. iomap_delta is the offset of the * desired data's offset into the mapping, given the offset supplied
* desired data from the beginning of the mapping. * to the file I/O map routine.
* *
* When a request is made to read beyond the logical end of the object, * When a request is made to read beyond the logical end of the object,
* iomap_size may be set to 0, but iomap_offset and iomap_length should be set to * iomap_size may be set to 0, but iomap_offset and iomap_length should be set
* the actual amount of underlying storage that has been allocated, if any. * to the actual amount of underlying storage that has been allocated, if any.
*/ */
typedef struct xfs_iomap { typedef struct xfs_iomap {
xfs_daddr_t iomap_bn; xfs_daddr_t iomap_bn; /* first 512b blk of mapping */
xfs_buftarg_t *iomap_target; xfs_buftarg_t *iomap_target;
loff_t iomap_offset; loff_t iomap_offset; /* offset of mapping, bytes */
size_t iomap_delta; loff_t iomap_bsize; /* size of mapping, bytes */
size_t iomap_bsize; size_t iomap_delta; /* offset into mapping, bytes */
iomap_flags_t iomap_flags; iomap_flags_t iomap_flags;
} xfs_iomap_t; } xfs_iomap_t;
......
...@@ -330,7 +330,7 @@ xfs_bulkstat( ...@@ -330,7 +330,7 @@ xfs_bulkstat(
* inode returned; 0 means start of the allocation group. * inode returned; 0 means start of the allocation group.
*/ */
rval = 0; rval = 0;
while ((ubleft/statstruct_size) > 0 && agno < mp->m_sb.sb_agcount) { while (ubleft >= statstruct_size && agno < mp->m_sb.sb_agcount) {
bp = NULL; bp = NULL;
down_read(&mp->m_peraglock); down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
...@@ -415,7 +415,7 @@ xfs_bulkstat( ...@@ -415,7 +415,7 @@ xfs_bulkstat(
* Loop through inode btree records in this ag, * Loop through inode btree records in this ag,
* until we run out of inodes or space in the buffer. * until we run out of inodes or space in the buffer.
*/ */
while (irbp < irbufend && icount < (ubleft/statstruct_size)) { while (irbp < irbufend && icount < ubcount) {
/* /*
* Loop as long as we're unable to read the * Loop as long as we're unable to read the
* inode btree. * inode btree.
...@@ -467,7 +467,7 @@ xfs_bulkstat( ...@@ -467,7 +467,7 @@ xfs_bulkstat(
*/ */
irbufend = irbp; irbufend = irbp;
for (irbp = irbuf; for (irbp = irbuf;
irbp < irbufend && (ubleft/statstruct_size) > 0; irbp++) { irbp < irbufend && ubleft >= statstruct_size; irbp++) {
/* /*
* Read-ahead the next chunk's worth of inodes. * Read-ahead the next chunk's worth of inodes.
*/ */
......
...@@ -1553,7 +1553,7 @@ xlog_recover_reorder_trans( ...@@ -1553,7 +1553,7 @@ xlog_recover_reorder_trans(
case XFS_LI_BUF: case XFS_LI_BUF:
case XFS_LI_6_1_BUF: case XFS_LI_6_1_BUF:
case XFS_LI_5_3_BUF: case XFS_LI_5_3_BUF:
if ((!flags & XFS_BLI_CANCEL)) { if (!(flags & XFS_BLI_CANCEL)) {
xlog_recover_insert_item_frontq(&trans->r_itemq, xlog_recover_insert_item_frontq(&trans->r_itemq,
itemq); itemq);
break; break;
......
...@@ -53,6 +53,7 @@ struct apm_info { ...@@ -53,6 +53,7 @@ struct apm_info {
int get_power_status_broken; int get_power_status_broken;
int get_power_status_swabinminutes; int get_power_status_swabinminutes;
int allow_ints; int allow_ints;
int forbid_idle;
int realmode_power_off; int realmode_power_off;
int disabled; int disabled;
}; };
......
...@@ -485,7 +485,7 @@ static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) ...@@ -485,7 +485,7 @@ static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
#define rq_for_each_bio(_bio, rq) \ #define rq_for_each_bio(_bio, rq) \
if ((rq->bio)) \ if ((rq->bio)) \
for (_bio = (rq)->bio; _bio; _bio = bio->bi_next) for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
struct sec_size { struct sec_size {
unsigned block_size; unsigned block_size;
......
...@@ -339,7 +339,7 @@ static void __init setup_per_cpu_areas(void) ...@@ -339,7 +339,7 @@ static void __init setup_per_cpu_areas(void)
/* Called by boot processor to activate the rest. */ /* Called by boot processor to activate the rest. */
static void __init smp_init(void) static void __init smp_init(void)
{ {
unsigned int i; unsigned int i, j=0;
/* FIXME: This should be done in userspace --RR */ /* FIXME: This should be done in userspace --RR */
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
...@@ -348,11 +348,12 @@ static void __init smp_init(void) ...@@ -348,11 +348,12 @@ static void __init smp_init(void)
if (cpu_possible(i) && !cpu_online(i)) { if (cpu_possible(i) && !cpu_online(i)) {
printk("Bringing up %i\n", i); printk("Bringing up %i\n", i);
cpu_up(i); cpu_up(i);
j++;
} }
} }
/* Any cleanup work */ /* Any cleanup work */
printk("CPUS done %u\n", max_cpus); printk("CPUS done %u\n", j);
smp_cpus_done(max_cpus); smp_cpus_done(max_cpus);
#if 0 #if 0
/* Get other processors into their bootup holding patterns. */ /* Get other processors into their bootup holding patterns. */
......
...@@ -66,24 +66,18 @@ static struct timer_list flow_hash_rnd_timer; ...@@ -66,24 +66,18 @@ static struct timer_list flow_hash_rnd_timer;
struct flow_flush_info { struct flow_flush_info {
atomic_t cpuleft; atomic_t cpuleft;
cpumask_t cpumap;
struct completion completion; struct completion completion;
}; };
static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL }; static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
static DECLARE_MUTEX(flow_cache_cpu_sem);
static cpumask_t flow_cache_cpu_map;
static unsigned int flow_cache_cpu_count;
static void flow_cache_new_hashrnd(unsigned long arg) static void flow_cache_new_hashrnd(unsigned long arg)
{ {
int i; int i;
for (i = 0; i < NR_CPUS; i++) for_each_cpu(i)
if (cpu_isset(i, flow_cache_cpu_map)) flow_hash_rnd_recalc(i) = 1;
flow_hash_rnd_recalc(i) = 1;
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer); add_timer(&flow_hash_rnd_timer);
...@@ -179,7 +173,9 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir, ...@@ -179,7 +173,9 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
cpu = smp_processor_id(); cpu = smp_processor_id();
fle = NULL; fle = NULL;
if (!cpu_isset(cpu, flow_cache_cpu_map)) /* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */
if (!flow_table(cpu))
goto nocache; goto nocache;
if (flow_hash_rnd_recalc(cpu)) if (flow_hash_rnd_recalc(cpu))
...@@ -278,8 +274,6 @@ static void flow_cache_flush_per_cpu(void *data) ...@@ -278,8 +274,6 @@ static void flow_cache_flush_per_cpu(void *data)
struct tasklet_struct *tasklet; struct tasklet_struct *tasklet;
cpu = smp_processor_id(); cpu = smp_processor_id();
if (!cpu_isset(cpu, info->cpumap))
return;
tasklet = flow_flush_tasklet(cpu); tasklet = flow_flush_tasklet(cpu);
tasklet->data = (unsigned long)info; tasklet->data = (unsigned long)info;
...@@ -289,29 +283,23 @@ static void flow_cache_flush_per_cpu(void *data) ...@@ -289,29 +283,23 @@ static void flow_cache_flush_per_cpu(void *data)
void flow_cache_flush(void) void flow_cache_flush(void)
{ {
struct flow_flush_info info; struct flow_flush_info info;
static DECLARE_MUTEX(flow_flush_sem);
down(&flow_cache_cpu_sem);
info.cpumap = flow_cache_cpu_map;
atomic_set(&info.cpuleft, flow_cache_cpu_count);
up(&flow_cache_cpu_sem);
/* Don't want cpus going down or up during this, also protects
* against multiple callers. */
down(&cpucontrol);
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion); init_completion(&info.completion);
down(&flow_flush_sem);
local_bh_disable(); local_bh_disable();
smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0); smp_call_function(flow_cache_flush_per_cpu, &info, 1, 0);
if (cpu_isset(smp_processor_id(), info.cpumap)) flow_cache_flush_tasklet((unsigned long)&info);
flow_cache_flush_tasklet((unsigned long)&info);
local_bh_enable(); local_bh_enable();
wait_for_completion(&info.completion); wait_for_completion(&info.completion);
up(&cpucontrol);
up(&flow_flush_sem);
} }
static int __devinit flow_cache_cpu_prepare(int cpu) static void __devinit flow_cache_cpu_prepare(int cpu)
{ {
struct tasklet_struct *tasklet; struct tasklet_struct *tasklet;
unsigned long order; unsigned long order;
...@@ -324,9 +312,8 @@ static int __devinit flow_cache_cpu_prepare(int cpu) ...@@ -324,9 +312,8 @@ static int __devinit flow_cache_cpu_prepare(int cpu)
flow_table(cpu) = (struct flow_cache_entry **) flow_table(cpu) = (struct flow_cache_entry **)
__get_free_pages(GFP_KERNEL, order); __get_free_pages(GFP_KERNEL, order);
if (!flow_table(cpu)) if (!flow_table(cpu))
return NOTIFY_BAD; panic("NET: failed to allocate flow cache order %lu\n", order);
memset(flow_table(cpu), 0, PAGE_SIZE << order); memset(flow_table(cpu), 0, PAGE_SIZE << order);
...@@ -335,39 +322,8 @@ static int __devinit flow_cache_cpu_prepare(int cpu) ...@@ -335,39 +322,8 @@ static int __devinit flow_cache_cpu_prepare(int cpu)
tasklet = flow_flush_tasklet(cpu); tasklet = flow_flush_tasklet(cpu);
tasklet_init(tasklet, flow_cache_flush_tasklet, 0); tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
return NOTIFY_OK;
}
static int __devinit flow_cache_cpu_online(int cpu)
{
down(&flow_cache_cpu_sem);
cpu_set(cpu, flow_cache_cpu_map);
flow_cache_cpu_count++;
up(&flow_cache_cpu_sem);
return NOTIFY_OK;
} }
static int __devinit flow_cache_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned long cpu = (unsigned long)cpu;
switch (action) {
case CPU_UP_PREPARE:
return flow_cache_cpu_prepare(cpu);
break;
case CPU_ONLINE:
return flow_cache_cpu_online(cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block __devinitdata flow_cache_cpu_nb = {
.notifier_call = flow_cache_cpu_notify,
};
static int __init flow_cache_init(void) static int __init flow_cache_init(void)
{ {
int i; int i;
...@@ -389,15 +345,8 @@ static int __init flow_cache_init(void) ...@@ -389,15 +345,8 @@ static int __init flow_cache_init(void)
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer); add_timer(&flow_hash_rnd_timer);
register_cpu_notifier(&flow_cache_cpu_nb); for_each_cpu(i)
for (i = 0; i < NR_CPUS; i++) { flow_cache_cpu_prepare(i);
if (!cpu_online(i))
continue;
if (flow_cache_cpu_prepare(i) == NOTIFY_OK &&
flow_cache_cpu_online(i) == NOTIFY_OK)
continue;
panic("NET: failed to initialise flow cache hash table\n");
}
return 0; return 0;
} }
......
...@@ -3030,6 +3030,9 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table, ...@@ -3030,6 +3030,9 @@ static int addrconf_sysctl_forward_strategy(ctl_table *table,
idev = NULL; idev = NULL;
*valp = new; *valp = new;
addrconf_forward_change(idev); addrconf_forward_change(idev);
if (*valp)
rt6_purge_dflt_routers(0);
} else } else
*valp = new; *valp = new;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment