Commit 91a2eb28 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge ../torvalds-2.6/

parents 53c7d2b7 f5d635f6
...@@ -153,10 +153,13 @@ scaling_governor, and by "echoing" the name of another ...@@ -153,10 +153,13 @@ scaling_governor, and by "echoing" the name of another
that some governors won't load - they only that some governors won't load - they only
work on some specific architectures or work on some specific architectures or
processors. processors.
scaling_min_freq and scaling_min_freq and
scaling_max_freq show the current "policy limits" (in scaling_max_freq show the current "policy limits" (in
kHz). By echoing new values into these kHz). By echoing new values into these
files, you can change these limits. files, you can change these limits.
NOTE: when setting a policy you need to
first set scaling_max_freq, then
scaling_min_freq.
If you have selected the "userspace" governor which allows you to If you have selected the "userspace" governor which allows you to
......
...@@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI ...@@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI
config X86_GX_SUSPMOD config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
depends on PCI
help help
This add the CPUFreq driver for NatSemi Geode processors which This add the CPUFreq driver for NatSemi Geode processors which
support suspend modulation. support suspend modulation.
...@@ -202,7 +203,7 @@ config X86_LONGRUN ...@@ -202,7 +203,7 @@ config X86_LONGRUN
config X86_LONGHAUL config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul" tristate "VIA Cyrix III Longhaul"
select CPU_FREQ_TABLE select CPU_FREQ_TABLE
depends on BROKEN depends on ACPI_PROCESSOR
help help
This adds the CPUFreq driver for VIA Samuel/CyrixIII, This adds the CPUFreq driver for VIA Samuel/CyrixIII,
VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T
......
...@@ -384,8 +384,7 @@ static int acpi_cpufreq_early_init_acpi(void) ...@@ -384,8 +384,7 @@ static int acpi_cpufreq_early_init_acpi(void)
} }
/* Do initialization in ACPI core */ /* Do initialization in ACPI core */
acpi_processor_preregister_performance(acpi_perf_data); return acpi_processor_preregister_performance(acpi_perf_data);
return 0;
} }
static int static int
......
...@@ -29,11 +29,13 @@ ...@@ -29,11 +29,13 @@
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/pci.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/timex.h> #include <asm/timex.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/acpi.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include "longhaul.h" #include "longhaul.h"
...@@ -56,6 +58,8 @@ static int minvid, maxvid; ...@@ -56,6 +58,8 @@ static int minvid, maxvid;
static unsigned int minmult, maxmult; static unsigned int minmult, maxmult;
static int can_scale_voltage; static int can_scale_voltage;
static int vrmrev; static int vrmrev;
static struct acpi_processor *pr = NULL;
static struct acpi_processor_cx *cx = NULL;
/* Module parameters */ /* Module parameters */
static int dont_scale_voltage; static int dont_scale_voltage;
...@@ -118,84 +122,65 @@ static int longhaul_get_cpu_mult(void) ...@@ -118,84 +122,65 @@ static int longhaul_get_cpu_mult(void)
return eblcr_table[invalue]; return eblcr_table[invalue];
} }
/* For processor with BCR2 MSR */
static void do_powersaver(union msr_longhaul *longhaul, static void do_longhaul1(int cx_address, unsigned int clock_ratio_index)
unsigned int clock_ratio_index)
{ {
struct pci_dev *dev; union msr_bcr2 bcr2;
unsigned long flags; u32 t;
unsigned int tmp_mask;
int version;
int i;
u16 pci_cmd;
u16 cmd_state[64];
switch (cpu_model) { rdmsrl(MSR_VIA_BCR2, bcr2.val);
case CPU_EZRA_T: /* Enable software clock multiplier */
version = 3; bcr2.bits.ESOFTBF = 1;
break; bcr2.bits.CLOCKMUL = clock_ratio_index;
case CPU_NEHEMIAH:
version = 0xf;
break;
default:
return;
}
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val); /* Sync to timer tick */
longhaul->bits.SoftBusRatio = clock_ratio_index & 0xf; safe_halt();
longhaul->bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; ACPI_FLUSH_CPU_CACHE();
longhaul->bits.EnableSoftBusRatio = 1; /* Change frequency on next halt or sleep */
longhaul->bits.RevisionKey = 0; wrmsrl(MSR_VIA_BCR2, bcr2.val);
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3 read */
t = inl(acpi_fadt.xpm_tmr_blk.address);
/* Disable software clock multiplier */
local_irq_disable();
rdmsrl(MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
wrmsrl(MSR_VIA_BCR2, bcr2.val);
}
preempt_disable(); /* For processor with Longhaul MSR */
local_irq_save(flags);
/* static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
* get current pci bus master state for all devices {
* and clear bus master bit union msr_longhaul longhaul;
*/ u32 t;
dev = NULL;
i = 0;
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (dev != NULL) {
pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
cmd_state[i++] = pci_cmd;
pci_cmd &= ~PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
}
} while (dev != NULL);
tmp_mask=inb(0x21); /* works on C3. save mask. */ rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
outb(0xFE,0x21); /* TMR0 only */ longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
outb(0xFF,0x80); /* delay */ longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
longhaul.bits.EnableSoftBusRatio = 1;
/* Sync to timer tick */
safe_halt(); safe_halt();
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val); ACPI_FLUSH_CPU_CACHE();
halt(); /* Change frequency on next halt or sleep */
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Invoke C3 */
inb(cx_address);
/* Dummy op - must do something useless after P_LVL3 read */
t = inl(acpi_fadt.xpm_tmr_blk.address);
/* Disable bus ratio bit */
local_irq_disable(); local_irq_disable();
longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
outb(tmp_mask,0x21); /* restore mask */ longhaul.bits.EnableSoftBusRatio = 0;
longhaul.bits.EnableSoftBSEL = 0;
/* restore pci bus master state for all devices */ longhaul.bits.EnableSoftVID = 0;
dev = NULL; wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
i = 0;
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (dev != NULL) {
pci_cmd = cmd_state[i++];
pci_write_config_byte(dev, PCI_COMMAND, pci_cmd);
}
} while (dev != NULL);
local_irq_restore(flags);
preempt_enable();
/* disable bus ratio bit */
rdmsrl(MSR_VIA_LONGHAUL, longhaul->val);
longhaul->bits.EnableSoftBusRatio = 0;
longhaul->bits.RevisionKey = version;
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
} }
/** /**
...@@ -209,9 +194,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index) ...@@ -209,9 +194,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
{ {
int speed, mult; int speed, mult;
struct cpufreq_freqs freqs; struct cpufreq_freqs freqs;
union msr_longhaul longhaul;
union msr_bcr2 bcr2;
static unsigned int old_ratio=-1; static unsigned int old_ratio=-1;
unsigned long flags;
unsigned int pic1_mask, pic2_mask;
if (old_ratio == clock_ratio_index) if (old_ratio == clock_ratio_index)
return; return;
...@@ -234,6 +219,20 @@ static void longhaul_setstate(unsigned int clock_ratio_index) ...@@ -234,6 +219,20 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000)); fsb, mult/10, mult%10, print_speed(speed/1000));
preempt_disable();
local_irq_save(flags);
pic2_mask = inb(0xA1);
pic1_mask = inb(0x21); /* works on C3. save mask. */
outb(0xFF,0xA1); /* Overkill */
outb(0xFE,0x21); /* TMR0 only */
/* Disable bus master arbitration */
if (pr->flags.bm_check) {
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
ACPI_MTX_DO_NOT_LOCK);
}
switch (longhaul_version) { switch (longhaul_version) {
/* /*
...@@ -245,20 +244,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index) ...@@ -245,20 +244,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
*/ */
case TYPE_LONGHAUL_V1: case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2: case TYPE_LONGHAUL_V2:
rdmsrl (MSR_VIA_BCR2, bcr2.val); do_longhaul1(cx->address, clock_ratio_index);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
bcr2.bits.CLOCKMUL = clock_ratio_index;
local_irq_disable();
wrmsrl (MSR_VIA_BCR2, bcr2.val);
safe_halt();
/* Disable software clock multiplier */
rdmsrl (MSR_VIA_BCR2, bcr2.val);
bcr2.bits.ESOFTBF = 0;
local_irq_disable();
wrmsrl (MSR_VIA_BCR2, bcr2.val);
local_irq_enable();
break; break;
/* /*
...@@ -273,10 +259,22 @@ static void longhaul_setstate(unsigned int clock_ratio_index) ...@@ -273,10 +259,22 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
* to work in practice. * to work in practice.
*/ */
case TYPE_POWERSAVER: case TYPE_POWERSAVER:
do_powersaver(&longhaul, clock_ratio_index); do_powersaver(cx->address, clock_ratio_index);
break; break;
} }
/* Enable bus master arbitration */
if (pr->flags.bm_check) {
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
ACPI_MTX_DO_NOT_LOCK);
}
outb(pic2_mask,0xA1); /* restore mask */
outb(pic1_mask,0x21);
local_irq_restore(flags);
preempt_enable();
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
} }
...@@ -324,9 +322,11 @@ static int guess_fsb(void) ...@@ -324,9 +322,11 @@ static int guess_fsb(void)
static int __init longhaul_get_ranges(void) static int __init longhaul_get_ranges(void)
{ {
unsigned long invalue; unsigned long invalue;
unsigned int multipliers[32]= { unsigned int ezra_t_multipliers[32]= {
50,30,40,100,55,35,45,95,90,70,80,60,120,75,85,65, 90, 30, 40, 100, 55, 35, 45, 95,
-1,110,120,-1,135,115,125,105,130,150,160,140,-1,155,-1,145 }; 50, 70, 80, 60, 120, 75, 85, 65,
-1, 110, 120, -1, 135, 115, 125, 105,
130, 150, 160, 140, -1, 155, -1, 145 };
unsigned int j, k = 0; unsigned int j, k = 0;
union msr_longhaul longhaul; union msr_longhaul longhaul;
unsigned long lo, hi; unsigned long lo, hi;
...@@ -355,13 +355,13 @@ static int __init longhaul_get_ranges(void) ...@@ -355,13 +355,13 @@ static int __init longhaul_get_ranges(void)
invalue = longhaul.bits.MaxMHzBR; invalue = longhaul.bits.MaxMHzBR;
if (longhaul.bits.MaxMHzBR4) if (longhaul.bits.MaxMHzBR4)
invalue += 16; invalue += 16;
maxmult=multipliers[invalue]; maxmult=ezra_t_multipliers[invalue];
invalue = longhaul.bits.MinMHzBR; invalue = longhaul.bits.MinMHzBR;
if (longhaul.bits.MinMHzBR4 == 1) if (longhaul.bits.MinMHzBR4 == 1)
minmult = 30; minmult = 30;
else else
minmult = multipliers[invalue]; minmult = ezra_t_multipliers[invalue];
fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB]; fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
break; break;
} }
...@@ -527,6 +527,18 @@ static unsigned int longhaul_get(unsigned int cpu) ...@@ -527,6 +527,18 @@ static unsigned int longhaul_get(unsigned int cpu)
return calc_speed(longhaul_get_cpu_mult()); return calc_speed(longhaul_get_cpu_mult());
} }
static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
u32 nesting_level,
void *context, void **return_value)
{
struct acpi_device *d;
if ( acpi_bus_get_device(obj_handle, &d) ) {
return 0;
}
*return_value = (void *)acpi_driver_data(d);
return 1;
}
static int __init longhaul_cpu_init(struct cpufreq_policy *policy) static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
{ {
...@@ -534,6 +546,15 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) ...@@ -534,6 +546,15 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
char *cpuname=NULL; char *cpuname=NULL;
int ret; int ret;
/* Check ACPI support for C3 state */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
&longhaul_walk_callback, NULL, (void *)&pr);
if (pr == NULL) goto err_acpi;
cx = &pr->power.states[ACPI_STATE_C3];
if (cx->address == 0 || cx->latency > 1000) goto err_acpi;
/* Now check what we have on this motherboard */
switch (c->x86_model) { switch (c->x86_model) {
case 6: case 6:
cpu_model = CPU_SAMUEL; cpu_model = CPU_SAMUEL;
...@@ -634,6 +655,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) ...@@ -634,6 +655,10 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
return 0; return 0;
err_acpi:
printk(KERN_ERR PFX "No ACPI support for CPU frequency changes.\n");
return -ENODEV;
} }
static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
...@@ -666,6 +691,18 @@ static int __init longhaul_init(void) ...@@ -666,6 +691,18 @@ static int __init longhaul_init(void)
if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
return -ENODEV; return -ENODEV;
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
return -ENODEV;
printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n");
}
#endif
#ifdef CONFIG_X86_IO_APIC
if (cpu_has_apic) {
printk(KERN_ERR PFX "APIC detected. Longhaul is currently broken in this configuration.\n");
return -ENODEV;
}
#endif
switch (c->x86_model) { switch (c->x86_model) {
case 6 ... 9: case 6 ... 9:
return cpufreq_register_driver(&longhaul_driver); return cpufreq_register_driver(&longhaul_driver);
...@@ -699,6 +736,6 @@ MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>"); ...@@ -699,6 +736,6 @@ MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors."); MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
MODULE_LICENSE ("GPL"); MODULE_LICENSE ("GPL");
module_init(longhaul_init); late_initcall(longhaul_init);
module_exit(longhaul_exit); module_exit(longhaul_exit);
...@@ -32,32 +32,38 @@ ...@@ -32,32 +32,38 @@
extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *); extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
#define MAX_UNCACHED_GRANULES 5 struct uncached_pool {
static int allocated_granules; struct gen_pool *pool;
struct mutex add_chunk_mutex; /* serialize adding a converted chunk */
int nchunks_added; /* #of converted chunks added to pool */
atomic_t status; /* smp called function's return status*/
};
#define MAX_CONVERTED_CHUNKS_PER_NODE 2
struct gen_pool *uncached_pool[MAX_NUMNODES]; struct uncached_pool uncached_pools[MAX_NUMNODES];
static void uncached_ipi_visibility(void *data) static void uncached_ipi_visibility(void *data)
{ {
int status; int status;
struct uncached_pool *uc_pool = (struct uncached_pool *)data;
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
if ((status != PAL_VISIBILITY_OK) && if ((status != PAL_VISIBILITY_OK) &&
(status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " atomic_inc(&uc_pool->status);
"CPU %i\n", status, raw_smp_processor_id());
} }
static void uncached_ipi_mc_drain(void *data) static void uncached_ipi_mc_drain(void *data)
{ {
int status; int status;
struct uncached_pool *uc_pool = (struct uncached_pool *)data;
status = ia64_pal_mc_drain(); status = ia64_pal_mc_drain();
if (status) if (status != PAL_STATUS_SUCCESS)
printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " atomic_inc(&uc_pool->status);
"CPU %i\n", status, raw_smp_processor_id());
} }
...@@ -70,21 +76,34 @@ static void uncached_ipi_mc_drain(void *data) ...@@ -70,21 +76,34 @@ static void uncached_ipi_mc_drain(void *data)
* This is accomplished by first allocating a granule of cached memory pages * This is accomplished by first allocating a granule of cached memory pages
* and then converting them to uncached memory pages. * and then converting them to uncached memory pages.
*/ */
static int uncached_add_chunk(struct gen_pool *pool, int nid) static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
{ {
struct page *page; struct page *page;
int status, i; int status, i, nchunks_added = uc_pool->nchunks_added;
unsigned long c_addr, uc_addr; unsigned long c_addr, uc_addr;
if (allocated_granules >= MAX_UNCACHED_GRANULES) if (mutex_lock_interruptible(&uc_pool->add_chunk_mutex) != 0)
return -1; /* interrupted by a signal */
if (uc_pool->nchunks_added > nchunks_added) {
/* someone added a new chunk while we were waiting */
mutex_unlock(&uc_pool->add_chunk_mutex);
return 0;
}
if (uc_pool->nchunks_added >= MAX_CONVERTED_CHUNKS_PER_NODE) {
mutex_unlock(&uc_pool->add_chunk_mutex);
return -1; return -1;
}
/* attempt to allocate a granule's worth of cached memory pages */ /* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO, page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
IA64_GRANULE_SHIFT-PAGE_SHIFT); IA64_GRANULE_SHIFT-PAGE_SHIFT);
if (!page) if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex);
return -1; return -1;
}
/* convert the memory pages from cached to uncached */ /* convert the memory pages from cached to uncached */
...@@ -102,11 +121,14 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid) ...@@ -102,11 +121,14 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE); flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
if (!status) { if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) {
status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); atomic_set(&uc_pool->status, 0);
if (status) status = smp_call_function(uncached_ipi_visibility, uc_pool,
0, 1);
if (status || atomic_read(&uc_pool->status))
goto failed; goto failed;
} } else if (status != PAL_VISIBILITY_OK)
goto failed;
preempt_disable(); preempt_disable();
...@@ -120,20 +142,24 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid) ...@@ -120,20 +142,24 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
preempt_enable(); preempt_enable();
ia64_pal_mc_drain(); status = ia64_pal_mc_drain();
status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1); if (status != PAL_STATUS_SUCCESS)
if (status) goto failed;
atomic_set(&uc_pool->status, 0);
status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1);
if (status || atomic_read(&uc_pool->status))
goto failed; goto failed;
/* /*
* The chunk of memory pages has been converted to uncached so now we * The chunk of memory pages has been converted to uncached so now we
* can add it to the pool. * can add it to the pool.
*/ */
status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid); status = gen_pool_add(uc_pool->pool, uc_addr, IA64_GRANULE_SIZE, nid);
if (status) if (status)
goto failed; goto failed;
allocated_granules++; uc_pool->nchunks_added++;
mutex_unlock(&uc_pool->add_chunk_mutex);
return 0; return 0;
/* failed to convert or add the chunk so give it back to the kernel */ /* failed to convert or add the chunk so give it back to the kernel */
...@@ -142,6 +168,7 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid) ...@@ -142,6 +168,7 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
ClearPageUncached(&page[i]); ClearPageUncached(&page[i]);
free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT); free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
mutex_unlock(&uc_pool->add_chunk_mutex);
return -1; return -1;
} }
...@@ -158,7 +185,7 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid) ...@@ -158,7 +185,7 @@ static int uncached_add_chunk(struct gen_pool *pool, int nid)
unsigned long uncached_alloc_page(int starting_nid) unsigned long uncached_alloc_page(int starting_nid)
{ {
unsigned long uc_addr; unsigned long uc_addr;
struct gen_pool *pool; struct uncached_pool *uc_pool;
int nid; int nid;
if (unlikely(starting_nid >= MAX_NUMNODES)) if (unlikely(starting_nid >= MAX_NUMNODES))
...@@ -171,14 +198,14 @@ unsigned long uncached_alloc_page(int starting_nid) ...@@ -171,14 +198,14 @@ unsigned long uncached_alloc_page(int starting_nid)
do { do {
if (!node_online(nid)) if (!node_online(nid))
continue; continue;
pool = uncached_pool[nid]; uc_pool = &uncached_pools[nid];
if (pool == NULL) if (uc_pool->pool == NULL)
continue; continue;
do { do {
uc_addr = gen_pool_alloc(pool, PAGE_SIZE); uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);
if (uc_addr != 0) if (uc_addr != 0)
return uc_addr; return uc_addr;
} while (uncached_add_chunk(pool, nid) == 0); } while (uncached_add_chunk(uc_pool, nid) == 0);
} while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid); } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
...@@ -197,7 +224,7 @@ EXPORT_SYMBOL(uncached_alloc_page); ...@@ -197,7 +224,7 @@ EXPORT_SYMBOL(uncached_alloc_page);
void uncached_free_page(unsigned long uc_addr) void uncached_free_page(unsigned long uc_addr)
{ {
int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET); int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
struct gen_pool *pool = uncached_pool[nid]; struct gen_pool *pool = uncached_pools[nid].pool;
if (unlikely(pool == NULL)) if (unlikely(pool == NULL))
return; return;
...@@ -224,7 +251,7 @@ static int __init uncached_build_memmap(unsigned long uc_start, ...@@ -224,7 +251,7 @@ static int __init uncached_build_memmap(unsigned long uc_start,
unsigned long uc_end, void *arg) unsigned long uc_end, void *arg)
{ {
int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET); int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
struct gen_pool *pool = uncached_pool[nid]; struct gen_pool *pool = uncached_pools[nid].pool;
size_t size = uc_end - uc_start; size_t size = uc_end - uc_start;
touch_softlockup_watchdog(); touch_softlockup_watchdog();
...@@ -242,7 +269,8 @@ static int __init uncached_init(void) ...@@ -242,7 +269,8 @@ static int __init uncached_init(void)
int nid; int nid;
for_each_online_node(nid) { for_each_online_node(nid) {
uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid); uncached_pools[nid].pool = gen_pool_create(PAGE_SHIFT, nid);
mutex_init(&uncached_pools[nid].add_chunk_mutex);
} }
efi_memmap_walk_uc(uncached_build_memmap, NULL); efi_memmap_walk_uc(uncached_build_memmap, NULL);
......
...@@ -284,39 +284,69 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_transition); ...@@ -284,39 +284,69 @@ EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
* SYSFS INTERFACE * * SYSFS INTERFACE *
*********************************************************************/ *********************************************************************/
static struct cpufreq_governor *__find_governor(const char *str_governor)
{
struct cpufreq_governor *t;
list_for_each_entry(t, &cpufreq_governor_list, governor_list)
if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
return t;
return NULL;
}
/** /**
* cpufreq_parse_governor - parse a governor string * cpufreq_parse_governor - parse a governor string
*/ */
static int cpufreq_parse_governor (char *str_governor, unsigned int *policy, static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
struct cpufreq_governor **governor) struct cpufreq_governor **governor)
{ {
int err = -EINVAL;
if (!cpufreq_driver) if (!cpufreq_driver)
return -EINVAL; goto out;
if (cpufreq_driver->setpolicy) { if (cpufreq_driver->setpolicy) {
if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_PERFORMANCE; *policy = CPUFREQ_POLICY_PERFORMANCE;
return 0; err = 0;
} else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { } else if (!strnicmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
*policy = CPUFREQ_POLICY_POWERSAVE; *policy = CPUFREQ_POLICY_POWERSAVE;
return 0; err = 0;
} }
return -EINVAL; } else if (cpufreq_driver->target) {
} else {
struct cpufreq_governor *t; struct cpufreq_governor *t;
mutex_lock(&cpufreq_governor_mutex); mutex_lock(&cpufreq_governor_mutex);
if (!cpufreq_driver || !cpufreq_driver->target)
goto out; t = __find_governor(str_governor);
list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { if (t == NULL) {
*governor = t; char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", str_governor);
if (name) {
int ret;
mutex_unlock(&cpufreq_governor_mutex); mutex_unlock(&cpufreq_governor_mutex);
return 0; ret = request_module(name);
mutex_lock(&cpufreq_governor_mutex);
if (ret == 0)
t = __find_governor(str_governor);
} }
kfree(name);
} }
out:
if (t != NULL) {
*governor = t;
err = 0;
}
mutex_unlock(&cpufreq_governor_mutex); mutex_unlock(&cpufreq_governor_mutex);
} }
return -EINVAL; out:
return err;
} }
...@@ -1265,23 +1295,21 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event) ...@@ -1265,23 +1295,21 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
int cpufreq_register_governor(struct cpufreq_governor *governor) int cpufreq_register_governor(struct cpufreq_governor *governor)
{ {
struct cpufreq_governor *t; int err;
if (!governor) if (!governor)
return -EINVAL; return -EINVAL;
mutex_lock(&cpufreq_governor_mutex); mutex_lock(&cpufreq_governor_mutex);
list_for_each_entry(t, &cpufreq_governor_list, governor_list) { err = -EBUSY;
if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { if (__find_governor(governor->name) == NULL) {
mutex_unlock(&cpufreq_governor_mutex); err = 0;
return -EBUSY; list_add(&governor->governor_list, &cpufreq_governor_list);
}
} }
list_add(&governor->governor_list, &cpufreq_governor_list);
mutex_unlock(&cpufreq_governor_mutex); mutex_unlock(&cpufreq_governor_mutex);
return 0; return err;
} }
EXPORT_SYMBOL_GPL(cpufreq_register_governor); EXPORT_SYMBOL_GPL(cpufreq_register_governor);
...@@ -1343,6 +1371,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli ...@@ -1343,6 +1371,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo));
if (policy->min > data->min && policy->min > policy->max) {
ret = -EINVAL;
goto error_out;
}
/* verify the cpu speed can be set within this limit */ /* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(policy); ret = cpufreq_driver->verify(policy);
if (ret) if (ret)
......
...@@ -638,9 +638,6 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) ...@@ -638,9 +638,6 @@ static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
if (task->tk_status < 0) { if (task->tk_status < 0) {
/* RPC error: Re-insert for retransmission */ /* RPC error: Re-insert for retransmission */
timeout = 10 * HZ; timeout = 10 * HZ;
} else if (block->b_done) {
/* Block already removed, kill it for real */
timeout = 0;
} else { } else {
/* Call was successful, now wait for client callback */ /* Call was successful, now wait for client callback */
timeout = 60 * HZ; timeout = 60 * HZ;
...@@ -709,13 +706,10 @@ nlmsvc_retry_blocked(void) ...@@ -709,13 +706,10 @@ nlmsvc_retry_blocked(void)
break; break;
if (time_after(block->b_when,jiffies)) if (time_after(block->b_when,jiffies))
break; break;
dprintk("nlmsvc_retry_blocked(%p, when=%ld, done=%d)\n", dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n",
block, block->b_when, block->b_done); block, block->b_when);
kref_get(&block->b_count); kref_get(&block->b_count);
if (block->b_done) nlmsvc_grant_blocked(block);
nlmsvc_unlink_block(block);
else
nlmsvc_grant_blocked(block);
nlmsvc_release_block(block); nlmsvc_release_block(block);
} }
......
...@@ -51,7 +51,7 @@ char *nfs_path(const char *base, const struct dentry *dentry, ...@@ -51,7 +51,7 @@ char *nfs_path(const char *base, const struct dentry *dentry,
namelen = dentry->d_name.len; namelen = dentry->d_name.len;
buflen -= namelen + 1; buflen -= namelen + 1;
if (buflen < 0) if (buflen < 0)
goto Elong; goto Elong_unlock;
end -= namelen; end -= namelen;
memcpy(end, dentry->d_name.name, namelen); memcpy(end, dentry->d_name.name, namelen);
*--end = '/'; *--end = '/';
...@@ -68,6 +68,8 @@ char *nfs_path(const char *base, const struct dentry *dentry, ...@@ -68,6 +68,8 @@ char *nfs_path(const char *base, const struct dentry *dentry,
end -= namelen; end -= namelen;
memcpy(end, base, namelen); memcpy(end, base, namelen);
return end; return end;
Elong_unlock:
spin_unlock(&dcache_lock);
Elong: Elong:
return ERR_PTR(-ENAMETOOLONG); return ERR_PTR(-ENAMETOOLONG);
} }
......
...@@ -63,7 +63,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) ...@@ -63,7 +63,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
return p; return p;
} }
void nfs_readdata_free(struct nfs_read_data *p) static void nfs_readdata_free(struct nfs_read_data *p)
{ {
if (p && (p->pagevec != &p->page_array[0])) if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec); kfree(p->pagevec);
......
...@@ -137,7 +137,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) ...@@ -137,7 +137,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
return p; return p;
} }
void nfs_writedata_free(struct nfs_write_data *p) static void nfs_writedata_free(struct nfs_write_data *p)
{ {
if (p && (p->pagevec != &p->page_array[0])) if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec); kfree(p->pagevec);
......
...@@ -123,7 +123,6 @@ struct nlm_block { ...@@ -123,7 +123,6 @@ struct nlm_block {
unsigned int b_id; /* block id */ unsigned int b_id; /* block id */
unsigned char b_queued; /* re-queued */ unsigned char b_queued; /* re-queued */
unsigned char b_granted; /* VFS granted lock */ unsigned char b_granted; /* VFS granted lock */
unsigned char b_done; /* callback complete */
struct nlm_file * b_file; /* file in question */ struct nlm_file * b_file; /* file in question */
}; };
......
...@@ -476,10 +476,9 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page) ...@@ -476,10 +476,9 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page)
} }
/* /*
* Allocate and free nfs_write_data structures * Allocate nfs_write_data structures
*/ */
extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount); extern struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount);
extern void nfs_writedata_free(struct nfs_write_data *p);
/* /*
* linux/fs/nfs/read.c * linux/fs/nfs/read.c
...@@ -491,10 +490,9 @@ extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); ...@@ -491,10 +490,9 @@ extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
extern void nfs_readdata_release(void *data); extern void nfs_readdata_release(void *data);
/* /*
* Allocate and free nfs_read_data structures * Allocate nfs_read_data structures
*/ */
extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount); extern struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount);
extern void nfs_readdata_free(struct nfs_read_data *p);
/* /*
* linux/fs/nfs3proc.c * linux/fs/nfs3proc.c
......
...@@ -229,7 +229,7 @@ int xprt_reserve_xprt(struct rpc_task *task); ...@@ -229,7 +229,7 @@ int xprt_reserve_xprt(struct rpc_task *task);
int xprt_reserve_xprt_cong(struct rpc_task *task); int xprt_reserve_xprt_cong(struct rpc_task *task);
int xprt_prepare_transmit(struct rpc_task *task); int xprt_prepare_transmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task);
void xprt_abort_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task);
int xprt_adjust_timeout(struct rpc_rqst *req); int xprt_adjust_timeout(struct rpc_rqst *req);
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
......
...@@ -921,26 +921,43 @@ call_transmit(struct rpc_task *task) ...@@ -921,26 +921,43 @@ call_transmit(struct rpc_task *task)
task->tk_status = xprt_prepare_transmit(task); task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status != 0) if (task->tk_status != 0)
return; return;
task->tk_action = call_transmit_status;
/* Encode here so that rpcsec_gss can use correct sequence number. */ /* Encode here so that rpcsec_gss can use correct sequence number. */
if (rpc_task_need_encode(task)) { if (rpc_task_need_encode(task)) {
task->tk_rqstp->rq_bytes_sent = 0; BUG_ON(task->tk_rqstp->rq_bytes_sent != 0);
call_encode(task); call_encode(task);
/* Did the encode result in an error condition? */ /* Did the encode result in an error condition? */
if (task->tk_status != 0) if (task->tk_status != 0)
goto out_nosend; return;
} }
task->tk_action = call_transmit_status;
xprt_transmit(task); xprt_transmit(task);
if (task->tk_status < 0) if (task->tk_status < 0)
return; return;
if (!task->tk_msg.rpc_proc->p_decode) { /*
task->tk_action = rpc_exit_task; * On success, ensure that we call xprt_end_transmit() before sleeping
rpc_wake_up_task(task); * in order to allow access to the socket to other RPC requests.
} */
return; call_transmit_status(task);
out_nosend: if (task->tk_msg.rpc_proc->p_decode != NULL)
/* release socket write lock before attempting to handle error */ return;
xprt_abort_transmit(task); task->tk_action = rpc_exit_task;
rpc_wake_up_task(task);
}
/*
* 5a. Handle cleanup after a transmission
*/
static void
call_transmit_status(struct rpc_task *task)
{
task->tk_action = call_status;
/*
* Special case: if we've been waiting on the socket's write_space()
* callback, then don't call xprt_end_transmit().
*/
if (task->tk_status == -EAGAIN)
return;
xprt_end_transmit(task);
rpc_task_force_reencode(task); rpc_task_force_reencode(task);
} }
...@@ -992,18 +1009,7 @@ call_status(struct rpc_task *task) ...@@ -992,18 +1009,7 @@ call_status(struct rpc_task *task)
} }
/* /*
* 6a. Handle transmission errors. * 6a. Handle RPC timeout
*/
static void
call_transmit_status(struct rpc_task *task)
{
if (task->tk_status != -EAGAIN)
rpc_task_force_reencode(task);
call_status(task);
}
/*
* 6b. Handle RPC timeout
* We do not release the request slot, so we keep using the * We do not release the request slot, so we keep using the
* same XID for all retransmits. * same XID for all retransmits.
*/ */
......
...@@ -667,10 +667,11 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client) ...@@ -667,10 +667,11 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
RPCAUTH_info, RPCAUTH_EOF); RPCAUTH_info, RPCAUTH_EOF);
if (error) if (error)
goto err_depopulate; goto err_depopulate;
dget(dentry);
out: out:
mutex_unlock(&dir->i_mutex); mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd); rpc_release_path(&nd);
return dget(dentry); return dentry;
err_depopulate: err_depopulate:
rpc_depopulate(dentry); rpc_depopulate(dentry);
__rpc_rmdir(dir, dentry); __rpc_rmdir(dir, dentry);
...@@ -731,10 +732,11 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags) ...@@ -731,10 +732,11 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
rpci->flags = flags; rpci->flags = flags;
rpci->ops = ops; rpci->ops = ops;
inode_dir_notify(dir, DN_CREATE); inode_dir_notify(dir, DN_CREATE);
dget(dentry);
out: out:
mutex_unlock(&dir->i_mutex); mutex_unlock(&dir->i_mutex);
rpc_release_path(&nd); rpc_release_path(&nd);
return dget(dentry); return dentry;
err_dput: err_dput:
dput(dentry); dput(dentry);
dentry = ERR_PTR(-ENOMEM); dentry = ERR_PTR(-ENOMEM);
......
...@@ -707,12 +707,9 @@ int xprt_prepare_transmit(struct rpc_task *task) ...@@ -707,12 +707,9 @@ int xprt_prepare_transmit(struct rpc_task *task)
return err; return err;
} }
void void xprt_end_transmit(struct rpc_task *task)
xprt_abort_transmit(struct rpc_task *task)
{ {
struct rpc_xprt *xprt = task->tk_xprt; xprt_release_write(task->tk_xprt, task);
xprt_release_write(xprt, task);
} }
/** /**
...@@ -761,8 +758,6 @@ void xprt_transmit(struct rpc_task *task) ...@@ -761,8 +758,6 @@ void xprt_transmit(struct rpc_task *task)
task->tk_status = -ENOTCONN; task->tk_status = -ENOTCONN;
else if (!req->rq_received) else if (!req->rq_received)
rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
xprt->ops->release_xprt(xprt, task);
spin_unlock_bh(&xprt->transport_lock); spin_unlock_bh(&xprt->transport_lock);
return; return;
} }
...@@ -772,18 +767,8 @@ void xprt_transmit(struct rpc_task *task) ...@@ -772,18 +767,8 @@ void xprt_transmit(struct rpc_task *task)
* schedq, and being picked up by a parallel run of rpciod(). * schedq, and being picked up by a parallel run of rpciod().
*/ */
task->tk_status = status; task->tk_status = status;
if (status == -ECONNREFUSED)
switch (status) {
case -ECONNREFUSED:
rpc_sleep_on(&xprt->sending, task, NULL, NULL); rpc_sleep_on(&xprt->sending, task, NULL, NULL);
case -EAGAIN:
case -ENOTCONN:
return;
default:
break;
}
xprt_release_write(xprt, task);
return;
} }
static inline void do_xprt_reserve(struct rpc_task *task) static inline void do_xprt_reserve(struct rpc_task *task)
......
...@@ -413,6 +413,33 @@ static int xs_tcp_send_request(struct rpc_task *task) ...@@ -413,6 +413,33 @@ static int xs_tcp_send_request(struct rpc_task *task)
return status; return status;
} }
/**
* xs_tcp_release_xprt - clean up after a tcp transmission
* @xprt: transport
* @task: rpc task
*
* This cleans up if an error causes us to abort the transmission of a request.
* In this case, the socket may need to be reset in order to avoid confusing
* the server.
*/
static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{
struct rpc_rqst *req;
if (task != xprt->snd_task)
return;
if (task == NULL)
goto out_release;
req = task->tk_rqstp;
if (req->rq_bytes_sent == 0)
goto out_release;
if (req->rq_bytes_sent == req->rq_snd_buf.len)
goto out_release;
set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
out_release:
xprt_release_xprt(xprt, task);
}
/** /**
* xs_close - close a socket * xs_close - close a socket
* @xprt: transport * @xprt: transport
...@@ -1250,7 +1277,7 @@ static struct rpc_xprt_ops xs_udp_ops = { ...@@ -1250,7 +1277,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
static struct rpc_xprt_ops xs_tcp_ops = { static struct rpc_xprt_ops xs_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt, .reserve_xprt = xprt_reserve_xprt,
.release_xprt = xprt_release_xprt, .release_xprt = xs_tcp_release_xprt,
.set_port = xs_set_port, .set_port = xs_set_port,
.connect = xs_connect, .connect = xs_connect,
.buf_alloc = rpc_malloc, .buf_alloc = rpc_malloc,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment