Commit 2f57572d authored by Linus Torvalds's avatar Linus Torvalds

Merge http://jfs.bkbits.net/linux-2.5

into home.osdl.org:/home/torvalds/v2.5/linux
parents 469c5855 d6ad3d08
...@@ -28,30 +28,34 @@ ...@@ -28,30 +28,34 @@
* with 'Suspend Modulation OFF Count Register' * with 'Suspend Modulation OFF Count Register'
* and 'Suspend Modulation ON Count Register'. * and 'Suspend Modulation ON Count Register'.
* These registers are 8bit counters that represent the number of * These registers are 8bit counters that represent the number of
* 32us intervals which the SUSP# pin is asserted/de-asserted to the * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF)
* processor. * to the processor.
* *
* These counters define a ratio which is the effective frequency * These counters define a ratio which is the effective frequency
* of operation of the system. * of operation of the system.
* *
* On Count * OFF Count
* F_eff = Fgx * ---------------------- * F_eff = Fgx * ----------------------
* On Count + Off Count * OFF Count + ON Count
* *
* 0 <= On Count, Off Count <= 255 * 0 <= On Count, Off Count <= 255
* *
* From these limits, we can get register values * From these limits, we can get register values
* *
* on_duration + off_duration <= MAX_DURATION * off_duration + on_duration <= MAX_DURATION
* off_duration = on_duration * (stock_freq - freq) / freq * on_duration = off_duration * (stock_freq - freq) / freq
* *
* on_duration = (freq * DURATION) / stock_freq * off_duration = (freq * DURATION) / stock_freq
* off_duration = DURATION - on_duration * on_duration = DURATION - off_duration
* *
* *
*--------------------------------------------------------------------------- *---------------------------------------------------------------------------
* *
* ChangeLog: * ChangeLog:
* Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org>
* - fix on/off register mistake
* - fix cpu_khz calc when it stops cpu modulation.
*
* Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org> * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org>
* - rewrite for Cyrix MediaGX Cx5510/5520 and * - rewrite for Cyrix MediaGX Cx5510/5520 and
* NatSemi Geode Cs5530(A). * NatSemi Geode Cs5530(A).
...@@ -233,13 +237,13 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off ...@@ -233,13 +237,13 @@ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off
int old_tmp_freq = stock_freq; int old_tmp_freq = stock_freq;
int tmp_freq; int tmp_freq;
*on_duration=1; *off_duration=1;
*off_duration=0; *on_duration=0;
for (i=max_duration; i>0; i--) { for (i=max_duration; i>0; i--) {
tmp_on = ((khz * i) / stock_freq) & 0xff; tmp_off = ((khz * i) / stock_freq) & 0xff;
tmp_off = i - tmp_on; tmp_on = i - tmp_off;
tmp_freq = (stock_freq * tmp_on) / i; tmp_freq = (stock_freq * tmp_off) / i;
/* if this relation is closer to khz, use this. If it's equal, /* if this relation is closer to khz, use this. If it's equal,
* prefer it, too - lower latency */ * prefer it, too - lower latency */
if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) { if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) {
...@@ -273,42 +277,37 @@ static void gx_set_cpuspeed(unsigned int khz) ...@@ -273,42 +277,37 @@ static void gx_set_cpuspeed(unsigned int khz)
freqs.new = new_khz; freqs.new = new_khz;
if (new_khz == stock_freq) { /* if new khz == 100% of CPU speed, it is special case */
local_irq_save(flags);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, (gx_params->pci_suscfg & ~(SUSMOD)));
pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &(gx_params->pci_suscfg));
local_irq_restore(flags);
dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n");
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return;
}
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
local_irq_save(flags); local_irq_save(flags);
switch (gx_params->cs55x0->device) {
case PCI_DEVICE_ID_CYRIX_5530_LEGACY: if (new_khz != stock_freq) { /* if new khz == 100% of CPU speed, it is special case */
pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; switch (gx_params->cs55x0->device) {
/* FIXME: need to test other values -- Zwane,Miura */ case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */ pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP;
pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */ /* FIXME: need to test other values -- Zwane,Miura */
pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1); pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */
pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */
if (gx_params->pci_rev < 0x10) { /* CS5530(rev 1.2, 1.3) */ pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1);
if (gx_params->pci_rev < 0x10) { /* CS5530(rev 1.2, 1.3) */
suscfg = gx_params->pci_suscfg | SUSMOD;
} else { /* CS5530A,B.. */
suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE;
}
break;
case PCI_DEVICE_ID_CYRIX_5520:
case PCI_DEVICE_ID_CYRIX_5510:
suscfg = gx_params->pci_suscfg | SUSMOD; suscfg = gx_params->pci_suscfg | SUSMOD;
} else { /* CS5530A,B.. */ default:
suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE; local_irq_restore(flags);
dprintk("fatal: try to set unknown chipset.\n");
return;
} }
break; } else {
case PCI_DEVICE_ID_CYRIX_5520: suscfg = gx_params->pci_suscfg & ~(SUSMOD);
case PCI_DEVICE_ID_CYRIX_5510: gx_params->off_duration = 0;
suscfg = gx_params->pci_suscfg | SUSMOD; gx_params->on_duration = 0;
break; dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n");
default:
local_irq_restore(flags);
dprintk("fatal: try to set unknown chipset.\n");
return;
} }
pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration);
......
...@@ -246,7 +246,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) ...@@ -246,7 +246,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
{ {
cpufreq_frequency_table_put_attr(policy->cpu); cpufreq_frequency_table_put_attr(policy->cpu);
return cpufreq_p4_setdc(policy->cpu, DC_DISABLE); return 0;
} }
static struct freq_attr* p4clockmod_attr[] = { static struct freq_attr* p4clockmod_attr[] = {
......
...@@ -91,18 +91,13 @@ static int check_powernow(void) ...@@ -91,18 +91,13 @@ static int check_powernow(void)
struct cpuinfo_x86 *c = cpu_data; struct cpuinfo_x86 *c = cpu_data;
unsigned int maxei, eax, ebx, ecx, edx; unsigned int maxei, eax, ebx, ecx, edx;
if (c->x86_vendor != X86_VENDOR_AMD) { if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) {
printk (KERN_INFO PFX "AMD processor not detected.\n"); #ifdef MODULE
return 0;
}
if (c->x86 !=6) {
printk (KERN_INFO PFX "This module only works with AMD K7 CPUs\n"); printk (KERN_INFO PFX "This module only works with AMD K7 CPUs\n");
#endif
return 0; return 0;
} }
printk (KERN_INFO PFX "AMD K7 CPU detected.\n");
if ((c->x86_model == 6) && (c->x86_mask == 0)) { if ((c->x86_model == 6) && (c->x86_mask == 0)) {
printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n"); printk (KERN_INFO PFX "K7 660[A0] core detected, enabling errata workarounds\n");
have_a0 = 1; have_a0 = 1;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define PFX "powernow-k8: " #define PFX "powernow-k8: "
#define BFX PFX "BIOS error: " #define BFX PFX "BIOS error: "
#define VERSION "version 1.00.08 - September 26, 2003" #define VERSION "version 1.00.08a"
#include "powernow-k8.h" #include "powernow-k8.h"
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
...@@ -44,10 +44,11 @@ static u32 numps; /* number of p-states, from PSB */ ...@@ -44,10 +44,11 @@ static u32 numps; /* number of p-states, from PSB */
static u32 rvo; /* ramp voltage offset, from PSB */ static u32 rvo; /* ramp voltage offset, from PSB */
static u32 irt; /* isochronous relief time, from PSB */ static u32 irt; /* isochronous relief time, from PSB */
static u32 vidmvs; /* usable value calculated from mvs, from PSB */ static u32 vidmvs; /* usable value calculated from mvs, from PSB */
struct pst_s *ppst; /* array of p states, valid for this part */
static u32 currvid; /* keep track of the current fid / vid */ static u32 currvid; /* keep track of the current fid / vid */
static u32 currfid; static u32 currfid;
static struct cpufreq_frequency_table *powernow_table;
/* /*
The PSB table supplied by BIOS allows for the definition of the number of The PSB table supplied by BIOS allows for the definition of the number of
p-states that can be used when running on a/c, and the number of p-states p-states that can be used when running on a/c, and the number of p-states
...@@ -71,30 +72,12 @@ so this is not actually a restriction. ...@@ -71,30 +72,12 @@ so this is not actually a restriction.
static u32 batps; /* limit on the number of p states when on battery */ static u32 batps; /* limit on the number of p states when on battery */
/* - set by BIOS in the PSB/PST */ /* - set by BIOS in the PSB/PST */
static struct cpufreq_driver cpufreq_amd64_driver = { /* Return a frequency in MHz, given an input fid */
.verify = powernowk8_verify, static u32 find_freq_from_fid(u32 fid)
.target = powernowk8_target,
.init = powernowk8_cpu_init,
.name = "cpufreq-amd64",
.owner = THIS_MODULE,
};
#define SEARCH_UP 1
#define SEARCH_DOWN 0
/* Return a frequency in MHz, given an input fid */
u32
find_freq_from_fid(u32 fid)
{ {
return 800 + (fid * 100); return 800 + (fid * 100);
} }
/* Return a fid matching an input frequency in MHz */
static u32
find_fid_from_freq(u32 freq)
{
return (freq - 800) / 100;
}
/* Return the vco fid for an input fid */ /* Return the vco fid for an input fid */
static u32 static u32
...@@ -107,56 +90,27 @@ convert_fid_to_vco_fid(u32 fid) ...@@ -107,56 +90,27 @@ convert_fid_to_vco_fid(u32 fid)
} }
} }
/* Sort the fid/vid frequency table into ascending order by fid. The spec */ /*
/* implies that it will be sorted by BIOS, but, it only implies it, and I */ * Return 1 if the pending bit is set. Unless we are actually just told the
/* prefer not to trust when I can check. */ * processor to transition a state, seeing this bit set is really bad news.
/* Yes, it is a simple bubble sort, but the PST is really small, so the */ */
/* choice of algorithm is pretty irrelevant. */
static inline void
sort_pst(struct pst_s *ppst, u32 numpstates)
{
u32 i;
u8 tempfid;
u8 tempvid;
int swaps = 1;
while (swaps) {
swaps = 0;
for (i = 0; i < (numpstates - 1); i++) {
if (ppst[i].fid > ppst[i + 1].fid) {
swaps = 1;
tempfid = ppst[i].fid;
tempvid = ppst[i].vid;
ppst[i].fid = ppst[i + 1].fid;
ppst[i].vid = ppst[i + 1].vid;
ppst[i + 1].fid = tempfid;
ppst[i + 1].vid = tempvid;
}
}
}
return;
}
/* Return 1 if the pending bit is set. Unless we are actually just told the */
/* processor to transition a state, seeing this bit set is really bad news. */
static inline int static inline int
pending_bit_stuck(void) pending_bit_stuck(void)
{ {
u32 lo; u32 lo, hi;
u32 hi;
rdmsr(MSR_FIDVID_STATUS, lo, hi); rdmsr(MSR_FIDVID_STATUS, lo, hi);
return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
} }
/* Update the global current fid / vid values from the status msr. Returns 1 */ /*
/* on error. */ * Update the global current fid / vid values from the status msr. Returns 1
* on error.
*/
static int static int
query_current_values_with_pending_wait(void) query_current_values_with_pending_wait(void)
{ {
u32 lo; u32 lo, hi;
u32 hi;
u32 i = 0; u32 i = 0;
lo = MSR_S_LO_CHANGE_PENDING; lo = MSR_S_LO_CHANGE_PENDING;
...@@ -271,9 +225,11 @@ write_new_vid(u32 vid) ...@@ -271,9 +225,11 @@ write_new_vid(u32 vid)
return 0; return 0;
} }
/* Reduce the vid by the max of step or reqvid. */ /*
/* Decreasing vid codes represent increasing voltages : */ * Reduce the vid by the max of step or reqvid.
/* vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of 0x1f is off. */ * Decreasing vid codes represent increasing voltages:
* vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of 0x1f is off.
*/
static int static int
decrease_vid_code_by_step(u32 reqvid, u32 step) decrease_vid_code_by_step(u32 reqvid, u32 step)
{ {
...@@ -316,8 +272,10 @@ transition_fid_vid(u32 reqfid, u32 reqvid) ...@@ -316,8 +272,10 @@ transition_fid_vid(u32 reqfid, u32 reqvid)
return 0; return 0;
} }
/* Phase 1 - core voltage transition ... setup appropriate voltage for the */ /*
/* fid transition. */ * Phase 1 - core voltage transition ... setup appropriate voltage for the
* fid transition.
*/
static inline int static inline int
core_voltage_pre_transition(u32 reqvid) core_voltage_pre_transition(u32 reqvid)
{ {
...@@ -500,7 +458,9 @@ check_supported_cpu(void) ...@@ -500,7 +458,9 @@ check_supported_cpu(void)
} }
if (c->x86_vendor != X86_VENDOR_AMD) { if (c->x86_vendor != X86_VENDOR_AMD) {
#ifdef MODULE
printk(KERN_INFO PFX "Not an AMD processor\n"); printk(KERN_INFO PFX "Not an AMD processor\n");
#endif
return 0; return 0;
} }
...@@ -533,20 +493,59 @@ check_supported_cpu(void) ...@@ -533,20 +493,59 @@ check_supported_cpu(void)
return 0; return 0;
} }
printk(KERN_INFO PFX "Found AMD Athlon 64 / Opteron processor " printk(KERN_INFO PFX "Found AMD64 processor supporting PowerNow (" VERSION ")\n");
"supporting p-state transitions\n");
return 1; return 1;
} }
static int check_pst_table(struct pst_s *pst, u8 maxvid)
{
unsigned int j;
u8 lastfid = 0xFF;
for (j = 0; j < numps; j++) {
if (pst[j].vid > LEAST_VID) {
printk(KERN_ERR PFX "vid %d invalid : 0x%x\n", j, pst[j].vid);
return -EINVAL;
}
if (pst[j].vid < rvo) { /* vid + rvo >= 0 */
printk(KERN_ERR PFX
"BIOS error - 0 vid exceeded with pstate %d\n",
j);
return -ENODEV;
}
if (pst[j].vid < maxvid + rvo) { /* vid + rvo >= maxvid */
printk(KERN_ERR PFX
"BIOS error - maxvid exceeded with pstate %d\n",
j);
return -ENODEV;
}
if ((pst[j].fid > MAX_FID)
|| (pst[j].fid & 1)
|| (pst[j].fid < HI_FID_TABLE_BOTTOM)){
printk(KERN_ERR PFX "fid %d invalid : 0x%x\n", j, pst[j].fid);
return -EINVAL;
}
if (pst[j].fid < lastfid)
lastfid = pst[j].fid;
}
if (lastfid & 1) {
printk(KERN_ERR PFX "lastfid invalid\n");
return -EINVAL;
}
if (lastfid > LO_FID_TABLE_TOP) {
printk(KERN_INFO PFX "first fid not from lo freq table\n");
}
return 0;
}
/* Find and validate the PSB/PST table in BIOS. */ /* Find and validate the PSB/PST table in BIOS. */
static inline int static inline int
find_psb_table(void) find_psb_table(void)
{ {
struct psb_s *psb; struct psb_s *psb;
struct pst_s *pst; struct pst_s *pst;
unsigned i, j; unsigned int i, j;
u32 lastfid;
u32 mvs; u32 mvs;
u8 maxvid; u8 maxvid;
...@@ -573,33 +572,19 @@ find_psb_table(void) ...@@ -573,33 +572,19 @@ find_psb_table(void)
} }
vstable = psb->voltagestabilizationtime; vstable = psb->voltagestabilizationtime;
printk(KERN_INFO PFX "voltage stable time: %d (units 20us)\n",
vstable);
dprintk(KERN_DEBUG PFX "flags2: 0x%x\n", psb->flags2); dprintk(KERN_DEBUG PFX "flags2: 0x%x\n", psb->flags2);
rvo = psb->flags2 & 3; rvo = psb->flags2 & 3;
irt = ((psb->flags2) >> 2) & 3; irt = ((psb->flags2) >> 2) & 3;
mvs = ((psb->flags2) >> 4) & 3; mvs = ((psb->flags2) >> 4) & 3;
vidmvs = 1 << mvs; vidmvs = 1 << mvs;
batps = ((psb->flags2) >> 6) & 3; batps = ((psb->flags2) >> 6) & 3;
printk(KERN_INFO PFX "p states on battery: %d ", batps);
switch (batps) { printk(KERN_INFO PFX "voltage stable in %d usec", vstable * 20);
case 0: if (batps)
printk("- all available\n"); printk(", only %d lowest states on battery", batps);
break; printk(", ramp voltage offset: %d", rvo);
case 1: printk(", isochronous relief time: %d", irt);
printk("- only the minimum\n"); printk(", maximum voltage step: %d\n", mvs);
break;
case 2:
printk("- only the 2 lowest\n");
break;
case 3:
printk("- only the 3 lowest\n");
break;
}
printk(KERN_INFO PFX "ramp voltage offset: %d\n", rvo);
printk(KERN_INFO PFX "isochronous relief time: %d\n", irt);
printk(KERN_INFO PFX "maximum voltage step: %d\n", mvs);
dprintk(KERN_DEBUG PFX "numpst: 0x%x\n", psb->numpst); dprintk(KERN_DEBUG PFX "numpst: 0x%x\n", psb->numpst);
if (psb->numpst != 1) { if (psb->numpst != 1) {
...@@ -610,14 +595,13 @@ find_psb_table(void) ...@@ -610,14 +595,13 @@ find_psb_table(void)
dprintk(KERN_DEBUG PFX "cpuid: 0x%x\n", psb->cpuid); dprintk(KERN_DEBUG PFX "cpuid: 0x%x\n", psb->cpuid);
plllock = psb->plllocktime; plllock = psb->plllocktime;
printk(KERN_INFO PFX "pll lock time: 0x%x\n", plllock); printk(KERN_INFO PFX "pll lock time: 0x%x, ", plllock);
maxvid = psb->maxvid; maxvid = psb->maxvid;
printk(KERN_INFO PFX "maxfid: 0x%x\n", psb->maxfid); printk("maxfid 0x%x (%d MHz), maxvid 0x%x\n",
printk(KERN_INFO PFX "maxvid: 0x%x\n", maxvid); psb->maxfid, find_freq_from_fid(psb->maxfid), maxvid);
numps = psb->numpstates; numps = psb->numpstates;
printk(KERN_INFO PFX "numpstates: 0x%x\n", numps);
if (numps < 2) { if (numps < 2) {
printk(KERN_ERR BFX "no p states to transition\n"); printk(KERN_ERR BFX "no p states to transition\n");
return -ENODEV; return -ENODEV;
...@@ -636,78 +620,41 @@ find_psb_table(void) ...@@ -636,78 +620,41 @@ find_psb_table(void)
"%d p-states\n", numps); "%d p-states\n", numps);
} }
if ((numps <= 1) || (batps <= 1)) { if (numps <= 1) {
printk(KERN_ERR PFX "only 1 p-state to transition\n"); printk(KERN_ERR PFX "only 1 p-state to transition\n");
return -ENODEV; return -ENODEV;
} }
ppst = kmalloc(sizeof (struct pst_s) * numps, GFP_KERNEL);
if (!ppst) {
printk(KERN_ERR PFX "ppst memory alloc failure\n");
return -ENOMEM;
}
pst = (struct pst_s *) (psb + 1); pst = (struct pst_s *) (psb + 1);
for (j = 0; j < numps; j++) { if (check_pst_table(pst, maxvid))
ppst[j].fid = pst[j].fid; return -EINVAL;
ppst[j].vid = pst[j].vid;
printk(KERN_INFO PFX
" %d : fid 0x%x, vid 0x%x\n", j,
ppst[j].fid, ppst[j].vid);
}
sort_pst(ppst, numps);
lastfid = ppst[0].fid;
if (lastfid > LO_FID_TABLE_TOP)
printk(KERN_INFO BFX "first fid not in lo freq tbl\n");
if ((lastfid > MAX_FID) || (lastfid & 1) || (ppst[0].vid > LEAST_VID)) {
printk(KERN_ERR BFX "first fid/vid bad (0x%x - 0x%x)\n",
lastfid, ppst[0].vid);
kfree(ppst);
return -ENODEV;
}
for (j = 1; j < numps; j++) { powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) * (numps + 1)), GFP_KERNEL);
if ((lastfid >= ppst[j].fid) if (!powernow_table) {
|| (ppst[j].fid & 1) printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
|| (ppst[j].fid < HI_FID_TABLE_BOTTOM) return -ENOMEM;
|| (ppst[j].fid > MAX_FID)
|| (ppst[j].vid > LEAST_VID)) {
printk(KERN_ERR BFX
"invalid fid/vid in pst(%x %x)\n",
ppst[j].fid, ppst[j].vid);
kfree(ppst);
return -ENODEV;
}
lastfid = ppst[j].fid;
} }
for (j = 0; j < numps; j++) { for (j = 0; j < numps; j++) {
if (ppst[j].vid < rvo) { /* vid+rvo >= 0 */ printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n", j,
printk(KERN_ERR BFX pst[j].fid, find_freq_from_fid(pst[j].fid), pst[j].vid);
"0 vid exceeded with pstate %d\n", j); powernow_table[j].index = pst[j].fid; /* lower 8 bits */
kfree(ppst); powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
return -ENODEV; powernow_table[j].frequency = find_freq_from_fid(pst[j].fid);
}
if (ppst[j].vid < maxvid+rvo) { /* vid+rvo >= maxvid */
printk(KERN_ERR BFX
"maxvid exceeded with pstate %d\n", j);
kfree(ppst);
return -ENODEV;
}
} }
powernow_table[numps].frequency = CPUFREQ_TABLE_END;
powernow_table[numps].index = 0;
if (query_current_values_with_pending_wait()) { if (query_current_values_with_pending_wait()) {
kfree(ppst); kfree(powernow_table);
return -EIO; return -EIO;
} }
printk(KERN_INFO PFX "currfid 0x%x, currvid 0x%x\n", printk(KERN_INFO PFX "currfid 0x%x (%d MHz), currvid 0x%x\n",
currfid, currvid); currfid, find_freq_from_fid(currfid), currvid);
for (j = 0; j < numps; j++) for (j = 0; j < numps; j++)
if ((ppst[j].fid==currfid) && (ppst[j].vid==currvid)) if ((pst[j].fid==currfid) && (pst[j].vid==currvid))
return 0; return 0;
printk(KERN_ERR BFX "currfid/vid do not match PST, ignoring\n"); printk(KERN_ERR BFX "currfid/vid do not match PST, ignoring\n");
...@@ -718,112 +665,22 @@ find_psb_table(void) ...@@ -718,112 +665,22 @@ find_psb_table(void)
return -ENODEV; return -ENODEV;
} }
/* Converts a frequency (that might not necessarily be a multiple of 200) */
/* to a fid. */
static u32
find_closest_fid(u32 freq, int searchup)
{
if (searchup == SEARCH_UP)
freq += MIN_FREQ_RESOLUTION - 1;
freq = (freq / MIN_FREQ_RESOLUTION) * MIN_FREQ_RESOLUTION;
if (freq < MIN_FREQ)
freq = MIN_FREQ;
else if (freq > MAX_FREQ)
freq = MAX_FREQ;
return find_fid_from_freq(freq);
}
static int
find_match(u32 * ptargfreq, u32 * pmin, u32 * pmax, int searchup, u32 * pfid,
u32 * pvid)
{
u32 availpstates = batps;
u32 targfid = find_closest_fid(*ptargfreq, searchup);
u32 minfid = find_closest_fid(*pmin, SEARCH_DOWN);
u32 maxfid = find_closest_fid(*pmax, SEARCH_UP);
u32 minidx = 0;
u32 maxidx = availpstates - 1;
u32 targidx = 0xffffffff;
int i;
dprintk(KERN_DEBUG PFX "find match: freq %d MHz, min %d, max %d\n",
*ptargfreq, *pmin, *pmax);
/* Restrict values to the frequency choices in the PST */
if (minfid < ppst[0].fid)
minfid = ppst[0].fid;
if (maxfid > ppst[maxidx].fid)
maxfid = ppst[maxidx].fid;
/* Find appropriate PST index for the minimim fid */
for (i = 0; i < (int) availpstates; i++) {
if (minfid >= ppst[i].fid)
minidx = i;
}
/* Find appropriate PST index for the maximum fid */
for (i = availpstates - 1; i >= 0; i--) {
if (maxfid <= ppst[i].fid)
maxidx = i;
}
if (minidx > maxidx)
maxidx = minidx;
/* Frequency ids are now constrained by limits matching PST entries */
minfid = ppst[minidx].fid;
maxfid = ppst[maxidx].fid;
/* Limit the target frequency to these limits */
if (targfid < minfid)
targfid = minfid;
else if (targfid > maxfid)
targfid = maxfid;
/* Find the best target index into the PST, contrained by the range */
if (searchup == SEARCH_UP) {
for (i = maxidx; i >= (int) minidx; i--) {
if (targfid <= ppst[i].fid)
targidx = i;
}
} else {
for (i = minidx; i <= (int) maxidx; i++) {
if (targfid >= ppst[i].fid)
targidx = i;
}
}
if (targidx == 0xffffffff) {
printk(KERN_ERR PFX "could not find target\n");
return 1;
}
*pmin = find_freq_from_fid(minfid);
*pmax = find_freq_from_fid(maxfid);
*ptargfreq = find_freq_from_fid(ppst[targidx].fid);
if (pfid)
*pfid = ppst[targidx].fid;
if (pvid)
*pvid = ppst[targidx].vid;
return 0;
}
/* Take a frequency, and issue the fid/vid transition command */ /* Take a frequency, and issue the fid/vid transition command */
static inline int static inline int
transition_frequency(u32 * preq, u32 * pmin, u32 * pmax, u32 searchup) transition_frequency(unsigned int index)
{ {
u32 fid; u32 fid;
u32 vid; u32 vid;
int res; int res;
struct cpufreq_freqs freqs; struct cpufreq_freqs freqs;
if (find_match(preq, pmin, pmax, searchup, &fid, &vid)) /* fid are the lower 8 bits of the index we stored into
return 1; * the cpufreq frequency table in find_psb_table, vid are
* the upper 8 bits.
*/
fid = powernow_table[index].index & 0xFF;
vid = (powernow_table[index].index & 0xFF00) >> 8;
dprintk(KERN_DEBUG PFX "table matched fid 0x%x, giving vid 0x%x\n", dprintk(KERN_DEBUG PFX "table matched fid 0x%x, giving vid 0x%x\n",
fid, vid); fid, vid);
...@@ -867,14 +724,7 @@ powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relati ...@@ -867,14 +724,7 @@ powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relati
{ {
u32 checkfid = currfid; u32 checkfid = currfid;
u32 checkvid = currvid; u32 checkvid = currvid;
u32 reqfreq = targfreq / 1000; unsigned int newstate;
u32 minfreq = pol->min / 1000;
u32 maxfreq = pol->max / 1000;
if (ppst == 0) {
printk(KERN_ERR PFX "targ: ppst 0\n");
return -ENODEV;
}
if (pending_bit_stuck()) { if (pending_bit_stuck()) {
printk(KERN_ERR PFX "drv targ fail: change pending bit set\n"); printk(KERN_ERR PFX "drv targ fail: change pending bit set\n");
...@@ -896,9 +746,10 @@ powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relati ...@@ -896,9 +746,10 @@ powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relati
checkfid, currfid, checkvid, currvid); checkfid, currfid, checkvid, currvid);
} }
if (transition_frequency(&reqfreq, &minfreq, &maxfreq, if (cpufreq_frequency_table_target(pol, powernow_table, targfreq, relation, &newstate))
relation == return -EINVAL;
CPUFREQ_RELATION_H ? SEARCH_UP : SEARCH_DOWN))
if (transition_frequency(newstate))
{ {
printk(KERN_ERR PFX "transition frequency failed\n"); printk(KERN_ERR PFX "transition frequency failed\n");
return 1; return 1;
...@@ -913,36 +764,12 @@ powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relati ...@@ -913,36 +764,12 @@ powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relati
static int static int
powernowk8_verify(struct cpufreq_policy *pol) powernowk8_verify(struct cpufreq_policy *pol)
{ {
u32 min = pol->min / 1000;
u32 max = pol->max / 1000;
u32 targ = min;
int res;
if (ppst == 0) {
printk(KERN_ERR PFX "verify - ppst 0\n");
return -ENODEV;
}
if (pending_bit_stuck()) { if (pending_bit_stuck()) {
printk(KERN_ERR PFX "failing verify, change pending bit set\n"); printk(KERN_ERR PFX "failing verify, change pending bit set\n");
return -EIO; return -EIO;
} }
dprintk(KERN_DEBUG PFX return cpufreq_frequency_table_verify(pol, powernow_table);
"ver: cpu%d, min %d, max %d, cur %d, pol %d\n", pol->cpu,
pol->min, pol->max, pol->cur, pol->policy);
if (pol->cpu != 0) {
printk(KERN_ERR PFX "verify - cpu not 0\n");
return -ENODEV;
}
res = find_match(&targ, &min, &max, SEARCH_DOWN, 0, 0);
if (!res) {
pol->min = min * 1000;
pol->max = max * 1000;
}
return res;
} }
/* per CPU init entry point to the driver */ /* per CPU init entry point to the driver */
...@@ -968,10 +795,11 @@ powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -968,10 +795,11 @@ powernowk8_cpu_init(struct cpufreq_policy *pol)
dprintk(KERN_DEBUG PFX "policy current frequency %d kHz\n", pol->cur); dprintk(KERN_DEBUG PFX "policy current frequency %d kHz\n", pol->cur);
/* min/max the cpu is capable of */ /* min/max the cpu is capable of */
pol->cpuinfo.min_freq = 1000 * find_freq_from_fid(ppst[0].fid); if (cpufreq_frequency_table_cpuinfo(pol, powernow_table)) {
pol->cpuinfo.max_freq = 1000 * find_freq_from_fid(ppst[numps-1].fid); printk(KERN_ERR PFX "invalid powernow_table\n");
pol->min = 1000 * find_freq_from_fid(ppst[0].fid); kfree(powernow_table);
pol->max = 1000 * find_freq_from_fid(ppst[batps - 1].fid); return -EINVAL;
}
printk(KERN_INFO PFX "cpu_init done, current fid 0x%x, vid 0x%x\n", printk(KERN_INFO PFX "cpu_init done, current fid 0x%x, vid 0x%x\n",
currfid, currvid); currfid, currvid);
...@@ -979,14 +807,33 @@ powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -979,14 +807,33 @@ powernowk8_cpu_init(struct cpufreq_policy *pol)
return 0; return 0;
} }
static int __exit powernowk8_cpu_exit (struct cpufreq_policy *pol)
{
if (pol->cpu != 0)
return -EINVAL;
if (powernow_table)
kfree(powernow_table);
return 0;
}
static struct cpufreq_driver cpufreq_amd64_driver = {
.verify = powernowk8_verify,
.target = powernowk8_target,
.init = powernowk8_cpu_init,
.exit = powernowk8_cpu_exit,
.name = "powernow-k8",
.owner = THIS_MODULE,
};
/* driver entry point for init */ /* driver entry point for init */
static int __init static int __init
powernowk8_init(void) powernowk8_init(void)
{ {
int rc; int rc;
printk(KERN_INFO PFX VERSION "\n");
if (check_supported_cpu() == 0) if (check_supported_cpu() == 0)
return -ENODEV; return -ENODEV;
...@@ -996,7 +843,6 @@ powernowk8_init(void) ...@@ -996,7 +843,6 @@ powernowk8_init(void)
if (pending_bit_stuck()) { if (pending_bit_stuck()) {
printk(KERN_ERR PFX "powernowk8_init fail, change pending bit set\n"); printk(KERN_ERR PFX "powernowk8_init fail, change pending bit set\n");
kfree(ppst);
return -EIO; return -EIO;
} }
...@@ -1010,7 +856,6 @@ powernowk8_exit(void) ...@@ -1010,7 +856,6 @@ powernowk8_exit(void)
dprintk(KERN_INFO PFX "powernowk8_exit\n"); dprintk(KERN_INFO PFX "powernowk8_exit\n");
cpufreq_unregister_driver(&cpufreq_amd64_driver); cpufreq_unregister_driver(&cpufreq_amd64_driver);
kfree(ppst);
} }
MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>"); MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com>");
......
...@@ -120,7 +120,3 @@ struct pst_s { ...@@ -120,7 +120,3 @@ struct pst_s {
static inline int core_voltage_pre_transition(u32 reqvid); static inline int core_voltage_pre_transition(u32 reqvid);
static inline int core_voltage_post_transition(u32 reqvid); static inline int core_voltage_post_transition(u32 reqvid);
static inline int core_frequency_transition(u32 reqfid); static inline int core_frequency_transition(u32 reqfid);
static int powernowk8_verify(struct cpufreq_policy *pol);
static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq,
unsigned relation);
static int __init powernowk8_cpu_init(struct cpufreq_policy *pol);
...@@ -207,17 +207,55 @@ unsigned int speedstep_detect_processor (void) ...@@ -207,17 +207,55 @@ unsigned int speedstep_detect_processor (void)
if (c->x86_model != 2) if (c->x86_model != 2)
return 0; return 0;
if ((c->x86_mask != 4) && /* B-stepping [M-P4-M] */
(c->x86_mask != 7) && /* C-stepping [M-P4-M] */
(c->x86_mask != 9)) /* D-stepping [M-P4-M or M-P4/533] */
return 0;
ebx = cpuid_ebx(0x00000001); ebx = cpuid_ebx(0x00000001);
ebx &= 0x000000FF; ebx &= 0x000000FF;
if ((ebx != 0x0e) && (ebx != 0x0f))
return 0;
return SPEEDSTEP_PROCESSOR_P4M; dprintk(KERN_INFO "ebx value is %x, x86_mask is %x\n", ebx, c->86_mask);
switch (c->x86_mask) {
case 4:
/*
* B-stepping [M-P4-M]
* sample has ebx = 0x0f, production has 0x0e.
*/
if ((ebx == 0x0e) || (ebx == 0x0f))
return SPEEDSTEP_PROCESSOR_P4M;
break;
case 7:
/*
* C-stepping [M-P4-M]
* needs to have ebx=0x0e, else it's a celeron:
* cf. 25130917.pdf / page 7, footnote 5 even
* though 25072120.pdf / page 7 doesn't say
* samples are only of B-stepping...
*/
if (ebx == 0x0e)
return SPEEDSTEP_PROCESSOR_P4M;
break;
case 9:
/*
* D-stepping [M-P4-M or M-P4/533]
*
* this is totally strange: CPUID 0x0F29 is
* used by M-P4-M, M-P4/533 and(!) Celeron CPUs.
* The latter need to be sorted out as they don't
* support speedstep.
* Celerons with CPUID 0x0F29 may have either
* ebx=0x8 or 0xf -- 25130917.pdf doesn't say anything
* specific.
* M-P4-Ms may have either ebx=0xe or 0xf [see above]
* M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf]
* So, how to distinguish all those processors with
* ebx=0xf? I don't know. Sort them out, and wait
* for someone to complain.
*/
if (ebx == 0x0e)
return SPEEDSTEP_PROCESSOR_P4M;
break;
default:
break;
}
return 0;
} }
switch (c->x86_model) { switch (c->x86_model) {
......
...@@ -296,12 +296,8 @@ static void __init init_intel(struct cpuinfo_x86 *c) ...@@ -296,12 +296,8 @@ static void __init init_intel(struct cpuinfo_x86 *c)
} else if (smp_num_siblings > 1 ) { } else if (smp_num_siblings > 1 ) {
index_lsb = 0; index_lsb = 0;
index_msb = 31; index_msb = 31;
/*
* At this point we only support two siblings per if (smp_num_siblings > NR_CPUS) {
* processor package.
*/
#define NR_SIBLINGS 2
if (smp_num_siblings != NR_SIBLINGS) {
printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
smp_num_siblings = 1; smp_num_siblings = 1;
goto too_many_siblings; goto too_many_siblings;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* linux/drivers/cpufreq/cpufreq_userspace.c * linux/drivers/cpufreq/cpufreq_userspace.c
* *
* Copyright (C) 2001 Russell King * Copyright (C) 2001 Russell King
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> * (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -112,7 +112,14 @@ int cpufreq_set(unsigned int freq, unsigned int cpu) ...@@ -112,7 +112,14 @@ int cpufreq_set(unsigned int freq, unsigned int cpu)
if (freq > cpu_max_freq[cpu]) if (freq > cpu_max_freq[cpu])
freq = cpu_max_freq[cpu]; freq = cpu_max_freq[cpu];
ret = cpufreq_driver_target(&current_policy[cpu], freq, /*
* We're safe from concurrent calls to ->target() here
* as we hold the userspace_sem lock. If we were calling
* cpufreq_driver_target, a deadlock situation might occur:
* A: cpufreq_set (lock userspace_sem) -> cpufreq_driver_target(lock policy->lock)
* B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_sem)
*/
ret = __cpufreq_driver_target(&current_policy[cpu], freq,
CPUFREQ_RELATION_L); CPUFREQ_RELATION_L);
err: err:
......
...@@ -82,9 +82,14 @@ validate_fields( ...@@ -82,9 +82,14 @@ validate_fields(
va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS; va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE|XFS_AT_NBLOCKS;
VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error); VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error);
ip->i_nlink = va.va_nlink; if (likely(!error)) {
ip->i_size = va.va_size; ip->i_nlink = va.va_nlink;
ip->i_blocks = va.va_nblocks; ip->i_blocks = va.va_nblocks;
/* we're under i_sem so i_size can't change under us */
if (i_size_read(ip) != va.va_size)
i_size_write(ip, va.va_size);
}
} }
/* /*
...@@ -536,6 +541,7 @@ linvfs_setattr( ...@@ -536,6 +541,7 @@ linvfs_setattr(
if (error) if (error)
return(-error); /* Positive error up from XFS */ return(-error); /* Positive error up from XFS */
if (ia_valid & ATTR_SIZE) { if (ia_valid & ATTR_SIZE) {
i_size_write(inode, vattr.va_size);
error = vmtruncate(inode, attr->ia_size); error = vmtruncate(inode, attr->ia_size);
} }
......
...@@ -213,7 +213,6 @@ vn_revalidate( ...@@ -213,7 +213,6 @@ vn_revalidate(
inode->i_mtime = va.va_mtime; inode->i_mtime = va.va_mtime;
inode->i_ctime = va.va_ctime; inode->i_ctime = va.va_ctime;
inode->i_atime = va.va_atime; inode->i_atime = va.va_atime;
i_size_write(inode, va.va_size);
if (va.va_xflags & XFS_XFLAG_IMMUTABLE) if (va.va_xflags & XFS_XFLAG_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE; inode->i_flags |= S_IMMUTABLE;
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment