Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
ea1c9de4
Commit
ea1c9de4
authored
Aug 25, 2008
by
Ingo Molnar
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'x86/urgent' into x86/cleanups
parents
4e1d112c
a2bd7274
Changes
29
Hide whitespace changes
Inline
Side-by-side
Showing
29 changed files
with
307 additions
and
118 deletions
+307
-118
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu.c
+1
-1
arch/x86/kernel/cpu/addon_cpuid_features.c
arch/x86/kernel/cpu/addon_cpuid_features.c
+15
-2
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/cyrix.c
+0
-18
arch/x86/kernel/cpu/mcheck/mce_64.c
arch/x86/kernel/cpu/mcheck/mce_64.c
+5
-0
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+5
-13
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/generic.c
+18
-2
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/mtrr/main.c
+1
-4
arch/x86/kernel/genx2apic_uv_x.c
arch/x86/kernel/genx2apic_uv_x.c
+5
-3
arch/x86/kernel/numaq_32.c
arch/x86/kernel/numaq_32.c
+1
-1
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt.c
+1
-1
arch/x86/kernel/pci-calgary_64.c
arch/x86/kernel/pci-calgary_64.c
+7
-7
arch/x86/kernel/setup.c
arch/x86/kernel/setup.c
+8
-8
arch/x86/kernel/smpboot.c
arch/x86/kernel/smpboot.c
+3
-0
arch/x86/kernel/tlb_uv.c
arch/x86/kernel/tlb_uv.c
+2
-1
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc.c
+4
-0
arch/x86/kernel/tsc_sync.c
arch/x86/kernel/tsc_sync.c
+2
-4
arch/x86/mm/init_64.c
arch/x86/mm/init_64.c
+20
-16
arch/x86/mm/ioremap.c
arch/x86/mm/ioremap.c
+3
-5
arch/x86/mm/mmio-mod.c
arch/x86/mm/mmio-mod.c
+3
-1
arch/x86/mm/pageattr.c
arch/x86/mm/pageattr.c
+3
-3
arch/x86/mm/pat.c
arch/x86/mm/pat.c
+36
-14
arch/x86/oprofile/nmi_int.c
arch/x86/oprofile/nmi_int.c
+36
-3
arch/x86/pci/amd_bus.c
arch/x86/pci/amd_bus.c
+46
-6
arch/x86/pci/i386.c
arch/x86/pci/i386.c
+78
-0
include/asm-x86/genapic_32.h
include/asm-x86/genapic_32.h
+1
-0
include/asm-x86/genapic_64.h
include/asm-x86/genapic_64.h
+1
-0
include/asm-x86/irq_vectors.h
include/asm-x86/irq_vectors.h
+1
-0
include/asm-x86/mce.h
include/asm-x86/mce.h
+1
-0
include/asm-x86/uv/uv_bau.h
include/asm-x86/uv/uv_bau.h
+0
-5
No files found.
arch/x86/kernel/amd_iommu.c
View file @
ea1c9de4
...
@@ -65,7 +65,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
...
@@ -65,7 +65,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
u8
*
target
;
u8
*
target
;
tail
=
readl
(
iommu
->
mmio_base
+
MMIO_CMD_TAIL_OFFSET
);
tail
=
readl
(
iommu
->
mmio_base
+
MMIO_CMD_TAIL_OFFSET
);
target
=
(
iommu
->
cmd_buf
+
tail
)
;
target
=
iommu
->
cmd_buf
+
tail
;
memcpy_toio
(
target
,
cmd
,
sizeof
(
*
cmd
));
memcpy_toio
(
target
,
cmd
,
sizeof
(
*
cmd
));
tail
=
(
tail
+
sizeof
(
*
cmd
))
%
iommu
->
cmd_buf_size
;
tail
=
(
tail
+
sizeof
(
*
cmd
))
%
iommu
->
cmd_buf_size
;
head
=
readl
(
iommu
->
mmio_base
+
MMIO_CMD_HEAD_OFFSET
);
head
=
readl
(
iommu
->
mmio_base
+
MMIO_CMD_HEAD_OFFSET
);
...
...
arch/x86/kernel/cpu/addon_cpuid_features.c
View file @
ea1c9de4
...
@@ -56,9 +56,22 @@ void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
...
@@ -56,9 +56,22 @@ void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
switch
(
c
->
x86_vendor
)
{
switch
(
c
->
x86_vendor
)
{
case
X86_VENDOR_INTEL
:
case
X86_VENDOR_INTEL
:
if
(
c
->
x86
==
0xF
||
(
c
->
x86
==
6
&&
c
->
x86_model
>=
15
))
/*
* There is a known erratum on Pentium III and Core Solo
* and Core Duo CPUs.
* " Page with PAT set to WC while associated MTRR is UC
* may consolidate to UC "
* Because of this erratum, it is better to stick with
* setting WC in MTRR rather than using PAT on these CPUs.
*
* Enable PAT WC only on P4, Core 2 or later CPUs.
*/
if
(
c
->
x86
>
0x6
||
(
c
->
x86
==
6
&&
c
->
x86_model
>=
15
))
return
;
return
;
break
;
pat_disable
(
"PAT WC disabled due to known CPU erratum."
);
return
;
case
X86_VENDOR_AMD
:
case
X86_VENDOR_AMD
:
case
X86_VENDOR_CENTAUR
:
case
X86_VENDOR_CENTAUR
:
case
X86_VENDOR_TRANSMETA
:
case
X86_VENDOR_TRANSMETA
:
...
...
arch/x86/kernel/cpu/cyrix.c
View file @
ea1c9de4
...
@@ -134,23 +134,6 @@ static void __cpuinit set_cx86_memwb(void)
...
@@ -134,23 +134,6 @@ static void __cpuinit set_cx86_memwb(void)
setCx86
(
CX86_CCR2
,
getCx86
(
CX86_CCR2
)
|
0x14
);
setCx86
(
CX86_CCR2
,
getCx86
(
CX86_CCR2
)
|
0x14
);
}
}
static
void
__cpuinit
set_cx86_inc
(
void
)
{
unsigned
char
ccr3
;
printk
(
KERN_INFO
"Enable Incrementor on Cyrix/NSC processor.
\n
"
);
ccr3
=
getCx86
(
CX86_CCR3
);
setCx86
(
CX86_CCR3
,
(
ccr3
&
0x0f
)
|
0x10
);
/* enable MAPEN */
/* PCR1 -- Performance Control */
/* Incrementor on, whatever that is */
setCx86
(
CX86_PCR1
,
getCx86
(
CX86_PCR1
)
|
0x02
);
/* PCR0 -- Performance Control */
/* Incrementor Margin 10 */
setCx86
(
CX86_PCR0
,
getCx86
(
CX86_PCR0
)
|
0x04
);
setCx86
(
CX86_CCR3
,
ccr3
);
/* disable MAPEN */
}
/*
/*
* Configure later MediaGX and/or Geode processor.
* Configure later MediaGX and/or Geode processor.
*/
*/
...
@@ -174,7 +157,6 @@ static void __cpuinit geode_configure(void)
...
@@ -174,7 +157,6 @@ static void __cpuinit geode_configure(void)
set_cx86_memwb
();
set_cx86_memwb
();
set_cx86_reorder
();
set_cx86_reorder
();
set_cx86_inc
();
local_irq_restore
(
flags
);
local_irq_restore
(
flags
);
}
}
...
...
arch/x86/kernel/cpu/mcheck/mce_64.c
View file @
ea1c9de4
...
@@ -759,6 +759,7 @@ static struct sysdev_class mce_sysclass = {
...
@@ -759,6 +759,7 @@ static struct sysdev_class mce_sysclass = {
};
};
DEFINE_PER_CPU
(
struct
sys_device
,
device_mce
);
DEFINE_PER_CPU
(
struct
sys_device
,
device_mce
);
void
(
*
threshold_cpu_callback
)(
unsigned
long
action
,
unsigned
int
cpu
)
__cpuinitdata
;
/* Why are there no generic functions for this? */
/* Why are there no generic functions for this? */
#define ACCESSOR(name, var, start) \
#define ACCESSOR(name, var, start) \
...
@@ -883,9 +884,13 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
...
@@ -883,9 +884,13 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
case
CPU_ONLINE
:
case
CPU_ONLINE
:
case
CPU_ONLINE_FROZEN
:
case
CPU_ONLINE_FROZEN
:
mce_create_device
(
cpu
);
mce_create_device
(
cpu
);
if
(
threshold_cpu_callback
)
threshold_cpu_callback
(
action
,
cpu
);
break
;
break
;
case
CPU_DEAD
:
case
CPU_DEAD
:
case
CPU_DEAD_FROZEN
:
case
CPU_DEAD_FROZEN
:
if
(
threshold_cpu_callback
)
threshold_cpu_callback
(
action
,
cpu
);
mce_remove_device
(
cpu
);
mce_remove_device
(
cpu
);
break
;
break
;
}
}
...
...
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
View file @
ea1c9de4
...
@@ -628,6 +628,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
...
@@ -628,6 +628,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
deallocate_threshold_block
(
cpu
,
bank
);
deallocate_threshold_block
(
cpu
,
bank
);
free_out:
free_out:
kobject_del
(
b
->
kobj
);
kobject_put
(
b
->
kobj
);
kobject_put
(
b
->
kobj
);
kfree
(
b
);
kfree
(
b
);
per_cpu
(
threshold_banks
,
cpu
)[
bank
]
=
NULL
;
per_cpu
(
threshold_banks
,
cpu
)[
bank
]
=
NULL
;
...
@@ -645,14 +646,11 @@ static void threshold_remove_device(unsigned int cpu)
...
@@ -645,14 +646,11 @@ static void threshold_remove_device(unsigned int cpu)
}
}
/* get notified when a cpu comes on/off */
/* get notified when a cpu comes on/off */
static
int
__cpuinit
threshold_cpu_callback
(
struct
notifier_block
*
nfb
,
static
void
__cpuinit
amd_64_threshold_cpu_callback
(
unsigned
long
action
,
unsigned
long
action
,
void
*
h
cpu
)
unsigned
int
cpu
)
{
{
/* cpu was unsigned int to begin with */
unsigned
int
cpu
=
(
unsigned
long
)
hcpu
;
if
(
cpu
>=
NR_CPUS
)
if
(
cpu
>=
NR_CPUS
)
goto
out
;
return
;
switch
(
action
)
{
switch
(
action
)
{
case
CPU_ONLINE
:
case
CPU_ONLINE
:
...
@@ -666,14 +664,8 @@ static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
...
@@ -666,14 +664,8 @@ static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb,
default:
default:
break
;
break
;
}
}
out:
return
NOTIFY_OK
;
}
}
static
struct
notifier_block
threshold_cpu_notifier
__cpuinitdata
=
{
.
notifier_call
=
threshold_cpu_callback
,
};
static
__init
int
threshold_init_device
(
void
)
static
__init
int
threshold_init_device
(
void
)
{
{
unsigned
lcpu
=
0
;
unsigned
lcpu
=
0
;
...
@@ -684,7 +676,7 @@ static __init int threshold_init_device(void)
...
@@ -684,7 +676,7 @@ static __init int threshold_init_device(void)
if
(
err
)
if
(
err
)
return
err
;
return
err
;
}
}
register_hotcpu_notifier
(
&
threshold_cpu_notifier
)
;
threshold_cpu_callback
=
amd_64_threshold_cpu_callback
;
return
0
;
return
0
;
}
}
...
...
arch/x86/kernel/cpu/mtrr/generic.c
View file @
ea1c9de4
...
@@ -379,6 +379,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
...
@@ -379,6 +379,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
unsigned
long
*
size
,
mtrr_type
*
type
)
unsigned
long
*
size
,
mtrr_type
*
type
)
{
{
unsigned
int
mask_lo
,
mask_hi
,
base_lo
,
base_hi
;
unsigned
int
mask_lo
,
mask_hi
,
base_lo
,
base_hi
;
unsigned
int
tmp
,
hi
;
rdmsr
(
MTRRphysMask_MSR
(
reg
),
mask_lo
,
mask_hi
);
rdmsr
(
MTRRphysMask_MSR
(
reg
),
mask_lo
,
mask_hi
);
if
((
mask_lo
&
0x800
)
==
0
)
{
if
((
mask_lo
&
0x800
)
==
0
)
{
...
@@ -392,8 +393,23 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
...
@@ -392,8 +393,23 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
rdmsr
(
MTRRphysBase_MSR
(
reg
),
base_lo
,
base_hi
);
rdmsr
(
MTRRphysBase_MSR
(
reg
),
base_lo
,
base_hi
);
/* Work out the shifted address mask. */
/* Work out the shifted address mask. */
mask_lo
=
size_or_mask
|
mask_hi
<<
(
32
-
PAGE_SHIFT
)
tmp
=
mask_hi
<<
(
32
-
PAGE_SHIFT
)
|
mask_lo
>>
PAGE_SHIFT
;
|
mask_lo
>>
PAGE_SHIFT
;
mask_lo
=
size_or_mask
|
tmp
;
/* Expand tmp with high bits to all 1s*/
hi
=
fls
(
tmp
);
if
(
hi
>
0
)
{
tmp
|=
~
((
1
<<
(
hi
-
1
))
-
1
);
if
(
tmp
!=
mask_lo
)
{
static
int
once
=
1
;
if
(
once
)
{
printk
(
KERN_INFO
"mtrr: your BIOS has set up an incorrect mask, fixing it up.
\n
"
);
once
=
0
;
}
mask_lo
=
tmp
;
}
}
/* This works correctly if size is a power of two, i.e. a
/* This works correctly if size is a power of two, i.e. a
contiguous range. */
contiguous range. */
...
...
arch/x86/kernel/cpu/mtrr/main.c
View file @
ea1c9de4
...
@@ -1496,11 +1496,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
...
@@ -1496,11 +1496,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
/* kvm/qemu doesn't have mtrr set right, don't trim them all */
/* kvm/qemu doesn't have mtrr set right, don't trim them all */
if
(
!
highest_pfn
)
{
if
(
!
highest_pfn
)
{
if
(
!
kvm_para_available
())
{
WARN
(
!
kvm_para_available
(),
KERN_WARNING
printk
(
KERN_WARNING
"WARNING: strange, CPU MTRRs all blank?
\n
"
);
"WARNING: strange, CPU MTRRs all blank?
\n
"
);
WARN_ON
(
1
);
}
return
0
;
return
0
;
}
}
...
...
arch/x86/kernel/genx2apic_uv_x.c
View file @
ea1c9de4
...
@@ -293,7 +293,9 @@ static __init void uv_rtc_init(void)
...
@@ -293,7 +293,9 @@ static __init void uv_rtc_init(void)
sn_rtc_cycles_per_second
=
ticks_per_sec
;
sn_rtc_cycles_per_second
=
ticks_per_sec
;
}
}
static
__init
void
uv_system_init
(
void
)
static
bool
uv_system_inited
;
void
__init
uv_system_init
(
void
)
{
{
union
uvh_si_addr_map_config_u
m_n_config
;
union
uvh_si_addr_map_config_u
m_n_config
;
union
uvh_node_id_u
node_id
;
union
uvh_node_id_u
node_id
;
...
@@ -383,6 +385,7 @@ static __init void uv_system_init(void)
...
@@ -383,6 +385,7 @@ static __init void uv_system_init(void)
map_mmr_high
(
max_pnode
);
map_mmr_high
(
max_pnode
);
map_config_high
(
max_pnode
);
map_config_high
(
max_pnode
);
map_mmioh_high
(
max_pnode
);
map_mmioh_high
(
max_pnode
);
uv_system_inited
=
true
;
}
}
/*
/*
...
@@ -391,8 +394,7 @@ static __init void uv_system_init(void)
...
@@ -391,8 +394,7 @@ static __init void uv_system_init(void)
*/
*/
void
__cpuinit
uv_cpu_init
(
void
)
void
__cpuinit
uv_cpu_init
(
void
)
{
{
if
(
!
uv_node_to_blade
)
BUG_ON
(
!
uv_system_inited
);
uv_system_init
();
uv_blade_info
[
uv_numa_blade_id
()].
nr_online_cpus
++
;
uv_blade_info
[
uv_numa_blade_id
()].
nr_online_cpus
++
;
...
...
arch/x86/kernel/numaq_32.c
View file @
ea1c9de4
...
@@ -73,7 +73,7 @@ static void __init smp_dump_qct(void)
...
@@ -73,7 +73,7 @@ static void __init smp_dump_qct(void)
}
}
void
__init
numaq_tsc_disable
(
void
)
void
__
cpu
init
numaq_tsc_disable
(
void
)
{
{
if
(
!
found_numaq
)
if
(
!
found_numaq
)
return
;
return
;
...
...
arch/x86/kernel/paravirt.c
View file @
ea1c9de4
...
@@ -471,7 +471,7 @@ struct pv_lock_ops pv_lock_ops = {
...
@@ -471,7 +471,7 @@ struct pv_lock_ops pv_lock_ops = {
.
spin_unlock
=
__ticket_spin_unlock
,
.
spin_unlock
=
__ticket_spin_unlock
,
#endif
#endif
};
};
EXPORT_SYMBOL
_GPL
(
pv_lock_ops
);
EXPORT_SYMBOL
(
pv_lock_ops
);
EXPORT_SYMBOL_GPL
(
pv_time_ops
);
EXPORT_SYMBOL_GPL
(
pv_time_ops
);
EXPORT_SYMBOL
(
pv_cpu_ops
);
EXPORT_SYMBOL
(
pv_cpu_ops
);
...
...
arch/x86/kernel/pci-calgary_64.c
View file @
ea1c9de4
...
@@ -343,9 +343,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
...
@@ -343,9 +343,8 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
/* were we called with bad_dma_address? */
/* were we called with bad_dma_address? */
badend
=
bad_dma_address
+
(
EMERGENCY_PAGES
*
PAGE_SIZE
);
badend
=
bad_dma_address
+
(
EMERGENCY_PAGES
*
PAGE_SIZE
);
if
(
unlikely
((
dma_addr
>=
bad_dma_address
)
&&
(
dma_addr
<
badend
)))
{
if
(
unlikely
((
dma_addr
>=
bad_dma_address
)
&&
(
dma_addr
<
badend
)))
{
printk
(
KERN_ERR
"Calgary: driver tried unmapping bad DMA "
WARN
(
1
,
KERN_ERR
"Calgary: driver tried unmapping bad DMA "
"address 0x%Lx
\n
"
,
dma_addr
);
"address 0x%Lx
\n
"
,
dma_addr
);
WARN_ON
(
1
);
return
;
return
;
}
}
...
@@ -1269,13 +1268,15 @@ static inline int __init determine_tce_table_size(u64 ram)
...
@@ -1269,13 +1268,15 @@ static inline int __init determine_tce_table_size(u64 ram)
static
int
__init
build_detail_arrays
(
void
)
static
int
__init
build_detail_arrays
(
void
)
{
{
unsigned
long
ptr
;
unsigned
long
ptr
;
int
i
,
scal_detail_size
,
rio_detail_size
;
unsigned
numnodes
,
i
;
int
scal_detail_size
,
rio_detail_size
;
if
(
rio_table_hdr
->
num_scal_dev
>
MAX_NUMNODES
){
numnodes
=
rio_table_hdr
->
num_scal_dev
;
if
(
numnodes
>
MAX_NUMNODES
){
printk
(
KERN_WARNING
printk
(
KERN_WARNING
"Calgary: MAX_NUMNODES too low! Defined as %d, "
"Calgary: MAX_NUMNODES too low! Defined as %d, "
"but system has %d nodes.
\n
"
,
"but system has %d nodes.
\n
"
,
MAX_NUMNODES
,
rio_table_hdr
->
num_scal_dev
);
MAX_NUMNODES
,
numnodes
);
return
-
ENODEV
;
return
-
ENODEV
;
}
}
...
@@ -1296,8 +1297,7 @@ static int __init build_detail_arrays(void)
...
@@ -1296,8 +1297,7 @@ static int __init build_detail_arrays(void)
}
}
ptr
=
((
unsigned
long
)
rio_table_hdr
)
+
3
;
ptr
=
((
unsigned
long
)
rio_table_hdr
)
+
3
;
for
(
i
=
0
;
i
<
rio_table_hdr
->
num_scal_dev
;
for
(
i
=
0
;
i
<
numnodes
;
i
++
,
ptr
+=
scal_detail_size
)
i
++
,
ptr
+=
scal_detail_size
)
scal_devs
[
i
]
=
(
struct
scal_detail
*
)
ptr
;
scal_devs
[
i
]
=
(
struct
scal_detail
*
)
ptr
;
for
(
i
=
0
;
i
<
rio_table_hdr
->
num_rio_dev
;
for
(
i
=
0
;
i
<
rio_table_hdr
->
num_rio_dev
;
...
...
arch/x86/kernel/setup.c
View file @
ea1c9de4
...
@@ -604,14 +604,6 @@ void __init setup_arch(char **cmdline_p)
...
@@ -604,14 +604,6 @@ void __init setup_arch(char **cmdline_p)
early_cpu_init
();
early_cpu_init
();
early_ioremap_init
();
early_ioremap_init
();
#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
/*
* Must be before kernel pagetables are setup
* or fixmap area is touched.
*/
vmi_init
();
#endif
ROOT_DEV
=
old_decode_dev
(
boot_params
.
hdr
.
root_dev
);
ROOT_DEV
=
old_decode_dev
(
boot_params
.
hdr
.
root_dev
);
screen_info
=
boot_params
.
screen_info
;
screen_info
=
boot_params
.
screen_info
;
edid_info
=
boot_params
.
edid_info
;
edid_info
=
boot_params
.
edid_info
;
...
@@ -678,6 +670,14 @@ void __init setup_arch(char **cmdline_p)
...
@@ -678,6 +670,14 @@ void __init setup_arch(char **cmdline_p)
parse_early_param
();
parse_early_param
();
#if defined(CONFIG_VMI) && defined(CONFIG_X86_32)
/*
* Must be before kernel pagetables are setup
* or fixmap area is touched.
*/
vmi_init
();
#endif
/* after early param, so could get panic from serial */
/* after early param, so could get panic from serial */
reserve_early_setup_data
();
reserve_early_setup_data
();
...
...
arch/x86/kernel/smpboot.c
View file @
ea1c9de4
...
@@ -1221,6 +1221,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
...
@@ -1221,6 +1221,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
printk
(
KERN_INFO
"CPU%d: "
,
0
);
printk
(
KERN_INFO
"CPU%d: "
,
0
);
print_cpu_info
(
&
cpu_data
(
0
));
print_cpu_info
(
&
cpu_data
(
0
));
setup_boot_clock
();
setup_boot_clock
();
if
(
is_uv_system
())
uv_system_init
();
out:
out:
preempt_enable
();
preempt_enable
();
}
}
...
...
arch/x86/kernel/tlb_uv.c
View file @
ea1c9de4
...
@@ -17,6 +17,7 @@
...
@@ -17,6 +17,7 @@
#include <asm/genapic.h>
#include <asm/genapic.h>
#include <asm/idle.h>
#include <asm/idle.h>
#include <asm/tsc.h>
#include <asm/tsc.h>
#include <asm/irq_vectors.h>
#include <mach_apic.h>
#include <mach_apic.h>
...
@@ -783,7 +784,7 @@ static int __init uv_bau_init(void)
...
@@ -783,7 +784,7 @@ static int __init uv_bau_init(void)
uv_init_blade
(
blade
,
node
,
cur_cpu
);
uv_init_blade
(
blade
,
node
,
cur_cpu
);
cur_cpu
+=
uv_blade_nr_possible_cpus
(
blade
);
cur_cpu
+=
uv_blade_nr_possible_cpus
(
blade
);
}
}
set
_intr_gate
(
UV_BAU_MESSAGE
,
uv_bau_message_intr1
);
alloc
_intr_gate
(
UV_BAU_MESSAGE
,
uv_bau_message_intr1
);
uv_enable_timeouts
();
uv_enable_timeouts
();
return
0
;
return
0
;
...
...
arch/x86/kernel/tsc.c
View file @
ea1c9de4
...
@@ -325,6 +325,10 @@ static struct notifier_block time_cpufreq_notifier_block = {
...
@@ -325,6 +325,10 @@ static struct notifier_block time_cpufreq_notifier_block = {
static
int
__init
cpufreq_tsc
(
void
)
static
int
__init
cpufreq_tsc
(
void
)
{
{
if
(
!
cpu_has_tsc
)
return
0
;
if
(
boot_cpu_has
(
X86_FEATURE_CONSTANT_TSC
))
return
0
;
cpufreq_register_notifier
(
&
time_cpufreq_notifier_block
,
cpufreq_register_notifier
(
&
time_cpufreq_notifier_block
,
CPUFREQ_TRANSITION_NOTIFIER
);
CPUFREQ_TRANSITION_NOTIFIER
);
return
0
;
return
0
;
...
...
arch/x86/kernel/tsc_sync.c
View file @
ea1c9de4
...
@@ -88,11 +88,9 @@ static __cpuinit void check_tsc_warp(void)
...
@@ -88,11 +88,9 @@ static __cpuinit void check_tsc_warp(void)
__raw_spin_unlock
(
&
sync_lock
);
__raw_spin_unlock
(
&
sync_lock
);
}
}
}
}
if
(
!
(
now
-
start
))
{
WARN
(
!
(
now
-
start
),
printk
(
"Warning: zero tsc calibration delta: %Ld [max: %Ld]
\n
"
,
"Warning: zero tsc calibration delta: %Ld [max: %Ld]
\n
"
,
now
-
start
,
end
-
start
);
now
-
start
,
end
-
start
);
WARN_ON
(
1
);
}
}
}
/*
/*
...
...
arch/x86/mm/init_64.c
View file @
ea1c9de4
...
@@ -241,7 +241,7 @@ static unsigned long __initdata table_start;
...
@@ -241,7 +241,7 @@ static unsigned long __initdata table_start;
static
unsigned
long
__meminitdata
table_end
;
static
unsigned
long
__meminitdata
table_end
;
static
unsigned
long
__meminitdata
table_top
;
static
unsigned
long
__meminitdata
table_top
;
static
__
meminit
void
*
alloc_low_page
(
unsigned
long
*
phys
)
static
__
ref
void
*
alloc_low_page
(
unsigned
long
*
phys
)
{
{
unsigned
long
pfn
=
table_end
++
;
unsigned
long
pfn
=
table_end
++
;
void
*
adr
;
void
*
adr
;
...
@@ -262,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys)
...
@@ -262,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys)
return
adr
;
return
adr
;
}
}
static
__
meminit
void
unmap_low_page
(
void
*
adr
)
static
__
ref
void
unmap_low_page
(
void
*
adr
)
{
{
if
(
after_bootmem
)
if
(
after_bootmem
)
return
;
return
;
...
@@ -336,9 +336,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
...
@@ -336,9 +336,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
}
}
if
(
pmd_val
(
*
pmd
))
{
if
(
pmd_val
(
*
pmd
))
{
if
(
!
pmd_large
(
*
pmd
))
if
(
!
pmd_large
(
*
pmd
))
{
spin_lock
(
&
init_mm
.
page_table_lock
);
last_map_addr
=
phys_pte_update
(
pmd
,
address
,
last_map_addr
=
phys_pte_update
(
pmd
,
address
,
end
);
end
);
spin_unlock
(
&
init_mm
.
page_table_lock
);
}
/* Count entries we're using from level2_ident_pgt */
/* Count entries we're using from level2_ident_pgt */
if
(
start
==
0
)
if
(
start
==
0
)
pages
++
;
pages
++
;
...
@@ -347,8 +350,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
...
@@ -347,8 +350,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
if
(
page_size_mask
&
(
1
<<
PG_LEVEL_2M
))
{
if
(
page_size_mask
&
(
1
<<
PG_LEVEL_2M
))
{
pages
++
;
pages
++
;
spin_lock
(
&
init_mm
.
page_table_lock
);
set_pte
((
pte_t
*
)
pmd
,
set_pte
((
pte_t
*
)
pmd
,
pfn_pte
(
address
>>
PAGE_SHIFT
,
PAGE_KERNEL_LARGE
));
pfn_pte
(
address
>>
PAGE_SHIFT
,
PAGE_KERNEL_LARGE
));
spin_unlock
(
&
init_mm
.
page_table_lock
);
last_map_addr
=
(
address
&
PMD_MASK
)
+
PMD_SIZE
;
last_map_addr
=
(
address
&
PMD_MASK
)
+
PMD_SIZE
;
continue
;
continue
;
}
}
...
@@ -357,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
...
@@ -357,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
last_map_addr
=
phys_pte_init
(
pte
,
address
,
end
);
last_map_addr
=
phys_pte_init
(
pte
,
address
,
end
);
unmap_low_page
(
pte
);
unmap_low_page
(
pte
);
spin_lock
(
&
init_mm
.
page_table_lock
);
pmd_populate_kernel
(
&
init_mm
,
pmd
,
__va
(
pte_phys
));
pmd_populate_kernel
(
&
init_mm
,
pmd
,
__va
(
pte_phys
));
spin_unlock
(
&
init_mm
.
page_table_lock
);
}
}
update_page_count
(
PG_LEVEL_2M
,
pages
);
update_page_count
(
PG_LEVEL_2M
,
pages
);
return
last_map_addr
;
return
last_map_addr
;
...
@@ -370,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
...
@@ -370,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
pmd_t
*
pmd
=
pmd_offset
(
pud
,
0
);
pmd_t
*
pmd
=
pmd_offset
(
pud
,
0
);
unsigned
long
last_map_addr
;
unsigned
long
last_map_addr
;
spin_lock
(
&
init_mm
.
page_table_lock
);
last_map_addr
=
phys_pmd_init
(
pmd
,
address
,
end
,
page_size_mask
);
last_map_addr
=
phys_pmd_init
(
pmd
,
address
,
end
,
page_size_mask
);
spin_unlock
(
&
init_mm
.
page_table_lock
);
__flush_tlb_all
();
__flush_tlb_all
();
return
last_map_addr
;
return
last_map_addr
;
}
}
...
@@ -408,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
...
@@ -408,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
if
(
page_size_mask
&
(
1
<<
PG_LEVEL_1G
))
{
if
(
page_size_mask
&
(
1
<<
PG_LEVEL_1G
))
{
pages
++
;
pages
++
;
spin_lock
(
&
init_mm
.
page_table_lock
);
set_pte
((
pte_t
*
)
pud
,
set_pte
((
pte_t
*
)
pud
,
pfn_pte
(
addr
>>
PAGE_SHIFT
,
PAGE_KERNEL_LARGE
));
pfn_pte
(
addr
>>
PAGE_SHIFT
,
PAGE_KERNEL_LARGE
));
spin_unlock
(
&
init_mm
.
page_table_lock
);
last_map_addr
=
(
addr
&
PUD_MASK
)
+
PUD_SIZE
;
last_map_addr
=
(
addr
&
PUD_MASK
)
+
PUD_SIZE
;
continue
;
continue
;
}
}
pmd
=
alloc_low_page
(
&
pmd_phys
);
pmd
=
alloc_low_page
(
&
pmd_phys
);
spin_lock
(
&
init_mm
.
page_table_lock
);
last_map_addr
=
phys_pmd_init
(
pmd
,
addr
,
end
,
page_size_mask
);
last_map_addr
=
phys_pmd_init
(
pmd
,
addr
,
end
,
page_size_mask
);
unmap_low_page
(
pmd
);
unmap_low_page
(
pmd
);
spin_lock
(
&
init_mm
.
page_table_lock
);
pud_populate
(
&
init_mm
,
pud
,
__va
(
pmd_phys
));
pud_populate
(
&
init_mm
,
pud
,
__va
(
pmd_phys
));
spin_unlock
(
&
init_mm
.
page_table_lock
);
spin_unlock
(
&
init_mm
.
page_table_lock
);
}
}
__flush_tlb_all
();
__flush_tlb_all
();
update_page_count
(
PG_LEVEL_1G
,
pages
);
update_page_count
(
PG_LEVEL_1G
,
pages
);
...
@@ -513,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
...
@@ -513,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
continue
;
continue
;
}
}
if
(
after_bootmem
)
pud
=
alloc_low_page
(
&
pud_phys
);
pud
=
pud_offset
(
pgd
,
start
&
PGDIR_MASK
);
else
pud
=
alloc_low_page
(
&
pud_phys
);
last_map_addr
=
phys_pud_init
(
pud
,
__pa
(
start
),
__pa
(
next
),
last_map_addr
=
phys_pud_init
(
pud
,
__pa
(
start
),
__pa
(
next
),
page_size_mask
);
page_size_mask
);
unmap_low_page
(
pud
);
unmap_low_page
(
pud
);
pgd_populate
(
&
init_mm
,
pgd_offset_k
(
start
),
__va
(
pud_phys
));
spin_lock
(
&
init_mm
.
page_table_lock
);
pgd_populate
(
&
init_mm
,
pgd
,
__va
(
pud_phys
));
spin_unlock
(
&
init_mm
.
page_table_lock
);
}
}
return
last_map_addr
;
return
last_map_addr
;
...
...
arch/x86/mm/ioremap.c
View file @
ea1c9de4
...
@@ -553,13 +553,11 @@ static int __init check_early_ioremap_leak(void)
...
@@ -553,13 +553,11 @@ static int __init check_early_ioremap_leak(void)
{
{
if
(
!
early_ioremap_nested
)
if
(
!
early_ioremap_nested
)
return
0
;
return
0
;
WARN
(
1
,
KERN_WARNING
printk
(
KERN_WARNING
"Debug warning: early ioremap leak of %d areas detected.
\n
"
,
"Debug warning: early ioremap leak of %d areas detected.
\n
"
,
early_ioremap_nested
);
early_ioremap_nested
);
printk
(
KERN_WARNING
printk
(
KERN_WARNING
"please boot with early_ioremap_debug and report the dmesg.
\n
"
);
"please boot with early_ioremap_debug and report the dmesg.
\n
"
);
WARN_ON
(
1
);
return
1
;
return
1
;
}
}
...
...
arch/x86/mm/mmio-mod.c
View file @
ea1c9de4
...
@@ -430,7 +430,9 @@ static void enter_uniprocessor(void)
...
@@ -430,7 +430,9 @@ static void enter_uniprocessor(void)
"may miss events.
\n
"
);
"may miss events.
\n
"
);
}
}
static
void
leave_uniprocessor
(
void
)
/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
static
void
__ref
leave_uniprocessor
(
void
)
{
{
int
cpu
;
int
cpu
;
int
err
;
int
err
;
...
...
arch/x86/mm/pageattr.c
View file @
ea1c9de4
...
@@ -849,7 +849,7 @@ int set_memory_uc(unsigned long addr, int numpages)
...
@@ -849,7 +849,7 @@ int set_memory_uc(unsigned long addr, int numpages)
/*
/*
* for now UC MINUS. see comments in ioremap_nocache()
* for now UC MINUS. see comments in ioremap_nocache()
*/
*/
if
(
reserve_memtype
(
addr
,
addr
+
numpages
*
PAGE_SIZE
,
if
(
reserve_memtype
(
__pa
(
addr
),
__pa
(
addr
)
+
numpages
*
PAGE_SIZE
,
_PAGE_CACHE_UC_MINUS
,
NULL
))
_PAGE_CACHE_UC_MINUS
,
NULL
))
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -868,7 +868,7 @@ int set_memory_wc(unsigned long addr, int numpages)
...
@@ -868,7 +868,7 @@ int set_memory_wc(unsigned long addr, int numpages)
if
(
!
pat_enabled
)
if
(
!
pat_enabled
)
return
set_memory_uc
(
addr
,
numpages
);
return
set_memory_uc
(
addr
,
numpages
);
if
(
reserve_memtype
(
addr
,
addr
+
numpages
*
PAGE_SIZE
,
if
(
reserve_memtype
(
__pa
(
addr
),
__pa
(
addr
)
+
numpages
*
PAGE_SIZE
,
_PAGE_CACHE_WC
,
NULL
))
_PAGE_CACHE_WC
,
NULL
))
return
-
EINVAL
;
return
-
EINVAL
;
...
@@ -884,7 +884,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
...
@@ -884,7 +884,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
int
set_memory_wb
(
unsigned
long
addr
,
int
numpages
)
int
set_memory_wb
(
unsigned
long
addr
,
int
numpages
)
{
{
free_memtype
(
addr
,
addr
+
numpages
*
PAGE_SIZE
);
free_memtype
(
__pa
(
addr
),
__pa
(
addr
)
+
numpages
*
PAGE_SIZE
);
return
_set_memory_wb
(
addr
,
numpages
);
return
_set_memory_wb
(
addr
,
numpages
);
}
}
...
...
arch/x86/mm/pat.c
View file @
ea1c9de4
...
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
...
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
return
-
EBUSY
;
return
-
EBUSY
;
}
}
static
struct
memtype
*
cached_entry
;
static
u64
cached_start
;
/*
/*
* req_type typically has one of the:
* req_type typically has one of the:
* - _PAGE_CACHE_WB
* - _PAGE_CACHE_WB
...
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
...
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
spin_lock
(
&
memtype_lock
);
spin_lock
(
&
memtype_lock
);
if
(
cached_entry
&&
start
>=
cached_start
)
entry
=
cached_entry
;
else
entry
=
list_entry
(
&
memtype_list
,
struct
memtype
,
nd
);
/* Search for existing mapping that overlaps the current range */
/* Search for existing mapping that overlaps the current range */
where
=
NULL
;
where
=
NULL
;
list_for_each_entry
(
entry
,
&
memtype_list
,
nd
)
{
list_for_each_entry
_continue
(
entry
,
&
memtype_list
,
nd
)
{
if
(
end
<=
entry
->
start
)
{
if
(
end
<=
entry
->
start
)
{
where
=
entry
->
nd
.
prev
;
where
=
entry
->
nd
.
prev
;
cached_entry
=
list_entry
(
where
,
struct
memtype
,
nd
);
break
;
break
;
}
else
if
(
start
<=
entry
->
start
)
{
/* end > entry->start */
}
else
if
(
start
<=
entry
->
start
)
{
/* end > entry->start */
err
=
chk_conflict
(
new
,
entry
,
new_type
);
err
=
chk_conflict
(
new
,
entry
,
new_type
);
...
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
...
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
dprintk
(
"Overlap at 0x%Lx-0x%Lx
\n
"
,
dprintk
(
"Overlap at 0x%Lx-0x%Lx
\n
"
,
entry
->
start
,
entry
->
end
);
entry
->
start
,
entry
->
end
);
where
=
entry
->
nd
.
prev
;
where
=
entry
->
nd
.
prev
;
cached_entry
=
list_entry
(
where
,
struct
memtype
,
nd
);
}
}
break
;
break
;
}
else
if
(
start
<
entry
->
end
)
{
/* start > entry->start */
}
else
if
(
start
<
entry
->
end
)
{
/* start > entry->start */
...
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
...
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if
(
!
err
)
{
if
(
!
err
)
{
dprintk
(
"Overlap at 0x%Lx-0x%Lx
\n
"
,
dprintk
(
"Overlap at 0x%Lx-0x%Lx
\n
"
,
entry
->
start
,
entry
->
end
);
entry
->
start
,
entry
->
end
);
where
=
&
entry
->
nd
;
cached_entry
=
list_entry
(
entry
->
nd
.
prev
,
struct
memtype
,
nd
);
/*
* Move to right position in the linked
* list to add this new entry
*/
list_for_each_entry_continue
(
entry
,
&
memtype_list
,
nd
)
{
if
(
start
<=
entry
->
start
)
{
where
=
entry
->
nd
.
prev
;
break
;
}
}
}
}
break
;
break
;
}
}
...
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
...
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
return
err
;
return
err
;
}
}
cached_start
=
start
;
if
(
where
)
if
(
where
)
list_add
(
&
new
->
nd
,
where
);
list_add
(
&
new
->
nd
,
where
);
else
else
...
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
...
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
spin_lock
(
&
memtype_lock
);
spin_lock
(
&
memtype_lock
);
list_for_each_entry
(
entry
,
&
memtype_list
,
nd
)
{
list_for_each_entry
(
entry
,
&
memtype_list
,
nd
)
{
if
(
entry
->
start
==
start
&&
entry
->
end
==
end
)
{
if
(
entry
->
start
==
start
&&
entry
->
end
==
end
)
{
if
(
cached_entry
==
entry
||
cached_start
==
start
)
cached_entry
=
NULL
;
list_del
(
&
entry
->
nd
);
list_del
(
&
entry
->
nd
);
kfree
(
entry
);
kfree
(
entry
);
err
=
0
;
err
=
0
;
...
@@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end)
...
@@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end)
}
}
/*
* /dev/mem mmap interface. The memtype used for mapping varies:
* - Use UC for mappings with O_SYNC flag
* - Without O_SYNC flag, if there is any conflict in reserve_memtype,
* inherit the memtype from existing mapping.
* - Else use UC_MINUS memtype (for backward compatibility with existing
* X drivers.
*/
pgprot_t
phys_mem_access_prot
(
struct
file
*
file
,
unsigned
long
pfn
,
pgprot_t
phys_mem_access_prot
(
struct
file
*
file
,
unsigned
long
pfn
,
unsigned
long
size
,
pgprot_t
vma_prot
)
unsigned
long
size
,
pgprot_t
vma_prot
)
{
{
...
@@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
...
@@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned
long
size
,
pgprot_t
*
vma_prot
)
unsigned
long
size
,
pgprot_t
*
vma_prot
)
{
{
u64
offset
=
((
u64
)
pfn
)
<<
PAGE_SHIFT
;
u64
offset
=
((
u64
)
pfn
)
<<
PAGE_SHIFT
;
unsigned
long
flags
=
_PAGE_CACHE_UC_MINUS
;
unsigned
long
flags
=
-
1
;
int
retval
;
int
retval
;
if
(
!
range_is_allowed
(
pfn
,
size
))
if
(
!
range_is_allowed
(
pfn
,
size
))
return
0
;
return
0
;
if
(
file
->
f_flags
&
O_SYNC
)
{
if
(
file
->
f_flags
&
O_SYNC
)
{
flags
=
_PAGE_CACHE_UC
;
flags
=
_PAGE_CACHE_UC
_MINUS
;
}
}
#ifdef CONFIG_X86_32
#ifdef CONFIG_X86_32
...
@@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
...
@@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
#endif
#endif
/*
/*
* With O_SYNC, we can only take UC mapping. Fail if we cannot.
* With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
*
* Without O_SYNC, we want to get
* Without O_SYNC, we want to get
* - WB for WB-able memory and no other conflicting mappings
* - WB for WB-able memory and no other conflicting mappings
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
* - Inherit from confliting mappings otherwise
* - Inherit from confliting mappings otherwise
*/
*/
if
(
flags
!=
_PAGE_CACHE_UC_MINUS
)
{
if
(
flags
!=
-
1
)
{
retval
=
reserve_memtype
(
offset
,
offset
+
size
,
flags
,
NULL
);
retval
=
reserve_memtype
(
offset
,
offset
+
size
,
flags
,
NULL
);
}
else
{
}
else
{
retval
=
reserve_memtype
(
offset
,
offset
+
size
,
-
1
,
&
flags
);
retval
=
reserve_memtype
(
offset
,
offset
+
size
,
-
1
,
&
flags
);
...
...
arch/x86/oprofile/nmi_int.c
View file @
ea1c9de4
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <linux/moduleparam.h>
#include <linux/kdebug.h>
#include <linux/kdebug.h>
#include <linux/cpu.h>
#include <asm/nmi.h>
#include <asm/nmi.h>
#include <asm/msr.h>
#include <asm/msr.h>
#include <asm/apic.h>
#include <asm/apic.h>
...
@@ -28,23 +29,48 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
...
@@ -28,23 +29,48 @@ static DEFINE_PER_CPU(unsigned long, saved_lvtpc);
static
int
nmi_start
(
void
);
static
int
nmi_start
(
void
);
static
void
nmi_stop
(
void
);
static
void
nmi_stop
(
void
);
static
void
nmi_cpu_start
(
void
*
dummy
);
static
void
nmi_cpu_stop
(
void
*
dummy
);
/* 0 == registered but off, 1 == registered and on */
/* 0 == registered but off, 1 == registered and on */
static
int
nmi_enabled
=
0
;
static
int
nmi_enabled
=
0
;
#ifdef CONFIG_SMP
static
int
oprofile_cpu_notifier
(
struct
notifier_block
*
b
,
unsigned
long
action
,
void
*
data
)
{
int
cpu
=
(
unsigned
long
)
data
;
switch
(
action
)
{
case
CPU_DOWN_FAILED
:
case
CPU_ONLINE
:
smp_call_function_single
(
cpu
,
nmi_cpu_start
,
NULL
,
0
);
break
;
case
CPU_DOWN_PREPARE
:
smp_call_function_single
(
cpu
,
nmi_cpu_stop
,
NULL
,
1
);
break
;
}
return
NOTIFY_DONE
;
}
static
struct
notifier_block
oprofile_cpu_nb
=
{
.
notifier_call
=
oprofile_cpu_notifier
};
#endif
#ifdef CONFIG_PM
#ifdef CONFIG_PM
static
int
nmi_suspend
(
struct
sys_device
*
dev
,
pm_message_t
state
)
static
int
nmi_suspend
(
struct
sys_device
*
dev
,
pm_message_t
state
)
{
{
/* Only one CPU left, just stop that one */
if
(
nmi_enabled
==
1
)
if
(
nmi_enabled
==
1
)
nmi_
stop
(
);
nmi_
cpu_stop
(
NULL
);
return
0
;
return
0
;
}
}
static
int
nmi_resume
(
struct
sys_device
*
dev
)
static
int
nmi_resume
(
struct
sys_device
*
dev
)
{
{
if
(
nmi_enabled
==
1
)
if
(
nmi_enabled
==
1
)
nmi_
start
(
);
nmi_
cpu_start
(
NULL
);
return
0
;
return
0
;
}
}
...
@@ -463,6 +489,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
...
@@ -463,6 +489,9 @@ int __init op_nmi_init(struct oprofile_operations *ops)
}
}
init_sysfs
();
init_sysfs
();
#ifdef CONFIG_SMP
register_cpu_notifier
(
&
oprofile_cpu_nb
);
#endif
using_nmi
=
1
;
using_nmi
=
1
;
ops
->
create_files
=
nmi_create_files
;
ops
->
create_files
=
nmi_create_files
;
ops
->
setup
=
nmi_setup
;
ops
->
setup
=
nmi_setup
;
...
@@ -476,6 +505,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
...
@@ -476,6 +505,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
void
op_nmi_exit
(
void
)
void
op_nmi_exit
(
void
)
{
{
if
(
using_nmi
)
if
(
using_nmi
)
{
exit_sysfs
();
exit_sysfs
();
#ifdef CONFIG_SMP
unregister_cpu_notifier
(
&
oprofile_cpu_nb
);
#endif
}
}
}
arch/x86/pci/amd_bus.c
View file @
ea1c9de4
#include <linux/init.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci.h>
#include <linux/topology.h>
#include <linux/topology.h>
#include <linux/cpu.h>
#include "pci.h"
#include "pci.h"
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
...
@@ -555,15 +556,17 @@ static int __init early_fill_mp_bus_info(void)
...
@@ -555,15 +556,17 @@ static int __init early_fill_mp_bus_info(void)
return
0
;
return
0
;
}
}
postcore_initcall
(
early_fill_mp_bus_info
);
#else
/* !CONFIG_X86_64 */
#endif
static
int
__init
early_fill_mp_bus_info
(
void
)
{
return
0
;
}
#endif
/* !CONFIG_X86_64 */
/* common 32/64 bit code */
/* common 32/64 bit code */
#define ENABLE_CF8_EXT_CFG (1ULL << 46)
#define ENABLE_CF8_EXT_CFG (1ULL << 46)
static
void
enable_pci_io_ecs
_per_cpu
(
void
*
unused
)
static
void
enable_pci_io_ecs
(
void
*
unused
)
{
{
u64
reg
;
u64
reg
;
rdmsrl
(
MSR_AMD64_NB_CFG
,
reg
);
rdmsrl
(
MSR_AMD64_NB_CFG
,
reg
);
...
@@ -573,14 +576,51 @@ static void enable_pci_io_ecs_per_cpu(void *unused)
...
@@ -573,14 +576,51 @@ static void enable_pci_io_ecs_per_cpu(void *unused)
}
}
}
}
static
int
__init
enable_pci_io_ecs
(
void
)
static
int
__cpuinit
amd_cpu_notify
(
struct
notifier_block
*
self
,
unsigned
long
action
,
void
*
hcpu
)
{
{
int
cpu
=
(
long
)
hcpu
;
switch
(
action
)
{
case
CPU_ONLINE
:
case
CPU_ONLINE_FROZEN
:
smp_call_function_single
(
cpu
,
enable_pci_io_ecs
,
NULL
,
0
);
break
;
default:
break
;
}
return
NOTIFY_OK
;
}
static
struct
notifier_block
__cpuinitdata
amd_cpu_notifier
=
{
.
notifier_call
=
amd_cpu_notify
,
};
static
int
__init
pci_io_ecs_init
(
void
)
{
int
cpu
;
/* assume all cpus from fam10h have IO ECS */
/* assume all cpus from fam10h have IO ECS */
if
(
boot_cpu_data
.
x86
<
0x10
)
if
(
boot_cpu_data
.
x86
<
0x10
)
return
0
;
return
0
;
on_each_cpu
(
enable_pci_io_ecs_per_cpu
,
NULL
,
1
);
register_cpu_notifier
(
&
amd_cpu_notifier
);
for_each_online_cpu
(
cpu
)
amd_cpu_notify
(
&
amd_cpu_notifier
,
(
unsigned
long
)
CPU_ONLINE
,
(
void
*
)(
long
)
cpu
);
pci_probe
|=
PCI_HAS_IO_ECS
;
pci_probe
|=
PCI_HAS_IO_ECS
;
return
0
;
}
static
int
__init
amd_postcore_init
(
void
)
{
if
(
boot_cpu_data
.
x86_vendor
!=
X86_VENDOR_AMD
)
return
0
;
early_fill_mp_bus_info
();
pci_io_ecs_init
();
return
0
;
return
0
;
}
}
postcore_initcall
(
enable_pci_io_ecs
);
postcore_initcall
(
amd_postcore_init
);
arch/x86/pci/i386.c
View file @
ea1c9de4
...
@@ -31,8 +31,11 @@
...
@@ -31,8 +31,11 @@
#include <linux/ioport.h>
#include <linux/ioport.h>
#include <linux/errno.h>
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <asm/pat.h>
#include <asm/pat.h>
#include <asm/hpet.h>
#include <asm/io_apic.h>
#include "pci.h"
#include "pci.h"
...
@@ -77,6 +80,77 @@ pcibios_align_resource(void *data, struct resource *res,
...
@@ -77,6 +80,77 @@ pcibios_align_resource(void *data, struct resource *res,
}
}
EXPORT_SYMBOL
(
pcibios_align_resource
);
EXPORT_SYMBOL
(
pcibios_align_resource
);
static
int
check_res_with_valid
(
struct
pci_dev
*
dev
,
struct
resource
*
res
)
{
unsigned
long
base
;
unsigned
long
size
;
int
i
;
base
=
res
->
start
;
size
=
(
res
->
start
==
0
&&
res
->
end
==
res
->
start
)
?
0
:
(
res
->
end
-
res
->
start
+
1
);
if
(
!
base
||
!
size
)
return
0
;
#ifdef CONFIG_HPET_TIMER
/* for hpet */
if
(
base
==
hpet_address
&&
(
res
->
flags
&
IORESOURCE_MEM
))
{
dev_info
(
&
dev
->
dev
,
"BAR has HPET at %08lx-%08lx
\n
"
,
base
,
base
+
size
-
1
);
return
1
;
}
#endif
#ifdef CONFIG_X86_IO_APIC
for
(
i
=
0
;
i
<
nr_ioapics
;
i
++
)
{
unsigned
long
ioapic_phys
=
mp_ioapics
[
i
].
mp_apicaddr
;
if
(
base
==
ioapic_phys
&&
(
res
->
flags
&
IORESOURCE_MEM
))
{
dev_info
(
&
dev
->
dev
,
"BAR has ioapic at %08lx-%08lx
\n
"
,
base
,
base
+
size
-
1
);
return
1
;
}
}
#endif
#ifdef CONFIG_PCI_MMCONFIG
for
(
i
=
0
;
i
<
pci_mmcfg_config_num
;
i
++
)
{
unsigned
long
addr
;
addr
=
pci_mmcfg_config
[
i
].
address
;
if
(
base
==
addr
&&
(
res
->
flags
&
IORESOURCE_MEM
))
{
dev_info
(
&
dev
->
dev
,
"BAR has MMCONFIG at %08lx-%08lx
\n
"
,
base
,
base
+
size
-
1
);
return
1
;
}
}
#endif
return
0
;
}
static
int
check_platform
(
struct
pci_dev
*
dev
,
struct
resource
*
res
)
{
struct
resource
*
root
=
NULL
;
/*
* forcibly insert it into the
* resource tree
*/
if
(
res
->
flags
&
IORESOURCE_MEM
)
root
=
&
iomem_resource
;
else
if
(
res
->
flags
&
IORESOURCE_IO
)
root
=
&
ioport_resource
;
if
(
root
&&
check_res_with_valid
(
dev
,
res
))
{
insert_resource
(
root
,
res
);
return
1
;
}
return
0
;
}
/*
/*
* Handle resources of PCI devices. If the world were perfect, we could
* Handle resources of PCI devices. If the world were perfect, we could
* just allocate all the resource regions and do nothing more. It isn't.
* just allocate all the resource regions and do nothing more. It isn't.
...
@@ -128,6 +202,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
...
@@ -128,6 +202,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
pr
=
pci_find_parent_resource
(
dev
,
r
);
pr
=
pci_find_parent_resource
(
dev
,
r
);
if
(
!
r
->
start
||
!
pr
||
if
(
!
r
->
start
||
!
pr
||
request_resource
(
pr
,
r
)
<
0
)
{
request_resource
(
pr
,
r
)
<
0
)
{
if
(
check_platform
(
dev
,
r
))
continue
;
dev_err
(
&
dev
->
dev
,
"BAR %d: can't "
dev_err
(
&
dev
->
dev
,
"BAR %d: can't "
"allocate resource
\n
"
,
idx
);
"allocate resource
\n
"
,
idx
);
/*
/*
...
@@ -171,6 +247,8 @@ static void __init pcibios_allocate_resources(int pass)
...
@@ -171,6 +247,8 @@ static void __init pcibios_allocate_resources(int pass)
r
->
flags
,
disabled
,
pass
);
r
->
flags
,
disabled
,
pass
);
pr
=
pci_find_parent_resource
(
dev
,
r
);
pr
=
pci_find_parent_resource
(
dev
,
r
);
if
(
!
pr
||
request_resource
(
pr
,
r
)
<
0
)
{
if
(
!
pr
||
request_resource
(
pr
,
r
)
<
0
)
{
if
(
check_platform
(
dev
,
r
))
continue
;
dev_err
(
&
dev
->
dev
,
"BAR %d: can't "
dev_err
(
&
dev
->
dev
,
"BAR %d: can't "
"allocate resource
\n
"
,
idx
);
"allocate resource
\n
"
,
idx
);
/* We'll assign a new address later */
/* We'll assign a new address later */
...
...
include/asm-x86/genapic_32.h
View file @
ea1c9de4
...
@@ -118,6 +118,7 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
...
@@ -118,6 +118,7 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
#define get_uv_system_type() UV_NONE
#define get_uv_system_type() UV_NONE
#define is_uv_system() 0
#define is_uv_system() 0
#define uv_wakeup_secondary(a, b) 1
#define uv_wakeup_secondary(a, b) 1
#define uv_system_init() do {} while (0)
#endif
#endif
include/asm-x86/genapic_64.h
View file @
ea1c9de4
...
@@ -42,6 +42,7 @@ extern int is_uv_system(void);
...
@@ -42,6 +42,7 @@ extern int is_uv_system(void);
extern
struct
genapic
apic_x2apic_uv_x
;
extern
struct
genapic
apic_x2apic_uv_x
;
DECLARE_PER_CPU
(
int
,
x2apic_extra_bits
);
DECLARE_PER_CPU
(
int
,
x2apic_extra_bits
);
extern
void
uv_cpu_init
(
void
);
extern
void
uv_cpu_init
(
void
);
extern
void
uv_system_init
(
void
);
extern
int
uv_wakeup_secondary
(
int
phys_apicid
,
unsigned
int
start_rip
);
extern
int
uv_wakeup_secondary
(
int
phys_apicid
,
unsigned
int
start_rip
);
extern
void
setup_apic_routing
(
void
);
extern
void
setup_apic_routing
(
void
);
...
...
include/asm-x86/irq_vectors.h
View file @
ea1c9de4
...
@@ -76,6 +76,7 @@
...
@@ -76,6 +76,7 @@
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
#define CALL_FUNCTION_SINGLE_VECTOR 0xfb
#define THERMAL_APIC_VECTOR 0xfa
#define THERMAL_APIC_VECTOR 0xfa
#define THRESHOLD_APIC_VECTOR 0xf9
#define THRESHOLD_APIC_VECTOR 0xf9
#define UV_BAU_MESSAGE 0xf8
#define INVALIDATE_TLB_VECTOR_END 0xf7
#define INVALIDATE_TLB_VECTOR_END 0xf7
#define INVALIDATE_TLB_VECTOR_START 0xf0
/* f0-f7 used for TLB flush */
#define INVALIDATE_TLB_VECTOR_START 0xf0
/* f0-f7 used for TLB flush */
...
...
include/asm-x86/mce.h
View file @
ea1c9de4
...
@@ -92,6 +92,7 @@ extern int mce_disabled;
...
@@ -92,6 +92,7 @@ extern int mce_disabled;
void
mce_log
(
struct
mce
*
m
);
void
mce_log
(
struct
mce
*
m
);
DECLARE_PER_CPU
(
struct
sys_device
,
device_mce
);
DECLARE_PER_CPU
(
struct
sys_device
,
device_mce
);
extern
void
(
*
threshold_cpu_callback
)(
unsigned
long
action
,
unsigned
int
cpu
);
#ifdef CONFIG_X86_MCE_INTEL
#ifdef CONFIG_X86_MCE_INTEL
void
mce_intel_feature_init
(
struct
cpuinfo_x86
*
c
);
void
mce_intel_feature_init
(
struct
cpuinfo_x86
*
c
);
...
...
include/asm-x86/uv/uv_bau.h
View file @
ea1c9de4
...
@@ -40,11 +40,6 @@
...
@@ -40,11 +40,6 @@
#define UV_ACTIVATION_DESCRIPTOR_SIZE 32
#define UV_ACTIVATION_DESCRIPTOR_SIZE 32
#define UV_DISTRIBUTION_SIZE 256
#define UV_DISTRIBUTION_SIZE 256
#define UV_SW_ACK_NPENDING 8
#define UV_SW_ACK_NPENDING 8
#define UV_BAU_MESSAGE 200
/*
* Messaging irq; see irq_64.h and include/asm-x86/hw_irq_64.h
* To be dynamically allocated in the future
*/
#define UV_NET_ENDPOINT_INTD 0x38
#define UV_NET_ENDPOINT_INTD 0x38
#define UV_DESC_BASE_PNODE_SHIFT 49
#define UV_DESC_BASE_PNODE_SHIFT 49
#define UV_PAYLOADQ_PNODE_SHIFT 49
#define UV_PAYLOADQ_PNODE_SHIFT 49
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment