Commit 74c75f52 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fixes-for-linus-2' of...

Merge branch 'x86-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: cpu_index build fix
  x86/voyager: fix missing cpu_index initialisation
  x86/voyager: fix compile breakage caused by dc1e35c6
  x86: fix /dev/mem mmap breakage when PAT is disabled
  x86/voyager: fix compile breakage casued by x86: move prefill_possible_map calling early
  x86: use CONFIG_X86_SMP instead of CONFIG_SMP
  x86/voyager: fix boot breakage caused by x86: boot secondary cpus through initial_code
  x86, uv: fix compile error in uv_hub.h
  i386/PAE: fix pud_page()
  x86: remove debug code from arch_add_memory()
  x86: start annotating early ioremap pointers with __iomem
  x86: two trivial sparse annotations
  x86: fix init_memory_mapping for [dc000000 - e0000000) - v2
parents 0b23e30b 1c4acdb4
......@@ -231,6 +231,10 @@ config SMP
If you don't know what to do here, say N.
config X86_HAS_BOOT_CPU_ID
def_bool y
depends on X86_VOYAGER
config X86_FIND_SMP_CONFIG
def_bool y
depends on X86_MPPARSE || X86_VOYAGER
......
......@@ -82,9 +82,9 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
extern void early_ioremap_init(void);
extern void early_ioremap_clear(void);
extern void early_ioremap_reset(void);
extern void *early_ioremap(unsigned long offset, unsigned long size);
extern void *early_memremap(unsigned long offset, unsigned long size);
extern void early_iounmap(void *addr, unsigned long size);
extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
extern void early_iounmap(void __iomem *addr, unsigned long size);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
......
......@@ -120,13 +120,13 @@ static inline void pud_clear(pud_t *pudp)
write_cr3(pgd);
}
#define pud_page(pud) ((struct page *) __va(pud_val(pud) & PTE_PFN_MASK))
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
/* Find an entry in the second-level page table.. */
#define pmd_offset(pud, address) ((pmd_t *)pud_page(*(pud)) + \
#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) + \
pmd_index(address))
#ifdef CONFIG_SMP
......
......@@ -225,5 +225,11 @@ static inline int hard_smp_processor_id(void)
#endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
extern unsigned char boot_cpu_id;
#else
#define boot_cpu_id 0
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SMP_H */
......@@ -13,6 +13,7 @@
#include <linux/numa.h>
#include <linux/percpu.h>
#include <linux/timer.h>
#include <asm/types.h>
#include <asm/percpu.h>
......
......@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
*/
void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
#ifdef CONFIG_X86_SMP
unsigned int eax, ebx, ecx, edx, sub_index;
unsigned int ht_mask_width, core_plus_mask_width;
unsigned int core_select_mask, core_level_siblings;
......
......@@ -549,6 +549,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
this_cpu->c_early_init(c);
validate_pat_support(c);
#ifdef CONFIG_SMP
c->cpu_index = boot_cpu_id;
#endif
}
void __init early_cpu_init(void)
......@@ -1134,7 +1138,7 @@ void __cpuinit cpu_init(void)
/*
* Boot processor to setup the FP and extended state context info.
*/
if (!smp_processor_id())
if (smp_processor_id() == boot_cpu_id)
init_thread_xstate();
xsave_init();
......
......@@ -759,7 +759,7 @@ __cpuinit int unsynchronized_tsc(void)
if (!cpu_has_tsc || tsc_unstable)
return 1;
#ifdef CONFIG_SMP
#ifdef CONFIG_X86_SMP
if (apic_is_clustered_box())
return 1;
#endif
......
......@@ -78,7 +78,7 @@ static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
static void __init set_vsmp_pv_ops(void)
{
void *address;
void __iomem *address;
unsigned int cap, ctl, cfg;
/* set vSMP magic bits to indicate vSMP capable kernel */
......
......@@ -90,6 +90,7 @@ static void ack_vic_irq(unsigned int irq);
static void vic_enable_cpi(void);
static void do_boot_cpu(__u8 cpuid);
static void do_quad_bootstrap(void);
static void initialize_secondary(void);
int hard_smp_processor_id(void);
int safe_smp_processor_id(void);
......@@ -344,6 +345,12 @@ static void do_quad_bootstrap(void)
}
}
void prefill_possible_map(void)
{
/* This is empty on voyager because we need a much
* earlier detection which is done in find_smp_config */
}
/* Set up all the basic stuff: read the SMP config and make all the
* SMP information reflect only the boot cpu. All others will be
* brought on-line later. */
......@@ -413,6 +420,7 @@ void __init smp_store_cpu_info(int id)
struct cpuinfo_x86 *c = &cpu_data(id);
*c = boot_cpu_data;
c->cpu_index = id;
identify_secondary_cpu(c);
}
......@@ -650,6 +658,8 @@ void __init smp_boot_cpus(void)
smp_tune_scheduling();
*/
smp_store_cpu_info(boot_cpu_id);
/* setup the jump vector */
initial_code = (unsigned long)initialize_secondary;
printk("CPU%d: ", boot_cpu_id);
print_cpu_info(&cpu_data(boot_cpu_id));
......@@ -702,7 +712,7 @@ void __init smp_boot_cpus(void)
/* Reload the secondary CPUs task structure (this function does not
* return ) */
void __init initialize_secondary(void)
static void __init initialize_secondary(void)
{
#if 0
// AC kernels only
......
......@@ -233,7 +233,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
start, len)))
(void __user *)start, len)))
goto slow_irqon;
/*
......
......@@ -671,12 +671,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
unsigned long last_map_addr = 0;
unsigned long page_size_mask = 0;
unsigned long start_pfn, end_pfn;
unsigned long pos;
struct map_range mr[NR_RANGE_MR];
int nr_range, i;
int use_pse, use_gbpages;
printk(KERN_INFO "init_memory_mapping\n");
printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
/*
* Find space for the kernel direct mapping tables.
......@@ -710,35 +711,50 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
/* head if not big page alignment ?*/
start_pfn = start >> PAGE_SHIFT;
end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
pos = start_pfn << PAGE_SHIFT;
end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
<< (PMD_SHIFT - PAGE_SHIFT);
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
pos = end_pfn << PAGE_SHIFT;
}
/* big page (2M) range*/
start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
<< (PMD_SHIFT - PAGE_SHIFT);
end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT)
end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
<< (PUD_SHIFT - PAGE_SHIFT);
if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask & (1<<PG_LEVEL_2M));
if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask & (1<<PG_LEVEL_2M));
pos = end_pfn << PAGE_SHIFT;
}
/* big page (1G) range */
start_pfn = end_pfn;
end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
<< (PUD_SHIFT - PAGE_SHIFT);
end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask &
((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
pos = end_pfn << PAGE_SHIFT;
}
/* tail is not big page (1G) alignment */
start_pfn = end_pfn;
end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask & (1<<PG_LEVEL_2M));
start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
<< (PMD_SHIFT - PAGE_SHIFT);
end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
if (start_pfn < end_pfn) {
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
page_size_mask & (1<<PG_LEVEL_2M));
pos = end_pfn << PAGE_SHIFT;
}
/* tail is not big page (2M) alignment */
start_pfn = end_pfn;
start_pfn = pos>>PAGE_SHIFT;
end_pfn = end>>PAGE_SHIFT;
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
......@@ -842,7 +858,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
max_pfn_mapped = last_mapped_pfn;
ret = __add_pages(zone, start_pfn, nr_pages);
WARN_ON(1);
WARN_ON_ONCE(ret);
return ret;
}
......
......@@ -387,7 +387,7 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
unsigned long size)
{
unsigned long flags;
void *ret;
void __iomem *ret;
int err;
/*
......@@ -399,11 +399,11 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
if (err < 0)
return NULL;
ret = (void *) __ioremap_caller(phys_addr, size, flags,
__builtin_return_address(0));
ret = __ioremap_caller(phys_addr, size, flags,
__builtin_return_address(0));
free_memtype(phys_addr, phys_addr + size);
return (void __iomem *)ret;
return ret;
}
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
......@@ -622,7 +622,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
__early_set_fixmap(idx, 0, __pgprot(0));
}
static void *prev_map[FIX_BTMAPS_SLOTS] __initdata;
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
static int __init check_early_ioremap_leak(void)
{
......@@ -645,7 +645,7 @@ static int __init check_early_ioremap_leak(void)
}
late_initcall(check_early_ioremap_leak);
static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
{
unsigned long offset, last_addr;
unsigned int nrpages;
......@@ -713,23 +713,23 @@ static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size,
if (early_ioremap_debug)
printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
prev_map[slot] = (void *) (offset + fix_to_virt(idx0));
prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
return prev_map[slot];
}
/* Remap an IO device */
void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
{
return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
}
/* Remap memory */
void __init *early_memremap(unsigned long phys_addr, unsigned long size)
void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
{
return __early_ioremap(phys_addr, size, PAGE_KERNEL);
}
void __init early_iounmap(void *addr, unsigned long size)
void __init early_iounmap(void __iomem *addr, unsigned long size)
{
unsigned long virt_addr;
unsigned long offset;
......@@ -779,7 +779,7 @@ void __init early_iounmap(void *addr, unsigned long size)
--idx;
--nrpages;
}
prev_map[slot] = 0;
prev_map[slot] = NULL;
}
void __this_fixmap_does_not_exist(void)
......
......@@ -481,12 +481,16 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1;
}
#else
/* This check is needed to avoid cache aliasing when PAT is enabled */
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
u64 to = from + size;
u64 cursor = from;
if (!pat_enabled)
return 1;
while (cursor < to) {
if (!devmem_is_allowed(pfn)) {
printk(KERN_INFO
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment