Commit 669c0f76 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 platform updates from Thomas Gleixner:
 "The platform support for x86 contains the following updates:

   - A set of updates for the UV platform to support new CPUs and to fix
     some of the UV4A BAU MRRs

   - The initial platform support for the jailhouse hypervisor to allow
     native Linux guests (inmates) in non-root cells.

   - A fix for the PCI initialization on Intel MID platforms"

* 'x86-platform-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  x86/jailhouse: Respect pci=lastbus command line settings
  x86/jailhouse: Set X86_FEATURE_TSC_KNOWN_FREQ
  x86/platform/intel-mid: Move PCI initialization to arch_init()
  x86/platform/uv/BAU: Replace hard-coded values with MMR definitions
  x86/platform/UV: Fix UV4A BAU MMRs
  x86/platform/UV: Fix GAM MMR references in the UV x2apic code
  x86/platform/UV: Fix GAM MMR changes in UV4A
  x86/platform/UV: Add references to access fixed UV4A HUB MMRs
  x86/platform/UV: Fix UV4A support on new Intel Processors
  x86/platform/UV: Update uv_mmrs.h to prepare for UV4A fixes
  x86/jailhouse: Add PCI dependency
  x86/jailhouse: Hide x2apic code when CONFIG_X86_X2APIC=n
  x86/jailhouse: Initialize PCI support
  x86/jailhouse: Wire up IOAPIC for legacy UART ports
  x86/jailhouse: Halt instead of failing to restart
  x86/jailhouse: Silence ACPI warning
  x86/jailhouse: Avoid access of unsupported platform resources
  x86/jailhouse: Set up timekeeping
  x86/jailhouse: Enable PMTIMER
  x86/jailhouse: Enable APIC and SMP support
  ...
parents f0b13428 3b42349d
......@@ -810,6 +810,15 @@ config PARAVIRT_TIME_ACCOUNTING
config PARAVIRT_CLOCK
bool
config JAILHOUSE_GUEST
bool "Jailhouse non-root cell support"
depends on X86_64 && PCI
select X86_PM_TIMER
---help---
This option allows to run Linux as guest in a Jailhouse non-root
cell. You can leave this option disabled if you only want to start
Jailhouse and run Linux afterwards in the root cell.
endif #HYPERVISOR_GUEST
config NO_BOOTMEM
......
......@@ -28,6 +28,7 @@ enum x86_hypervisor_type {
X86_HYPER_XEN_PV,
X86_HYPER_XEN_HVM,
X86_HYPER_KVM,
X86_HYPER_JAILHOUSE,
};
#ifdef CONFIG_HYPERVISOR_GUEST
......
/* SPDX-License-Identifier: GPL2.0 */
/*
* Jailhouse paravirt_ops implementation
*
* Copyright (c) Siemens AG, 2015-2017
*
* Authors:
* Jan Kiszka <jan.kiszka@siemens.com>
*/
#ifndef _ASM_X86_JAILHOUSE_PARA_H
#define _ASM_X86_JAILHOUSE_PARA_H
#include <linux/types.h>
#ifdef CONFIG_JAILHOUSE_GUEST
bool jailhouse_paravirt(void);
#else
static inline bool jailhouse_paravirt(void)
{
return false;
}
#endif
#endif /* _ASM_X86_JAILHOUSE_PARA_H */
......@@ -128,9 +128,17 @@ enum mp_irq_source_types {
mp_ExtINT = 3
};
#define MP_IRQDIR_DEFAULT 0
#define MP_IRQDIR_HIGH 1
#define MP_IRQDIR_LOW 3
#define MP_IRQPOL_DEFAULT 0x0
#define MP_IRQPOL_ACTIVE_HIGH 0x1
#define MP_IRQPOL_RESERVED 0x2
#define MP_IRQPOL_ACTIVE_LOW 0x3
#define MP_IRQPOL_MASK 0x3
#define MP_IRQTRIG_DEFAULT 0x0
#define MP_IRQTRIG_EDGE 0x4
#define MP_IRQTRIG_RESERVED 0x8
#define MP_IRQTRIG_LEVEL 0xc
#define MP_IRQTRIG_MASK 0xc
#define MP_APIC_ALL 0xFF
......
......@@ -48,7 +48,6 @@
#define UV2_NET_ENDPOINT_INTD 0x28
#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
#define UV_DESC_PSHIFT 49
#define UV_PAYLOADQ_GNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
......
......@@ -241,6 +241,7 @@ static inline int uv_hub_info_check(int version)
#define UV2_HUB_REVISION_BASE 3
#define UV3_HUB_REVISION_BASE 5
#define UV4_HUB_REVISION_BASE 7
#define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
#ifdef UV1_HUB_IS_SUPPORTED
static inline int is_uv1_hub(void)
......@@ -280,6 +281,19 @@ static inline int is_uv3_hub(void)
}
#endif
/* First test "is UV4A", then "is UV4" */
#ifdef UV4A_HUB_IS_SUPPORTED
static inline int is_uv4a_hub(void)
{
return (uv_hub_info->hub_revision >= UV4A_HUB_REVISION_BASE);
}
#else
static inline int is_uv4a_hub(void)
{
return 0;
}
#endif
#ifdef UV4_HUB_IS_SUPPORTED
static inline int is_uv4_hub(void)
{
......
This diff is collapsed.
......@@ -212,6 +212,7 @@ enum x86_legacy_i8042_state {
struct x86_legacy_features {
enum x86_legacy_i8042_state i8042;
int rtc;
int warm_reset;
int no_vga;
int reserve_bios_regions;
struct x86_legacy_devices devices;
......
......@@ -9,6 +9,7 @@
#define SETUP_PCI 3
#define SETUP_EFI 4
#define SETUP_APPLE_PROPERTIES 5
#define SETUP_JAILHOUSE 6
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
......@@ -126,6 +127,27 @@ struct boot_e820_entry {
__u32 type;
} __attribute__((packed));
/*
* Smallest compatible version of jailhouse_setup_data required by this kernel.
*/
#define JAILHOUSE_SETUP_REQUIRED_VERSION 1
/*
* The boot loader is passing platform information via this Jailhouse-specific
* setup data structure.
*/
struct jailhouse_setup_data {
u16 version;
u16 compatible_version;
u16 pm_timer_address;
u16 num_cpus;
u64 pci_mmconfig_base;
u32 tsc_khz;
u32 apic_khz;
u8 standard_ioapic;
u8 cpu_ids[255];
} __attribute__((packed));
/* The so-called "zeropage" */
struct boot_params {
struct screen_info screen_info; /* 0x000 */
......
......@@ -115,6 +115,8 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
obj-$(CONFIG_JAILHOUSE_GUEST) += jailhouse.o
obj-$(CONFIG_EISA) += eisa.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
......
......@@ -19,6 +19,7 @@
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
#include <asm/jailhouse_para.h>
#include <linux/acpi.h>
......@@ -218,6 +219,15 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0;
}
static void physflat_init_apic_ldr(void)
{
/*
* LDR and DFR are not involved in physflat mode, rather:
* "In physical destination mode, the destination processor is
* specified by its local APIC ID [...]." (Intel SDM, 10.6.2.1)
*/
}
static void physflat_send_IPI_allbutself(int vector)
{
default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
......@@ -230,7 +240,8 @@ static void physflat_send_IPI_all(int vector)
static int physflat_probe(void)
{
if (apic == &apic_physflat || num_possible_cpus() > 8)
if (apic == &apic_physflat || num_possible_cpus() > 8 ||
jailhouse_paravirt())
return 1;
return 0;
......@@ -251,8 +262,7 @@ static struct apic apic_physflat __ro_after_init = {
.dest_logical = 0,
.check_apicid_used = NULL,
/* not needed, but shouldn't hurt: */
.init_apic_ldr = flat_init_apic_ldr,
.init_apic_ldr = physflat_init_apic_ldr,
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
......
......@@ -800,18 +800,18 @@ static int irq_polarity(int idx)
/*
* Determine IRQ line polarity (high active or low active):
*/
switch (mp_irqs[idx].irqflag & 0x03) {
case 0:
switch (mp_irqs[idx].irqflag & MP_IRQPOL_MASK) {
case MP_IRQPOL_DEFAULT:
/* conforms to spec, ie. bus-type dependent polarity */
if (test_bit(bus, mp_bus_not_pci))
return default_ISA_polarity(idx);
else
return default_PCI_polarity(idx);
case 1:
case MP_IRQPOL_ACTIVE_HIGH:
return IOAPIC_POL_HIGH;
case 2:
case MP_IRQPOL_RESERVED:
pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
case 3:
case MP_IRQPOL_ACTIVE_LOW:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_POL_LOW;
}
......@@ -845,8 +845,8 @@ static int irq_trigger(int idx)
/*
* Determine IRQ trigger mode (edge or level sensitive):
*/
switch ((mp_irqs[idx].irqflag >> 2) & 0x03) {
case 0:
switch (mp_irqs[idx].irqflag & MP_IRQTRIG_MASK) {
case MP_IRQTRIG_DEFAULT:
/* conforms to spec, ie. bus-type dependent trigger mode */
if (test_bit(bus, mp_bus_not_pci))
trigger = default_ISA_trigger(idx);
......@@ -854,11 +854,11 @@ static int irq_trigger(int idx)
trigger = default_PCI_trigger(idx);
/* Take EISA into account */
return eisa_irq_trigger(idx, bus, trigger);
case 1:
case MP_IRQTRIG_EDGE:
return IOAPIC_EDGE;
case 2:
case MP_IRQTRIG_RESERVED:
pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
case 3:
case MP_IRQTRIG_LEVEL:
default: /* Pointless default required due to do gcc stupidity */
return IOAPIC_LEVEL;
}
......
......@@ -137,6 +137,8 @@ static int __init early_get_pnodeid(void)
case UV3_HUB_PART_NUMBER_X:
uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
break;
/* Update: UV4A has only a modified revision to indicate HUB fixes */
case UV4_HUB_PART_NUMBER:
uv_min_hub_revision_id += UV4_HUB_REVISION_BASE - 1;
uv_cpuid.gnode_shift = 2; /* min partition is 4 sockets */
......@@ -316,6 +318,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
} else if (!strcmp(oem_table_id, "UVH")) {
/* Only UV1 systems: */
uv_system_type = UV_NON_UNIQUE_APIC;
x86_platform.legacy.warm_reset = 0;
__this_cpu_write(x2apic_extra_bits, pnodeid << uvh_apicid.s.pnode_shift);
uv_set_apicid_hibit();
uv_apic = 1;
......@@ -767,6 +770,7 @@ static __init void map_gru_high(int max_pnode)
return;
}
/* Only UV3 has distributed GRU mode */
if (is_uv3_hub() && gru.s3.mode) {
map_gru_distributed(gru.v);
return;
......@@ -790,63 +794,61 @@ static __init void map_mmr_high(int max_pnode)
pr_info("UV: MMR disabled\n");
}
/*
* This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
* and REDIRECT MMR regs are exactly the same on UV3.
*/
struct mmioh_config {
unsigned long overlay;
unsigned long redirect;
char *id;
};
static __initdata struct mmioh_config mmiohs[] = {
{
UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
"MMIOH0"
},
{
UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
"MMIOH1"
},
};
/* UV3 & UV4 have identical MMIOH overlay configs */
static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
/* UV3/4 have identical MMIOH overlay configs, UV4A is slightly different */
static __init void map_mmioh_high_uv34(int index, int min_pnode, int max_pnode)
{
union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
unsigned long overlay;
unsigned long mmr;
unsigned long base;
unsigned long nasid_mask;
unsigned long m_overlay;
int i, n, shift, m_io, max_io;
int nasid, lnasid, fi, li;
char *id;
id = mmiohs[index].id;
overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n", id, overlay.v, overlay.s3.base, overlay.s3.m_io);
if (!overlay.s3.enable) {
if (index == 0) {
id = "MMIOH0";
m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR;
overlay = uv_read_local_mmr(m_overlay);
base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK;
mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR;
m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK)
>> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT;
n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK;
} else {
id = "MMIOH1";
m_overlay = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR;
overlay = uv_read_local_mmr(m_overlay);
base = overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK;
mmr = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR;
m_io = (overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK)
>> UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT;
n = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH;
nasid_mask = UVH_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK;
}
pr_info("UV: %s overlay 0x%lx base:0x%lx m_io:%d\n", id, overlay, base, m_io);
if (!(overlay & UVH_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK)) {
pr_info("UV: %s disabled\n", id);
return;
}
shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
base = (unsigned long)overlay.s3.base;
m_io = overlay.s3.m_io;
mmr = mmiohs[index].redirect;
n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
/* Convert to NASID: */
min_pnode *= 2;
max_pnode *= 2;
max_io = lnasid = fi = li = -1;
for (i = 0; i < n; i++) {
union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
unsigned long m_redirect = mmr + i * 8;
unsigned long redirect = uv_read_local_mmr(m_redirect);
nasid = redirect & nasid_mask;
if (i == 0)
pr_info("UV: %s redirect base 0x%lx(@0x%lx) 0x%04x\n",
id, redirect, m_redirect, nasid);
redirect.v = uv_read_local_mmr(mmr + i * 8);
nasid = redirect.s3.nasid;
/* Invalid NASID: */
if (nasid < min_pnode || max_pnode < nasid)
nasid = -1;
......@@ -894,8 +896,8 @@ static __init void map_mmioh_high(int min_pnode, int max_pnode)
if (is_uv3_hub() || is_uv4_hub()) {
/* Map both MMIOH regions: */
map_mmioh_high_uv3(0, min_pnode, max_pnode);
map_mmioh_high_uv3(1, min_pnode, max_pnode);
map_mmioh_high_uv34(0, min_pnode, max_pnode);
map_mmioh_high_uv34(1, min_pnode, max_pnode);
return;
}
......
......@@ -31,6 +31,7 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_pv;
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
extern const struct hypervisor_x86 x86_hyper_kvm;
extern const struct hypervisor_x86 x86_hyper_jailhouse;
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
......@@ -45,6 +46,9 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] =
#ifdef CONFIG_KVM_GUEST
&x86_hyper_kvm,
#endif
#ifdef CONFIG_JAILHOUSE_GUEST
&x86_hyper_jailhouse,
#endif
};
enum x86_hypervisor_type x86_hyper_type;
......
// SPDX-License-Identifier: GPL2.0
/*
* Jailhouse paravirt_ops implementation
*
* Copyright (c) Siemens AG, 2015-2017
*
* Authors:
* Jan Kiszka <jan.kiszka@siemens.com>
*/
#include <linux/acpi_pmtmr.h>
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <asm/apic.h>
#include <asm/cpu.h>
#include <asm/hypervisor.h>
#include <asm/i8259.h>
#include <asm/irqdomain.h>
#include <asm/pci_x86.h>
#include <asm/reboot.h>
#include <asm/setup.h>
static __initdata struct jailhouse_setup_data setup_data;
static unsigned int precalibrated_tsc_khz;
static uint32_t jailhouse_cpuid_base(void)
{
if (boot_cpu_data.cpuid_level < 0 ||
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
return 0;
return hypervisor_cpuid_base("Jailhouse\0\0\0", 0);
}
static uint32_t __init jailhouse_detect(void)
{
return jailhouse_cpuid_base();
}
static void jailhouse_get_wallclock(struct timespec *now)
{
memset(now, 0, sizeof(*now));
}
static void __init jailhouse_timer_init(void)
{
lapic_timer_frequency = setup_data.apic_khz * (1000 / HZ);
}
static unsigned long jailhouse_get_tsc(void)
{
return precalibrated_tsc_khz;
}
static void __init jailhouse_x2apic_init(void)
{
#ifdef CONFIG_X86_X2APIC
if (!x2apic_enabled())
return;
/*
* We do not have access to IR inside Jailhouse non-root cells. So
* we have to run in physical mode.
*/
x2apic_phys = 1;
/*
* This will trigger the switch to apic_x2apic_phys. Empty OEM IDs
* ensure that only this APIC driver picks up the call.
*/
default_acpi_madt_oem_check("", "");
#endif
}
static void __init jailhouse_get_smp_config(unsigned int early)
{
struct ioapic_domain_cfg ioapic_cfg = {
.type = IOAPIC_DOMAIN_STRICT,
.ops = &mp_ioapic_irqdomain_ops,
};
struct mpc_intsrc mp_irq = {
.type = MP_INTSRC,
.irqtype = mp_INT,
.irqflag = MP_IRQPOL_ACTIVE_HIGH | MP_IRQTRIG_EDGE,
};
unsigned int cpu;
jailhouse_x2apic_init();
register_lapic_address(0xfee00000);
for (cpu = 0; cpu < setup_data.num_cpus; cpu++) {
generic_processor_info(setup_data.cpu_ids[cpu],
boot_cpu_apic_version);
}
smp_found_config = 1;
if (setup_data.standard_ioapic) {
mp_register_ioapic(0, 0xfec00000, gsi_top, &ioapic_cfg);
/* Register 1:1 mapping for legacy UART IRQs 3 and 4 */
mp_irq.srcbusirq = mp_irq.dstirq = 3;
mp_save_irq(&mp_irq);
mp_irq.srcbusirq = mp_irq.dstirq = 4;
mp_save_irq(&mp_irq);
}
}
static void jailhouse_no_restart(void)
{
pr_notice("Jailhouse: Restart not supported, halting\n");
machine_halt();
}
static int __init jailhouse_pci_arch_init(void)
{
pci_direct_init(1);
/*
* There are no bridges on the virtual PCI root bus under Jailhouse,
* thus no other way to discover all devices than a full scan.
* Respect any overrides via the command line, though.
*/
if (pcibios_last_bus < 0)
pcibios_last_bus = 0xff;
return 0;
}
static void __init jailhouse_init_platform(void)
{
u64 pa_data = boot_params.hdr.setup_data;
struct setup_data header;
void *mapping;
x86_init.irqs.pre_vector_init = x86_init_noop;
x86_init.timers.timer_init = jailhouse_timer_init;
x86_init.mpparse.get_smp_config = jailhouse_get_smp_config;
x86_init.pci.arch_init = jailhouse_pci_arch_init;
x86_platform.calibrate_cpu = jailhouse_get_tsc;
x86_platform.calibrate_tsc = jailhouse_get_tsc;
x86_platform.get_wallclock = jailhouse_get_wallclock;
x86_platform.legacy.rtc = 0;
x86_platform.legacy.warm_reset = 0;
x86_platform.legacy.i8042 = X86_LEGACY_I8042_PLATFORM_ABSENT;
legacy_pic = &null_legacy_pic;
machine_ops.emergency_restart = jailhouse_no_restart;
while (pa_data) {
mapping = early_memremap(pa_data, sizeof(header));
memcpy(&header, mapping, sizeof(header));
early_memunmap(mapping, sizeof(header));
if (header.type == SETUP_JAILHOUSE &&
header.len >= sizeof(setup_data)) {
pa_data += offsetof(struct setup_data, data);
mapping = early_memremap(pa_data, sizeof(setup_data));
memcpy(&setup_data, mapping, sizeof(setup_data));
early_memunmap(mapping, sizeof(setup_data));
break;
}
pa_data = header.next;
}
if (!pa_data)
panic("Jailhouse: No valid setup data found");
if (setup_data.compatible_version > JAILHOUSE_SETUP_REQUIRED_VERSION)
panic("Jailhouse: Unsupported setup data structure");
pmtmr_ioport = setup_data.pm_timer_address;
pr_debug("Jailhouse: PM-Timer IO Port: %#x\n", pmtmr_ioport);
precalibrated_tsc_khz = setup_data.tsc_khz;
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
pci_probe = 0;
/*
* Avoid that the kernel complains about missing ACPI tables - there
* are none in a non-root cell.
*/
disable_acpi();
}
bool jailhouse_paravirt(void)
{
return jailhouse_cpuid_base() != 0;
}
static bool jailhouse_x2apic_available(void)
{
/*
* The x2APIC is only available if the root cell enabled it. Jailhouse
* does not support switching between xAPIC and x2APIC.
*/
return x2apic_enabled();
}
const struct hypervisor_x86 x86_hyper_jailhouse __refconst = {
.name = "Jailhouse",
.detect = jailhouse_detect,
.init.init_platform = jailhouse_init_platform,
.init.x2apic_available = jailhouse_x2apic_available,
};
......@@ -281,7 +281,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
int ELCR_fallback = 0;
intsrc.type = MP_INTSRC;
intsrc.irqflag = 0; /* conforming */
intsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
intsrc.srcbus = 0;
intsrc.dstapic = mpc_ioapic_id(0);
......@@ -324,10 +324,13 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
* copy that information over to the MP table in the
* irqflag field (level sensitive, active high polarity).
*/
if (ELCR_trigger(i))
intsrc.irqflag = 13;
else
intsrc.irqflag = 0;
if (ELCR_trigger(i)) {
intsrc.irqflag = MP_IRQTRIG_LEVEL |
MP_IRQPOL_ACTIVE_HIGH;
} else {
intsrc.irqflag = MP_IRQTRIG_DEFAULT |
MP_IRQPOL_DEFAULT;
}
}
intsrc.srcbusirq = i;
......@@ -419,7 +422,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
construct_ioapic_table(mpc_default_type);
lintsrc.type = MP_LINTSRC;
lintsrc.irqflag = 0; /* conforming */
lintsrc.irqflag = MP_IRQTRIG_DEFAULT | MP_IRQPOL_DEFAULT;
lintsrc.srcbusid = 0;
lintsrc.srcbusirq = 0;
lintsrc.destapic = MP_APIC_ALL;
......@@ -664,7 +667,7 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
if (m->irqtype != mp_INT)
return 0;
if (m->irqflag != 0x0f)
if (m->irqflag != (MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW))
return 0;
/* not legacy */
......@@ -673,7 +676,8 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
if (mp_irqs[i].irqtype != mp_INT)
continue;
if (mp_irqs[i].irqflag != 0x0f)
if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
MP_IRQPOL_ACTIVE_LOW))
continue;
if (mp_irqs[i].srcbus != m->srcbus)
......@@ -784,7 +788,8 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
if (mp_irqs[i].irqtype != mp_INT)
continue;
if (mp_irqs[i].irqflag != 0x0f)
if (mp_irqs[i].irqflag != (MP_IRQTRIG_LEVEL |
MP_IRQPOL_ACTIVE_LOW))
continue;
if (nr_m_spare > 0) {
......
......@@ -9,6 +9,7 @@ void __init x86_early_init_platform_quirks(void)
{
x86_platform.legacy.i8042 = X86_LEGACY_I8042_EXPECTED_PRESENT;
x86_platform.legacy.rtc = 1;
x86_platform.legacy.warm_reset = 1;
x86_platform.legacy.reserve_bios_regions = 0;
x86_platform.legacy.devices.pnpbios = 1;
......
......@@ -934,7 +934,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
* the targeted processor.
*/
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
if (x86_platform.legacy.warm_reset) {
pr_debug("Setting warm reset code and vector.\n");
......@@ -1006,7 +1006,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
/* mark "stuck" area as not stuck */
*trampoline_status = 0;
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
if (x86_platform.legacy.warm_reset) {
/*
* Cleanup possible dangling ends...
*/
......
......@@ -300,6 +300,7 @@ int __init intel_mid_pci_init(void)
pci_root_ops = intel_mid_pci_ops;
pci_soc_mode = 1;
/* Continue with standard init */
acpi_noirq_set();
return 1;
}
......
......@@ -194,7 +194,7 @@ void __init x86_intel_mid_early_setup(void)
x86_platform.calibrate_tsc = intel_mid_calibrate_tsc;
x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
x86_init.pci.init = intel_mid_pci_init;
x86_init.pci.arch_init = intel_mid_pci_init;
x86_init.pci.fixup_irqs = x86_init_noop;
legacy_pic = &null_legacy_pic;
......
......@@ -96,8 +96,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
pentry->freq_hz, pentry->irq);
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
mp_irq.irqflag = 5;
mp_irq.irqflag = MP_IRQTRIG_EDGE | MP_IRQPOL_ACTIVE_HIGH;
mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
......@@ -168,7 +167,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
totallen, (u32)pentry->phys_addr, pentry->irq);
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
mp_irq.irqflag = 0xf; /* level trigger and active low */
mp_irq.irqflag = MP_IRQTRIG_LEVEL | MP_IRQPOL_ACTIVE_LOW;
mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
......
......@@ -1751,7 +1751,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
uv1 = 1;
/* the 14-bit pnode */
write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
write_mmr_descriptor_base(pnode,
(n << UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT | m));
/*
* Initializing all 8 (ITEMS_PER_DESC) descriptors for each
* cpu even though we only use the first one; one descriptor can
......
......@@ -361,22 +361,6 @@ config ACPI_PCI_SLOT
i.e., segment/bus/device/function tuples, with physical slots in
the system. If you are unsure, say N.
config X86_PM_TIMER
bool "Power Management Timer Support" if EXPERT
depends on X86
default y
help
The Power Management Timer is available on all ACPI-capable,
in most cases even if ACPI is unusable or blacklisted.
This timing source is not affected by power management features
like aggressive processor idling, throttling, frequency and/or
voltage scaling, unlike the commonly used Time Stamp Counter
(TSC) timing source.
You should nearly always say Y here because many modern
systems require this timer.
config ACPI_CONTAINER
bool "Container and Module Devices"
default (ACPI_HOTPLUG_MEMORY || ACPI_HOTPLUG_CPU)
......@@ -564,3 +548,19 @@ config TPS68470_PMIC_OPREGION
using this, are probed.
endif # ACPI
config X86_PM_TIMER
bool "Power Management Timer Support" if EXPERT
depends on X86 && (ACPI || JAILHOUSE_GUEST)
default y
help
The Power Management Timer is available on all ACPI-capable,
in most cases even if ACPI is unusable or blacklisted.
This timing source is not affected by power management features
like aggressive processor idling, throttling, frequency and/or
voltage scaling, unlike the commonly used Time Stamp Counter
(TSC) timing source.
You should nearly always say Y here because many modern
systems require this timer.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment