Commit 5183a617 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-platform-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 platform updates from Ingo Molnar:
 "The biggest change is the removal of SGI UV1 support, which allowed
  the removal of the legacy EFI old_mmap code as well.

  This removes quite a bunch of old code & quirks"

* tag 'x86-platform-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/efi: Remove unused EFI_UV1_MEMMAP code
  x86/platform/uv: Remove uv bios and efi code related to EFI_UV1_MEMMAP
  x86/efi: Remove references to no-longer-used efi_have_uv1_memmap()
  x86/efi: Delete SGI UV1 detection.
  x86/platform/uv: Remove efi=old_map command line option
  x86/platform/uv: Remove vestigial mention of UV1 platform from bios header
  x86/platform/uv: Remove support for UV1 platform from uv
  x86/platform/uv: Remove support for uv1 platform from uv_hub
  x86/platform/uv: Remove support for UV1 platform from uv_bau
  x86/platform/uv: Remove support for UV1 platform from uv_mmrs
  x86/platform/uv: Remove support for UV1 platform from x2apic_uv_x
  x86/platform/uv: Remove support for UV1 platform from uv_tlb
  x86/platform/uv: Remove support for UV1 platform from uv_time
parents e96ec8cf 3bcf25a4
......@@ -22,17 +22,7 @@ extern unsigned long efi_fw_vendor, efi_config_table;
*
* This is the main reason why we're doing stable VA mappings for RT
* services.
*
* SGI UV1 machines are known to be incompatible with this scheme, so we
* provide an opt-out for these machines via a DMI quirk that sets the
* attribute below.
*/
#define EFI_UV1_MEMMAP EFI_ARCH_1
static inline bool efi_have_uv1_memmap(void)
{
return IS_ENABLED(CONFIG_X86_UV) && efi_enabled(EFI_UV1_MEMMAP);
}
#define EFI32_LOADER_SIGNATURE "EL32"
#define EFI64_LOADER_SIGNATURE "EL64"
......@@ -122,8 +112,6 @@ struct efi_scratch {
efi_sync_low_kernel_mappings(); \
kernel_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \
\
if (!efi_have_uv1_memmap()) \
efi_switch_mm(&efi_mm); \
})
......@@ -132,9 +120,7 @@ struct efi_scratch {
#define arch_efi_call_virt_teardown() \
({ \
if (!efi_have_uv1_memmap()) \
efi_switch_mm(efi_scratch.prev_mm); \
\
firmware_restrict_branch_speculation_end(); \
kernel_fpu_end(); \
})
......@@ -176,8 +162,6 @@ extern void efi_delete_dummy_variable(void);
extern void efi_switch_mm(struct mm_struct *mm);
extern void efi_recover_from_page_fault(unsigned long phys_addr);
extern void efi_free_boot_services(void);
extern pgd_t * __init efi_uv1_memmap_phys_prolog(void);
extern void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd);
/* kexec external ABI */
struct efi_setup_data {
......
......@@ -72,7 +72,7 @@ struct uv_gam_range_entry {
};
#define UV_SYSTAB_SIG "UVST"
#define UV_SYSTAB_VERSION_1 1 /* UV1/2/3 BIOS version */
#define UV_SYSTAB_VERSION_1 1 /* UV2/3 BIOS version */
#define UV_SYSTAB_VERSION_UV4 0x400 /* UV4 BIOS base version */
#define UV_SYSTAB_VERSION_UV4_1 0x401 /* + gpa_shift */
#define UV_SYSTAB_VERSION_UV4_2 0x402 /* + TYPE_NVRAM/WINDOW/MBOX */
......
......@@ -4,7 +4,7 @@
#include <asm/tlbflush.h>
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC};
struct cpumask;
struct mm_struct;
......
......@@ -46,10 +46,7 @@
#define UV_ACT_STATUS_SIZE 2
#define UV_DISTRIBUTION_SIZE 256
#define UV_SW_ACK_NPENDING 8
#define UV1_NET_ENDPOINT_INTD 0x38
#define UV2_NET_ENDPOINT_INTD 0x28
#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
#define UV_NET_ENDPOINT_INTD 0x28
#define UV_PAYLOADQ_GNODE_SHIFT 49
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
......@@ -64,14 +61,9 @@
* UV2: Bit 19 selects between
* (0): 10 microsecond timebase and
* (1): 80 microseconds
* we're using 560us, similar to UV1: 65 units of 10us
* we're using 560us
*/
#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD)
#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
/* assuming UV3 is the same */
#define BAU_MISC_CONTROL_MULT_MASK 3
......@@ -148,7 +140,6 @@
#define UV_LB_SUBNODEID 0x10
/* these two are the same for UV1 and UV2: */
#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
/* 4 bits of software ack period */
......@@ -189,8 +180,7 @@
#define BAU_DESC_QUALIFIER 0x534749
enum uv_bau_version {
UV_BAU_V1 = 1,
UV_BAU_V2,
UV_BAU_V2 = 2,
UV_BAU_V3,
UV_BAU_V4,
};
......@@ -233,12 +223,12 @@ struct bau_local_cpumask {
*/
/**
* struct uv1_2_3_bau_msg_payload - defines payload for INTD transactions
* struct uv2_3_bau_msg_payload - defines payload for INTD transactions
* @address: Signifies a page or all TLB's of the cpu
* @sending_cpu: CPU from which the message originates
* @acknowledge_count: CPUs on the destination Hub that received the interrupt
*/
struct uv1_2_3_bau_msg_payload {
struct uv2_3_bau_msg_payload {
u64 address;
u16 sending_cpu;
u16 acknowledge_count;
......@@ -259,89 +249,6 @@ struct uv4_bau_msg_payload {
u32 qualifier:24;
};
/*
* UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
* see table 4.2.3.0.1 in broacast_assist spec.
*/
struct uv1_bau_msg_header {
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
/* bits 5:0 */
unsigned int base_dest_nasid:15; /* nasid of the first bit */
/* bits 20:6 */ /* in uvhub map */
unsigned int command:8; /* message type */
/* bits 28:21 */
/* 0x38: SN3net EndPoint Message */
unsigned int rsvd_1:3; /* must be zero */
/* bits 31:29 */
/* int will align on 32 bits */
unsigned int rsvd_2:9; /* must be zero */
/* bits 40:32 */
/* Suppl_A is 56-41 */
unsigned int sequence:16; /* message sequence number */
/* bits 56:41 */ /* becomes bytes 16-17 of msg */
/* Address field (96:57) is
never used as an address
(these are address bits
42:3) */
unsigned int rsvd_3:1; /* must be zero */
/* bit 57 */
/* address bits 27:4 are payload */
/* these next 24 (58-81) bits become bytes 12-14 of msg */
/* bits 65:58 land in byte 12 */
unsigned int replied_to:1; /* sent as 0 by the source to
byte 12 */
/* bit 58 */
unsigned int msg_type:3; /* software type of the
message */
/* bits 61:59 */
unsigned int canceled:1; /* message canceled, resource
is to be freed*/
/* bit 62 */
unsigned int payload_1a:1; /* not currently used */
/* bit 63 */
unsigned int payload_1b:2; /* not currently used */
/* bits 65:64 */
/* bits 73:66 land in byte 13 */
unsigned int payload_1ca:6; /* not currently used */
/* bits 71:66 */
unsigned int payload_1c:2; /* not currently used */
/* bits 73:72 */
/* bits 81:74 land in byte 14 */
unsigned int payload_1d:6; /* not currently used */
/* bits 79:74 */
unsigned int payload_1e:2; /* not currently used */
/* bits 81:80 */
unsigned int rsvd_4:7; /* must be zero */
/* bits 88:82 */
unsigned int swack_flag:1; /* software acknowledge flag */
/* bit 89 */
/* INTD trasactions at
destination are to wait for
software acknowledge */
unsigned int rsvd_5:6; /* must be zero */
/* bits 95:90 */
unsigned int rsvd_6:5; /* must be zero */
/* bits 100:96 */
unsigned int int_both:1; /* if 1, interrupt both sockets
on the uvhub */
/* bit 101*/
unsigned int fairness:3; /* usually zero */
/* bits 104:102 */
unsigned int multilevel:1; /* multi-level multicast
format */
/* bit 105 */
/* 0 for TLB: endpoint multi-unicast messages */
unsigned int chaining:1; /* next descriptor is part of
this activation*/
/* bit 106 */
unsigned int rsvd_7:21; /* must be zero */
/* bits 127:107 */
};
/*
* UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
* see figure 9-2 of harp_sys.pdf
......@@ -418,25 +325,14 @@ struct bau_desc {
* message template, consisting of header and payload:
*/
union bau_msg_header {
struct uv1_bau_msg_header uv1_hdr;
struct uv2_3_bau_msg_header uv2_3_hdr;
} header;
union bau_payload_header {
struct uv1_2_3_bau_msg_payload uv1_2_3;
struct uv2_3_bau_msg_payload uv2_3;
struct uv4_bau_msg_payload uv4;
} payload;
};
/* UV1:
* -payload-- ---------header------
* bytes 0-11 bits 41-56 bits 58-81
* A B (2) C (3)
*
* A/B/C are moved to:
* A C B
* bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
* ------------payload queue-----------
*/
/* UV2:
* -payload-- ---------header------
* bytes 0-11 bits 70-78 bits 21-44
......
......@@ -224,17 +224,11 @@ static inline struct uv_hub_info_s *uv_cpu_hub_info(int cpu)
* This is a software convention - NOT the hardware revision numbers in
* the hub chip.
*/
#define UV1_HUB_REVISION_BASE 1
#define UV2_HUB_REVISION_BASE 3
#define UV3_HUB_REVISION_BASE 5
#define UV4_HUB_REVISION_BASE 7
#define UV4A_HUB_REVISION_BASE 8 /* UV4 (fixed) rev 2 */
static inline int is_uv1_hub(void)
{
return is_uv_hubbed(uv(1));
}
static inline int is_uv2_hub(void)
{
return is_uv_hubbed(uv(2));
......@@ -265,7 +259,7 @@ static inline int is_uvx_hub(void)
static inline int is_uv_hub(void)
{
return is_uv1_hub() || is_uvx_hub();
return is_uvx_hub();
}
union uvh_apicid {
......@@ -292,11 +286,6 @@ union uvh_apicid {
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
#define UV1_LOCAL_MMR_BASE 0xf4000000UL
#define UV1_GLOBAL_MMR32_BASE 0xf8000000UL
#define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
#define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
#define UV2_LOCAL_MMR_BASE 0xfa000000UL
#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
......@@ -313,25 +302,21 @@ union uvh_apicid {
#define UV4_GLOBAL_MMR32_SIZE (16UL * 1024 * 1024)
#define UV_LOCAL_MMR_BASE ( \
is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
is_uv3_hub() ? UV3_LOCAL_MMR_BASE : \
/*is_uv4_hub*/ UV4_LOCAL_MMR_BASE)
#define UV_GLOBAL_MMR32_BASE ( \
is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE : \
is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE : \
is_uv3_hub() ? UV3_GLOBAL_MMR32_BASE : \
/*is_uv4_hub*/ UV4_GLOBAL_MMR32_BASE)
#define UV_LOCAL_MMR_SIZE ( \
is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
is_uv3_hub() ? UV3_LOCAL_MMR_SIZE : \
/*is_uv4_hub*/ UV4_LOCAL_MMR_SIZE)
#define UV_GLOBAL_MMR32_SIZE ( \
is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE : \
is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE : \
is_uv3_hub() ? UV3_GLOBAL_MMR32_SIZE : \
/*is_uv4_hub*/ UV4_GLOBAL_MMR32_SIZE)
......@@ -352,8 +337,6 @@ union uvh_apicid {
#define UVH_APICID 0x002D0E00L
#define UV_APIC_PNODE_SHIFT 6
#define UV_APICID_HIBIT_MASK 0xffff0000
/* Local Bus from cpu's perspective */
#define LOCAL_BUS_BASE 0x1c00000
#define LOCAL_BUS_SIZE (4 * 1024 * 1024)
......@@ -560,15 +543,6 @@ static inline int uv_apicid_to_pnode(int apicid)
return s2pn ? s2pn[pnode - uv_hub_info->min_socket] : pnode;
}
/* Convert an apicid to the socket number on the blade */
static inline int uv_apicid_to_socket(int apicid)
{
if (is_uv1_hub())
return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1;
else
return 0;
}
/*
* Access global MMRs using the low memory MMR32 space. This region supports
* faster MMR access but not all MMRs are accessible in this space.
......@@ -660,7 +634,7 @@ static inline int uv_cpu_blade_processor_id(int cpu)
return uv_cpu_info_per(cpu)->blade_cpu_id;
}
/* Blade number to Node number (UV1..UV4 is 1:1) */
/* Blade number to Node number (UV2..UV4 is 1:1) */
static inline int uv_blade_to_node(int blade)
{
return blade;
......@@ -674,7 +648,7 @@ static inline int uv_numa_blade_id(void)
/*
* Convert linux node number to the UV blade number.
* .. Currently for UV1 thru UV4 the node and the blade are identical.
* .. Currently for UV2 thru UV4 the node and the blade are identical.
* .. If this changes then you MUST check references to this function!
*/
static inline int uv_node_to_blade_id(int nid)
......@@ -821,8 +795,6 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
}
}
extern unsigned int uv_apicid_hibits;
/*
* Get the minimum revision number of the hub chips within the partition.
* (See UVx_HUB_REVISION_BASE above for specific values.)
......
This diff is collapsed.
......@@ -24,8 +24,6 @@
#include <asm/uv/uv.h>
#include <asm/apic.h>
static DEFINE_PER_CPU(int, x2apic_extra_bits);
static enum uv_system_type uv_system_type;
static int uv_hubbed_system;
static int uv_hubless_system;
......@@ -40,7 +38,7 @@ static u8 oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
static struct {
unsigned int apicid_shift;
unsigned int apicid_mask;
unsigned int socketid_shift; /* aka pnode_shift for UV1/2/3 */
unsigned int socketid_shift; /* aka pnode_shift for UV2/3 */
unsigned int pnode_mask;
unsigned int gpa_shift;
unsigned int gnode_shift;
......@@ -48,8 +46,6 @@ static struct {
static int uv_min_hub_revision_id;
unsigned int uv_apicid_hibits;
static struct apic apic_x2apic_uv_x;
static struct uv_hub_info_s uv_hub_info_node0;
......@@ -139,12 +135,8 @@ static void __init uv_tsc_check_sync(void)
/* Accommodate different UV arch BIOSes */
mmr = uv_early_read_mmr(UVH_TSC_SYNC_MMR);
mmr_shift =
is_uv1_hub() ? 0 :
is_uv2_hub() ? UVH_TSC_SYNC_SHIFT_UV2K : UVH_TSC_SYNC_SHIFT;
if (mmr_shift)
sync_state = (mmr >> mmr_shift) & UVH_TSC_SYNC_MASK;
else
sync_state = 0;
switch (sync_state) {
case UVH_TSC_SYNC_VALID:
......@@ -223,21 +215,6 @@ static void __init early_get_apic_socketid_shift(void)
pr_info("UV: socketid_shift:%d pnode_mask:0x%x\n", uv_cpuid.socketid_shift, uv_cpuid.pnode_mask);
}
/*
* Add an extra bit as dictated by bios to the destination apicid of
* interrupts potentially passing through the UV HUB. This prevents
* a deadlock between interrupts and IO port operations.
*/
static void __init uv_set_apicid_hibit(void)
{
union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
if (is_uv1_hub()) {
apicid_mask.v = uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
uv_apicid_hibits = apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
}
}
static void __init uv_stringify(int len, char *to, char *from)
{
/* Relies on 'to' being NULL chars so result will be NULL terminated */
......@@ -280,36 +257,25 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
/*
* Determine UV arch type.
* SGI: UV100/1000
* SGI2: UV2000/3000
* SGI3: UV300 (truncated to 4 chars because of different varieties)
* SGI4: UV400 (truncated to 4 chars because of different varieties)
*/
uv_hub_info->hub_revision =
!strncmp(oem_id, "SGI4", 4) ? UV4_HUB_REVISION_BASE :
!strncmp(oem_id, "SGI3", 4) ? UV3_HUB_REVISION_BASE :
!strcmp(oem_id, "SGI2") ? UV2_HUB_REVISION_BASE :
!strcmp(oem_id, "SGI") ? UV1_HUB_REVISION_BASE : 0;
if (uv_hub_info->hub_revision == 0)
goto badbios;
switch (uv_hub_info->hub_revision) {
case UV4_HUB_REVISION_BASE:
if (!strncmp(oem_id, "SGI4", 4)) {
uv_hub_info->hub_revision = UV4_HUB_REVISION_BASE;
uv_hubbed_system = 0x11;
break;
case UV3_HUB_REVISION_BASE:
} else if (!strncmp(oem_id, "SGI3", 4)) {
uv_hub_info->hub_revision = UV3_HUB_REVISION_BASE;
uv_hubbed_system = 0x9;
break;
case UV2_HUB_REVISION_BASE:
} else if (!strcmp(oem_id, "SGI2")) {
uv_hub_info->hub_revision = UV2_HUB_REVISION_BASE;
uv_hubbed_system = 0x5;
break;
case UV1_HUB_REVISION_BASE:
uv_hubbed_system = 0x3;
break;
} else {
uv_hub_info->hub_revision = 0;
goto badbios;
}
pnodeid = early_get_pnodeid();
......@@ -323,14 +289,6 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
uv_system_type = UV_X2APIC;
uv_apic = 0;
} else if (!strcmp(oem_table_id, "UVH")) {
/* Only UV1 systems: */
uv_system_type = UV_NON_UNIQUE_APIC;
x86_platform.legacy.warm_reset = 0;
__this_cpu_write(x2apic_extra_bits, pnodeid << uvh_apicid.s.pnode_shift);
uv_set_apicid_hibit();
uv_apic = 1;
} else if (!strcmp(oem_table_id, "UVL")) {
/* Only used for very small systems: */
uv_system_type = UV_LEGACY_APIC;
......@@ -347,7 +305,7 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
badbios:
pr_err("UV: OEM_ID:%s OEM_TABLE_ID:%s\n", oem_id, oem_table_id);
pr_err("Current BIOS not supported, update kernel and/or BIOS\n");
pr_err("Current UV Type or BIOS not supported\n");
BUG();
}
......@@ -545,7 +503,6 @@ static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
int pnode;
pnode = uv_apicid_to_pnode(phys_apicid);
phys_apicid |= uv_apicid_hibits;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
......@@ -576,7 +533,7 @@ static void uv_send_IPI_one(int cpu, int vector)
dmode = dest_Fixed;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
((apicid | uv_apicid_hibits) << UVH_IPI_INT_APIC_ID_SHFT) |
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
(dmode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
(vector << UVH_IPI_INT_VECTOR_SHFT);
......@@ -634,22 +591,16 @@ static void uv_init_apic_ldr(void)
static u32 apic_uv_calc_apicid(unsigned int cpu)
{
return apic_default_calc_apicid(cpu) | uv_apicid_hibits;
return apic_default_calc_apicid(cpu);
}
static unsigned int x2apic_get_apic_id(unsigned long x)
static unsigned int x2apic_get_apic_id(unsigned long id)
{
unsigned int id;
WARN_ON(preemptible() && num_online_cpus() > 1);
id = x | __this_cpu_read(x2apic_extra_bits);
return id;
}
static u32 set_apic_id(unsigned int id)
{
/* CHECKME: Do we need to mask out the xapic extra bits? */
return id;
}
......@@ -721,11 +672,6 @@ static struct apic apic_x2apic_uv_x __ro_after_init = {
.safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
};
static void set_x2apic_extra_bits(int pnode)
{
__this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
}
#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_LENGTH 3
#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
......@@ -920,15 +866,7 @@ static __init void map_mmioh_high(int min_pnode, int max_pnode)
return;
}
if (is_uv1_hub()) {
mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
mmioh.v = uv_read_local_mmr(mmr);
enable = !!mmioh.s1.enable;
base = mmioh.s1.base;
m_io = mmioh.s1.m_io;
n_io = mmioh.s1.n_io;
} else if (is_uv2_hub()) {
if (is_uv2_hub()) {
mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
mmioh.v = uv_read_local_mmr(mmr);
......@@ -936,17 +874,16 @@ static __init void map_mmioh_high(int min_pnode, int max_pnode)
base = mmioh.s2.base;
m_io = mmioh.s2.m_io;
n_io = mmioh.s2.n_io;
} else {
return;
}
if (enable) {
max_pnode &= (1 << n_io) - 1;
pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n", base, shift, m_io, n_io, max_pnode);
pr_info("UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
base, shift, m_io, n_io, max_pnode);
map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
} else {
pr_info("UV: MMIOH disabled\n");
}
}
}
static __init void map_low_mmrs(void)
......@@ -1081,9 +1018,6 @@ void uv_cpu_init(void)
return;
uv_hub_info->nr_online_cpus++;
if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
set_x2apic_extra_bits(uv_hub_info->pnode);
}
struct mn {
......@@ -1114,9 +1048,6 @@ static void get_mn(struct mn *mnp)
} else if (is_uv2_hub()) {
mnp->m_val = m_n_config.s2.m_skt;
mnp->n_lshift = mnp->m_val == 40 ? 40 : 39;
} else if (is_uv1_hub()) {
mnp->m_val = m_n_config.s1.m_skt;
mnp->n_lshift = mnp->m_val;
}
mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
}
......@@ -1318,7 +1249,7 @@ static void __init build_socket_tables(void)
size_t bytes;
if (!gre) {
if (is_uv1_hub() || is_uv2_hub() || is_uv3_hub()) {
if (is_uv2_hub() || is_uv3_hub()) {
pr_info("UV: No UVsystab socket table, ignoring\n");
return;
}
......@@ -1500,8 +1431,7 @@ static void __init uv_system_init_hub(void)
unsigned short min_pnode = 9999, max_pnode = 0;
char *hub = is_uv4_hub() ? "UV400" :
is_uv3_hub() ? "UV300" :
is_uv2_hub() ? "UV2000/3000" :
is_uv1_hub() ? "UV100/1000" : NULL;
is_uv2_hub() ? "UV2000/3000" : NULL;
if (!hub) {
pr_err("UV: Unknown/unsupported UV hub\n");
......
......@@ -170,15 +170,6 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
if (!current_ei->efi_memmap_size)
return 0;
/*
* If 1:1 mapping is not enabled, second kernel can not setup EFI
* and use EFI run time services. User space will have to pass
* acpi_rsdp=<addr> on kernel command line to make second kernel boot
* without efi.
*/
if (efi_have_uv1_memmap())
return 0;
params->secure_boot = boot_params.secure_boot;
ei->efi_loader_signature = current_ei->efi_loader_signature;
ei->efi_systab = current_ei->efi_systab;
......
......@@ -496,7 +496,7 @@ void __init efi_init(void)
efi_print_memmap();
}
#if defined(CONFIG_X86_32) || defined(CONFIG_X86_UV)
#if defined(CONFIG_X86_32)
void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
{
......@@ -648,7 +648,7 @@ static inline void *efi_map_next_entry_reverse(void *entry)
*/
static void *efi_map_next_entry(void *entry)
{
if (!efi_have_uv1_memmap() && efi_enabled(EFI_64BIT)) {
if (efi_enabled(EFI_64BIT)) {
/*
* Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
* config table feature requires us to map all entries
......@@ -777,11 +777,9 @@ static void __init kexec_enter_virtual_mode(void)
/*
* We don't do virtual mode, since we don't do runtime services, on
* non-native EFI. With the UV1 memmap, we don't do runtime services in
* kexec kernel because in the initial boot something else might
* have been mapped at these virtual addresses.
* non-native EFI.
*/
if (efi_is_mixed() || efi_have_uv1_memmap()) {
if (efi_is_mixed()) {
efi_memmap_unmap();
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return;
......@@ -832,12 +830,6 @@ static void __init kexec_enter_virtual_mode(void)
* has the runtime attribute bit set in its memory descriptor into the
* efi_pgd page table.
*
* The old method which used to update that memory descriptor with the
* virtual address obtained from ioremap() is still supported when the
* kernel is booted on SG1 UV1 hardware. Same old method enabled the
* runtime services to be called without having to thunk back into
* physical mode for every invocation.
*
* The new method does a pagetable switch in a preemption-safe manner
* so that we're in a different address space when calling a runtime
* function. For function arguments passing we do copy the PUDs of the
......
......@@ -74,9 +74,6 @@ int __init efi_alloc_page_tables(void)
pud_t *pud;
gfp_t gfp_mask;
if (efi_have_uv1_memmap())
return 0;
gfp_mask = GFP_KERNEL | __GFP_ZERO;
efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
if (!efi_pgd)
......@@ -115,9 +112,6 @@ void efi_sync_low_kernel_mappings(void)
pud_t *pud_k, *pud_efi;
pgd_t *efi_pgd = efi_mm.pgd;
if (efi_have_uv1_memmap())
return;
/*
* We can share all PGD entries apart from the one entry that
* covers the EFI runtime mapping space.
......@@ -206,9 +200,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
unsigned npages;
pgd_t *pgd = efi_mm.pgd;
if (efi_have_uv1_memmap())
return 0;
/*
* It can happen that the physical address of new_memmap lands in memory
* which is not mapped in the EFI page table. Therefore we need to go
......@@ -315,9 +306,6 @@ void __init efi_map_region(efi_memory_desc_t *md)
unsigned long size = md->num_pages << PAGE_SHIFT;
u64 pa = md->phys_addr;
if (efi_have_uv1_memmap())
return old_map_region(md);
/*
* Make sure the 1:1 mappings are present as a catch-all for b0rked
* firmware which doesn't update all internal pointers after switching
......@@ -420,12 +408,6 @@ void __init efi_runtime_update_mappings(void)
{
efi_memory_desc_t *md;
if (efi_have_uv1_memmap()) {
if (__supported_pte_mask & _PAGE_NX)
runtime_code_page_mkexec();
return;
}
/*
* Use the EFI Memory Attribute Table for mapping permissions if it
* exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
......@@ -474,9 +456,6 @@ void __init efi_runtime_update_mappings(void)
void __init efi_dump_pagetable(void)
{
#ifdef CONFIG_EFI_PGT_DUMP
if (efi_have_uv1_memmap())
ptdump_walk_pgd_level(NULL, &init_mm);
else
ptdump_walk_pgd_level(NULL, &efi_mm);
#endif
}
......@@ -849,21 +828,13 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
efi_status_t status;
unsigned long flags;
pgd_t *save_pgd = NULL;
if (efi_is_mixed())
return efi_thunk_set_virtual_address_map(memory_map_size,
descriptor_size,
descriptor_version,
virtual_map);
if (efi_have_uv1_memmap()) {
save_pgd = efi_uv1_memmap_phys_prolog();
if (!save_pgd)
return EFI_ABORTED;
} else {
efi_switch_mm(&efi_mm);
}
kernel_fpu_begin();
......@@ -879,9 +850,6 @@ efi_set_virtual_address_map(unsigned long memory_map_size,
/* grab the virtually remapped EFI runtime services table pointer */
efi.runtime = READ_ONCE(systab->runtime);
if (save_pgd)
efi_uv1_memmap_phys_epilog(save_pgd);
else
efi_switch_mm(efi_scratch.prev_mm);
return status;
......
......@@ -380,14 +380,6 @@ static void __init efi_unmap_pages(efi_memory_desc_t *md)
u64 pa = md->phys_addr;
u64 va = md->virt_addr;
/*
* To Do: Remove this check after adding functionality to unmap EFI boot
* services code/data regions from direct mapping area because the UV1
* memory map maps EFI regions in swapper_pg_dir.
*/
if (efi_have_uv1_memmap())
return;
/*
* EFI mixed mode has all RAM mapped to access arguments while making
* EFI runtime calls, hence don't unmap EFI boot services code/data
......@@ -558,16 +550,6 @@ int __init efi_reuse_config(u64 tables, int nr_tables)
return ret;
}
static const struct dmi_system_id sgi_uv1_dmi[] __initconst = {
{ NULL, "SGI UV1",
{ DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"),
DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
DMI_MATCH(DMI_BIOS_VENDOR, "SGI.COM"),
}
},
{ } /* NULL entry stops DMI scanning */
};
void __init efi_apply_memmap_quirks(void)
{
/*
......@@ -579,17 +561,6 @@ void __init efi_apply_memmap_quirks(void)
pr_info("Setup done, disabling due to 32/64-bit mismatch\n");
efi_memmap_unmap();
}
/* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */
if (dmi_check_system(sgi_uv1_dmi)) {
if (IS_ENABLED(CONFIG_X86_UV)) {
set_bit(EFI_UV1_MEMMAP, &efi.flags);
} else {
pr_warn("EFI runtime disabled, needs CONFIG_X86_UV=y on UV1\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
efi_memmap_unmap();
}
}
}
/*
......@@ -723,8 +694,6 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
/*
* Make sure that an efi runtime service caused the page fault.
* "efi_mm" cannot be used to check if the page fault had occurred
* in the firmware context because the UV1 memmap doesn't use efi_pgd.
*/
if (efi_rts_work.efi_rts_id == EFI_NONE)
return;
......
......@@ -30,17 +30,7 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
*/
return BIOS_STATUS_UNIMPLEMENTED;
/*
* If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI
* callback method, which uses efi_call() directly, with the kernel page tables:
*/
if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) {
kernel_fpu_begin();
ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
kernel_fpu_end();
} else {
ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
}
return ret;
}
......@@ -209,164 +199,3 @@ int uv_bios_init(void)
pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
return 0;
}
static void __init early_code_mapping_set_exec(int executable)
{
efi_memory_desc_t *md;
if (!(__supported_pte_mask & _PAGE_NX))
return;
/* Make EFI service code area executable */
for_each_efi_memory_desc(md) {
if (md->type == EFI_RUNTIME_SERVICES_CODE ||
md->type == EFI_BOOT_SERVICES_CODE)
efi_set_executable(md, executable);
}
}
void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd)
{
/*
* After the lock is released, the original page table is restored.
*/
int pgd_idx, i;
int nr_pgds;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
if (!pgd_present(*pgd))
continue;
for (i = 0; i < PTRS_PER_P4D; i++) {
p4d = p4d_offset(pgd,
pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
if (!p4d_present(*p4d))
continue;
pud = (pud_t *)p4d_page_vaddr(*p4d);
pud_free(&init_mm, pud);
}
p4d = (p4d_t *)pgd_page_vaddr(*pgd);
p4d_free(&init_mm, p4d);
}
kfree(save_pgd);
__flush_tlb_all();
early_code_mapping_set_exec(0);
}
pgd_t * __init efi_uv1_memmap_phys_prolog(void)
{
unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
pgd_t *save_pgd, *pgd_k, *pgd_efi;
p4d_t *p4d, *p4d_k, *p4d_efi;
pud_t *pud;
int pgd;
int n_pgds, i, j;
early_code_mapping_set_exec(1);
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
if (!save_pgd)
return NULL;
/*
* Build 1:1 identity mapping for UV1 memmap usage. Note that
* PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
* it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
* address X, the pud_index(X) != pud_index(__va(X)), we can only copy
* PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
* This means here we can only reuse the PMD tables of the direct mapping.
*/
for (pgd = 0; pgd < n_pgds; pgd++) {
addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
pgd_efi = pgd_offset_k(addr_pgd);
save_pgd[pgd] = *pgd_efi;
p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
if (!p4d) {
pr_err("Failed to allocate p4d table!\n");
goto out;
}
for (i = 0; i < PTRS_PER_P4D; i++) {
addr_p4d = addr_pgd + i * P4D_SIZE;
p4d_efi = p4d + p4d_index(addr_p4d);
pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
if (!pud) {
pr_err("Failed to allocate pud table!\n");
goto out;
}
for (j = 0; j < PTRS_PER_PUD; j++) {
addr_pud = addr_p4d + j * PUD_SIZE;
if (addr_pud > (max_pfn << PAGE_SHIFT))
break;
vaddr = (unsigned long)__va(addr_pud);
pgd_k = pgd_offset_k(vaddr);
p4d_k = p4d_offset(pgd_k, vaddr);
pud[j] = *pud_offset(p4d_k, vaddr);
}
}
pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
}
__flush_tlb_all();
return save_pgd;
out:
efi_uv1_memmap_phys_epilog(save_pgd);
return NULL;
}
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
u32 type, u64 attribute)
{
unsigned long last_map_pfn;
if (type == EFI_MEMORY_MAPPED_IO)
return ioremap(phys_addr, size);
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size,
PAGE_KERNEL);
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
unsigned long top = last_map_pfn << PAGE_SHIFT;
efi_ioremap(top, size - (top - phys_addr), type, attribute);
}
if (!(attribute & EFI_MEMORY_WB))
efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
return (void __iomem *)__va(phys_addr);
}
static int __init arch_parse_efi_cmdline(char *str)
{
if (!str) {
pr_warn("need at least one option\n");
return -EINVAL;
}
if (!efi_is_mixed() && parse_option_str(str, "old_map"))
set_bit(EFI_UV1_MEMMAP, &efi.flags);
return 0;
}
early_param("efi", arch_parse_efi_cmdline);
This diff is collapsed.
......@@ -74,7 +74,6 @@ static void uv_rtc_send_IPI(int cpu)
apicid = cpu_physical_id(cpu);
pnode = uv_apicid_to_pnode(apicid);
apicid |= uv_apicid_hibits;
val = (1UL << UVH_IPI_INT_SEND_SHFT) |
(apicid << UVH_IPI_INT_APIC_ID_SHFT) |
(X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
......@@ -85,10 +84,7 @@ static void uv_rtc_send_IPI(int cpu)
/* Check for an RTC interrupt pending */
static int uv_intr_pending(int pnode)
{
if (is_uv1_hub())
return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
UV1H_EVENT_OCCURRED0_RTC1_MASK;
else if (is_uvx_hub())
if (is_uvx_hub())
return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
UVXH_EVENT_OCCURRED2_RTC_1_MASK;
return 0;
......@@ -98,17 +94,13 @@ static int uv_intr_pending(int pnode)
static int uv_setup_intr(int cpu, u64 expires)
{
u64 val;
unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits;
unsigned long apicid = cpu_physical_id(cpu);
int pnode = uv_cpu_to_pnode(cpu);
uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
UVH_RTC1_INT_CONFIG_M_MASK);
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
if (is_uv1_hub())
uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
UV1H_EVENT_OCCURRED0_RTC1_MASK);
else
uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
UVXH_EVENT_OCCURRED2_RTC_1_MASK);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment