Commit 95328de5 authored by steve.wahl@hpe.com's avatar steve.wahl@hpe.com Committed by Thomas Gleixner

x86/platform/uv: Remove support for UV1 platform from uv_tlb

UV1 is not longer supported.
Signed-off-by: default avatarSteve Wahl <steve.wahl@hpe.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20200713212954.728022415@hpe.com
parent 8b3c9b16
...@@ -23,18 +23,6 @@ ...@@ -23,18 +23,6 @@
static struct bau_operations ops __ro_after_init; static struct bau_operations ops __ro_after_init;
/* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
static const int timeout_base_ns[] = {
20,
160,
1280,
10240,
81920,
655360,
5242880,
167772160
};
static int timeout_us; static int timeout_us;
static bool nobau = true; static bool nobau = true;
static int nobau_perm; static int nobau_perm;
...@@ -510,70 +498,6 @@ static inline void end_uvhub_quiesce(struct bau_control *hmaster) ...@@ -510,70 +498,6 @@ static inline void end_uvhub_quiesce(struct bau_control *hmaster)
atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce); atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
} }
static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
{
unsigned long descriptor_status;
descriptor_status = uv_read_local_mmr(mmr_offset);
descriptor_status >>= right_shift;
descriptor_status &= UV_ACT_STATUS_MASK;
return descriptor_status;
}
/*
* Wait for completion of a broadcast software ack message
* return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
*/
static int uv1_wait_completion(struct bau_desc *bau_desc,
struct bau_control *bcp, long try)
{
unsigned long descriptor_status;
cycles_t ttm;
u64 mmr_offset = bcp->status_mmr;
int right_shift = bcp->status_index;
struct ptc_stats *stat = bcp->statp;
descriptor_status = uv1_read_status(mmr_offset, right_shift);
/* spin on the status MMR, waiting for it to go idle */
while ((descriptor_status != DS_IDLE)) {
/*
* Our software ack messages may be blocked because
* there are no swack resources available. As long
* as none of them has timed out hardware will NACK
* our message and its state will stay IDLE.
*/
if (descriptor_status == DS_SOURCE_TIMEOUT) {
stat->s_stimeout++;
return FLUSH_GIVEUP;
} else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
stat->s_dtimeout++;
ttm = get_cycles();
/*
* Our retries may be blocked by all destination
* swack resources being consumed, and a timeout
* pending. In that case hardware returns the
* ERROR that looks like a destination timeout.
*/
if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
bcp->conseccompletes = 0;
return FLUSH_RETRY_PLUGGED;
}
bcp->conseccompletes = 0;
return FLUSH_RETRY_TIMEOUT;
} else {
/*
* descriptor_status is still BUSY
*/
cpu_relax();
}
descriptor_status = uv1_read_status(mmr_offset, right_shift);
}
bcp->conseccompletes++;
return FLUSH_COMPLETE;
}
/* /*
* UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register. * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
* But not currently used. * But not currently used.
...@@ -852,24 +776,6 @@ static void record_send_stats(cycles_t time1, cycles_t time2, ...@@ -852,24 +776,6 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
} }
} }
/*
* Because of a uv1 hardware bug only a limited number of concurrent
* requests can be made.
*/
static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
{
spinlock_t *lock = &hmaster->uvhub_lock;
atomic_t *v;
v = &hmaster->active_descriptor_count;
if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
stat->s_throttles++;
do {
cpu_relax();
} while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
}
}
/* /*
* Handle the completion status of a message send. * Handle the completion status of a message send.
*/ */
...@@ -899,50 +805,30 @@ static int uv_flush_send_and_wait(struct cpumask *flush_mask, ...@@ -899,50 +805,30 @@ static int uv_flush_send_and_wait(struct cpumask *flush_mask,
{ {
int seq_number = 0; int seq_number = 0;
int completion_stat = 0; int completion_stat = 0;
int uv1 = 0;
long try = 0; long try = 0;
unsigned long index; unsigned long index;
cycles_t time1; cycles_t time1;
cycles_t time2; cycles_t time2;
struct ptc_stats *stat = bcp->statp; struct ptc_stats *stat = bcp->statp;
struct bau_control *hmaster = bcp->uvhub_master; struct bau_control *hmaster = bcp->uvhub_master;
struct uv1_bau_msg_header *uv1_hdr = NULL;
struct uv2_3_bau_msg_header *uv2_3_hdr = NULL; struct uv2_3_bau_msg_header *uv2_3_hdr = NULL;
if (bcp->uvhub_version == UV_BAU_V1) {
uv1 = 1;
uv1_throttle(hmaster, stat);
}
while (hmaster->uvhub_quiesce) while (hmaster->uvhub_quiesce)
cpu_relax(); cpu_relax();
time1 = get_cycles(); time1 = get_cycles();
if (uv1) uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
uv1_hdr = &bau_desc->header.uv1_hdr;
else
/* uv2 and uv3 */
uv2_3_hdr = &bau_desc->header.uv2_3_hdr;
do { do {
if (try == 0) { if (try == 0) {
if (uv1) uv2_3_hdr->msg_type = MSG_REGULAR;
uv1_hdr->msg_type = MSG_REGULAR;
else
uv2_3_hdr->msg_type = MSG_REGULAR;
seq_number = bcp->message_number++; seq_number = bcp->message_number++;
} else { } else {
if (uv1) uv2_3_hdr->msg_type = MSG_RETRY;
uv1_hdr->msg_type = MSG_RETRY;
else
uv2_3_hdr->msg_type = MSG_RETRY;
stat->s_retry_messages++; stat->s_retry_messages++;
} }
if (uv1) uv2_3_hdr->sequence = seq_number;
uv1_hdr->sequence = seq_number;
else
uv2_3_hdr->sequence = seq_number;
index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
bcp->send_message = get_cycles(); bcp->send_message = get_cycles();
...@@ -1162,7 +1048,6 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, ...@@ -1162,7 +1048,6 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
address = TLB_FLUSH_ALL; address = TLB_FLUSH_ALL;
switch (bcp->uvhub_version) { switch (bcp->uvhub_version) {
case UV_BAU_V1:
case UV_BAU_V2: case UV_BAU_V2:
case UV_BAU_V3: case UV_BAU_V3:
bau_desc->payload.uv1_2_3.address = address; bau_desc->payload.uv1_2_3.address = address;
...@@ -1300,7 +1185,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message) ...@@ -1300,7 +1185,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_uv_bau_message)
if (bcp->uvhub_version == UV_BAU_V2) if (bcp->uvhub_version == UV_BAU_V2)
process_uv2_message(&msgdesc, bcp); process_uv2_message(&msgdesc, bcp);
else else
/* no error workaround for uv1 or uv3 */ /* no error workaround for uv3 */
bau_process_message(&msgdesc, bcp, 1); bau_process_message(&msgdesc, bcp, 1);
msg++; msg++;
...@@ -1350,12 +1235,7 @@ static void __init enable_timeouts(void) ...@@ -1350,12 +1235,7 @@ static void __init enable_timeouts(void)
mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT); mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT); mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
write_mmr_misc_control(pnode, mmr_image); write_mmr_misc_control(pnode, mmr_image);
/*
* UV1:
* Subsequent reversals of the timebase bit (3) cause an
* immediate timeout of one or all INTD resources as
* indicated in bits 2:0 (7 causes all of them to timeout).
*/
mmr_image |= (1L << SOFTACK_MSHIFT); mmr_image |= (1L << SOFTACK_MSHIFT);
if (is_uv2_hub()) { if (is_uv2_hub()) {
/* do not touch the legacy mode bit */ /* do not touch the legacy mode bit */
...@@ -1711,14 +1591,12 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) ...@@ -1711,14 +1591,12 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
{ {
int i; int i;
int cpu; int cpu;
int uv1 = 0;
unsigned long gpa; unsigned long gpa;
unsigned long m; unsigned long m;
unsigned long n; unsigned long n;
size_t dsize; size_t dsize;
struct bau_desc *bau_desc; struct bau_desc *bau_desc;
struct bau_desc *bd2; struct bau_desc *bd2;
struct uv1_bau_msg_header *uv1_hdr;
struct uv2_3_bau_msg_header *uv2_3_hdr; struct uv2_3_bau_msg_header *uv2_3_hdr;
struct bau_control *bcp; struct bau_control *bcp;
...@@ -1733,8 +1611,6 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) ...@@ -1733,8 +1611,6 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
gpa = uv_gpa(bau_desc); gpa = uv_gpa(bau_desc);
n = uv_gpa_to_gnode(gpa); n = uv_gpa_to_gnode(gpa);
m = ops.bau_gpa_to_offset(gpa); m = ops.bau_gpa_to_offset(gpa);
if (is_uv1_hub())
uv1 = 1;
/* the 14-bit pnode */ /* the 14-bit pnode */
write_mmr_descriptor_base(pnode, write_mmr_descriptor_base(pnode,
...@@ -1746,37 +1622,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) ...@@ -1746,37 +1622,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
*/ */
for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) { for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
memset(bd2, 0, sizeof(struct bau_desc)); memset(bd2, 0, sizeof(struct bau_desc));
if (uv1) { /*
uv1_hdr = &bd2->header.uv1_hdr; * BIOS uses legacy mode, but uv2 and uv3 hardware always
uv1_hdr->swack_flag = 1; * uses native mode for selective broadcasts.
/* */
* The base_dest_nasid set in the message header uv2_3_hdr = &bd2->header.uv2_3_hdr;
* is the nasid of the first uvhub in the partition. uv2_3_hdr->swack_flag = 1;
* The bit map will indicate destination pnode numbers uv2_3_hdr->base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
* relative to that base. They may not be consecutive uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
* if nasid striding is being used. uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
*/
uv1_hdr->base_dest_nasid =
UV_PNODE_TO_NASID(base_pnode);
uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID;
uv1_hdr->command = UV_NET_ENDPOINT_INTD;
uv1_hdr->int_both = 1;
/*
* all others need to be set to zero:
* fairness chaining multilevel count replied_to
*/
} else {
/*
* BIOS uses legacy mode, but uv2 and uv3 hardware always
* uses native mode for selective broadcasts.
*/
uv2_3_hdr = &bd2->header.uv2_3_hdr;
uv2_3_hdr->swack_flag = 1;
uv2_3_hdr->base_dest_nasid =
UV_PNODE_TO_NASID(base_pnode);
uv2_3_hdr->dest_subnodeid = UV_LB_SUBNODEID;
uv2_3_hdr->command = UV_NET_ENDPOINT_INTD;
}
} }
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
...@@ -1861,7 +1715,7 @@ static void __init init_uvhub(int uvhub, int vector, int base_pnode) ...@@ -1861,7 +1715,7 @@ static void __init init_uvhub(int uvhub, int vector, int base_pnode)
* The below initialization can't be in firmware because the * The below initialization can't be in firmware because the
* messaging IRQ will be determined by the OS. * messaging IRQ will be determined by the OS.
*/ */
apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; apicid = uvhub_to_first_apicid(uvhub);
write_mmr_data_config(pnode, ((apicid << 32) | vector)); write_mmr_data_config(pnode, ((apicid << 32) | vector));
} }
...@@ -1874,33 +1728,20 @@ static int calculate_destination_timeout(void) ...@@ -1874,33 +1728,20 @@ static int calculate_destination_timeout(void)
{ {
unsigned long mmr_image; unsigned long mmr_image;
int mult1; int mult1;
int mult2;
int index;
int base; int base;
int ret; int ret;
unsigned long ts_ns;
/* same destination timeout for uv2 and uv3 */
if (is_uv1_hub()) { /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK; mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK; if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT); base = 80;
mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK; else
ts_ns = timeout_base_ns[index]; base = 10;
ts_ns *= (mult1 * mult2); mult1 = mmr_image & UV2_ACK_MASK;
ret = ts_ns / 1000; ret = mult1 * base;
} else {
/* same destination timeout for uv2 and uv3 */
/* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
base = 80;
else
base = 10;
mult1 = mmr_image & UV2_ACK_MASK;
ret = mult1 * base;
}
return ret; return ret;
} }
...@@ -2039,9 +1880,7 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, ...@@ -2039,9 +1880,7 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
bcp->cpus_in_socket = sdp->num_cpus; bcp->cpus_in_socket = sdp->num_cpus;
bcp->socket_master = *smasterp; bcp->socket_master = *smasterp;
bcp->uvhub = bdp->uvhub; bcp->uvhub = bdp->uvhub;
if (is_uv1_hub()) if (is_uv2_hub())
bcp->uvhub_version = UV_BAU_V1;
else if (is_uv2_hub())
bcp->uvhub_version = UV_BAU_V2; bcp->uvhub_version = UV_BAU_V2;
else if (is_uv3_hub()) else if (is_uv3_hub())
bcp->uvhub_version = UV_BAU_V3; bcp->uvhub_version = UV_BAU_V3;
...@@ -2123,7 +1962,7 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode) ...@@ -2123,7 +1962,7 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
struct uvhub_desc *uvhub_descs; struct uvhub_desc *uvhub_descs;
unsigned char *uvhub_mask = NULL; unsigned char *uvhub_mask = NULL;
if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub()) if (is_uv3_hub() || is_uv2_hub())
timeout_us = calculate_destination_timeout(); timeout_us = calculate_destination_timeout();
uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL); uvhub_descs = kcalloc(nuvhubs, sizeof(struct uvhub_desc), GFP_KERNEL);
...@@ -2151,17 +1990,6 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode) ...@@ -2151,17 +1990,6 @@ static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
return 1; return 1;
} }
static const struct bau_operations uv1_bau_ops __initconst = {
.bau_gpa_to_offset = uv_gpa_to_offset,
.read_l_sw_ack = read_mmr_sw_ack,
.read_g_sw_ack = read_gmmr_sw_ack,
.write_l_sw_ack = write_mmr_sw_ack,
.write_g_sw_ack = write_gmmr_sw_ack,
.write_payload_first = write_mmr_payload_first,
.write_payload_last = write_mmr_payload_last,
.wait_completion = uv1_wait_completion,
};
static const struct bau_operations uv2_3_bau_ops __initconst = { static const struct bau_operations uv2_3_bau_ops __initconst = {
.bau_gpa_to_offset = uv_gpa_to_offset, .bau_gpa_to_offset = uv_gpa_to_offset,
.read_l_sw_ack = read_mmr_sw_ack, .read_l_sw_ack = read_mmr_sw_ack,
...@@ -2206,8 +2034,6 @@ static int __init uv_bau_init(void) ...@@ -2206,8 +2034,6 @@ static int __init uv_bau_init(void)
ops = uv2_3_bau_ops; ops = uv2_3_bau_ops;
else if (is_uv2_hub()) else if (is_uv2_hub())
ops = uv2_3_bau_ops; ops = uv2_3_bau_ops;
else if (is_uv1_hub())
ops = uv1_bau_ops;
nuvhubs = uv_num_possible_blades(); nuvhubs = uv_num_possible_blades();
if (nuvhubs < 2) { if (nuvhubs < 2) {
...@@ -2228,7 +2054,7 @@ static int __init uv_bau_init(void) ...@@ -2228,7 +2054,7 @@ static int __init uv_bau_init(void)
} }
/* software timeouts are not supported on UV4 */ /* software timeouts are not supported on UV4 */
if (is_uv3_hub() || is_uv2_hub() || is_uv1_hub()) if (is_uv3_hub() || is_uv2_hub())
enable_timeouts(); enable_timeouts();
if (init_per_cpu(nuvhubs, uv_base_pnode)) { if (init_per_cpu(nuvhubs, uv_base_pnode)) {
...@@ -2251,8 +2077,7 @@ static int __init uv_bau_init(void) ...@@ -2251,8 +2077,7 @@ static int __init uv_bau_init(void)
val = 1L << 63; val = 1L << 63;
write_gmmr_activation(pnode, val); write_gmmr_activation(pnode, val);
mmr = 1; /* should be 1 to broadcast to both sockets */ mmr = 1; /* should be 1 to broadcast to both sockets */
if (!is_uv1_hub()) write_mmr_data_broadcast(pnode, mmr);
write_mmr_data_broadcast(pnode, mmr);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment