Commit dd25cfab authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-ipa-remaining-ipa-v5-0-support'

Alex Elder says:

====================
net: ipa: remaining IPA v5.0 support

This series includes almost all remaining IPA code changes required
to support IPA v5.0.  IPA register definitions and configuration
data for IPA v5.0 will be sent later (soon).  Note that the GSI
register definitions still require work.  GSI for IPA v5.0 supports
up to 256 (rather than 32) channels, and this changes the way GSI
register offsets are calculated.  A few GSI register fields also
change.

The first patch in this series increases the number of IPA endpoints
supported by the driver, from 32 to 36.  The next updates the width
of the destination field for the IP_PACKET_INIT immediate command so
it can represent up to 256 endpoints rather than just 32.  The next
adds a few definitions of some IPA registers and fields that are
first available in IPA v5.0.

The next two patches update the code that handles router and filter
table caches.  Previously these were referred to as "hashed" tables,
and the IPv4 and IPv6 tables are now combined into one "unified"
table.  The sixth and seventh patches add support for a new pulse
generator, which allows time periods to be specified with a wider
range of clock resolution.  And the last patch just defines two new
memory regions that were not previously used.
====================

Link: https://lore.kernel.org/r/20230130210158.4126129-1-elder@linaro.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 71af6a2d 5157d6bf
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2022 Linaro Ltd.
* Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
......@@ -94,11 +94,11 @@ struct ipa_cmd_register_write {
/* IPA_CMD_IP_PACKET_INIT */
struct ipa_cmd_ip_packet_init {
u8 dest_endpoint;
u8 dest_endpoint; /* Full 8 bits used for IPA v5.0+ */
u8 reserved[7];
};
/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
/* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */
#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
/* IPA_CMD_DMA_SHARED_MEM */
......@@ -157,9 +157,14 @@ static void ipa_cmd_validate_build(void)
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
/* Valid endpoint numbers must fit in the IP packet init command */
BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
IPA_ENDPOINT_MAX - 1);
/* Prior to IPA v5.0, we supported no more than 32 endpoints,
* and this was reflected in some 5-bit fields that held
* endpoint numbers. Starting with IPA v5.0, the widths of
* these fields were extended to 8 bits, meaning up to 256
* endpoints. If the driver claims to support more than
* that it's an error.
*/
BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX);
}
/* Validate a memory region holding a table */
......@@ -290,7 +295,11 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
if (ipa->version < IPA_VERSION_5_0)
reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
else
reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
offset = ipa_reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
......@@ -486,8 +495,13 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_init;
payload->dest_endpoint = u8_encode_bits(endpoint_id,
IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
if (ipa->version < IPA_VERSION_5_0) {
payload->dest_endpoint =
u8_encode_bits(endpoint_id,
IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
} else {
payload->dest_endpoint = endpoint_id;
}
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2022 Linaro Ltd.
* Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
......@@ -922,64 +922,72 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
iowrite32(val, ipa->reg_virt + offset);
}
/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
* pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
* they're configured to have granularity 100 usec and 1 msec, respectively.
*
* The return value is the positive or negative Qtime value to use to
* express the (microsecond) time provided. A positive return value
* means pulse generator 0 can be used; otherwise use pulse generator 1.
/* For IPA v4.5+, times are expressed using Qtime. A time is represented
* at one of several available granularities, which are configured in
* ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
* generators are set up with different "tick" periods. A Qtime value
* encodes a tick count along with an indication of a pulse generator
* (which has a fixed tick period). Two pulse generators are always
* available to the AP; a third is available starting with IPA v5.0.
* This function determines which pulse generator most accurately
* represents the time period provided, and returns the tick count to
* use to represent that time.
*/
static int ipa_qtime_val(u32 microseconds, u32 max)
{
u32 val;
/* Use 100 microsecond granularity if possible */
val = DIV_ROUND_CLOSEST(microseconds, 100);
if (val <= max)
return (int)val;
/* Have to use pulse generator 1 (millisecond granularity) */
val = DIV_ROUND_CLOSEST(microseconds, 1000);
WARN_ON(val > max);
static u32
ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
{
u32 which = 0;
u32 ticks;
/* Pulse generator 0 has 100 microsecond granularity */
ticks = DIV_ROUND_CLOSEST(microseconds, 100);
if (ticks <= max)
goto out;
/* Pulse generator 1 has millisecond granularity */
which = 1;
ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
if (ticks <= max)
goto out;
if (ipa->version >= IPA_VERSION_5_0) {
/* Pulse generator 2 has 10 millisecond granularity */
which = 2;
ticks = DIV_ROUND_CLOSEST(microseconds, 100);
}
WARN_ON(ticks > max);
out:
*select = which;
return (int)-val;
return ticks;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
u32 microseconds)
{
u32 ticks;
u32 max;
u32 val;
if (!microseconds)
return 0; /* Nothing to compute if time limit is 0 */
max = ipa_reg_field_max(reg, TIME_LIMIT);
if (ipa->version >= IPA_VERSION_4_5) {
u32 gran_sel;
int ret;
/* Compute the Qtime limit value to use */
ret = ipa_qtime_val(microseconds, max);
if (ret < 0) {
val = -ret;
gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
} else {
val = ret;
gran_sel = 0;
}
u32 select;
ticks = ipa_qtime_val(ipa, microseconds, max, &select);
return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
return ipa_reg_encode(reg, AGGR_GRAN_SEL, select) |
ipa_reg_encode(reg, TIME_LIMIT, ticks);
}
/* We program aggregation granularity in ipa_hardware_config() */
val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
microseconds, max * IPA_AGGR_GRANULARITY);
return ipa_reg_encode(reg, TIME_LIMIT, val);
return ipa_reg_encode(reg, TIME_LIMIT, ticks);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
......@@ -1050,20 +1058,13 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
if (ipa->version >= IPA_VERSION_4_5) {
u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
u32 gran_sel;
int ret;
/* Compute the Qtime limit value to use */
ret = ipa_qtime_val(microseconds, max);
if (ret < 0) {
val = -ret;
gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
} else {
val = ret;
gran_sel = 0;
}
u32 select;
u32 ticks;
return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
ticks = ipa_qtime_val(ipa, microseconds, max, &select);
return ipa_reg_encode(reg, TIMER_GRAN_SEL, 1) |
ipa_reg_encode(reg, TIMER_LIMIT, ticks);
}
/* Use 64 bit arithmetic to avoid overflow */
......@@ -1986,6 +1987,7 @@ int ipa_endpoint_config(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
const struct ipa_reg *reg;
u32 endpoint_id;
u32 hw_limit;
u32 tx_count;
u32 rx_count;
u32 rx_base;
......@@ -2031,6 +2033,14 @@ int ipa_endpoint_config(struct ipa *ipa)
return -EINVAL;
}
/* Until IPA v5.0, the max endpoint ID was 32 */
hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
if (limit > hw_limit) {
dev_err(dev, "unexpected endpoint count, %u > %u\n",
limit, hw_limit);
return -EINVAL;
}
/* Allocate and initialize the available endpoint bitmap */
ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
if (!ipa->available)
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2022 Linaro Ltd.
* Copyright (C) 2019-2023 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
......@@ -38,7 +38,7 @@ enum ipa_endpoint_name {
IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
};
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
#define IPA_ENDPOINT_MAX 36 /* Max supported by driver */
/**
* struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
* Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
......@@ -390,7 +390,12 @@ static void ipa_qtime_config(struct ipa *ipa)
reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
val = ipa_reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
val |= ipa_reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
if (ipa->version >= IPA_VERSION_5_0) {
val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
val |= ipa_reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
} else {
val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
}
iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
......@@ -432,6 +437,11 @@ static void ipa_hardware_config_hashing(struct ipa *ipa)
{
const struct ipa_reg *reg;
/* Other than IPA v4.2, all versions enable "hashing". Starting
* with IPA v5.0, the filter and router tables are implemented
* differently, but the default configuration enables this feature
* (now referred to as "cacheing"), so there's nothing to do here.
*/
if (ipa->version != IPA_VERSION_4_2)
return;
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2022 Linaro Ltd.
* Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
......@@ -163,6 +163,12 @@ static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
return false;
break;
case IPA_MEM_AP_V4_FILTER:
case IPA_MEM_AP_V6_FILTER:
if (version != IPA_VERSION_5_0)
return false;
break;
case IPA_MEM_NAT_TABLE:
case IPA_MEM_STATS_FILTER_ROUTE:
if (version < IPA_VERSION_4_5)
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2021 Linaro Ltd.
* Copyright (C) 2019-2023 Linaro Ltd.
*/
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
......@@ -62,13 +62,15 @@ enum ipa_mem_id {
IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0+) */
IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0+) */
IPA_MEM_STATS_QUOTA_AP, /* 0 canaries, optional (IPA v4.0+) */
IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0+) */
IPA_MEM_STATS_TETHERING, /* 0 canaries, optional (IPA v4.0+) */
IPA_MEM_STATS_DROP, /* 0 canaries, optional (IPA v4.0+) */
/* The next 5 filter and route statistics regions are optional */
/* The next 7 filter and route statistics regions are optional */
IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_AP_V4_FILTER, /* 2 canaries (IPA v5.0) */
IPA_MEM_AP_V6_FILTER, /* 0 canaries (IPA v5.0) */
IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5+) */
IPA_MEM_NAT_TABLE, /* 4 canaries, optional (IPA v4.5+) */
IPA_MEM_END_MARKER, /* 1 canary (not a real region) */
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
* Copyright (C) 2018-2023 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
......@@ -59,8 +59,10 @@ enum ipa_reg_id {
SHARED_MEM_SIZE,
QSB_MAX_WRITES,
QSB_MAX_READS,
FILT_ROUT_HASH_EN,
FILT_ROUT_HASH_FLUSH,
FILT_ROUT_HASH_EN, /* Not IPA v5.0+ */
FILT_ROUT_CACHE_CFG, /* IPA v5.0+ */
FILT_ROUT_HASH_FLUSH, /* Not IPA v5.0+ */
FILT_ROUT_CACHE_FLUSH, /* IPA v5.0+ */
STATE_AGGR_ACTIVE,
IPA_BCR, /* Not IPA v4.5+ */
LOCAL_PKT_PROC_CNTXT,
......@@ -95,7 +97,9 @@ enum ipa_reg_id {
ENDP_INIT_SEQ, /* TX only */
ENDP_STATUS,
ENDP_FILTER_ROUTER_HSH_CFG, /* Not IPA v4.2 */
/* The IRQ registers are only used for GSI_EE_AP */
ENDP_FILTER_CACHE_CFG, /* IPA v5.0+ */
ENDP_ROUTER_CACHE_CFG, /* IPA v5.0+ */
/* The IRQ registers that follow are only used for GSI_EE_AP */
IPA_IRQ_STTS,
IPA_IRQ_EN,
IPA_IRQ_CLR,
......@@ -251,14 +255,28 @@ enum ipa_reg_qsb_max_reads_field_id {
GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
};
/* FILT_ROUT_CACHE_CFG register */
enum ipa_reg_filt_rout_cache_cfg_field_id {
ROUTER_CACHE_EN,
FILTER_CACHE_EN,
LOW_PRI_HASH_HIT_DISABLE,
LRU_EVICTION_THRESHOLD,
};
/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
enum ipa_reg_rout_hash_field_id {
enum ipa_reg_filt_rout_hash_field_id {
IPV6_ROUTER_HASH,
IPV6_FILTER_HASH,
IPV4_ROUTER_HASH,
IPV4_FILTER_HASH,
};
/* FILT_ROUT_CACHE_FLUSH register */
enum ipa_reg_filt_rout_cache_field_id {
ROUTER_CACHE,
FILTER_CACHE,
};
/* BCR register */
enum ipa_bcr_compat {
BCR_CMDQ_L_LACK_ONE_ENTRY = 0x0, /* Not IPA v4.2+ */
......@@ -298,6 +316,7 @@ enum ipa_reg_ipa_tx_cfg_field_id {
DUAL_TX_ENABLE, /* v4.5+ */
SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
SSPND_PA_NO_BQ_STATE, /* v4.2 only */
HOLB_STICKY_DROP_EN, /* v5.0+ */
};
/* FLAVOR_0 register */
......@@ -333,6 +352,7 @@ enum ipa_reg_timers_pulse_gran_cfg_field_id {
PULSE_GRAN_0,
PULSE_GRAN_1,
PULSE_GRAN_2,
PULSE_GRAN_3,
};
/* Values for IPA_GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
......@@ -415,6 +435,8 @@ enum ipa_reg_endp_init_hdr_ext_field_id {
HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
HDR_BYTES_TO_REMOVE_VALID, /* v5.0+ */
HDR_BYTES_TO_REMOVE, /* v5.0+ */
};
/* ENDP_INIT_MODE register */
......@@ -573,6 +595,17 @@ enum ipa_reg_endp_filter_router_hsh_cfg_field_id {
ROUTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
};
/* ENDP_FILTER_CACHE_CFG and ENDP_ROUTER_CACHE_CFG registers */
enum ipa_reg_endp_cache_cfg_field_id {
CACHE_MSK_SRC_ID,
CACHE_MSK_SRC_IP,
CACHE_MSK_DST_IP,
CACHE_MSK_SRC_PORT,
CACHE_MSK_DST_PORT,
CACHE_MSK_PROTOCOL,
CACHE_MSK_METADATA,
};
/* IPA_IRQ_STTS, IPA_IRQ_EN, and IPA_IRQ_CLR registers */
/**
* enum ipa_irq_id - Bit positions representing type of IPA IRQ
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd.
* Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
......@@ -359,13 +359,22 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
offset = ipa_reg_offset(reg);
if (ipa->version < IPA_VERSION_5_0) {
reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
offset = ipa_reg_offset(reg);
val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
} else {
reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
offset = ipa_reg_offset(reg);
/* IPA v5.0+ uses a unified cache (both IPv4 and IPv6) */
val = ipa_reg_bit(reg, ROUTER_CACHE);
val |= ipa_reg_bit(reg, FILTER_CACHE);
}
ipa_cmd_register_write_add(trans, offset, val, val, false);
......@@ -490,13 +499,22 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
u32 offset;
u32 val;
reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
if (ipa->version < IPA_VERSION_5_0) {
reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
offset = ipa_reg_n_offset(reg, endpoint_id);
val = ioread32(endpoint->ipa->reg_virt + offset);
offset = ipa_reg_n_offset(reg, endpoint_id);
val = ioread32(endpoint->ipa->reg_virt + offset);
/* Zero all filter-related fields, preserving the rest */
val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
/* Zero all filter-related fields, preserving the rest */
val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
} else {
/* IPA v5.0 separates filter and router cache configuration */
reg = ipa_reg(ipa, ENDP_FILTER_CACHE_CFG);
offset = ipa_reg_n_offset(reg, endpoint_id);
/* Zero all filter-related fields */
val = 0;
}
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
......@@ -540,13 +558,22 @@ static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
u32 offset;
u32 val;
reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
offset = ipa_reg_n_offset(reg, route_id);
if (ipa->version < IPA_VERSION_5_0) {
reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
offset = ipa_reg_n_offset(reg, route_id);
val = ioread32(ipa->reg_virt + offset);
val = ioread32(ipa->reg_virt + offset);
/* Zero all route-related fields, preserving the rest */
val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
} else {
/* IPA v5.0 separates filter and router cache configuration */
reg = ipa_reg(ipa, ENDP_ROUTER_CACHE_CFG);
offset = ipa_reg_n_offset(reg, route_id);
/* Zero all route-related fields, preserving the rest */
val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
/* Zero all route-related fields */
val = 0;
}
iowrite32(val, ipa->reg_virt + offset);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment