Commit 0598cec9 authored by David S. Miller's avatar David S. Miller

Merge branch 'ipa-next'

Alex Elder says:

====================
net: ipa: a few more small items

This series consists of three small sets of changes.  Version 2 adds
a patch that avoids a warning that occurs when handling a modem
crash (I unfortunately didn't notice it earlier).  All other patches
are the same--just rebased.

The first three patches allow a few endpoint features to be
specified.  At this time, currently-defined endpoints retain the
same configuration, but when the monitor functionality is added in
the next cycle these options will be required.

The fourth patch simply removes an unused function, explaining also
why it would likely never be used.

The fifth patch is new.  It counts the number of modem TX endpoints
and uses it to determine how many TREs a transaction needs when
when handling a modem crash.  It is needed to avoid exceeding the
limited number of commands imposed by the last four patches.

And the last four patches refactor code related to IPA immediate
commands, eliminating an unused field and then simplifying and
removing some unneeded code.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 60f243ad a224bd4b
......@@ -84,7 +84,6 @@ struct gsi_trans_info {
struct gsi_trans_pool pool; /* transaction pool */
struct gsi_trans_pool sg_pool; /* scatterlist pool */
struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
struct gsi_trans_pool info_pool;/* command information pool */
struct gsi_trans **map; /* TRE -> transaction map */
spinlock_t spinlock; /* protects updates to the lists */
......
......@@ -410,10 +410,8 @@ void gsi_trans_free(struct gsi_trans *trans)
/* Add an immediate command to a transaction */
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
dma_addr_t addr, enum dma_data_direction direction,
enum ipa_cmd_opcode opcode)
dma_addr_t addr, enum ipa_cmd_opcode opcode)
{
struct ipa_cmd_info *info;
u32 which = trans->used++;
struct scatterlist *sg;
......@@ -438,9 +436,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
info = &trans->info[which];
info->opcode = opcode;
info->direction = direction;
trans->cmd_opcode[which] = opcode;
}
/* Add a page transfer to a transaction. It will fill the only TRE. */
......@@ -556,10 +552,10 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
struct gsi_ring *ring = &channel->tre_ring;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
bool bei = channel->toward_ipa;
struct ipa_cmd_info *info;
struct gsi_tre *dest_tre;
struct scatterlist *sg;
u32 byte_count = 0;
u8 *cmd_opcode;
u32 avail;
u32 i;
......@@ -570,7 +566,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
* If there is no info array we're doing a simple data
* transfer request, whose opcode is IPA_CMD_NONE.
*/
info = trans->info ? &trans->info[0] : NULL;
cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
avail = ring->count - ring->index % ring->count;
dest_tre = gsi_ring_virt(ring, ring->index);
for_each_sg(trans->sgl, sg, trans->used, i) {
......@@ -581,8 +577,8 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
byte_count += len;
if (!avail--)
dest_tre = gsi_ring_virt(ring, 0);
if (info)
opcode = info++->opcode;
if (cmd_opcode)
opcode = *cmd_opcode++;
gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
dest_tre++;
......@@ -637,28 +633,6 @@ void gsi_trans_commit_wait(struct gsi_trans *trans)
gsi_trans_free(trans);
}
/* Commit a GSI transaction and wait for it to complete, with timeout */
int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
unsigned long timeout)
{
unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
unsigned long remaining = 1; /* In case of empty transaction */
if (!trans->used)
goto out_trans_free;
refcount_inc(&trans->refcount);
__gsi_trans_commit(trans, true);
remaining = wait_for_completion_timeout(&trans->completion,
timeout_jiffies);
out_trans_free:
gsi_trans_free(trans);
return remaining ? 0 : -ETIMEDOUT;
}
/* Process the completion of a transaction; called while polling */
void gsi_trans_complete(struct gsi_trans *trans)
{
......
......@@ -22,6 +22,9 @@ struct gsi;
struct gsi_trans;
struct gsi_trans_pool;
/* Maximum number of TREs in an IPA immediate command transaction */
#define IPA_COMMAND_TRANS_TRE_MAX 8
/**
* struct gsi_trans - a GSI transaction
*
......@@ -34,8 +37,8 @@ struct gsi_trans_pool;
* @used: Number of TREs *used* (could be less than tre_count)
* @len: Total # of transfer bytes represented in sgl[] (set by core)
* @data: Preserved but not touched by the core transaction code
* @cmd_opcode: Array of command opcodes (command channel only)
* @sgl: An array of scatter/gather entries managed by core code
* @info: Array of command information structures (command channel)
* @direction: DMA transfer direction (DMA_NONE for commands)
* @refcount: Reference count used for destruction
* @completion: Completed when the transaction completes
......@@ -57,9 +60,11 @@ struct gsi_trans {
u8 used; /* # entries used in sgl[] */
u32 len; /* total # bytes across sgl[] */
void *data;
union {
void *data;
u8 cmd_opcode[IPA_COMMAND_TRANS_TRE_MAX];
};
struct scatterlist *sgl;
struct ipa_cmd_info *info; /* array of entries, or null */
enum dma_data_direction direction;
refcount_t refcount;
......@@ -165,12 +170,10 @@ void gsi_trans_free(struct gsi_trans *trans);
* @buf: Buffer pointer for command payload
* @size: Number of bytes in buffer
* @addr: DMA address for payload
* @direction: Direction of DMA transfer (or DMA_NONE if none required)
* @opcode: IPA immediate command opcode
*/
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
dma_addr_t addr, enum dma_data_direction direction,
enum ipa_cmd_opcode opcode);
dma_addr_t addr, enum ipa_cmd_opcode opcode);
/**
* gsi_trans_page_add() - Add a page transfer to a transaction
......@@ -205,15 +208,6 @@ void gsi_trans_commit(struct gsi_trans *trans, bool ring_db);
*/
void gsi_trans_commit_wait(struct gsi_trans *trans);
/**
* gsi_trans_commit_wait_timeout() - Commit a GSI transaction and wait for
* it to complete, with timeout
* @trans: Transaction to commit
* @timeout: Timeout period (in milliseconds)
*/
int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
unsigned long timeout);
/**
* gsi_trans_read_byte() - Issue a single byte read TRE on a channel
* @gsi: GSI pointer
......
......@@ -62,6 +62,7 @@ struct ipa_interrupt;
* @initialized: Bit mask indicating endpoints initialized
* @set_up: Bit mask indicating endpoints set up
* @enabled: Bit mask indicating endpoints enabled
* @modem_tx_count: Number of defined modem TX endoints
* @endpoint: Array of endpoint information
* @channel_map: Mapping of GSI channel to IPA endpoint
* @name_map: Mapping of IPA endpoint name to IPA endpoint
......@@ -114,6 +115,7 @@ struct ipa {
u32 set_up;
u32 enabled;
u32 modem_tx_count;
struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
struct ipa_endpoint *channel_map[GSI_CHANNEL_COUNT_MAX];
struct ipa_endpoint *name_map[IPA_ENDPOINT_COUNT];
......
......@@ -26,14 +26,13 @@
* other than data transfer to another endpoint.
*
* Immediate commands are represented by GSI transactions just like other
* transfer requests, represented by a single GSI TRE. Each immediate
* command has a well-defined format, having a payload of a known length.
* This allows the transfer element's length field to be used to hold an
* immediate command's opcode. The payload for a command resides in DRAM
* and is described by a single scatterlist entry in its transaction.
* Commands do not require a transaction completion callback. To commit
* an immediate command transaction, either gsi_trans_commit_wait() or
* gsi_trans_commit_wait_timeout() is used.
* transfer requests, and use a single GSI TRE. Each immediate command
* has a well-defined format, having a payload of a known length. This
* allows the transfer element's length field to be used to hold an
* immediate command's opcode. The payload for a command resides in AP
* memory and is described by a single scatterlist entry in its transaction.
* Commands do not require a transaction completion callback, and are
* (currently) always issued using gsi_trans_commit_wait().
*/
/* Some commands can wait until indicated pipeline stages are clear */
......@@ -350,7 +349,6 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
int ret;
/* This is as good a place as any to validate build constants */
ipa_cmd_validate_build();
......@@ -359,20 +357,9 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
* a single transaction can require up to tlv_count of them,
* so we treat them as if that many can be allocated at once.
*/
ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
sizeof(union ipa_cmd_payload),
tre_max, channel->tlv_count);
if (ret)
return ret;
/* Each TRE needs a command info structure */
ret = gsi_trans_pool_init(&trans_info->info_pool,
sizeof(struct ipa_cmd_info),
tre_max, channel->tlv_count);
if (ret)
gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
return ret;
return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
sizeof(union ipa_cmd_payload),
tre_max, channel->tlv_count);
}
void ipa_cmd_pool_exit(struct gsi_channel *channel)
......@@ -380,7 +367,6 @@ void ipa_cmd_pool_exit(struct gsi_channel *channel)
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
gsi_trans_pool_exit(&trans_info->info_pool);
gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
}
......@@ -403,7 +389,6 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans,
dma_addr_t hash_addr)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_hw_ip_fltrt_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
......@@ -434,7 +419,7 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans,
payload->nhash_rules_addr = cpu_to_le64(addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
opcode);
}
/* Initialize header space in IPA-local memory */
......@@ -443,7 +428,6 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_hw_hdr_init_local *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
......@@ -465,7 +449,7 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
payload->flags = cpu_to_le32(flags);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
opcode);
}
void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
......@@ -522,7 +506,7 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
payload->clear_options = cpu_to_le32(options);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
DMA_NONE, opcode);
opcode);
}
/* Skip IP packet processing on the next data transfer on a TX channel */
......@@ -530,7 +514,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_ip_packet_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
......@@ -542,7 +525,7 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
opcode);
}
/* Use a DMA command to read or write a block of IPA-resident memory */
......@@ -553,7 +536,6 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
struct ipa_cmd_hw_dma_mem_mem *payload;
union ipa_cmd_payload *cmd_payload;
enum dma_data_direction direction;
dma_addr_t payload_addr;
u16 flags;
......@@ -584,17 +566,14 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
payload->flags = cpu_to_le16(flags);
payload->system_addr = cpu_to_le64(addr);
direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
opcode);
}
static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_ip_packet_tag_status *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
......@@ -605,14 +584,13 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
opcode);
}
/* Issue a small command TX data transfer */
static void ipa_cmd_transfer_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum dma_data_direction direction = DMA_TO_DEVICE;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
union ipa_cmd_payload *payload;
dma_addr_t payload_addr;
......@@ -621,7 +599,7 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans)
payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
opcode);
}
/* Add immediate commands to a transaction to clear the hardware pipeline */
......@@ -661,28 +639,16 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
wait_for_completion(&ipa->completion);
}
static struct ipa_cmd_info *
ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
{
struct gsi_channel *channel;
channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
}
/* Allocate a transaction for the command TX endpoint */
struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
{
struct ipa_endpoint *endpoint;
struct gsi_trans *trans;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
return NULL;
trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
tre_count, DMA_NONE);
if (trans)
trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
return trans;
return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
tre_count, DMA_NONE);
}
......@@ -46,17 +46,6 @@ enum ipa_cmd_opcode {
IPA_CMD_IP_PACKET_TAG_STATUS = 0x14,
};
/**
* struct ipa_cmd_info - information needed for an IPA immediate command
*
* @opcode: The command opcode.
* @direction: Direction of data transfer for DMA commands
*/
struct ipa_cmd_info {
enum ipa_cmd_opcode opcode;
enum dma_data_direction direction;
};
/**
* ipa_cmd_table_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
......
......@@ -103,6 +103,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
......@@ -150,6 +151,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
......
......@@ -94,6 +94,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
......@@ -142,6 +143,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
......
......@@ -88,6 +88,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
......@@ -135,6 +136,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 32768,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
......
......@@ -84,6 +84,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
......@@ -132,6 +133,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
......
......@@ -97,6 +97,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
......@@ -144,6 +145,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
......
......@@ -89,6 +89,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.rx = {
.buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
.aggr_time_limit = 500,
},
},
},
......@@ -136,6 +137,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.rx = {
.buffer_size = 8192,
.aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
......
......@@ -35,7 +35,6 @@
#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
#define IPA_AGGR_TIME_LIMIT 500 /* microseconds */
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
......@@ -81,6 +80,24 @@ static u32 aggr_byte_limit_max(enum ipa_version version)
return field_max(aggr_byte_limit_fmask(false));
}
/* Compute the aggregation size value to use for a given buffer size */
static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
{
/* A hard aggregation limit will not be crossed; aggregation closes
* if saving incoming data would cross the hard byte limit boundary.
*
* With a soft limit, aggregation closes *after* the size boundary
* has been crossed. In that case the limit must leave enough space
* after that limit to receive a full MTU of data plus overhead.
*/
if (!aggr_hard_limit)
rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
/* The byte limit is encoded as a number of kilobytes */
return rx_buffer_size / SZ_1K;
}
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
......@@ -93,7 +110,9 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
if (!data->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
u32 buffer_size;
u32 aggr_size;
u32 limit;
if (data->endpoint.filter_support) {
......@@ -107,8 +126,10 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (data->ee_id != GSI_EE_AP)
return true;
buffer_size = data->endpoint.config.rx.buffer_size;
rx_config = &data->endpoint.config.rx;
/* The buffer size must hold an MTU plus overhead */
buffer_size = rx_config->buffer_size;
limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
if (buffer_size < limit) {
dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
......@@ -116,27 +137,46 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return false;
}
/* For an endpoint supporting receive aggregation, the
* aggregation byte limit defines the point at which an
* aggregation window will close. It is programmed into the
* IPA hardware as a number of KB. We don't use "hard byte
* limit" aggregation, so we need to supply enough space in
* a receive buffer to hold a complete MTU plus normal skb
* overhead *after* that aggregation byte limit has been
* crossed.
*
* This check just ensures the receive buffer size doesn't
* exceed what's representable in the aggregation limit field.
*/
if (data->endpoint.config.aggregation) {
limit += SZ_1K * aggr_byte_limit_max(ipa->version);
if (buffer_size - NET_SKB_PAD > limit) {
dev_err(dev, "RX buffer size too large for aggregated RX endpoint %u (%u > %u)\n",
data->endpoint_id,
buffer_size - NET_SKB_PAD, limit);
if (!data->endpoint.config.aggregation) {
bool result = true;
return false;
/* No aggregation; check for bogus aggregation data */
if (rx_config->aggr_time_limit) {
dev_err(dev,
"time limit with no aggregation for RX endpoint %u\n",
data->endpoint_id);
result = false;
}
if (rx_config->aggr_hard_limit) {
dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
data->endpoint_id);
result = false;
}
if (rx_config->aggr_close_eof) {
dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
data->endpoint_id);
result = false;
}
return result; /* Nothing more to check */
}
/* For an endpoint supporting receive aggregation, the byte
* limit defines the point at which aggregation closes. This
* check ensures the receive buffer size doesn't result in a
* limit that exceeds what's representable in the aggregation
* byte limit field.
*/
aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
limit = aggr_byte_limit_max(ipa->version);
if (aggr_size > limit) {
dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
data->endpoint_id, aggr_size, limit);
return false;
}
return true; /* Nothing more to check for RX */
......@@ -402,12 +442,10 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
struct gsi_trans *trans;
u32 count;
/* We need one command per modem TX endpoint. We can get an upper
* bound on that by assuming all initialized endpoints are modem->IPA.
* That won't happen, and we could be more precise, but this is fine
* for now. End the transaction with commands to clear the pipeline.
/* We need one command per modem TX endpoint, plus the commands
* that clear the pipeline.
*/
count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
......@@ -438,7 +476,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
ipa_cmd_pipeline_clear_add(trans);
/* XXX This should have a 1 second timeout */
gsi_trans_commit_wait(trans);
ipa_cmd_pipeline_clear_wait(ipa);
......@@ -670,18 +707,6 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
/* Compute the aggregation size value to use for a given buffer size */
static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
{
/* We don't use "hard byte limit" aggregation, so we define the
* aggregation limit such that our buffer has enough space *after*
* that limit to receive a full MTU of data, plus overhead.
*/
rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
return rx_buffer_size / SZ_1K;
}
/* Encoded values for AGGR endpoint register fields */
static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
{
......@@ -700,9 +725,13 @@ static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
if (version < IPA_VERSION_4_5) {
/* We set aggregation granularity in ipa_hardware_config() */
limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
fmask = aggr_time_limit_fmask(true);
val = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
WARN(val > field_max(fmask),
"aggr_time_limit too large (%u > %u usec)\n",
val, field_max(fmask) * IPA_AGGR_GRANULARITY);
return u32_encode_bits(limit, aggr_time_limit_fmask(true));
return u32_encode_bits(val, fmask);
}
/* IPA v4.5 expresses the time limit using Qtime. The AP has
......@@ -717,6 +746,9 @@ static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
/* Have to use pulse generator 1 (millisecond granularity) */
gran_sel = AGGR_GRAN_SEL_FMASK;
val = DIV_ROUND_CLOSEST(limit, 1000);
WARN(val > field_max(fmask),
"aggr_time_limit too large (%u > %u usec)\n",
limit, field_max(fmask) * 1000);
} else {
/* We can use pulse generator 0 (100 usec granularity) */
gran_sel = 0;
......@@ -753,10 +785,11 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
buffer_size = rx_config->buffer_size;
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD);
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
val |= aggr_byte_limit_encoded(version, limit);
limit = IPA_AGGR_TIME_LIMIT;
limit = rx_config->aggr_time_limit;
val |= aggr_time_limit_encoded(version, limit);
/* AGGR_PKT_LIMIT is 0 (unlimited) */
......@@ -1554,8 +1587,12 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_hdr_metadata_mask(endpoint);
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
if (!endpoint->toward_ipa)
ipa_endpoint_init_hol_block_disable(endpoint);
if (!endpoint->toward_ipa) {
if (endpoint->config.rx.holb_drop)
ipa_endpoint_init_hol_block_enable(endpoint, 0);
else
ipa_endpoint_init_hol_block_disable(endpoint);
}
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
......@@ -1885,6 +1922,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
if (data->endpoint.filter_support)
filter_map |= BIT(data->endpoint_id);
if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
ipa->modem_tx_count++;
}
if (!ipa_filter_map_valid(ipa, filter_map))
......
......@@ -59,7 +59,13 @@ struct ipa_endpoint_tx {
* struct ipa_endpoint_rx - Endpoint configuration for RX endpoints
* @buffer_size: requested receive buffer size (bytes)
* @pad_align: power-of-2 boundary to which packet payload is aligned
* @aggr_time_limit: time before aggregation closes (microseconds)
* @aggr_hard_limit: whether aggregation closes before or after boundary
* @aggr_close_eof: whether aggregation closes on end-of-frame
* @holb_drop: whether to drop packets to avoid head-of-line blocking
*
* The actual size of the receive buffer is rounded up if necessary
* to be a power-of-2 number of pages.
*
* With each packet it transfers, the IPA hardware can perform certain
* transformations of its packet data. One of these is adding pad bytes
......@@ -67,13 +73,26 @@ struct ipa_endpoint_tx {
*
* It is also able to aggregate multiple packets into a single receive buffer.
* Aggregation is "open" while a buffer is being filled, and "closes" when
* certain criteria are met. One of those criteria is the sender indicating
* a "frame" consisting of several transfers has ended.
* certain criteria are met.
*
* A time limit can be specified to close aggregation. Aggregation will be
* closed if this period passes after data is first written into a receive
* buffer. If not specified, no time limit is imposed.
*
* Insufficient space available in the receive buffer can close aggregation.
* The aggregation byte limit defines the point (in units of 1024 bytes) in
* the buffer where aggregation closes. With a "soft" aggregation limit,
* aggregation closes when a packet written to the buffer *crosses* that
* aggregation limit. With a "hard" aggregation limit, aggregation will
* close *before* writing a packet that would cross that boundary.
*/
struct ipa_endpoint_rx {
u32 buffer_size;
u32 pad_align;
u32 aggr_time_limit;
bool aggr_hard_limit;
bool aggr_close_eof;
bool holb_drop;
};
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment