Commit b6d26144 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-ipa-next'

Alex Elder says:

====================
net: ipa: a mix of patches

This series includes a mix of things things that are generally
minor.  The first four are sort of unrelated fixes, and summarizing
them here wouldn't be that helpful.

The last three together make it so only the "configuration data" we
need after initialization is saved for later use.  Most such data is
used only during driver initialization.  But endpoint configuration
is needed later, so the last patch saves a copy of that.  Eventually
we'll want to support reconfiguring endpoints at runtime as well,
and this will facilitate that.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 805cb5aa 660e52d6
......@@ -1179,15 +1179,15 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
* Similarly, we could get an error back when updating flow control
* on a channel because it's not in the proper state.
*
* In either case, we silently ignore a CHANNEL_NOT_RUNNING error
* if we receive it.
* In either case, we silently ignore a INCORRECT_CHANNEL_STATE
* error if we receive it.
*/
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
switch (result) {
case GENERIC_EE_SUCCESS:
case GENERIC_EE_CHANNEL_NOT_RUNNING:
case GENERIC_EE_INCORRECT_CHANNEL_STATE:
gsi->result = 0;
break;
......@@ -1492,12 +1492,8 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
if (index == ring->index % ring->count)
return NULL;
/* Get the transaction for the latest completed event. Take a
* reference to keep it from completing before we give the events
* for this and previous transactions back to the hardware.
*/
/* Get the transaction for the latest completed event. */
trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
refcount_inc(&trans->refcount);
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
......@@ -1512,9 +1508,7 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
gsi_trans_move_complete(trans);
/* Tell the hardware we've handled these events */
gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
gsi_trans_free(trans);
gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
return gsi_channel_trans_complete(channel);
}
......
......@@ -515,7 +515,7 @@ enum gsi_err_type {
/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
enum gsi_generic_ee_result {
GENERIC_EE_SUCCESS = 0x1,
GENERIC_EE_CHANNEL_NOT_RUNNING = 0x2,
GENERIC_EE_INCORRECT_CHANNEL_STATE = 0x2,
GENERIC_EE_INCORRECT_DIRECTION = 0x3,
GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4,
GENERIC_EE_INCORRECT_CHANNEL = 0x5,
......
......@@ -95,72 +95,10 @@ struct gsi_channel_data {
u8 tlv_count;
};
/**
* struct ipa_endpoint_tx_data - configuration data for TX endpoints
* @seq_type: primary packet processing sequencer type
* @seq_rep_type: sequencer type for replication processing
* @status_endpoint: endpoint to which status elements are sent
*
* The @status_endpoint is only valid if the endpoint's @status_enable
* flag is set.
*/
struct ipa_endpoint_tx_data {
enum ipa_seq_type seq_type;
enum ipa_seq_rep_type seq_rep_type;
enum ipa_endpoint_name status_endpoint;
};
/**
* struct ipa_endpoint_rx_data - configuration data for RX endpoints
* @buffer_size: requested receive buffer size (bytes)
* @pad_align: power-of-2 boundary to which packet payload is aligned
* @aggr_close_eof: whether aggregation closes on end-of-frame
*
* With each packet it transfers, the IPA hardware can perform certain
* transformations of its packet data. One of these is adding pad bytes
* to the end of the packet data so the result ends on a power-of-2 boundary.
*
* It is also able to aggregate multiple packets into a single receive buffer.
* Aggregation is "open" while a buffer is being filled, and "closes" when
* certain criteria are met. One of those criteria is the sender indicating
* a "frame" consisting of several transfers has ended.
*/
struct ipa_endpoint_rx_data {
u32 buffer_size;
u32 pad_align;
bool aggr_close_eof;
};
/**
* struct ipa_endpoint_config_data - IPA endpoint hardware configuration
* @resource_group: resource group to assign endpoint to
* @checksum: whether checksum offload is enabled
* @qmap: whether endpoint uses QMAP protocol
* @aggregation: whether endpoint supports aggregation
* @status_enable: whether endpoint uses status elements
* @dma_mode: whether endpoint operates in DMA mode
* @dma_endpoint: peer endpoint, if operating in DMA mode
* @tx: TX-specific endpoint information (see above)
* @rx: RX-specific endpoint information (see above)
*/
struct ipa_endpoint_config_data {
u32 resource_group;
bool checksum;
bool qmap;
bool aggregation;
bool status_enable;
bool dma_mode;
enum ipa_endpoint_name dma_endpoint;
union {
struct ipa_endpoint_tx_data tx;
struct ipa_endpoint_rx_data rx;
};
};
/**
* struct ipa_endpoint_data - IPA endpoint configuration data
* @filter_support: whether endpoint supports filtering
* @config: hardware configuration (see above)
* @config: hardware configuration
*
* Not all endpoints support the IPA filtering capability. A filter table
* defines the filters to apply for those endpoints that support it. The
......@@ -168,12 +106,12 @@ struct ipa_endpoint_config_data {
* for non-AP endpoints. For this reason we define *all* endpoints used
* in the system, and indicate whether they support filtering.
*
* The remaining endpoint configuration data applies only to AP endpoints.
* The remaining endpoint configuration data specifies default hardware
* configuration values that apply only to AP endpoints.
*/
struct ipa_endpoint_data {
bool filter_support;
/* Everything else is specified only for AP endpoints */
struct ipa_endpoint_config_data config;
struct ipa_endpoint_config config;
};
/**
......
......@@ -333,7 +333,7 @@ static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
if (!endpoint->data->aggregation)
if (!endpoint->config.aggregation)
return;
/* Nothing to do if the endpoint doesn't have aggregation open */
......@@ -453,7 +453,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
u32 val = 0;
/* FRAG_OFFLOAD_EN is 0 */
if (endpoint->data->checksum) {
if (endpoint->config.checksum) {
enum ipa_version version = endpoint->ipa->version;
if (endpoint->toward_ipa) {
......@@ -502,7 +502,7 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
u32 header_size = sizeof(struct rmnet_map_header);
/* Without checksum offload, we just have the MAP header */
if (!endpoint->data->checksum)
if (!endpoint->config.checksum)
return header_size;
if (version < IPA_VERSION_4_5) {
......@@ -544,7 +544,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
struct ipa *ipa = endpoint->ipa;
u32 val = 0;
if (endpoint->data->qmap) {
if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
size_t header_size;
......@@ -583,23 +583,27 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
u32 pad_align = endpoint->data->rx.pad_align;
u32 pad_align = endpoint->config.rx.pad_align;
struct ipa *ipa = endpoint->ipa;
u32 val = 0;
val |= HDR_ENDIANNESS_FMASK; /* big endian */
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
val |= HDR_ENDIANNESS_FMASK; /* big endian */
/* A QMAP header contains a 6 bit pad field at offset 0. The RMNet
* driver assumes this field is meaningful in packets it receives,
* and assumes the header's payload length includes that padding.
* The RMNet driver does *not* pad packets it sends, however, so
* the pad field (although 0) should be ignored.
*/
if (endpoint->data->qmap && !endpoint->toward_ipa) {
val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
/* A QMAP header contains a 6 bit pad field at offset 0.
* The RMNet driver assumes this field is meaningful in
* packets it receives, and assumes the header's payload
* length includes that padding. The RMNet driver does
* *not* pad packets it sends, however, so the pad field
* (although 0) should be ignored.
*/
if (!endpoint->toward_ipa) {
val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
}
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
......@@ -611,7 +615,7 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
*/
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->data->qmap && !endpoint->toward_ipa) {
if (endpoint->config.qmap && !endpoint->toward_ipa) {
u32 offset;
offset = offsetof(struct rmnet_map_header, pkt_len);
......@@ -636,7 +640,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->data->qmap)
if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
......@@ -650,8 +654,8 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
if (endpoint->data->dma_mode) {
enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
if (endpoint->config.dma_mode) {
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id;
dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
......@@ -737,18 +741,18 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
enum ipa_version version = endpoint->ipa->version;
u32 val = 0;
if (endpoint->data->aggregation) {
if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
const struct ipa_endpoint_rx_data *rx_data;
const struct ipa_endpoint_rx *rx_config;
u32 buffer_size;
bool close_eof;
u32 limit;
rx_data = &endpoint->data->rx;
rx_config = &endpoint->config.rx;
val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
buffer_size = rx_data->buffer_size;
buffer_size = rx_config->buffer_size;
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD);
val |= aggr_byte_limit_encoded(version, limit);
......@@ -757,10 +761,8 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* AGGR_PKT_LIMIT is 0 (unlimited) */
close_eof = rx_data->aggr_close_eof;
close_eof = rx_config->aggr_close_eof;
val |= aggr_sw_eof_active_encoded(version, close_eof);
/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
} else {
val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
AGGR_EN_FMASK);
......@@ -945,7 +947,7 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
struct ipa *ipa = endpoint->ipa;
u32 val;
val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
val = rsrc_grp_encoded(ipa->version, endpoint->config.resource_group);
iowrite32(val, ipa->reg_virt + offset);
}
......@@ -958,10 +960,10 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
return; /* Register not valid for RX endpoints */
/* Low-order byte configures primary packet processing */
val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
val |= u32_encode_bits(endpoint->config.tx.seq_type, SEQ_TYPE_FMASK);
/* Second byte configures replicated packet processing */
val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
val |= u32_encode_bits(endpoint->config.tx.seq_rep_type,
SEQ_REP_TYPE_FMASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
......@@ -1019,13 +1021,13 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
if (endpoint->data->status_enable) {
if (endpoint->config.status_enable) {
val |= STATUS_EN_FMASK;
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
name = endpoint->data->tx.status_endpoint;
name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
val |= u32_encode_bits(status_endpoint_id,
......@@ -1049,7 +1051,7 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
u32 len;
int ret;
buffer_size = endpoint->data->rx.buffer_size;
buffer_size = endpoint->config.rx.buffer_size;
page = dev_alloc_pages(get_order(buffer_size));
if (!page)
return -ENOMEM;
......@@ -1166,7 +1168,7 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
struct page *page, u32 len)
{
u32 buffer_size = endpoint->data->rx.buffer_size;
u32 buffer_size = endpoint->config.rx.buffer_size;
struct sk_buff *skb;
/* Nothing to do if there's no netdev */
......@@ -1273,7 +1275,7 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
struct page *page, u32 total_len)
{
u32 buffer_size = endpoint->data->rx.buffer_size;
u32 buffer_size = endpoint->config.rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
u32 resid = total_len;
......@@ -1303,10 +1305,10 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
* And if checksum offload is enabled a trailer containing
* computed checksum information will be appended.
*/
align = endpoint->data->rx.pad_align ? : 1;
align = endpoint->config.rx.pad_align ? : 1;
len = le16_to_cpu(status->pkt_len);
len = sizeof(*status) + ALIGN(len, align);
if (endpoint->data->checksum)
if (endpoint->config.checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
if (!ipa_endpoint_status_drop(endpoint, status)) {
......@@ -1350,7 +1352,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
/* Parse or build a socket buffer using the actual received length */
page = trans->data;
if (endpoint->data->status_enable)
if (endpoint->config.status_enable)
ipa_endpoint_status_parse(endpoint, page, trans->len);
else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
trans->data = NULL; /* Pages have been consumed */
......@@ -1384,7 +1386,7 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct page *page = trans->data;
if (page) {
u32 buffer_size = endpoint->data->rx.buffer_size;
u32 buffer_size = endpoint->config.rx.buffer_size;
__free_pages(page, get_order(buffer_size));
}
......@@ -1518,7 +1520,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
* All other cases just need to reset the underlying GSI channel.
*/
special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
endpoint->data->aggregation;
endpoint->config.aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
......@@ -1833,7 +1835,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
endpoint->channel_id = data->channel_id;
endpoint->endpoint_id = data->endpoint_id;
endpoint->toward_ipa = data->toward_ipa;
endpoint->data = &data->endpoint.config;
endpoint->config = data->endpoint.config;
ipa->initialized |= BIT(endpoint->endpoint_id);
}
......
......@@ -40,6 +40,68 @@ enum ipa_endpoint_name {
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
/**
* struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
* @seq_type: primary packet processing sequencer type
* @seq_rep_type: sequencer type for replication processing
* @status_endpoint: endpoint to which status elements are sent
*
* The @status_endpoint is only valid if the endpoint's @status_enable
* flag is set.
*/
struct ipa_endpoint_tx {
enum ipa_seq_type seq_type;
enum ipa_seq_rep_type seq_rep_type;
enum ipa_endpoint_name status_endpoint;
};
/**
* struct ipa_endpoint_rx - Endpoint configuration for RX endpoints
* @buffer_size: requested receive buffer size (bytes)
* @pad_align: power-of-2 boundary to which packet payload is aligned
* @aggr_close_eof: whether aggregation closes on end-of-frame
*
* With each packet it transfers, the IPA hardware can perform certain
* transformations of its packet data. One of these is adding pad bytes
* to the end of the packet data so the result ends on a power-of-2 boundary.
*
* It is also able to aggregate multiple packets into a single receive buffer.
* Aggregation is "open" while a buffer is being filled, and "closes" when
* certain criteria are met. One of those criteria is the sender indicating
* a "frame" consisting of several transfers has ended.
*/
struct ipa_endpoint_rx {
u32 buffer_size;
u32 pad_align;
bool aggr_close_eof;
};
/**
* struct ipa_endpoint_config - IPA endpoint hardware configuration
* @resource_group: resource group to assign endpoint to
* @checksum: whether checksum offload is enabled
* @qmap: whether endpoint uses QMAP protocol
* @aggregation: whether endpoint supports aggregation
* @status_enable: whether endpoint uses status elements
* @dma_mode: whether endpoint operates in DMA mode
* @dma_endpoint: peer endpoint, if operating in DMA mode
* @tx: TX-specific endpoint information (see above)
* @rx: RX-specific endpoint information (see above)
*/
struct ipa_endpoint_config {
u32 resource_group;
bool checksum;
bool qmap;
bool aggregation;
bool status_enable;
bool dma_mode;
enum ipa_endpoint_name dma_endpoint;
union {
struct ipa_endpoint_tx tx;
struct ipa_endpoint_rx rx;
};
};
/**
* enum ipa_replenish_flag: RX buffer replenish flags
*
......@@ -60,7 +122,7 @@ enum ipa_replenish_flag {
* @channel_id: GSI channel used by the endpoint
* @endpoint_id: IPA endpoint number
* @toward_ipa: Endpoint direction (true = TX, false = RX)
* @data: Endpoint configuration data
* @config: Default endpoint configuration
* @trans_tre_max: Maximum number of TRE descriptors per transaction
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
......@@ -74,7 +136,7 @@ struct ipa_endpoint {
u32 channel_id;
u32 endpoint_id;
bool toward_ipa;
const struct ipa_endpoint_config_data *data;
struct ipa_endpoint_config config;
u32 trans_tre_max;
u32 evt_ring_id;
......
......@@ -9,6 +9,8 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_rmnet.h>
#include <linux/etherdevice.h>
#include <net/pkt_sched.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc/qcom_rproc.h>
......@@ -127,7 +129,7 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
goto err_drop_skb;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP))
if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
/* The hardware must be powered for us to transmit */
......@@ -203,15 +205,20 @@ static const struct net_device_ops ipa_modem_ops = {
static void ipa_modem_netdev_setup(struct net_device *netdev)
{
netdev->netdev_ops = &ipa_modem_ops;
ether_setup(netdev);
/* No header ops (override value set by ether_setup()) */
netdev->header_ops = NULL;
netdev->type = ARPHRD_RAWIP;
netdev->hard_header_len = 0;
netdev->min_header_len = ETH_HLEN;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IPA_MTU;
netdev->mtu = netdev->max_mtu;
netdev->addr_len = 0;
netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
netdev->priv_flags |= IFF_TX_SKB_SHARING;
eth_broadcast_addr(netdev->broadcast);
/* The endpoint is configured for QMAP */
netdev->needed_headroom = sizeof(struct rmnet_map_header);
netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment