Commit cbd31253 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-ipa-a-few-small-fixes'

Alex Elder says:

====================
net: ipa: a few small fixes

This series implements some minor bug fixes or improvements.

The first patch removes an apparently unnecessary restriction, which
results in an error on a 32-bit ARM build.

The second makes a definition used for SDM845 match what is used in
the downstream code.

The third just ensures two netdev pointers are only non-null when
valid.

The fourth simplifies a little code, knowing that a called function
never returns an error.

The fifth and sixth just remove some empty/place holder functions.

And the last patch fixes a comment, makes a function private, and
removes an unnecessary double-negation of a Boolean variable.  This
patch produces a warning from checkpatch, indicating that a pair of
parentheses is unnecessary.  I agree with that advice, but it
conflicts with a suggestion from the compiler.  I left the "problem"
in place to avoid the compiler warning.
====================

Link: https://lore.kernel.org/r/20210409180722.1176868-1-elder@linaro.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 8859a44e 602a1c76
......@@ -198,7 +198,7 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
}
/* Turn off all GSI interrupts initially */
/* Turn off all GSI interrupts initially; there is no gsi_irq_teardown() */
static void gsi_irq_setup(struct gsi *gsi)
{
/* Disable all interrupt types */
......@@ -217,12 +217,6 @@ static void gsi_irq_setup(struct gsi *gsi)
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
/* Turn off all GSI interrupts when we're all done */
static void gsi_irq_teardown(struct gsi *gsi)
{
/* Nothing to do */
}
/* Event ring commands are performed one at a time. Their completion
* is signaled by the event ring control GSI interrupt type, which is
* only enabled when we issue an event ring command. Only the event
......@@ -786,7 +780,7 @@ static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
}
}
/* Program a channel for use */
/* Program a channel for use; there is no gsi_channel_deprogram() */
static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
{
size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
......@@ -874,11 +868,6 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* All done! */
}
static void gsi_channel_deprogram(struct gsi_channel *channel)
{
/* Nothing to do */
}
static int __gsi_channel_start(struct gsi_channel *channel, bool start)
{
struct gsi *gsi = channel->gsi;
......@@ -1623,18 +1612,6 @@ static u32 gsi_event_bitmap_init(u32 evt_ring_max)
return event_bitmap;
}
/* Setup function for event rings */
static void gsi_evt_ring_setup(struct gsi *gsi)
{
/* Nothing to do */
}
/* Inverse of gsi_evt_ring_setup() */
static void gsi_evt_ring_teardown(struct gsi *gsi)
{
/* Nothing to do */
}
/* Setup function for a single channel */
static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
{
......@@ -1684,7 +1661,6 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
netif_napi_del(&channel->napi);
gsi_channel_deprogram(channel);
gsi_channel_de_alloc_command(gsi, channel_id);
gsi_evt_ring_reset_command(gsi, evt_ring_id);
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
......@@ -1759,7 +1735,6 @@ static int gsi_channel_setup(struct gsi *gsi)
u32 mask;
int ret;
gsi_evt_ring_setup(gsi);
gsi_irq_enable(gsi);
mutex_lock(&gsi->mutex);
......@@ -1819,7 +1794,6 @@ static int gsi_channel_setup(struct gsi *gsi)
mutex_unlock(&gsi->mutex);
gsi_irq_disable(gsi);
gsi_evt_ring_teardown(gsi);
return ret;
}
......@@ -1848,7 +1822,6 @@ static void gsi_channel_teardown(struct gsi *gsi)
mutex_unlock(&gsi->mutex);
gsi_irq_disable(gsi);
gsi_evt_ring_teardown(gsi);
}
/* Setup function for GSI. GSI firmware must be loaded and initialized */
......@@ -1856,7 +1829,6 @@ int gsi_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
u32 val;
int ret;
/* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
......@@ -1865,7 +1837,7 @@ int gsi_setup(struct gsi *gsi)
return -EIO;
}
gsi_irq_setup(gsi);
gsi_irq_setup(gsi); /* No matching teardown required */
val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
......@@ -1899,18 +1871,13 @@ int gsi_setup(struct gsi *gsi)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
ret = gsi_channel_setup(gsi);
if (ret)
gsi_irq_teardown(gsi);
return ret;
return gsi_channel_setup(gsi);
}
/* Inverse of gsi_setup() */
void gsi_teardown(struct gsi *gsi)
{
gsi_channel_teardown(gsi);
gsi_irq_teardown(gsi);
}
/* Initialize a channel's event ring */
......@@ -1952,7 +1919,7 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
/* Init function for event rings */
/* Init function for event rings; there is no gsi_evt_ring_exit() */
static void gsi_evt_ring_init(struct gsi *gsi)
{
u32 evt_ring_id = 0;
......@@ -1964,12 +1931,6 @@ static void gsi_evt_ring_init(struct gsi *gsi)
while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
}
/* Inverse of gsi_evt_ring_init() */
static void gsi_evt_ring_exit(struct gsi *gsi)
{
/* Nothing to do */
}
static bool gsi_channel_data_valid(struct gsi *gsi,
const struct ipa_gsi_endpoint_data *data)
{
......@@ -2114,7 +2075,7 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
/* IPA v4.2 requires the AP to allocate channels for the modem */
modem_alloc = gsi->version == IPA_VERSION_4_2;
gsi_evt_ring_init(gsi);
gsi_evt_ring_init(gsi); /* No matching exit required */
/* The endpoint data array is indexed by endpoint name */
for (i = 0; i < count; i++) {
......@@ -2148,7 +2109,6 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
}
gsi_channel_exit_one(&gsi->channel[data->channel_id]);
}
gsi_evt_ring_exit(gsi);
return ret;
}
......@@ -2162,8 +2122,6 @@ static void gsi_channel_exit(struct gsi *gsi)
gsi_channel_exit_one(&gsi->channel[channel_id]);
while (channel_id--);
gsi->modem_channel_bitmap = 0;
gsi_evt_ring_exit(gsi);
}
/* Init function for GSI. GSI hardware does not need to be "ready" */
......
......@@ -91,7 +91,7 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
void *virt;
#ifdef IPA_VALIDATE
if (!size || size % 8)
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
......@@ -141,7 +141,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
void *virt;
#ifdef IPA_VALIDATE
if (!size || size % 8)
if (!size)
return -EINVAL;
if (count < max_alloc)
return -EINVAL;
......
......@@ -116,6 +116,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.status_enable = true,
.tx = {
.seq_type = IPA_SEQ_2_PASS_SKIP_LAST_UC,
.seq_rep_type = IPA_SEQ_REP_DMA_PARSER,
.status_endpoint =
IPA_ENDPOINT_MODEM_AP_RX,
},
......
......@@ -397,7 +397,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
/* We need one command per modem TX endpoint. We can get an upper
* bound on that by assuming all initialized endpoints are modem->IPA.
* That won't happen, and we could be more precise, but this is fine
* for now. We need to end the transaction with a "tag process."
* for now. End the transaction with commands to clear the pipeline.
*/
count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
......@@ -1755,7 +1755,7 @@ int ipa_endpoint_config(struct ipa *ipa)
/* Make sure it's pointing in the right direction */
endpoint = &ipa->endpoint[endpoint_id];
if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
dev_err(dev, "endpoint id %u wrong direction\n",
endpoint_id);
ret = -EINVAL;
......@@ -1791,7 +1791,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
ipa->initialized |= BIT(endpoint->endpoint_id);
}
void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
{
endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
......
......@@ -87,8 +87,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa);
int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb);
void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint);
int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint);
void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint);
......
......@@ -147,13 +147,13 @@ int ipa_setup(struct ipa *ipa)
if (ret)
goto err_endpoint_teardown;
ret = ipa_mem_setup(ipa);
ret = ipa_mem_setup(ipa); /* No matching teardown required */
if (ret)
goto err_command_disable;
ret = ipa_table_setup(ipa);
ret = ipa_table_setup(ipa); /* No matching teardown required */
if (ret)
goto err_mem_teardown;
goto err_command_disable;
/* Enable the exception handling endpoint, and tell the hardware
* to use it by default.
......@@ -161,7 +161,7 @@ int ipa_setup(struct ipa *ipa)
exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ret = ipa_endpoint_enable_one(exception_endpoint);
if (ret)
goto err_table_teardown;
goto err_command_disable;
ipa_endpoint_default_route_set(ipa, exception_endpoint->endpoint_id);
......@@ -179,10 +179,6 @@ int ipa_setup(struct ipa *ipa)
err_default_route_clear:
ipa_endpoint_default_route_clear(ipa);
ipa_endpoint_disable_one(exception_endpoint);
err_table_teardown:
ipa_table_teardown(ipa);
err_mem_teardown:
ipa_mem_teardown(ipa);
err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
......@@ -211,8 +207,6 @@ static void ipa_teardown(struct ipa *ipa)
ipa_endpoint_default_route_clear(ipa);
exception_endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_endpoint_disable_one(exception_endpoint);
ipa_table_teardown(ipa);
ipa_mem_teardown(ipa);
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
......@@ -480,23 +474,20 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
if (ret)
goto err_endpoint_deconfig;
ipa_table_config(ipa);
ipa_table_config(ipa); /* No deconfig required */
/* Assign resource limitation to each group */
/* Assign resource limitation to each group; no deconfig required */
ret = ipa_resource_config(ipa, data->resource_data);
if (ret)
goto err_table_deconfig;
goto err_mem_deconfig;
ret = ipa_modem_config(ipa);
if (ret)
goto err_resource_deconfig;
goto err_mem_deconfig;
return 0;
err_resource_deconfig:
ipa_resource_deconfig(ipa);
err_table_deconfig:
ipa_table_deconfig(ipa);
err_mem_deconfig:
ipa_mem_deconfig(ipa);
err_endpoint_deconfig:
ipa_endpoint_deconfig(ipa);
......@@ -514,8 +505,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
static void ipa_deconfig(struct ipa *ipa)
{
ipa_modem_deconfig(ipa);
ipa_resource_deconfig(ipa);
ipa_table_deconfig(ipa);
ipa_mem_deconfig(ipa);
ipa_endpoint_deconfig(ipa);
ipa_hardware_deconfig(ipa);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2020 Linaro Ltd.
* Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/types.h>
......@@ -53,6 +53,8 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
* The AP informs the modem where its portions of memory are located
* in a QMI exchange that occurs at modem startup.
*
* There is no need for a matching ipa_mem_teardown() function.
*
* Return: 0 if successful, or a negative error code
*/
int ipa_mem_setup(struct ipa *ipa)
......@@ -97,11 +99,6 @@ int ipa_mem_setup(struct ipa *ipa)
return 0;
}
void ipa_mem_teardown(struct ipa *ipa)
{
/* Nothing to do */
}
#ifdef IPA_VALIDATE
static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2019-2020 Linaro Ltd.
* Copyright (C) 2019-2021 Linaro Ltd.
*/
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
......@@ -88,8 +88,7 @@ struct ipa_mem {
int ipa_mem_config(struct ipa *ipa);
void ipa_mem_deconfig(struct ipa *ipa);
int ipa_mem_setup(struct ipa *ipa);
void ipa_mem_teardown(struct ipa *ipa);
int ipa_mem_setup(struct ipa *ipa); /* No ipa_mem_teardown() needed */
int ipa_mem_zero_modem(struct ipa *ipa);
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2020 Linaro Ltd.
* Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/errno.h>
......@@ -213,18 +213,18 @@ int ipa_modem_start(struct ipa *ipa)
goto out_set_state;
}
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
ret = register_netdev(netdev);
if (ret)
free_netdev(netdev);
else
if (!ret) {
ipa->modem_netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
} else {
free_netdev(netdev);
}
out_set_state:
if (ret)
......@@ -240,7 +240,6 @@ int ipa_modem_stop(struct ipa *ipa)
{
struct net_device *netdev = ipa->modem_netdev;
enum ipa_modem_state state;
int ret;
/* Only attempt to stop the modem if it's running */
state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
......@@ -257,27 +256,20 @@ int ipa_modem_stop(struct ipa *ipa)
/* Prevent the modem from triggering a call to ipa_setup() */
ipa_smp2p_disable(ipa);
if (netdev) {
/* Stop the queue and disable the endpoints if it's open */
ret = ipa_stop(netdev);
if (ret)
goto out_set_state;
if (netdev) {
(void)ipa_stop(netdev);
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = NULL;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = NULL;
ipa->modem_netdev = NULL;
unregister_netdev(netdev);
free_netdev(netdev);
} else {
ret = 0;
}
out_set_state:
if (ret)
atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
else
atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
smp_mb__after_atomic();
return ret;
return 0;
}
/* Treat a "clean" modem stop the same as a crash */
......
......@@ -158,7 +158,7 @@ static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
}
/* Configure resources */
/* Configure resources; there is no ipa_resource_deconfig() */
int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
{
u32 i;
......@@ -174,9 +174,3 @@ int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
return 0;
}
/* Inverse of ipa_resource_config() */
void ipa_resource_deconfig(struct ipa *ipa)
{
/* Nothing to do */
}
......@@ -14,14 +14,10 @@ struct ipa_resource_data;
* @ipa: IPA pointer
* @data: IPA resource configuration data
*
* There is no need for a matching ipa_resource_deconfig() function.
*
* Return: true if all regions are valid, false otherwise
*/
int ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data);
/**
* ipa_resource_deconfig() - Inverse of ipa_resource_config()
* @ipa: IPA pointer
*/
void ipa_resource_deconfig(struct ipa *ipa);
#endif /* _IPA_RESOURCE_H_ */
......@@ -497,11 +497,6 @@ int ipa_table_setup(struct ipa *ipa)
return 0;
}
void ipa_table_teardown(struct ipa *ipa)
{
/* Nothing to do */ /* XXX Maybe reset the tables? */
}
/**
* ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
* @endpoint: Endpoint whose filter hash tuple should be zeroed
......@@ -525,6 +520,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
/* Configure a hashed filter table; there is no ipa_filter_deconfig() */
static void ipa_filter_config(struct ipa *ipa, bool modem)
{
enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
......@@ -545,11 +541,6 @@ static void ipa_filter_config(struct ipa *ipa, bool modem)
}
}
static void ipa_filter_deconfig(struct ipa *ipa, bool modem)
{
/* Nothing to do */
}
static bool ipa_route_id_modem(u32 route_id)
{
return route_id >= IPA_ROUTE_MODEM_MIN &&
......@@ -576,6 +567,7 @@ static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
iowrite32(val, ipa->reg_virt + offset);
}
/* Configure a hashed route table; there is no ipa_route_deconfig() */
static void ipa_route_config(struct ipa *ipa, bool modem)
{
u32 route_id;
......@@ -588,11 +580,7 @@ static void ipa_route_config(struct ipa *ipa, bool modem)
ipa_route_tuple_zero(ipa, route_id);
}
static void ipa_route_deconfig(struct ipa *ipa, bool modem)
{
/* Nothing to do */
}
/* Configure a filter and route tables; there is no ipa_table_deconfig() */
void ipa_table_config(struct ipa *ipa)
{
ipa_filter_config(ipa, false);
......@@ -601,14 +589,6 @@ void ipa_table_config(struct ipa *ipa)
ipa_route_config(ipa, true);
}
void ipa_table_deconfig(struct ipa *ipa)
{
ipa_route_deconfig(ipa, true);
ipa_route_deconfig(ipa, false);
ipa_filter_deconfig(ipa, true);
ipa_filter_deconfig(ipa, false);
}
/*
* Initialize a coherent DMA allocation containing initialized filter and
* route table data. This is used when initializing or resetting the IPA
......
......@@ -74,27 +74,19 @@ int ipa_table_hash_flush(struct ipa *ipa);
/**
* ipa_table_setup() - Set up filter and route tables
* @ipa: IPA pointer
*
* There is no need for a matching ipa_table_teardown() function.
*/
int ipa_table_setup(struct ipa *ipa);
/**
* ipa_table_teardown() - Inverse of ipa_table_setup()
* @ipa: IPA pointer
*/
void ipa_table_teardown(struct ipa *ipa);
/**
* ipa_table_config() - Configure filter and route tables
* @ipa: IPA pointer
*
* There is no need for a matching ipa_table_deconfig() function.
*/
void ipa_table_config(struct ipa *ipa);
/**
* ipa_table_deconfig() - Inverse of ipa_table_config()
* @ipa: IPA pointer
*/
void ipa_table_deconfig(struct ipa *ipa);
/**
* ipa_table_init() - Do early initialization of filter and route tables
* @ipa: IPA pointer
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment