Commit dece303f authored by Arnd Bergmann's avatar Arnd Bergmann Committed by David S. Miller

net: xgene: move xgene_cle_ptree_ewdn data off stack

The array for initializing the cle is set up on the stack with
almost entirely constant data and then passed to a function that
converts it into HW specific bit patterns. With the latest
addition, the size of this array has grown to the point that
we get a warning about potential stack overflow in allmodconfig
builds:

xgene_enet_cle.c: In function ‘xgene_enet_cle_init’:
xgene_enet_cle.c:836:1: error: the frame size of 1032 bytes is larger than 1024 bytes [-Werror=frame-larger-than=]

Looking a bit deeper at the usage, I noticed that the only modification
of the data is in dead code, as we don't even use the cle module
for phy_mode other than PHY_INTERFACE_MODE_XGMII. This means we
can simply mark the structure constant and access it directly rather
than passing the pointer down through another structure, making
the code more efficient at the same time as avoiding the
warning.

Fixes: a809701f ("drivers: net: xgene: fix: RSS for non-TCP/UDP")
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9afd8952
......@@ -79,10 +79,10 @@ static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf)
}
}
static void xgene_cle_dn_to_hw(struct xgene_cle_ptree_ewdn *dn,
static void xgene_cle_dn_to_hw(const struct xgene_cle_ptree_ewdn *dn,
u32 *buf, u32 jb)
{
struct xgene_cle_ptree_branch *br;
const struct xgene_cle_ptree_branch *br;
u32 i, j = 0;
u32 npp;
......@@ -205,213 +205,7 @@ static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata,
return 0;
}
static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
struct xgene_enet_cle *cle)
{
struct xgene_cle_ptree *ptree = &cle->ptree;
struct xgene_cle_ptree_ewdn *dn = ptree->dn;
struct xgene_cle_ptree_kn *kn = ptree->kn;
u32 buf[CLE_DRAM_REGS];
int i, j, ret;
memset(buf, 0, sizeof(buf));
for (i = 0; i < ptree->num_dn; i++) {
xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes);
ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node,
PTREE_RAM, CLE_CMD_WR);
if (ret)
return ret;
}
/* continue node index for key node */
memset(buf, 0, sizeof(buf));
for (j = i; j < (ptree->num_kn + ptree->num_dn); j++) {
xgene_cle_kn_to_hw(&kn[j - ptree->num_dn], buf);
ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node,
PTREE_RAM, CLE_CMD_WR);
if (ret)
return ret;
}
return 0;
}
static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata,
struct xgene_enet_cle *cle)
{
int ret;
ret = xgene_cle_setup_node(pdata, cle);
if (ret)
return ret;
ret = xgene_cle_setup_dbptr(pdata, cle);
if (ret)
return ret;
xgene_cle_enable_ptree(pdata, cle);
return 0;
}
static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata,
struct xgene_enet_cle *enet_cle,
struct xgene_cle_dbptr *dbptr,
u32 index, u8 priority)
{
void __iomem *base = enet_cle->base;
void __iomem *base_addr;
u32 buf[CLE_DRAM_REGS];
u32 def_cls, offset;
u32 i, j;
memset(buf, 0, sizeof(buf));
xgene_cle_dbptr_to_hw(pdata, dbptr, buf);
for (i = 0; i < enet_cle->parsers; i++) {
if (enet_cle->active_parser != PARSER_ALL) {
offset = enet_cle->active_parser *
CLE_PORT_OFFSET;
} else {
offset = i * CLE_PORT_OFFSET;
}
base_addr = base + DFCLSRESDB00 + offset;
for (j = 0; j < 6; j++)
iowrite32(buf[j], base_addr + (j * 4));
def_cls = ((priority & 0x7) << 10) | (index & 0x3ff);
iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset);
}
}
static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle)
{
u32 idx = CLE_PKTRAM_SIZE / sizeof(u32);
u32 mac_hdr_len = ETH_HLEN;
u32 sband, reg = 0;
u32 ipv4_ihl = 5;
u32 hdr_len;
int ret;
/* Sideband: IPV4/TCP packets */
hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, &reg);
sband = reg;
/* Sideband: IPv4/UDP packets */
hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, &reg);
sband |= (reg << 16);
ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR);
if (ret)
return ret;
/* Sideband: IPv4/RAW packets */
hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
hdr_len, &reg);
sband = reg;
/* Sideband: Ethernet II/RAW packets */
hdr_len = (mac_hdr_len << 5);
xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
hdr_len, &reg);
sband |= (reg << 16);
ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR);
if (ret)
return ret;
return 0;
}
static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle)
{
u32 secret_key_ipv4[4]; /* 16 Bytes*/
int ret = 0;
get_random_bytes(secret_key_ipv4, 16);
ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0,
RSS_IPV4_HASH_SKEY, CLE_CMD_WR);
return ret;
}
static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
{
u32 fpsel, dstqid, nfpsel, idt_reg, idx;
int i, ret = 0;
u16 pool_id;
for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
idx = i % pdata->rxq_cnt;
pool_id = pdata->rx_ring[idx]->buf_pool->id;
fpsel = xgene_enet_get_fpsel(pool_id);
dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
nfpsel = 0;
if (pdata->rx_ring[idx]->page_pool) {
pool_id = pdata->rx_ring[idx]->page_pool->id;
nfpsel = xgene_enet_get_fpsel(pool_id);
}
idt_reg = 0;
xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
RSS_IDT, CLE_CMD_WR);
if (ret)
return ret;
}
ret = xgene_cle_set_rss_skeys(&pdata->cle);
if (ret)
return ret;
return 0;
}
static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
{
struct xgene_enet_cle *cle = &pdata->cle;
void __iomem *base = cle->base;
u32 offset, val = 0;
int i, ret = 0;
offset = CLE_PORT_OFFSET;
for (i = 0; i < cle->parsers; i++) {
if (cle->active_parser != PARSER_ALL)
offset = cle->active_parser * CLE_PORT_OFFSET;
else
offset = i * CLE_PORT_OFFSET;
/* enable RSS */
val = (RSS_IPV4_12B << 1) | 0x1;
writel(val, base + RSS_CTRL0 + offset);
}
/* setup sideband data */
ret = xgene_cle_set_rss_sband(cle);
if (ret)
return ret;
/* setup indirection table */
ret = xgene_cle_set_rss_idt(pdata);
if (ret)
return ret;
return 0;
}
static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
{
struct xgene_enet_cle *enet_cle = &pdata->cle;
u32 def_qid, def_fpsel, def_nxtfpsel, pool_id;
struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
struct xgene_cle_ptree_branch *br;
struct xgene_cle_ptree *ptree;
struct xgene_cle_ptree_kn kn;
int ret;
struct xgene_cle_ptree_ewdn ptree_dn[] = {
static const struct xgene_cle_ptree_ewdn xgene_init_ptree_dn[] = {
{
/* PKT_TYPE_NODE */
.node_type = EWDN,
......@@ -776,24 +570,226 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
}
}
}
};
};
static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata,
struct xgene_enet_cle *cle)
{
struct xgene_cle_ptree *ptree = &cle->ptree;
const struct xgene_cle_ptree_ewdn *dn = xgene_init_ptree_dn;
int num_dn = ARRAY_SIZE(xgene_init_ptree_dn);
struct xgene_cle_ptree_kn *kn = ptree->kn;
u32 buf[CLE_DRAM_REGS];
int i, j, ret;
memset(buf, 0, sizeof(buf));
for (i = 0; i < num_dn; i++) {
xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes);
ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node,
PTREE_RAM, CLE_CMD_WR);
if (ret)
return ret;
}
/* continue node index for key node */
memset(buf, 0, sizeof(buf));
for (j = i; j < (ptree->num_kn + num_dn); j++) {
xgene_cle_kn_to_hw(&kn[j - num_dn], buf);
ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node,
PTREE_RAM, CLE_CMD_WR);
if (ret)
return ret;
}
return 0;
}
static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata,
struct xgene_enet_cle *cle)
{
int ret;
ret = xgene_cle_setup_node(pdata, cle);
if (ret)
return ret;
ret = xgene_cle_setup_dbptr(pdata, cle);
if (ret)
return ret;
xgene_cle_enable_ptree(pdata, cle);
return 0;
}
static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata,
struct xgene_enet_cle *enet_cle,
struct xgene_cle_dbptr *dbptr,
u32 index, u8 priority)
{
void __iomem *base = enet_cle->base;
void __iomem *base_addr;
u32 buf[CLE_DRAM_REGS];
u32 def_cls, offset;
u32 i, j;
memset(buf, 0, sizeof(buf));
xgene_cle_dbptr_to_hw(pdata, dbptr, buf);
for (i = 0; i < enet_cle->parsers; i++) {
if (enet_cle->active_parser != PARSER_ALL) {
offset = enet_cle->active_parser *
CLE_PORT_OFFSET;
} else {
offset = i * CLE_PORT_OFFSET;
}
base_addr = base + DFCLSRESDB00 + offset;
for (j = 0; j < 6; j++)
iowrite32(buf[j], base_addr + (j * 4));
def_cls = ((priority & 0x7) << 10) | (index & 0x3ff);
iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset);
}
}
static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle)
{
u32 idx = CLE_PKTRAM_SIZE / sizeof(u32);
u32 mac_hdr_len = ETH_HLEN;
u32 sband, reg = 0;
u32 ipv4_ihl = 5;
u32 hdr_len;
int ret;
/* Sideband: IPV4/TCP packets */
hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, &reg);
sband = reg;
/* Sideband: IPv4/UDP packets */
hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, &reg);
sband |= (reg << 16);
ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR);
if (ret)
return ret;
/* Sideband: IPv4/RAW packets */
hdr_len = (mac_hdr_len << 5) | ipv4_ihl;
xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
hdr_len, &reg);
sband = reg;
/* Sideband: Ethernet II/RAW packets */
hdr_len = (mac_hdr_len << 5);
xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER,
hdr_len, &reg);
sband |= (reg << 16);
ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR);
if (ret)
return ret;
return 0;
}
static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle)
{
u32 secret_key_ipv4[4]; /* 16 Bytes*/
int ret = 0;
get_random_bytes(secret_key_ipv4, 16);
ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0,
RSS_IPV4_HASH_SKEY, CLE_CMD_WR);
return ret;
}
static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata)
{
u32 fpsel, dstqid, nfpsel, idt_reg, idx;
int i, ret = 0;
u16 pool_id;
for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) {
idx = i % pdata->rxq_cnt;
pool_id = pdata->rx_ring[idx]->buf_pool->id;
fpsel = xgene_enet_get_fpsel(pool_id);
dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]);
nfpsel = 0;
if (pdata->rx_ring[idx]->page_pool) {
pool_id = pdata->rx_ring[idx]->page_pool->id;
nfpsel = xgene_enet_get_fpsel(pool_id);
}
idt_reg = 0;
xgene_cle_idt_to_hw(pdata, dstqid, fpsel, nfpsel, &idt_reg);
ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i,
RSS_IDT, CLE_CMD_WR);
if (ret)
return ret;
}
ret = xgene_cle_set_rss_skeys(&pdata->cle);
if (ret)
return ret;
return 0;
}
static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata)
{
struct xgene_enet_cle *cle = &pdata->cle;
void __iomem *base = cle->base;
u32 offset, val = 0;
int i, ret = 0;
offset = CLE_PORT_OFFSET;
for (i = 0; i < cle->parsers; i++) {
if (cle->active_parser != PARSER_ALL)
offset = cle->active_parser * CLE_PORT_OFFSET;
else
offset = i * CLE_PORT_OFFSET;
/* enable RSS */
val = (RSS_IPV4_12B << 1) | 0x1;
writel(val, base + RSS_CTRL0 + offset);
}
/* setup sideband data */
ret = xgene_cle_set_rss_sband(cle);
if (ret)
return ret;
/* setup indirection table */
ret = xgene_cle_set_rss_idt(pdata);
if (ret)
return ret;
return 0;
}
static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
{
struct xgene_enet_cle *enet_cle = &pdata->cle;
u32 def_qid, def_fpsel, def_nxtfpsel, pool_id;
struct xgene_cle_dbptr dbptr[DB_MAX_PTRS];
struct xgene_cle_ptree *ptree;
struct xgene_cle_ptree_kn kn;
int ret;
if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII)
return -EINVAL;
ptree = &enet_cle->ptree;
ptree->start_pkt = 12; /* Ethertype */
if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
ret = xgene_cle_setup_rss(pdata);
if (ret) {
netdev_err(pdata->ndev, "RSS initialization failed\n");
return ret;
}
} else {
br = &ptree_dn[PKT_PROT_NODE].branch[0];
br->valid = 0;
br->next_packet_pointer = 260;
br->next_node = LAST_NODE;
br->data = 0x0000;
br->mask = 0xffff;
}
def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]);
pool_id = pdata->rx_ring[0]->buf_pool->id;
......@@ -825,10 +821,8 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
kn.key[0].priority = 0;
kn.key[0].result_pointer = DB_RES_ACCEPT;
ptree->dn = ptree_dn;
ptree->kn = &kn;
ptree->dbptr = dbptr;
ptree->num_dn = MAX_NODES;
ptree->num_kn = 1;
ptree->num_dbptr = DB_MAX_PTRS;
......
......@@ -278,10 +278,8 @@ struct xgene_cle_dbptr {
};
struct xgene_cle_ptree {
struct xgene_cle_ptree_ewdn *dn;
struct xgene_cle_ptree_kn *kn;
struct xgene_cle_dbptr *dbptr;
u32 num_dn;
u32 num_kn;
u32 num_dbptr;
u32 start_node;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment