Commit 115a731e authored by David S. Miller's avatar David S. Miller

Merge branch 'dpaa_eth-next' of git://git.freescale.com/ppc/upstream/linux

Madalin Bucur says:

====================
QorIQ DPAA 1 updates

This patch set introduces a series of fixes and features to the DPAA 1
drivers. Besides activating hardware Rx checksum offloading, four traffic
classes are added for Tx traffic prioritisation.

changes from v1: added patch to enable context-A stashing
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5425077d 7fe1e290
...@@ -137,6 +137,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); ...@@ -137,6 +137,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
/* L4 Type field: TCP */ /* L4 Type field: TCP */
#define FM_L4_PARSE_RESULT_TCP 0x20 #define FM_L4_PARSE_RESULT_TCP 0x20
/* FD status field indicating whether the FM Parser has attempted to validate
* the L4 csum of the frame.
* Note that having this bit set doesn't necessarily imply that the checksum
* is valid. One would have to check the parse results to find that out.
*/
#define FM_FD_STAT_L4CV 0x00000004
#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
...@@ -235,6 +242,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, ...@@ -235,6 +242,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
* For conformity, we'll still declare GSO explicitly. * For conformity, we'll still declare GSO explicitly.
*/ */
net_dev->features |= NETIF_F_GSO; net_dev->features |= NETIF_F_GSO;
net_dev->features |= NETIF_F_RXCSUM;
net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
/* we do not want shared skbs on TX */ /* we do not want shared skbs on TX */
...@@ -334,6 +342,41 @@ static void dpaa_get_stats64(struct net_device *net_dev, ...@@ -334,6 +342,41 @@ static void dpaa_get_stats64(struct net_device *net_dev,
} }
} }
static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
int i;
if (tc->type != TC_SETUP_MQPRIO)
return -EINVAL;
if (tc->tc == priv->num_tc)
return 0;
if (!tc->tc) {
netdev_reset_tc(net_dev);
goto out;
}
if (tc->tc > DPAA_TC_NUM) {
netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
DPAA_TC_NUM);
return -EINVAL;
}
netdev_set_num_tc(net_dev, tc->tc);
for (i = 0; i < tc->tc; i++)
netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
i * DPAA_TC_TXQ_NUM);
out:
priv->num_tc = tc->tc ? tc->tc : 1;
netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
return 0;
}
static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
{ {
struct platform_device *of_dev; struct platform_device *of_dev;
...@@ -557,16 +600,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv) ...@@ -557,16 +600,18 @@ static void dpaa_bps_free(struct dpaa_priv *priv)
/* Use multiple WQs for FQ assignment: /* Use multiple WQs for FQ assignment:
* - Tx Confirmation queues go to WQ1. * - Tx Confirmation queues go to WQ1.
* - Rx Error and Tx Error queues go to WQ2 (giving them a better chance * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
* to be scheduled, in case there are many more FQs in WQ3). * to be scheduled, in case there are many more FQs in WQ6).
* - Rx Default and Tx queues go to WQ3 (no differentiation between * - Rx Default goes to WQ6.
* Rx and Tx traffic). * - Tx queues go to different WQs depending on their priority. Equal
* chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
* WQ0 (highest priority).
* This ensures that Tx-confirmed buffers are timely released. In particular, * This ensures that Tx-confirmed buffers are timely released. In particular,
* it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
* are greatly outnumbered by other FQs in the system, while * are greatly outnumbered by other FQs in the system, while
* dequeue scheduling is round-robin. * dequeue scheduling is round-robin.
*/ */
static inline void dpaa_assign_wq(struct dpaa_fq *fq) static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
{ {
switch (fq->fq_type) { switch (fq->fq_type) {
case FQ_TYPE_TX_CONFIRM: case FQ_TYPE_TX_CONFIRM:
...@@ -575,11 +620,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq) ...@@ -575,11 +620,33 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq)
break; break;
case FQ_TYPE_RX_ERROR: case FQ_TYPE_RX_ERROR:
case FQ_TYPE_TX_ERROR: case FQ_TYPE_TX_ERROR:
fq->wq = 2; fq->wq = 5;
break; break;
case FQ_TYPE_RX_DEFAULT: case FQ_TYPE_RX_DEFAULT:
fq->wq = 6;
break;
case FQ_TYPE_TX: case FQ_TYPE_TX:
fq->wq = 3; switch (idx / DPAA_TC_TXQ_NUM) {
case 0:
/* Low priority (best effort) */
fq->wq = 6;
break;
case 1:
/* Medium priority */
fq->wq = 2;
break;
case 2:
/* High priority */
fq->wq = 1;
break;
case 3:
/* Very high priority */
fq->wq = 0;
break;
default:
WARN(1, "Too many TX FQs: more than %d!\n",
DPAA_ETH_TXQ_NUM);
}
break; break;
default: default:
WARN(1, "Invalid FQ type %d for FQID %d!\n", WARN(1, "Invalid FQ type %d for FQID %d!\n",
...@@ -607,7 +674,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, ...@@ -607,7 +674,7 @@ static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
} }
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
dpaa_assign_wq(dpaa_fq + i); dpaa_assign_wq(dpaa_fq + i, i);
return dpaa_fq; return dpaa_fq;
} }
...@@ -985,7 +1052,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) ...@@ -985,7 +1052,8 @@ static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
/* Initialization common to all ingress queues */ /* Initialization common to all ingress queues */
if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE); initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
QM_FQCTRL_CTXASTASHING);
initfq.fqd.context_a.stashing.exclusive = initfq.fqd.context_a.stashing.exclusive =
QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
QM_STASHING_EXCL_ANNOTATION; QM_STASHING_EXCL_ANNOTATION;
...@@ -1055,9 +1123,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list) ...@@ -1055,9 +1123,9 @@ static int dpaa_fq_free(struct device *dev, struct list_head *list)
return err; return err;
} }
static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
struct dpaa_fq *defq, struct dpaa_fq *defq,
struct dpaa_buffer_layout *buf_layout) struct dpaa_buffer_layout *buf_layout)
{ {
struct fman_buffer_prefix_content buf_prefix_content; struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_params params; struct fman_port_params params;
...@@ -1076,23 +1144,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, ...@@ -1076,23 +1144,29 @@ static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
params.specific_params.non_rx_params.dflt_fqid = defq->fqid; params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
err = fman_port_config(port, &params); err = fman_port_config(port, &params);
if (err) if (err) {
pr_err("%s: fman_port_config failed\n", __func__); pr_err("%s: fman_port_config failed\n", __func__);
return err;
}
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
if (err) if (err) {
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
__func__); __func__);
return err;
}
err = fman_port_init(port); err = fman_port_init(port);
if (err) if (err)
pr_err("%s: fm_port_init failed\n", __func__); pr_err("%s: fm_port_init failed\n", __func__);
return err;
} }
static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
size_t count, struct dpaa_fq *errq, size_t count, struct dpaa_fq *errq,
struct dpaa_fq *defq, struct dpaa_fq *defq,
struct dpaa_buffer_layout *buf_layout) struct dpaa_buffer_layout *buf_layout)
{ {
struct fman_buffer_prefix_content buf_prefix_content; struct fman_buffer_prefix_content buf_prefix_content;
struct fman_port_rx_params *rx_p; struct fman_port_rx_params *rx_p;
...@@ -1120,32 +1194,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, ...@@ -1120,32 +1194,44 @@ static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
} }
err = fman_port_config(port, &params); err = fman_port_config(port, &params);
if (err) if (err) {
pr_err("%s: fman_port_config failed\n", __func__); pr_err("%s: fman_port_config failed\n", __func__);
return err;
}
err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
if (err) if (err) {
pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
__func__); __func__);
return err;
}
err = fman_port_init(port); err = fman_port_init(port);
if (err) if (err)
pr_err("%s: fm_port_init failed\n", __func__); pr_err("%s: fm_port_init failed\n", __func__);
return err;
} }
static void dpaa_eth_init_ports(struct mac_device *mac_dev, static int dpaa_eth_init_ports(struct mac_device *mac_dev,
struct dpaa_bp **bps, size_t count, struct dpaa_bp **bps, size_t count,
struct fm_port_fqs *port_fqs, struct fm_port_fqs *port_fqs,
struct dpaa_buffer_layout *buf_layout, struct dpaa_buffer_layout *buf_layout,
struct device *dev) struct device *dev)
{ {
struct fman_port *rxport = mac_dev->port[RX]; struct fman_port *rxport = mac_dev->port[RX];
struct fman_port *txport = mac_dev->port[TX]; struct fman_port *txport = mac_dev->port[TX];
int err;
err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
port_fqs->tx_defq, &buf_layout[TX]);
if (err)
return err;
err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
port_fqs->rx_defq, &buf_layout[RX]);
dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, return err;
port_fqs->tx_defq, &buf_layout[TX]);
dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
port_fqs->rx_defq, &buf_layout[RX]);
} }
static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
...@@ -1526,6 +1612,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, ...@@ -1526,6 +1612,23 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
return skb; return skb;
} }
static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
{
/* The parser has run and performed L4 checksum validation.
* We know there were no parser errors (and implicitly no
* L4 csum error), otherwise we wouldn't be here.
*/
if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
(be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
return CHECKSUM_UNNECESSARY;
/* We're here because either the parser didn't run or the L4 checksum
* was not verified. This may include the case of a UDP frame with
* checksum zero or an L4 proto other than TCP/UDP
*/
return CHECKSUM_NONE;
}
/* Build a linear skb around the received buffer. /* Build a linear skb around the received buffer.
* We are guaranteed there is enough room at the end of the data buffer to * We are guaranteed there is enough room at the end of the data buffer to
* accommodate the shared info area of the skb. * accommodate the shared info area of the skb.
...@@ -1556,7 +1659,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1556,7 +1659,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
skb_reserve(skb, fd_off); skb_reserve(skb, fd_off);
skb_put(skb, qm_fd_get_length(fd)); skb_put(skb, qm_fd_get_length(fd));
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = rx_csum_offload(priv, fd);
return skb; return skb;
...@@ -1616,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, ...@@ -1616,7 +1719,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
if (WARN_ON(unlikely(!skb))) if (WARN_ON(unlikely(!skb)))
goto free_buffers; goto free_buffers;
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = rx_csum_offload(priv, fd);
/* Make sure forwarded skbs will have enough space /* Make sure forwarded skbs will have enough space
* on Tx, if extra headers are added. * on Tx, if extra headers are added.
...@@ -2093,7 +2196,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, ...@@ -2093,7 +2196,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
dma_addr_t addr = qm_fd_addr(fd); dma_addr_t addr = qm_fd_addr(fd);
enum qm_fd_format fd_format; enum qm_fd_format fd_format;
struct net_device *net_dev; struct net_device *net_dev;
u32 fd_status = fd->status; u32 fd_status;
struct dpaa_bp *dpaa_bp; struct dpaa_bp *dpaa_bp;
struct dpaa_priv *priv; struct dpaa_priv *priv;
unsigned int skb_len; unsigned int skb_len;
...@@ -2350,6 +2453,7 @@ static const struct net_device_ops dpaa_ops = { ...@@ -2350,6 +2453,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode, .ndo_set_rx_mode = dpaa_set_rx_mode,
.ndo_do_ioctl = dpaa_ioctl, .ndo_do_ioctl = dpaa_ioctl,
.ndo_setup_tc = dpaa_setup_tc,
}; };
static int dpaa_napi_add(struct net_device *net_dev) static int dpaa_napi_add(struct net_device *net_dev)
...@@ -2624,8 +2728,10 @@ static int dpaa_eth_probe(struct platform_device *pdev) ...@@ -2624,8 +2728,10 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
/* All real interfaces need their ports initialized */ /* All real interfaces need their ports initialized */
dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
&priv->buf_layout[0], dev); &priv->buf_layout[0], dev);
if (err)
goto init_ports_failed;
priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
if (!priv->percpu_priv) { if (!priv->percpu_priv) {
...@@ -2638,6 +2744,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) ...@@ -2638,6 +2744,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
memset(percpu_priv, 0, sizeof(*percpu_priv)); memset(percpu_priv, 0, sizeof(*percpu_priv));
} }
priv->num_tc = 1;
netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
/* Initialize NAPI */ /* Initialize NAPI */
err = dpaa_napi_add(net_dev); err = dpaa_napi_add(net_dev);
if (err < 0) if (err < 0)
...@@ -2658,6 +2767,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) ...@@ -2658,6 +2767,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
napi_add_failed: napi_add_failed:
dpaa_napi_del(net_dev); dpaa_napi_del(net_dev);
alloc_percpu_failed: alloc_percpu_failed:
init_ports_failed:
dpaa_fq_free(dev, &priv->dpaa_fq_list); dpaa_fq_free(dev, &priv->dpaa_fq_list);
fq_alloc_failed: fq_alloc_failed:
qman_delete_cgr_safe(&priv->ingress_cgr); qman_delete_cgr_safe(&priv->ingress_cgr);
......
...@@ -39,7 +39,12 @@ ...@@ -39,7 +39,12 @@
#include "mac.h" #include "mac.h"
#include "dpaa_eth_trace.h" #include "dpaa_eth_trace.h"
#define DPAA_ETH_TXQ_NUM NR_CPUS /* Number of prioritised traffic classes */
#define DPAA_TC_NUM 4
/* Number of Tx queues per traffic class */
#define DPAA_TC_TXQ_NUM NR_CPUS
/* Total number of Tx queues */
#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
#define DPAA_BPS_NUM 3 /* number of bpools per interface */ #define DPAA_BPS_NUM 3 /* number of bpools per interface */
...@@ -152,6 +157,7 @@ struct dpaa_priv { ...@@ -152,6 +157,7 @@ struct dpaa_priv {
u16 channel; u16 channel;
struct list_head dpaa_fq_list; struct list_head dpaa_fq_list;
u8 num_tc;
u32 msg_enable; /* net_device message level */ u32 msg_enable; /* net_device message level */
struct { struct {
......
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#define DMA_OFFSET 0x000C2000 #define DMA_OFFSET 0x000C2000
#define FPM_OFFSET 0x000C3000 #define FPM_OFFSET 0x000C3000
#define IMEM_OFFSET 0x000C4000 #define IMEM_OFFSET 0x000C4000
#define HWP_OFFSET 0x000C7000
#define CGP_OFFSET 0x000DB000 #define CGP_OFFSET 0x000DB000
/* Exceptions bit map */ /* Exceptions bit map */
...@@ -218,6 +219,9 @@ ...@@ -218,6 +219,9 @@
#define QMI_GS_HALT_NOT_BUSY 0x00000002 #define QMI_GS_HALT_NOT_BUSY 0x00000002
/* HWP defines */
#define HWP_RPIMAC_PEN 0x00000001
/* IRAM defines */ /* IRAM defines */
#define IRAM_IADD_AIE 0x80000000 #define IRAM_IADD_AIE 0x80000000
#define IRAM_READY 0x80000000 #define IRAM_READY 0x80000000
...@@ -475,6 +479,12 @@ struct fman_dma_regs { ...@@ -475,6 +479,12 @@ struct fman_dma_regs {
u32 res00e0[0x400 - 56]; u32 res00e0[0x400 - 56];
}; };
struct fman_hwp_regs {
u32 res0000[0x844 / 4]; /* 0x000..0x843 */
u32 fmprrpimac; /* FM Parser Internal memory access control */
u32 res[(0x1000 - 0x848) / 4]; /* 0x848..0xFFF */
};
/* Structure that holds current FMan state. /* Structure that holds current FMan state.
* Used for saving run time information. * Used for saving run time information.
*/ */
...@@ -606,6 +616,7 @@ struct fman { ...@@ -606,6 +616,7 @@ struct fman {
struct fman_bmi_regs __iomem *bmi_regs; struct fman_bmi_regs __iomem *bmi_regs;
struct fman_qmi_regs __iomem *qmi_regs; struct fman_qmi_regs __iomem *qmi_regs;
struct fman_dma_regs __iomem *dma_regs; struct fman_dma_regs __iomem *dma_regs;
struct fman_hwp_regs __iomem *hwp_regs;
fman_exceptions_cb *exception_cb; fman_exceptions_cb *exception_cb;
fman_bus_error_cb *bus_error_cb; fman_bus_error_cb *bus_error_cb;
/* Spinlock for FMan use */ /* Spinlock for FMan use */
...@@ -999,6 +1010,12 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg, ...@@ -999,6 +1010,12 @@ static void qmi_init(struct fman_qmi_regs __iomem *qmi_rg,
iowrite32be(tmp_reg, &qmi_rg->fmqm_ien); iowrite32be(tmp_reg, &qmi_rg->fmqm_ien);
} }
static void hwp_init(struct fman_hwp_regs __iomem *hwp_rg)
{
/* enable HW Parser */
iowrite32be(HWP_RPIMAC_PEN, &hwp_rg->fmprrpimac);
}
static int enable(struct fman *fman, struct fman_cfg *cfg) static int enable(struct fman *fman, struct fman_cfg *cfg)
{ {
u32 cfg_reg = 0; u32 cfg_reg = 0;
...@@ -1195,7 +1212,7 @@ static int fill_soc_specific_params(struct fman_state_struct *state) ...@@ -1195,7 +1212,7 @@ static int fill_soc_specific_params(struct fman_state_struct *state)
state->max_num_of_open_dmas = 32; state->max_num_of_open_dmas = 32;
state->fm_port_num_of_cg = 256; state->fm_port_num_of_cg = 256;
state->num_of_rx_ports = 6; state->num_of_rx_ports = 6;
state->total_fifo_size = 122 * 1024; state->total_fifo_size = 136 * 1024;
break; break;
case 2: case 2:
...@@ -1793,6 +1810,7 @@ static int fman_config(struct fman *fman) ...@@ -1793,6 +1810,7 @@ static int fman_config(struct fman *fman)
fman->bmi_regs = base_addr + BMI_OFFSET; fman->bmi_regs = base_addr + BMI_OFFSET;
fman->qmi_regs = base_addr + QMI_OFFSET; fman->qmi_regs = base_addr + QMI_OFFSET;
fman->dma_regs = base_addr + DMA_OFFSET; fman->dma_regs = base_addr + DMA_OFFSET;
fman->hwp_regs = base_addr + HWP_OFFSET;
fman->base_addr = base_addr; fman->base_addr = base_addr;
spin_lock_init(&fman->spinlock); spin_lock_init(&fman->spinlock);
...@@ -2062,6 +2080,9 @@ static int fman_init(struct fman *fman) ...@@ -2062,6 +2080,9 @@ static int fman_init(struct fman *fman)
/* Init QMI Registers */ /* Init QMI Registers */
qmi_init(fman->qmi_regs, fman->cfg); qmi_init(fman->qmi_regs, fman->cfg);
/* Init HW Parser */
hwp_init(fman->hwp_regs);
err = enable(fman, cfg); err = enable(fman, cfg);
if (err != 0) if (err != 0)
return err; return err;
......
...@@ -134,14 +134,14 @@ enum fman_exceptions { ...@@ -134,14 +134,14 @@ enum fman_exceptions {
struct fman_prs_result { struct fman_prs_result {
u8 lpid; /* Logical port id */ u8 lpid; /* Logical port id */
u8 shimr; /* Shim header result */ u8 shimr; /* Shim header result */
u16 l2r; /* Layer 2 result */ __be16 l2r; /* Layer 2 result */
u16 l3r; /* Layer 3 result */ __be16 l3r; /* Layer 3 result */
u8 l4r; /* Layer 4 result */ u8 l4r; /* Layer 4 result */
u8 cplan; /* Classification plan id */ u8 cplan; /* Classification plan id */
u16 nxthdr; /* Next Header */ __be16 nxthdr; /* Next Header */
u16 cksum; /* Running-sum */ __be16 cksum; /* Running-sum */
/* Flags&fragment-offset field of the last IP-header */ /* Flags&fragment-offset field of the last IP-header */
u16 flags_frag_off; __be16 flags_frag_off;
/* Routing type field of a IPV6 routing extension header */ /* Routing type field of a IPV6 routing extension header */
u8 route_type; u8 route_type;
/* Routing Extension Header Present; last bit is IP valid */ /* Routing Extension Header Present; last bit is IP valid */
......
...@@ -62,6 +62,7 @@ ...@@ -62,6 +62,7 @@
#define BMI_PORT_REGS_OFFSET 0 #define BMI_PORT_REGS_OFFSET 0
#define QMI_PORT_REGS_OFFSET 0x400 #define QMI_PORT_REGS_OFFSET 0x400
#define HWP_PORT_REGS_OFFSET 0x800
/* Default values */ /* Default values */
#define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \ #define DFLT_PORT_BUFFER_PREFIX_CONTEXT_DATA_ALIGN \
...@@ -182,7 +183,7 @@ ...@@ -182,7 +183,7 @@
#define NIA_ENG_BMI 0x00500000 #define NIA_ENG_BMI 0x00500000
#define NIA_ENG_QMI_ENQ 0x00540000 #define NIA_ENG_QMI_ENQ 0x00540000
#define NIA_ENG_QMI_DEQ 0x00580000 #define NIA_ENG_QMI_DEQ 0x00580000
#define NIA_ENG_HWP 0x00440000
#define NIA_BMI_AC_ENQ_FRAME 0x00000002 #define NIA_BMI_AC_ENQ_FRAME 0x00000002
#define NIA_BMI_AC_TX_RELEASE 0x000002C0 #define NIA_BMI_AC_TX_RELEASE 0x000002C0
#define NIA_BMI_AC_RELEASE 0x000000C0 #define NIA_BMI_AC_RELEASE 0x000000C0
...@@ -317,6 +318,19 @@ struct fman_port_qmi_regs { ...@@ -317,6 +318,19 @@ struct fman_port_qmi_regs {
u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */ u32 fmqm_pndcc; /* PortID n Dequeue Confirm Counter */
}; };
#define HWP_HXS_COUNT 16
#define HWP_HXS_PHE_REPORT 0x00000800
#define HWP_HXS_PCAC_PSTAT 0x00000100
#define HWP_HXS_PCAC_PSTOP 0x00000001
struct fman_port_hwp_regs {
struct {
u32 ssa; /* Soft Sequence Attachment */
u32 lcv; /* Line-up Enable Confirmation Mask */
} pmda[HWP_HXS_COUNT]; /* Parse Memory Direct Access Registers */
u32 reserved080[(0x3f8 - 0x080) / 4]; /* (0x080-0x3f7) */
u32 fmpr_pcac; /* Configuration Access Control */
};
/* QMI dequeue prefetch modes */ /* QMI dequeue prefetch modes */
enum fman_port_deq_prefetch { enum fman_port_deq_prefetch {
FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */ FMAN_PORT_DEQ_NO_PREFETCH, /* No prefetch mode */
...@@ -436,6 +450,7 @@ struct fman_port { ...@@ -436,6 +450,7 @@ struct fman_port {
union fman_port_bmi_regs __iomem *bmi_regs; union fman_port_bmi_regs __iomem *bmi_regs;
struct fman_port_qmi_regs __iomem *qmi_regs; struct fman_port_qmi_regs __iomem *qmi_regs;
struct fman_port_hwp_regs __iomem *hwp_regs;
struct fman_sp_buffer_offsets buffer_offsets; struct fman_sp_buffer_offsets buffer_offsets;
...@@ -521,9 +536,12 @@ static int init_bmi_rx(struct fman_port *port) ...@@ -521,9 +536,12 @@ static int init_bmi_rx(struct fman_port *port)
/* NIA */ /* NIA */
tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT; tmp = (u32)cfg->rx_fd_bits << BMI_NEXT_ENG_FD_BITS_SHIFT;
tmp |= NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME; tmp |= NIA_ENG_HWP;
iowrite32be(tmp, &regs->fmbm_rfne); iowrite32be(tmp, &regs->fmbm_rfne);
/* Parser Next Engine NIA */
iowrite32be(NIA_ENG_BMI | NIA_BMI_AC_ENQ_FRAME, &regs->fmbm_rfpne);
/* Enqueue NIA */ /* Enqueue NIA */
iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene); iowrite32be(NIA_ENG_QMI_ENQ | NIA_ORDER_RESTOR, &regs->fmbm_rfene);
...@@ -665,6 +683,50 @@ static int init_qmi(struct fman_port *port) ...@@ -665,6 +683,50 @@ static int init_qmi(struct fman_port *port)
return 0; return 0;
} }
static void stop_port_hwp(struct fman_port *port)
{
struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
int cnt = 100;
iowrite32be(HWP_HXS_PCAC_PSTOP, &regs->fmpr_pcac);
while (cnt-- > 0 &&
(ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
udelay(10);
if (!cnt)
pr_err("Timeout stopping HW Parser\n");
}
static void start_port_hwp(struct fman_port *port)
{
struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
int cnt = 100;
iowrite32be(0, &regs->fmpr_pcac);
while (cnt-- > 0 &&
!(ioread32be(&regs->fmpr_pcac) & HWP_HXS_PCAC_PSTAT))
udelay(10);
if (!cnt)
pr_err("Timeout starting HW Parser\n");
}
static void init_hwp(struct fman_port *port)
{
struct fman_port_hwp_regs __iomem *regs = port->hwp_regs;
int i;
stop_port_hwp(port);
for (i = 0; i < HWP_HXS_COUNT; i++) {
/* enable HXS error reporting into FD[STATUS] PHE */
iowrite32be(0x00000000, &regs->pmda[i].ssa);
iowrite32be(0xffffffff, &regs->pmda[i].lcv);
}
start_port_hwp(port);
}
static int init(struct fman_port *port) static int init(struct fman_port *port)
{ {
int err; int err;
...@@ -673,6 +735,8 @@ static int init(struct fman_port *port) ...@@ -673,6 +735,8 @@ static int init(struct fman_port *port)
switch (port->port_type) { switch (port->port_type) {
case FMAN_PORT_TYPE_RX: case FMAN_PORT_TYPE_RX:
err = init_bmi_rx(port); err = init_bmi_rx(port);
if (!err)
init_hwp(port);
break; break;
case FMAN_PORT_TYPE_TX: case FMAN_PORT_TYPE_TX:
err = init_bmi_tx(port); err = init_bmi_tx(port);
...@@ -686,7 +750,8 @@ static int init(struct fman_port *port) ...@@ -686,7 +750,8 @@ static int init(struct fman_port *port)
/* Init QMI registers */ /* Init QMI registers */
err = init_qmi(port); err = init_qmi(port);
return err; if (err)
return err;
return 0; return 0;
} }
...@@ -1247,7 +1312,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) ...@@ -1247,7 +1312,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* Allocate the FM driver's parameters structure */ /* Allocate the FM driver's parameters structure */
port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL); port->cfg = kzalloc(sizeof(*port->cfg), GFP_KERNEL);
if (!port->cfg) if (!port->cfg)
goto err_params; return -EINVAL;
/* Initialize FM port parameters which will be kept by the driver */ /* Initialize FM port parameters which will be kept by the driver */
port->port_type = port->dts_params.type; port->port_type = port->dts_params.type;
...@@ -1276,6 +1341,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) ...@@ -1276,6 +1341,7 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
/* set memory map pointers */ /* set memory map pointers */
port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET; port->bmi_regs = base_addr + BMI_PORT_REGS_OFFSET;
port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET; port->qmi_regs = base_addr + QMI_PORT_REGS_OFFSET;
port->hwp_regs = base_addr + HWP_PORT_REGS_OFFSET;
port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH; port->max_frame_length = DFLT_PORT_MAX_FRAME_LENGTH;
/* resource distribution. */ /* resource distribution. */
...@@ -1327,8 +1393,6 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) ...@@ -1327,8 +1393,6 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params)
err_port_cfg: err_port_cfg:
kfree(port->cfg); kfree(port->cfg);
err_params:
kfree(port);
return -EINVAL; return -EINVAL;
} }
EXPORT_SYMBOL(fman_port_config); EXPORT_SYMBOL(fman_port_config);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment