Commit 70098289 authored by David S. Miller's avatar David S. Miller

Merge branch 'DPAA-fixes'

Madalin Bucur says:

====================
DPAA fixes

A couple of fixes for the DPAA drivers, addressing an issue
with short UDP or TCP frames (with padding) that were marked
as having a wrong checksum and dropped by the FMan hardware
and a problem with the buffer used for the scatter-gather
table being too small as per the hardware requirements.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 484c016d 595e802e
......@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
/* Default alignment for start of data in an Rx FD */
#define DPAA_FD_DATA_ALIGNMENT 16
/* The DPAA requires 256 bytes reserved and mapped for the SGT */
#define DPAA_SGT_SIZE 256
/* Values for the L3R field of the FM Parse Results
*/
/* L3 Type field: First IP Present IPv4 */
......@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
nr_frags = skb_shinfo(skb)->nr_frags;
dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
sizeof(struct qm_sg_entry) * (1 + nr_frags),
dma_unmap_single(dev, addr,
qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
dma_dir);
/* The sgt buffer has been allocated with netdev_alloc_frag(),
......@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
void *sgt_buf;
/* get a page frag to store the SGTable */
sz = SKB_DATA_ALIGN(priv->tx_headroom +
sizeof(struct qm_sg_entry) * (1 + nr_frags));
sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
sgt_buf = netdev_alloc_frag(sz);
if (unlikely(!sgt_buf)) {
netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
......@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
skbh = (struct sk_buff **)buffer_start;
*skbh = skb;
addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
sizeof(struct qm_sg_entry) * (1 + nr_frags),
dma_dir);
addr = dma_map_single(dev, buffer_start,
priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
if (unlikely(dma_mapping_error(dev, addr))) {
dev_err(dev, "DMA mapping failed");
err = -EINVAL;
......
......@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
#define HWP_HXS_PHE_REPORT 0x00000800
#define HWP_HXS_PCAC_PSTAT 0x00000100
#define HWP_HXS_PCAC_PSTOP 0x00000001
#define HWP_HXS_TCP_OFFSET 0xA
#define HWP_HXS_UDP_OFFSET 0xB
#define HWP_HXS_SH_PAD_REM 0x80000000
struct fman_port_hwp_regs {
struct {
u32 ssa; /* Soft Sequence Attachment */
......@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
iowrite32be(0xffffffff, &regs->pmda[i].lcv);
}
/* Short packet padding removal from checksum calculation */
iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
start_port_hwp(port);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment