Commit 5cbf20c7 authored by Sergei Shtylyov's avatar Sergei Shtylyov Committed by David S. Miller

sh_eth: fix 16-bit descriptor field access endianness too

Commit 1299653a ("sh_eth: fix descriptor access endianness") only
addressed the 32-bit buffer address field byte-swapping  but the driver
still accesses 16-bit frame/buffer length descriptor fields without the
necessary byte-swapping -- which should affect the big-endian kernels.
In order to be able to use {cpu|edmac}_to_{edmac|cpu}(), we need to declare
the RX/TX descriptor word 1 as a 32-bit field and use shifts/masking to
access the 16-bit subfields (which gets rid of the ugly #ifdef'ery too)...
Signed-off-by: default avatarSergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ce8c839b
...@@ -1167,6 +1167,7 @@ static void sh_eth_ring_format(struct net_device *ndev) ...@@ -1167,6 +1167,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr; dma_addr_t dma_addr;
u32 buf_len;
mdp->cur_rx = 0; mdp->cur_rx = 0;
mdp->cur_tx = 0; mdp->cur_tx = 0;
...@@ -1187,9 +1188,9 @@ static void sh_eth_ring_format(struct net_device *ndev) ...@@ -1187,9 +1188,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* RX descriptor */ /* RX descriptor */
rxdesc = &mdp->rx_ring[i]; rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 32 bytes. */ /* The size of the buffer is a multiple of 32 bytes. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); buf_len = ALIGN(mdp->rx_buf_sz, 32);
dma_addr = dma_map_single(&ndev->dev, skb->data, rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
rxdesc->buffer_length, dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) { if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb); kfree_skb(skb);
...@@ -1220,7 +1221,7 @@ static void sh_eth_ring_format(struct net_device *ndev) ...@@ -1220,7 +1221,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->tx_skbuff[i] = NULL; mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i]; txdesc = &mdp->tx_ring[i];
txdesc->status = cpu_to_edmac(mdp, TD_TFP); txdesc->status = cpu_to_edmac(mdp, TD_TFP);
txdesc->buffer_length = 0; txdesc->len = cpu_to_edmac(mdp, 0);
if (i == 0) { if (i == 0) {
/* Tx descriptor address set */ /* Tx descriptor address set */
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
...@@ -1429,7 +1430,8 @@ static int sh_eth_txfree(struct net_device *ndev) ...@@ -1429,7 +1430,8 @@ static int sh_eth_txfree(struct net_device *ndev)
if (mdp->tx_skbuff[entry]) { if (mdp->tx_skbuff[entry]) {
dma_unmap_single(&ndev->dev, dma_unmap_single(&ndev->dev,
edmac_to_cpu(mdp, txdesc->addr), edmac_to_cpu(mdp, txdesc->addr),
txdesc->buffer_length, DMA_TO_DEVICE); edmac_to_cpu(mdp, txdesc->len) >> 16,
DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]); dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL; mdp->tx_skbuff[entry] = NULL;
free_num++; free_num++;
...@@ -1439,7 +1441,7 @@ static int sh_eth_txfree(struct net_device *ndev) ...@@ -1439,7 +1441,7 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
ndev->stats.tx_packets++; ndev->stats.tx_packets++;
ndev->stats.tx_bytes += txdesc->buffer_length; ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16;
} }
return free_num; return free_num;
} }
...@@ -1458,6 +1460,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) ...@@ -1458,6 +1460,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
u32 desc_status; u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr; dma_addr_t dma_addr;
u32 buf_len;
boguscnt = min(boguscnt, *quota); boguscnt = min(boguscnt, *quota);
limit = boguscnt; limit = boguscnt;
...@@ -1466,7 +1469,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) ...@@ -1466,7 +1469,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* RACT bit must be checked before all the following reads */ /* RACT bit must be checked before all the following reads */
dma_rmb(); dma_rmb();
desc_status = edmac_to_cpu(mdp, rxdesc->status); desc_status = edmac_to_cpu(mdp, rxdesc->status);
pkt_len = rxdesc->frame_length; pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL;
if (--boguscnt < 0) if (--boguscnt < 0)
break; break;
...@@ -1532,7 +1535,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) ...@@ -1532,7 +1535,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
entry = mdp->dirty_rx % mdp->num_rx_ring; entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry]; rxdesc = &mdp->rx_ring[entry];
/* The size of the buffer is 32 byte boundary. */ /* The size of the buffer is 32 byte boundary. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32); buf_len = ALIGN(mdp->rx_buf_sz, 32);
rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
if (mdp->rx_skbuff[entry] == NULL) { if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size); skb = netdev_alloc_skb(ndev, skbuff_size);
...@@ -1540,8 +1544,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) ...@@ -1540,8 +1544,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
break; /* Better luck next round. */ break; /* Better luck next round. */
sh_eth_set_receive_align(skb); sh_eth_set_receive_align(skb);
dma_addr = dma_map_single(&ndev->dev, skb->data, dma_addr = dma_map_single(&ndev->dev, skb->data,
rxdesc->buffer_length, buf_len, DMA_FROM_DEVICE);
DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) { if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb); kfree_skb(skb);
break; break;
...@@ -2407,7 +2410,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -2407,7 +2410,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
txdesc->addr = cpu_to_edmac(mdp, dma_addr); txdesc->addr = cpu_to_edmac(mdp, dma_addr);
txdesc->buffer_length = skb->len; txdesc->len = cpu_to_edmac(mdp, skb->len << 16);
dma_wmb(); /* TACT bit must be set after all the above writes */ dma_wmb(); /* TACT bit must be set after all the above writes */
if (entry >= mdp->num_tx_ring - 1) if (entry >= mdp->num_tx_ring - 1)
......
...@@ -283,7 +283,7 @@ enum DMAC_IM_BIT { ...@@ -283,7 +283,7 @@ enum DMAC_IM_BIT {
DMAC_M_RINT1 = 0x00000001, DMAC_M_RINT1 = 0x00000001,
}; };
/* Receive descriptor bit */ /* Receive descriptor 0 bits */
enum RD_STS_BIT { enum RD_STS_BIT {
RD_RACT = 0x80000000, RD_RDLE = 0x40000000, RD_RACT = 0x80000000, RD_RDLE = 0x40000000,
RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000, RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
...@@ -298,6 +298,12 @@ enum RD_STS_BIT { ...@@ -298,6 +298,12 @@ enum RD_STS_BIT {
#define RDFEND RD_RFP0 #define RDFEND RD_RFP0
#define RD_RFP (RD_RFP1|RD_RFP0) #define RD_RFP (RD_RFP1|RD_RFP0)
/* Receive descriptor 1 bits */
enum RD_LEN_BIT {
RD_RFL = 0x0000ffff, /* receive frame length */
RD_RBL = 0xffff0000, /* receive buffer length */
};
/* FCFTR */ /* FCFTR */
enum FCFTR_BIT { enum FCFTR_BIT {
FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000, FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
...@@ -307,7 +313,7 @@ enum FCFTR_BIT { ...@@ -307,7 +313,7 @@ enum FCFTR_BIT {
#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0) #define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0) #define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
/* Transmit descriptor bit */ /* Transmit descriptor 0 bits */
enum TD_STS_BIT { enum TD_STS_BIT {
TD_TACT = 0x80000000, TD_TDLE = 0x40000000, TD_TACT = 0x80000000, TD_TDLE = 0x40000000,
TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000, TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000,
...@@ -317,6 +323,11 @@ enum TD_STS_BIT { ...@@ -317,6 +323,11 @@ enum TD_STS_BIT {
#define TDFEND TD_TFP0 #define TDFEND TD_TFP0
#define TD_TFP (TD_TFP1|TD_TFP0) #define TD_TFP (TD_TFP1|TD_TFP0)
/* Transmit descriptor 1 bits */
enum TD_LEN_BIT {
TD_TBL = 0xffff0000, /* transmit buffer length */
};
/* RMCR */ /* RMCR */
enum RMCR_BIT { enum RMCR_BIT {
RMCR_RNC = 0x00000001, RMCR_RNC = 0x00000001,
...@@ -425,15 +436,9 @@ enum TSU_FWSLC_BIT { ...@@ -425,15 +436,9 @@ enum TSU_FWSLC_BIT {
*/ */
struct sh_eth_txdesc { struct sh_eth_txdesc {
u32 status; /* TD0 */ u32 status; /* TD0 */
#if defined(__LITTLE_ENDIAN) u32 len; /* TD1 */
u16 pad0; /* TD1 */
u16 buffer_length; /* TD1 */
#else
u16 buffer_length; /* TD1 */
u16 pad0; /* TD1 */
#endif
u32 addr; /* TD2 */ u32 addr; /* TD2 */
u32 pad1; /* padding data */ u32 pad0; /* padding data */
} __aligned(2) __packed; } __aligned(2) __packed;
/* The sh ether Rx buffer descriptors. /* The sh ether Rx buffer descriptors.
...@@ -441,13 +446,7 @@ struct sh_eth_txdesc { ...@@ -441,13 +446,7 @@ struct sh_eth_txdesc {
*/ */
struct sh_eth_rxdesc { struct sh_eth_rxdesc {
u32 status; /* RD0 */ u32 status; /* RD0 */
#if defined(__LITTLE_ENDIAN) u32 len; /* RD1 */
u16 frame_length; /* RD1 */
u16 buffer_length; /* RD1 */
#else
u16 buffer_length; /* RD1 */
u16 frame_length; /* RD1 */
#endif
u32 addr; /* RD2 */ u32 addr; /* RD2 */
u32 pad0; /* padding data */ u32 pad0; /* padding data */
} __aligned(2) __packed; } __aligned(2) __packed;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment