Commit 970efef4 authored by David S. Miller's avatar David S. Miller

Merge branch 'fec-next'

Lothar Waßmann says:

====================
net: fec: assorted cleanup patches

This patch series is a followup to:
<1415350967-2238-1-git-send-email-LW@KARO-electronics.de>
[PATCHv4 1/1] net: fec: fix regression on i.MX28 introduced by rx_copybreak support
to apply the cleanup patches that were originally sent along with the
bugfix patch.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9a9f9dd7 c20e599b
...@@ -38,9 +38,9 @@ ...@@ -38,9 +38,9 @@
#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */ #define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */ #define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
#define FEC_OPD 0x0ec /* Opcode + Pause duration */ #define FEC_OPD 0x0ec /* Opcode + Pause duration */
#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */ #define FEC_TXIC0 0x0f0 /* Tx Interrupt Coalescing for ring 0 */
#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */ #define FEC_TXIC1 0x0f4 /* Tx Interrupt Coalescing for ring 1 */
#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */ #define FEC_TXIC2 0x0f8 /* Tx Interrupt Coalescing for ring 2 */
#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */ #define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */
#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */ #define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */
#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */ #define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
#define FEC_RACC 0x1C4 /* Receive Accelerator function */ #define FEC_RACC 0x1c4 /* Receive Accelerator function */
#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ #define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ #define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */ #define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */
...@@ -82,57 +82,57 @@ ...@@ -82,57 +82,57 @@
#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */ #define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
#define RMON_T_PACKETS 0x204 /* RMON TX packet count */ #define RMON_T_PACKETS 0x204 /* RMON TX packet count */
#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */ #define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
#define RMON_T_MC_PKT 0x20C /* RMON TX multicast pkts */ #define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */ #define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */ #define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */ #define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
#define RMON_T_FRAG 0x21C /* RMON TX pkts < 64 bytes, bad CRC */ #define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */ #define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
#define RMON_T_COL 0x224 /* RMON TX collision count */ #define RMON_T_COL 0x224 /* RMON TX collision count */
#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */ #define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
#define RMON_T_P65TO127 0x22C /* RMON TX 65 to 127 byte pkts */ #define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */ #define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */ #define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */ #define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
#define RMON_T_P1024TO2047 0x23C /* RMON TX 1024 to 2047 byte pkts */ #define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */ #define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
#define RMON_T_OCTETS 0x244 /* RMON TX octets */ #define RMON_T_OCTETS 0x244 /* RMON TX octets */
#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */ #define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
#define IEEE_T_FRAME_OK 0x24C /* Frames tx'd OK */ #define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */ #define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */ #define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */ #define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
#define IEEE_T_LCOL 0x25C /* Frames tx'd with late collision */ #define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */ #define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */ #define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */ #define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
#define IEEE_T_SQE 0x26C /* Frames tx'd with SQE err */ #define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */ #define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */ #define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
#define RMON_R_PACKETS 0x284 /* RMON RX packet count */ #define RMON_R_PACKETS 0x284 /* RMON RX packet count */
#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */ #define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
#define RMON_R_MC_PKT 0x28C /* RMON RX multicast pkts */ #define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */ #define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */ #define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */ #define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
#define RMON_R_FRAG 0x29C /* RMON RX pkts < 64 bytes, bad CRC */ #define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
#define RMON_R_JAB 0x2A0 /* RMON RX pkts > MAX_FL bytes, bad CRC */ #define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
#define RMON_R_RESVD_O 0x2A4 /* Reserved */ #define RMON_R_RESVD_O 0x2a4 /* Reserved */
#define RMON_R_P64 0x2A8 /* RMON RX 64 byte pkts */ #define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
#define RMON_R_P65TO127 0x2AC /* RMON RX 65 to 127 byte pkts */ #define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
#define RMON_R_P128TO255 0x2B0 /* RMON RX 128 to 255 byte pkts */ #define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
#define RMON_R_P256TO511 0x2B4 /* RMON RX 256 to 511 byte pkts */ #define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
#define RMON_R_P512TO1023 0x2B8 /* RMON RX 512 to 1023 byte pkts */ #define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
#define RMON_R_P1024TO2047 0x2BC /* RMON RX 1024 to 2047 byte pkts */ #define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
#define RMON_R_P_GTE2048 0x2C0 /* RMON RX pkts > 2048 bytes */ #define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
#define RMON_R_OCTETS 0x2C4 /* RMON RX octets */ #define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
#define IEEE_R_DROP 0x2C8 /* Count frames not counted correctly */ #define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
#define IEEE_R_FRAME_OK 0x2CC /* Frames rx'd OK */ #define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
#define IEEE_R_CRC 0x2D0 /* Frames rx'd with CRC err */ #define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
#define IEEE_R_ALIGN 0x2D4 /* Frames rx'd with alignment err */ #define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
#define IEEE_R_MACERR 0x2D8 /* Receive FIFO overflow count */ #define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
#define IEEE_R_FDXFC 0x2DC /* Flow control pause frames rx'd */ #define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
#define IEEE_R_OCTETS_OK 0x2E0 /* Octet cnt for frames rx'd w/o err */ #define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
#else #else
...@@ -170,16 +170,16 @@ ...@@ -170,16 +170,16 @@
/* Not existed in real chip /* Not existed in real chip
* Just for pass build. * Just for pass build.
*/ */
#define FEC_RCMR_1 0xFFF #define FEC_RCMR_1 0xfff
#define FEC_RCMR_2 0xFFF #define FEC_RCMR_2 0xfff
#define FEC_DMA_CFG_1 0xFFF #define FEC_DMA_CFG_1 0xfff
#define FEC_DMA_CFG_2 0xFFF #define FEC_DMA_CFG_2 0xfff
#define FEC_TXIC0 0xFFF #define FEC_TXIC0 0xfff
#define FEC_TXIC1 0xFFF #define FEC_TXIC1 0xfff
#define FEC_TXIC2 0xFFF #define FEC_TXIC2 0xfff
#define FEC_RXIC0 0xFFF #define FEC_RXIC0 0xfff
#define FEC_RXIC1 0xFFF #define FEC_RXIC1 0xfff
#define FEC_RXIC2 0xFFF #define FEC_RXIC2 0xfff
#endif /* CONFIG_M5272 */ #endif /* CONFIG_M5272 */
...@@ -227,7 +227,7 @@ struct bufdesc_ex { ...@@ -227,7 +227,7 @@ struct bufdesc_ex {
#define BD_SC_CD ((ushort)0x0001) /* ?? */ #define BD_SC_CD ((ushort)0x0001) /* ?? */
/* Buffer descriptor control/status used by Ethernet receive. /* Buffer descriptor control/status used by Ethernet receive.
*/ */
#define BD_ENET_RX_EMPTY ((ushort)0x8000) #define BD_ENET_RX_EMPTY ((ushort)0x8000)
#define BD_ENET_RX_WRAP ((ushort)0x2000) #define BD_ENET_RX_WRAP ((ushort)0x2000)
#define BD_ENET_RX_INTR ((ushort)0x1000) #define BD_ENET_RX_INTR ((ushort)0x1000)
...@@ -246,7 +246,7 @@ struct bufdesc_ex { ...@@ -246,7 +246,7 @@ struct bufdesc_ex {
#define BD_ENET_RX_VLAN 0x00000004 #define BD_ENET_RX_VLAN 0x00000004
/* Buffer descriptor control/status used by Ethernet transmit. /* Buffer descriptor control/status used by Ethernet transmit.
*/ */
#define BD_ENET_TX_READY ((ushort)0x8000) #define BD_ENET_TX_READY ((ushort)0x8000)
#define BD_ENET_TX_PAD ((ushort)0x4000) #define BD_ENET_TX_PAD ((ushort)0x4000)
#define BD_ENET_TX_WRAP ((ushort)0x2000) #define BD_ENET_TX_WRAP ((ushort)0x2000)
...@@ -262,7 +262,7 @@ struct bufdesc_ex { ...@@ -262,7 +262,7 @@ struct bufdesc_ex {
#define BD_ENET_TX_CSL ((ushort)0x0001) #define BD_ENET_TX_CSL ((ushort)0x0001)
#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */ #define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
/*enhanced buffer descriptor control/status used by Ethernet transmit*/ /* enhanced buffer descriptor control/status used by Ethernet transmit */
#define BD_ENET_TX_INT 0x40000000 #define BD_ENET_TX_INT 0x40000000
#define BD_ENET_TX_TS 0x20000000 #define BD_ENET_TX_TS 0x20000000
#define BD_ENET_TX_PINS 0x10000000 #define BD_ENET_TX_PINS 0x10000000
...@@ -279,36 +279,37 @@ struct bufdesc_ex { ...@@ -279,36 +279,37 @@ struct bufdesc_ex {
#define FEC_ENET_MAX_TX_QS 3 #define FEC_ENET_MAX_TX_QS 3
#define FEC_ENET_MAX_RX_QS 3 #define FEC_ENET_MAX_RX_QS 3
#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \ #define FEC_R_DES_START(X) (((X) == 1) ? FEC_R_DES_START_1 : \
((X == 2) ? \ (((X) == 2) ? \
FEC_R_DES_START_2 : FEC_R_DES_START_0)) FEC_R_DES_START_2 : FEC_R_DES_START_0))
#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \ #define FEC_X_DES_START(X) (((X) == 1) ? FEC_X_DES_START_1 : \
((X == 2) ? \ (((X) == 2) ? \
FEC_X_DES_START_2 : FEC_X_DES_START_0)) FEC_X_DES_START_2 : FEC_X_DES_START_0))
#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \ #define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \
((X == 2) ? \ (((X) == 2) ? \
FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \ #define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \
((X == 2) ? \ (((X) == 2) ? \
FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) #define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
#define DMA_CLASS_EN (1 << 16) #define DMA_CLASS_EN (1 << 16)
#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1) #define FEC_RCMR(X) (((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
#define IDLE_SLOPE_MASK 0xFFFF #define IDLE_SLOPE_MASK 0xffff
#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */ #define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */
#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */ #define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */
#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \ #define IDLE_SLOPE(X) (((X) == 1) ? \
(IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
(IDLE_SLOPE_2 & IDLE_SLOPE_MASK)) (IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
#define RCMR_MATCHEN (0x1 << 16) #define RCMR_MATCHEN (0x1 << 16)
#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2)) #define RCMR_CMP_CFG(v, n) (((v) & 0x7) << (n << 2))
#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \ #define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3)) RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \ #define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3)) RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2) #define RCMR_CMP(X) (((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
#define FEC_TX_BD_FTYPE(X) ((X & 0xF) << 20) #define FEC_TX_BD_FTYPE(X) (((X) & 0xf) << 20)
/* The number of Tx and Rx buffers. These are allocated from the page /* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best * pool. The code may assume these are power of two, so it it best
...@@ -359,8 +360,8 @@ struct bufdesc_ex { ...@@ -359,8 +360,8 @@ struct bufdesc_ex {
/* ENET interrupt coalescing macro define */ /* ENET interrupt coalescing macro define */
#define FEC_ITR_CLK_SEL (0x1 << 30) #define FEC_ITR_CLK_SEL (0x1 << 30)
#define FEC_ITR_EN (0x1 << 31) #define FEC_ITR_EN (0x1 << 31)
#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20) #define FEC_ITR_ICFT(X) (((X) & 0xff) << 20)
#define FEC_ITR_ICTT(X) ((X) & 0xFFFF) #define FEC_ITR_ICTT(X) ((X) & 0xffff)
#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */ #define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */
#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */ #define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */
...@@ -501,8 +502,9 @@ struct fec_enet_private { ...@@ -501,8 +502,9 @@ struct fec_enet_private {
int speed; int speed;
struct completion mdio_done; struct completion mdio_done;
int irq[FEC_IRQ_NUM]; int irq[FEC_IRQ_NUM];
int bufdesc_ex; bool bufdesc_ex;
int pause_flag; int pause_flag;
u32 quirks;
struct napi_struct napi; struct napi_struct napi;
int csum_flags; int csum_flags;
......
...@@ -287,15 +287,13 @@ static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, ...@@ -287,15 +287,13 @@ static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
return entries > 0 ? entries : entries + txq->tx_ring_size; return entries > 0 ? entries : entries + txq->tx_ring_size;
} }
static void *swap_buffer(void *bufaddr, int len) static void swap_buffer(void *bufaddr, int len)
{ {
int i; int i;
unsigned int *buf = bufaddr; unsigned int *buf = bufaddr;
for (i = 0; i < DIV_ROUND_UP(len, 4); i++, buf++) for (i = 0; i < len; i += 4, buf++)
*buf = cpu_to_be32(*buf); swab32s(buf);
return bufaddr;
} }
static void swap_buffer2(void *dst_buf, void *src_buf, int len) static void swap_buffer2(void *dst_buf, void *src_buf, int len)
...@@ -361,8 +359,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -361,8 +359,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct net_device *ndev) struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *bdp = txq->cur_tx; struct bufdesc *bdp = txq->cur_tx;
struct bufdesc_ex *ebdp; struct bufdesc_ex *ebdp;
int nr_frags = skb_shinfo(skb)->nr_frags; int nr_frags = skb_shinfo(skb)->nr_frags;
...@@ -398,7 +394,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -398,7 +394,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
} }
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
...@@ -410,11 +406,11 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, ...@@ -410,11 +406,11 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
if (((unsigned long) bufaddr) & fep->tx_align || if (((unsigned long) bufaddr) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], bufaddr, frag_len); memcpy(txq->tx_bounce[index], bufaddr, frag_len);
bufaddr = txq->tx_bounce[index]; bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, frag_len); swap_buffer(bufaddr, frag_len);
} }
...@@ -450,8 +446,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -450,8 +446,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb, struct net_device *ndev) struct sk_buff *skb, struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int nr_frags = skb_shinfo(skb)->nr_frags; int nr_frags = skb_shinfo(skb)->nr_frags;
struct bufdesc *bdp, *last_bdp; struct bufdesc *bdp, *last_bdp;
void *bufaddr; void *bufaddr;
...@@ -490,11 +484,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -490,11 +484,11 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
queue = skb_get_queue_mapping(skb); queue = skb_get_queue_mapping(skb);
index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
if (((unsigned long) bufaddr) & fep->tx_align || if (((unsigned long) bufaddr) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], skb->data, buflen); memcpy(txq->tx_bounce[index], skb->data, buflen);
bufaddr = txq->tx_bounce[index]; bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, buflen); swap_buffer(bufaddr, buflen);
} }
...@@ -529,7 +523,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ...@@ -529,7 +523,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
fep->hwts_tx_en)) fep->hwts_tx_en))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
...@@ -573,8 +567,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -573,8 +567,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
int size, bool last_tcp, bool is_last) int size, bool last_tcp, bool is_last)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb); unsigned short queue = skb_get_queue_mapping(skb);
unsigned short status; unsigned short status;
...@@ -587,11 +579,11 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -587,11 +579,11 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
if (((unsigned long) data) & fep->tx_align || if (((unsigned long) data) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], data, size); memcpy(txq->tx_bounce[index], data, size);
data = txq->tx_bounce[index]; data = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, size); swap_buffer(data, size);
} }
...@@ -607,7 +599,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, ...@@ -607,7 +599,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
bdp->cbd_bufaddr = addr; bdp->cbd_bufaddr = addr;
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
...@@ -635,8 +627,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -635,8 +627,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp, int index) struct bufdesc *bdp, int index)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
unsigned short queue = skb_get_queue_mapping(skb); unsigned short queue = skb_get_queue_mapping(skb);
...@@ -652,11 +642,11 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -652,11 +642,11 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
if (((unsigned long)bufaddr) & fep->tx_align || if (((unsigned long)bufaddr) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) { fep->quirks & FEC_QUIRK_SWAP_FRAME) {
memcpy(txq->tx_bounce[index], skb->data, hdr_len); memcpy(txq->tx_bounce[index], skb->data, hdr_len);
bufaddr = txq->tx_bounce[index]; bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, hdr_len); swap_buffer(bufaddr, hdr_len);
dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
...@@ -673,7 +663,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, ...@@ -673,7 +663,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
bdp->cbd_datlen = hdr_len; bdp->cbd_datlen = hdr_len;
if (fep->bufdesc_ex) { if (fep->bufdesc_ex) {
if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) if (fep->quirks & FEC_QUIRK_HAS_AVB)
estatus |= FEC_TX_BD_FTYPE(queue); estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
...@@ -698,8 +688,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, ...@@ -698,8 +688,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
struct tso_t tso; struct tso_t tso;
unsigned int index = 0; unsigned int index = 0;
int ret; int ret;
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -761,7 +749,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, ...@@ -761,7 +749,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
txq->cur_tx = bdp; txq->cur_tx = bdp;
/* Trigger transmission start */ /* Trigger transmission start */
if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) || if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
!readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
...@@ -924,8 +912,6 @@ static void ...@@ -924,8 +912,6 @@ static void
fec_restart(struct net_device *ndev) fec_restart(struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
u32 val; u32 val;
u32 temp_mac[2]; u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 rcntl = OPT_FRAME_SIZE | 0x04;
...@@ -935,7 +921,7 @@ fec_restart(struct net_device *ndev) ...@@ -935,7 +921,7 @@ fec_restart(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself. * instead of reset MAC itself.
*/ */
if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { if (fep->quirks & FEC_QUIRK_HAS_AVB) {
writel(0, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_ECNTRL);
} else { } else {
writel(1, fep->hwp + FEC_ECNTRL); writel(1, fep->hwp + FEC_ECNTRL);
...@@ -946,7 +932,7 @@ fec_restart(struct net_device *ndev) ...@@ -946,7 +932,7 @@ fec_restart(struct net_device *ndev)
* enet-mac reset will reset mac address registers too, * enet-mac reset will reset mac address registers too,
* so need to reconfigure it. * so need to reconfigure it.
*/ */
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { if (fep->quirks & FEC_QUIRK_ENET_MAC) {
memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
...@@ -992,7 +978,7 @@ fec_restart(struct net_device *ndev) ...@@ -992,7 +978,7 @@ fec_restart(struct net_device *ndev)
* The phy interface and speed need to get configured * The phy interface and speed need to get configured
* differently on enet-mac. * differently on enet-mac.
*/ */
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* Enable flow control and length check */ /* Enable flow control and length check */
rcntl |= 0x40000000 | 0x00000020; rcntl |= 0x40000000 | 0x00000020;
...@@ -1015,7 +1001,7 @@ fec_restart(struct net_device *ndev) ...@@ -1015,7 +1001,7 @@ fec_restart(struct net_device *ndev)
} }
} else { } else {
#ifdef FEC_MIIGSK_ENR #ifdef FEC_MIIGSK_ENR
if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { if (fep->quirks & FEC_QUIRK_USE_GASKET) {
u32 cfgr; u32 cfgr;
/* disable the gasket and wait */ /* disable the gasket and wait */
writel(0, fep->hwp + FEC_MIIGSK_ENR); writel(0, fep->hwp + FEC_MIIGSK_ENR);
...@@ -1068,7 +1054,7 @@ fec_restart(struct net_device *ndev) ...@@ -1068,7 +1054,7 @@ fec_restart(struct net_device *ndev)
writel(0, fep->hwp + FEC_HASH_TABLE_LOW); writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
#endif #endif
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { if (fep->quirks & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */ /* enable ENET endian swap */
ecntl |= (1 << 8); ecntl |= (1 << 8);
/* enable ENET store and forward mode */ /* enable ENET store and forward mode */
...@@ -1102,8 +1088,6 @@ static void ...@@ -1102,8 +1088,6 @@ static void
fec_stop(struct net_device *ndev) fec_stop(struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
/* We cannot expect a graceful transmit stop without link !!! */ /* We cannot expect a graceful transmit stop without link !!! */
...@@ -1118,7 +1102,7 @@ fec_stop(struct net_device *ndev) ...@@ -1118,7 +1102,7 @@ fec_stop(struct net_device *ndev)
* For i.MX6SX SOC, enet use AXI bus, we use disable MAC * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
* instead of reset MAC itself. * instead of reset MAC itself.
*/ */
if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) { if (fep->quirks & FEC_QUIRK_HAS_AVB) {
writel(0, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_ECNTRL);
} else { } else {
writel(1, fep->hwp + FEC_ECNTRL); writel(1, fep->hwp + FEC_ECNTRL);
...@@ -1128,7 +1112,7 @@ fec_stop(struct net_device *ndev) ...@@ -1128,7 +1112,7 @@ fec_stop(struct net_device *ndev)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */ /* We have to keep ENET enabled to have MII interrupt stay working */
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { if (fep->quirks & FEC_QUIRK_ENET_MAC) {
writel(2, fep->hwp + FEC_ECNTRL); writel(2, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
} }
...@@ -1350,8 +1334,6 @@ static int ...@@ -1350,8 +1334,6 @@ static int
fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct fec_enet_priv_rx_q *rxq; struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp; struct bufdesc *bdp;
unsigned short status; unsigned short status;
...@@ -1365,7 +1347,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ...@@ -1365,7 +1347,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
u16 vlan_tag; u16 vlan_tag;
int index = 0; int index = 0;
bool is_copybreak; bool is_copybreak;
bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME; bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
#ifdef CONFIG_M532x #ifdef CONFIG_M532x
flush_cache_all(); flush_cache_all();
...@@ -1880,8 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ...@@ -1880,8 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
static int fec_enet_mii_probe(struct net_device *ndev) static int fec_enet_mii_probe(struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct phy_device *phy_dev = NULL; struct phy_device *phy_dev = NULL;
char mdio_bus_id[MII_BUS_ID_SIZE]; char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3]; char phy_name[MII_BUS_ID_SIZE + 3];
...@@ -1927,7 +1907,7 @@ static int fec_enet_mii_probe(struct net_device *ndev) ...@@ -1927,7 +1907,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
} }
/* mask with MAC supported features */ /* mask with MAC supported features */
if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->supported &= ~SUPPORTED_1000baseT_Half; phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
#if !defined(CONFIG_M5272) #if !defined(CONFIG_M5272)
...@@ -1955,8 +1935,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) ...@@ -1955,8 +1935,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
static struct mii_bus *fec0_mii_bus; static struct mii_bus *fec0_mii_bus;
struct net_device *ndev = platform_get_drvdata(pdev); struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct device_node *node; struct device_node *node;
int err = -ENXIO, i; int err = -ENXIO, i;
...@@ -1976,7 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) ...@@ -1976,7 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by * mdio interface in board design, and need to be configured by
* fec0 mii_bus. * fec0 mii_bus.
*/ */
if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */ /* fec1 uses fec0 mii_bus */
if (mii_cnt && fec0_mii_bus) { if (mii_cnt && fec0_mii_bus) {
fep->mii_bus = fec0_mii_bus; fep->mii_bus = fec0_mii_bus;
...@@ -1997,7 +1975,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) ...@@ -1997,7 +1975,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* document. * document.
*/ */
fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) if (fep->quirks & FEC_QUIRK_ENET_MAC)
fep->phy_speed--; fep->phy_speed--;
fep->phy_speed <<= 1; fep->phy_speed <<= 1;
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
...@@ -2039,7 +2017,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) ...@@ -2039,7 +2017,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
mii_cnt++; mii_cnt++;
/* save fec0 mii_bus */ /* save fec0 mii_bus */
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) if (fep->quirks & FEC_QUIRK_ENET_MAC)
fec0_mii_bus = fep->mii_bus; fec0_mii_bus = fep->mii_bus;
return 0; return 0;
...@@ -2308,11 +2286,9 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) ...@@ -2308,11 +2286,9 @@ static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
static void fec_enet_itr_coal_set(struct net_device *ndev) static void fec_enet_itr_coal_set(struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int rx_itr, tx_itr; int rx_itr, tx_itr;
if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return; return;
/* Must be greater than zero to avoid unpredictable behavior */ /* Must be greater than zero to avoid unpredictable behavior */
...@@ -2347,10 +2323,8 @@ static int ...@@ -2347,10 +2323,8 @@ static int
fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return -EOPNOTSUPP; return -EOPNOTSUPP;
ec->rx_coalesce_usecs = fep->rx_time_itr; ec->rx_coalesce_usecs = fep->rx_time_itr;
...@@ -2366,12 +2340,9 @@ static int ...@@ -2366,12 +2340,9 @@ static int
fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
unsigned int cycle; unsigned int cycle;
if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB)) if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) { if (ec->rx_max_coalesced_frames > 255) {
...@@ -2951,8 +2922,6 @@ static const struct net_device_ops fec_netdev_ops = { ...@@ -2951,8 +2922,6 @@ static const struct net_device_ops fec_netdev_ops = {
static int fec_enet_init(struct net_device *ndev) static int fec_enet_init(struct net_device *ndev)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct fec_enet_priv_tx_q *txq; struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq; struct fec_enet_priv_rx_q *rxq;
struct bufdesc *cbd_base; struct bufdesc *cbd_base;
...@@ -3031,11 +3000,11 @@ static int fec_enet_init(struct net_device *ndev) ...@@ -3031,11 +3000,11 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) if (fep->quirks & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */ /* enable hw VLAN support */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
ndev->gso_max_segs = FEC_MAX_TSO_SEGS; ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
/* enable hw accelerator */ /* enable hw accelerator */
...@@ -3044,7 +3013,7 @@ static int fec_enet_init(struct net_device *ndev) ...@@ -3044,7 +3013,7 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED; fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
} }
if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) { if (fep->quirks & FEC_QUIRK_HAS_AVB) {
fep->tx_align = 0; fep->tx_align = 0;
fep->rx_align = 0x3f; fep->rx_align = 0x3f;
} }
...@@ -3144,10 +3113,6 @@ fec_probe(struct platform_device *pdev) ...@@ -3144,10 +3113,6 @@ fec_probe(struct platform_device *pdev)
int num_tx_qs; int num_tx_qs;
int num_rx_qs; int num_rx_qs;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
pdev->id_entry = of_id->data;
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
/* Init network device */ /* Init network device */
...@@ -3161,13 +3126,17 @@ fec_probe(struct platform_device *pdev) ...@@ -3161,13 +3126,17 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */ /* setup board info structure */
fep = netdev_priv(ndev); fep = netdev_priv(ndev);
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
pdev->id_entry = of_id->data;
fep->quirks = pdev->id_entry->driver_data;
fep->num_rx_queues = num_rx_qs; fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs; fep->num_tx_queues = num_tx_qs;
#if !defined(CONFIG_M5272) #if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */ /* default enable pause frame auto negotiation */
if (pdev->id_entry && if (fep->quirks & FEC_QUIRK_HAS_GBIT)
(pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif #endif
...@@ -3184,8 +3153,6 @@ fec_probe(struct platform_device *pdev) ...@@ -3184,8 +3153,6 @@ fec_probe(struct platform_device *pdev)
fep->pdev = pdev; fep->pdev = pdev;
fep->dev_id = dev_id++; fep->dev_id = dev_id++;
fep->bufdesc_ex = 0;
platform_set_drvdata(pdev, ndev); platform_set_drvdata(pdev, ndev);
phy_node = of_parse_phandle(np, "phy-handle", 0); phy_node = of_parse_phandle(np, "phy-handle", 0);
...@@ -3238,12 +3205,11 @@ fec_probe(struct platform_device *pdev) ...@@ -3238,12 +3205,11 @@ fec_probe(struct platform_device *pdev)
if (IS_ERR(fep->clk_ref)) if (IS_ERR(fep->clk_ref))
fep->clk_ref = NULL; fep->clk_ref = NULL;
fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
fep->bufdesc_ex =
pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
if (IS_ERR(fep->clk_ptp)) { if (IS_ERR(fep->clk_ptp)) {
fep->clk_ptp = NULL; fep->clk_ptp = NULL;
fep->bufdesc_ex = 0; fep->bufdesc_ex = false;
} }
ret = fec_enet_clk_enable(ndev, true); ret = fec_enet_clk_enable(ndev, true);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment