Commit 94b2f9ba authored by Dmitry Kravkov's avatar Dmitry Kravkov Committed by David S. Miller

bnx2x: remove gro workaround

Removes GRO workaround, as issue is fixed in FW 7.2.51.
Signed-off-by: default avatarDmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: default avatarBarak Witkowski <barak@broadcom.com>
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a3348722
...@@ -349,7 +349,6 @@ union db_prod { ...@@ -349,7 +349,6 @@ union db_prod {
#define SGE_PAGE_SIZE PAGE_SIZE #define SGE_PAGE_SIZE PAGE_SIZE
#define SGE_PAGE_SHIFT PAGE_SHIFT #define SGE_PAGE_SHIFT PAGE_SHIFT
#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) #define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
/* SGE ring related macros */ /* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2 #define NUM_RX_SGE_PAGES 2
...@@ -1240,7 +1239,6 @@ struct bnx2x { ...@@ -1240,7 +1239,6 @@ struct bnx2x {
#define ETH_MAX_JUMBO_PACKET_SIZE 9600 #define ETH_MAX_JUMBO_PACKET_SIZE 9600
/* TCP with Timestamp Option (32) + IPv6 (40) */ /* TCP with Timestamp Option (32) + IPv6 (40) */
#define ETH_MAX_TPA_HEADER_SIZE 72 #define ETH_MAX_TPA_HEADER_SIZE 72
#define ETH_MIN_TPA_HEADER_SIZE 40
/* Max supported alignment is 256 (8 shift) */ /* Max supported alignment is 256 (8 shift) */
#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) #define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
...@@ -1362,8 +1360,6 @@ struct bnx2x { ...@@ -1362,8 +1360,6 @@ struct bnx2x {
u8 wol; u8 wol;
bool gro_check;
int rx_ring_size; int rx_ring_size;
u16 tx_quick_cons_trip_int; u16 tx_quick_cons_trip_int;
......
...@@ -328,16 +328,6 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, ...@@ -328,16 +328,6 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page = tpa_info->full_page =
SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size; SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
/*
* FW 7.2.16 BUG workaround:
* if SGE size is (exactly) multiple gro_size
* fw will place one less frag on SGE.
* the calculation is done only for potentially
* dangerous MTUs.
*/
if (unlikely(bp->gro_check))
if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
tpa_info->full_page -= gro_size;
tpa_info->gro_size = gro_size; tpa_info->gro_size = gro_size;
} }
...@@ -3525,8 +3515,6 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu) ...@@ -3525,8 +3515,6 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/ */
dev->mtu = new_mtu; dev->mtu = new_mtu;
bp->gro_check = bnx2x_need_gro_check(new_mtu);
return bnx2x_reload_if_running(dev); return bnx2x_reload_if_running(dev);
} }
......
...@@ -1544,13 +1544,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu) ...@@ -1544,13 +1544,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
*/ */
return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
} }
static inline bool bnx2x_need_gro_check(int mtu)
{
return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) !=
(SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1));
}
/** /**
* bnx2x_bz_fp - zero content of the fastpath structure. * bnx2x_bz_fp - zero content of the fastpath structure.
* *
......
...@@ -10714,8 +10714,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp) ...@@ -10714,8 +10714,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (CHIP_IS_E3B0(bp)) if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu);
return rc; return rc;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment