Commit 293e4365 authored by Giuseppe Cavallaro's avatar Giuseppe Cavallaro Committed by David S. Miller

stmmac: change descriptor layout

This patch completely changes the descriptor layout to improve
the whole performances due to the single read usage of the
descriptors in critical paths.
Signed-off-by: default avatarGiuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: default avatarAlexandre TORGUE <alexandre.torgue@st.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent afea0365
This diff is collapsed.
......@@ -35,100 +35,91 @@
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
if (end)
p->des01.erx.end_ring = 1;
}
p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT)
& ERDES1_BUFFER2_SIZE_MASK;
static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
{
if (end)
p->des01.etx.end_ring = 1;
p->des1 |= ERDES1_END_RING;
}
static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
{
p->des01.etx.end_ring = ter;
if (end)
p->des0 |= ETDES0_END_RING;
else
p->des0 &= ~ETDES0_END_RING;
}
static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_4KiB)) {
p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT)
& ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
& ETDES1_BUFFER1_SIZE_MASK);
} else
p->des01.etx.buffer1_size = len;
p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
}
/* Normal descriptors */
static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
{
p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
if (end)
p->des01.rx.end_ring = 1;
}
p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT)
& RDES1_BUFFER2_SIZE_MASK;
static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
{
if (end)
p->des01.tx.end_ring = 1;
p->des1 |= RDES1_END_RING;
}
static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
{
p->des01.tx.end_ring = ter;
if (end)
p->des1 |= TDES1_END_RING;
else
p->des1 &= ~TDES1_END_RING;
}
static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_2KiB)) {
p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
p->des01.etx.buffer2_size = len - p->des01.etx.buffer1_size;
unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
& TDES1_BUFFER1_SIZE_MASK;
p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT)
& TDES1_BUFFER2_SIZE_MASK) | buffer1);
} else
p->des01.tx.buffer1_size = len;
p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK);
}
/* Specific functions used for Chain mode */
/* Enhanced descriptors */
static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
{
p->des01.erx.second_address_chained = 1;
}
static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
{
p->des01.etx.second_address_chained = 1;
p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED;
}
static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
{
p->des01.etx.second_address_chained = 1;
p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED;
}
static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
p->des01.etx.buffer1_size = len;
p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
}
/* Normal descriptors */
static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
{
p->des01.rx.second_address_chained = 1;
}
static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int ring_size)
{
p->des01.tx.second_address_chained = 1;
p->des1 |= RDES1_SECOND_ADDRESS_CHAINED;
}
static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
{
p->des01.tx.second_address_chained = 1;
p->des1 |= TDES1_SECOND_ADDRESS_CHAINED;
}
static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
p->des01.tx.buffer1_size = len;
p->des1 |= len & TDES1_BUFFER1_SIZE_MASK;
}
#endif /* __DESC_COM_H__ */
......@@ -29,33 +29,38 @@
static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p, void __iomem *ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
unsigned int tdes0 = p->des0;
int ret = 0;
if (unlikely(p->des01.tx.error_summary)) {
if (unlikely(p->des01.tx.underflow_error)) {
if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
x->tx_underflow++;
stats->tx_fifo_errors++;
}
if (unlikely(p->des01.tx.no_carrier)) {
if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
x->tx_carrier++;
stats->tx_carrier_errors++;
}
if (unlikely(p->des01.tx.loss_carrier)) {
if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
x->tx_losscarrier++;
stats->tx_carrier_errors++;
}
if (unlikely((p->des01.tx.excessive_deferral) ||
(p->des01.tx.excessive_collisions) ||
(p->des01.tx.late_collision)))
stats->collisions += p->des01.tx.collision_count;
if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
(tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
(tdes0 & TDES0_LATE_COLLISION))) {
unsigned int collisions;
collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
stats->collisions += collisions;
}
ret = -1;
}
if (p->des01.etx.vlan_frame)
if (tdes0 & TDES0_VLAN_FRAME)
x->tx_vlan++;
if (unlikely(p->des01.tx.deferred))
if (unlikely(tdes0 & TDES0_DEFERRED))
x->tx_deferred++;
return ret;
......@@ -63,7 +68,7 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
static int ndesc_get_tx_len(struct dma_desc *p)
{
return p->des01.tx.buffer1_size;
return (p->des1 & RDES1_BUFFER1_SIZE_MASK);
}
/* This function verifies if each incoming frame has some errors
......@@ -74,47 +79,48 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = good_frame;
unsigned int rdes0 = p->des0;
struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(p->des01.rx.last_descriptor == 0)) {
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
pr_warn("%s: Oversized frame spanned multiple buffers\n",
__func__);
stats->rx_length_errors++;
return discard_frame;
}
if (unlikely(p->des01.rx.error_summary)) {
if (unlikely(p->des01.rx.descriptor_error))
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
x->rx_desc++;
if (unlikely(p->des01.rx.sa_filter_fail))
if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
x->sa_filter_fail++;
if (unlikely(p->des01.rx.overflow_error))
if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
x->overflow_error++;
if (unlikely(p->des01.rx.ipc_csum_error))
if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
x->ipc_csum_error++;
if (unlikely(p->des01.rx.collision)) {
if (unlikely(rdes0 & RDES0_COLLISION)) {
x->rx_collision++;
stats->collisions++;
}
if (unlikely(p->des01.rx.crc_error)) {
if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
x->rx_crc++;
stats->rx_crc_errors++;
}
ret = discard_frame;
}
if (unlikely(p->des01.rx.dribbling))
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
if (unlikely(p->des01.rx.length_error)) {
if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
x->rx_length++;
ret = discard_frame;
}
if (unlikely(p->des01.rx.mii_error)) {
if (unlikely(rdes0 & RDES0_MII_ERROR)) {
x->rx_mii++;
ret = discard_frame;
}
#ifdef STMMAC_VLAN_TAG_USED
if (p->des01.rx.vlan_tag)
if (rdes0 & RDES0_VLAN_TAG)
x->vlan_tag++;
#endif
return ret;
......@@ -123,9 +129,8 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
int end)
{
p->des01.all_flags = 0;
p->des01.rx.own = 1;
p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
p->des0 |= RDES0_OWN;
p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK;
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
......@@ -133,50 +138,50 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
ndesc_rx_set_on_ring(p, end);
if (disable_rx_ic)
p->des01.rx.disable_ic = 1;
p->des1 |= RDES1_DISABLE_IC;
}
static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
p->des01.all_flags = 0;
p->des0 &= ~TDES0_OWN;
if (mode == STMMAC_CHAIN_MODE)
ndesc_tx_set_on_chain(p, end);
ndesc_tx_set_on_chain(p);
else
ndesc_tx_set_on_ring(p, end);
ndesc_end_tx_desc_on_ring(p, end);
}
static int ndesc_get_tx_owner(struct dma_desc *p)
{
return p->des01.tx.own;
return (p->des0 & TDES0_OWN) >> 31;
}
static int ndesc_get_rx_owner(struct dma_desc *p)
{
return p->des01.rx.own;
return (p->des0 & RDES0_OWN) >> 31;
}
static void ndesc_set_tx_owner(struct dma_desc *p)
{
p->des01.tx.own = 1;
p->des0 |= TDES0_OWN;
}
static void ndesc_set_rx_owner(struct dma_desc *p)
{
p->des01.rx.own = 1;
p->des0 |= RDES0_OWN;
}
static int ndesc_get_tx_ls(struct dma_desc *p)
{
return p->des01.tx.last_segment;
return (p->des1 & TDES1_LAST_SEGMENT) >> 30;
}
static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
{
int ter = p->des01.tx.end_ring;
int ter = (p->des1 & TDES1_END_RING) >> 25;
memset(p, 0, offsetof(struct dma_desc, des2));
if (mode == STMMAC_CHAIN_MODE)
ndesc_end_tx_desc_on_chain(p, ter);
ndesc_tx_set_on_chain(p);
else
ndesc_end_tx_desc_on_ring(p, ter);
}
......@@ -184,48 +189,62 @@ static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag, int mode)
{
p->des01.tx.first_segment = is_fs;
unsigned int tdes1 = p->des1;
if (is_fs)
tdes1 |= TDES1_FIRST_SEGMENT;
else
tdes1 &= ~TDES1_FIRST_SEGMENT;
if (likely(csum_flag))
tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
else
tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
p->des1 = tdes1;
if (mode == STMMAC_CHAIN_MODE)
norm_set_tx_desc_len_on_chain(p, len);
else
norm_set_tx_desc_len_on_ring(p, len);
if (likely(csum_flag))
p->des01.tx.checksum_insertion = cic_full;
}
static void ndesc_clear_tx_ic(struct dma_desc *p)
{
p->des01.tx.interrupt = 0;
p->des1 &= ~TDES1_INTERRUPT;
}
static void ndesc_close_tx_desc(struct dma_desc *p)
{
p->des01.tx.last_segment = 1;
p->des01.tx.interrupt = 1;
p->des1 |= TDES1_LAST_SEGMENT | TDES1_INTERRUPT;
}
static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
{
unsigned int csum = 0;
/* The type-1 checksum offload engines append the checksum at
* the end of frame and the two bytes of checksum are added in
* the length.
* Adjust for that in the framelen for type-1 checksum offload
* engines. */
* engines
*/
if (rx_coe_type == STMMAC_RX_COE_TYPE1)
return p->des01.rx.frame_length - 2;
else
return p->des01.rx.frame_length;
csum = 2;
return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
csum);
}
static void ndesc_enable_tx_timestamp(struct dma_desc *p)
{
p->des01.tx.time_stamp_enable = 1;
p->des1 |= TDES1_TIME_STAMP_ENABLE;
}
static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
{
return p->des01.tx.time_stamp_status;
return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17;
}
static u64 ndesc_get_timestamp(void *desc, u32 ats)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment