Commit 4badc385 authored by Scott Feldman's avatar Scott Feldman Committed by David S. Miller

enic: workaround A0 erratum

A0 revision ASIC has an erratum on the RQ desc cache on chip where the
cache can become corrupted causing pkt buf writes to wrong locations.  The s/w
workaround is to post a dummy RQ desc in the ring every 32 descs, causing a
flush of the cache.  A0 parts are not production, but there are enough of
these parts in the wild in test setups to warrant including workaround.  A1
revision ASIC parts fix erratum.
Signed-off-by: default avatarScott Feldman <scofeldm@cisco.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 27e6c7d3
...@@ -851,6 +851,50 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) ...@@ -851,6 +851,50 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0; return 0;
} }
static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
{
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
if (vnic_rq_posting_soon(rq)) {
/* SW workaround for A0 HW erratum: if we're just about
* to write posted_index, insert a dummy desc
* of type resvd
*/
rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
vnic_rq_post(rq, 0, 0, 0, 0);
} else {
return enic_rq_alloc_buf(rq);
}
return 0;
}
static int enic_set_rq_alloc_buf(struct enic *enic)
{
enum vnic_dev_hw_version hw_ver;
int err;
err = vnic_dev_hw_version(enic->vdev, &hw_ver);
if (err)
return err;
switch (hw_ver) {
case VNIC_DEV_HW_VER_A1:
enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
break;
case VNIC_DEV_HW_VER_A2:
case VNIC_DEV_HW_VER_UNKNOWN:
enic->rq_alloc_buf = enic_rq_alloc_buf;
break;
default:
return -ENODEV;
}
return 0;
}
static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, static int enic_get_skb_header(struct sk_buff *skb, void **iphdr,
void **tcph, u64 *hdr_flags, void *priv) void **tcph, u64 *hdr_flags, void *priv)
{ {
...@@ -1058,7 +1102,7 @@ static int enic_poll(struct napi_struct *napi, int budget) ...@@ -1058,7 +1102,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
/* Replenish RQ /* Replenish RQ
*/ */
vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
} else { } else {
...@@ -1093,7 +1137,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) ...@@ -1093,7 +1137,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
/* Replenish RQ /* Replenish RQ
*/ */
vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
/* Return intr event credits for this polling /* Return intr event credits for this polling
* cycle. An intr event is the completion of a * cycle. An intr event is the completion of a
...@@ -1269,7 +1313,7 @@ static int enic_open(struct net_device *netdev) ...@@ -1269,7 +1313,7 @@ static int enic_open(struct net_device *netdev)
} }
for (i = 0; i < enic->rq_count; i++) { for (i = 0; i < enic->rq_count; i++) {
err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
if (err) { if (err) {
printk(KERN_ERR PFX printk(KERN_ERR PFX
"%s: Unable to alloc receive buffers.\n", "%s: Unable to alloc receive buffers.\n",
......
...@@ -349,6 +349,25 @@ int vnic_dev_fw_info(struct vnic_dev *vdev, ...@@ -349,6 +349,25 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
return err; return err;
} }
int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
{
struct vnic_devcmd_fw_info *fw_info;
int err;
err = vnic_dev_fw_info(vdev, &fw_info);
if (err)
return err;
if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
*hw_ver = VNIC_DEV_HW_VER_A1;
else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
*hw_ver = VNIC_DEV_HW_VER_A2;
else
*hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
return 0;
}
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value) void *value)
{ {
......
...@@ -41,6 +41,12 @@ static inline void writeq(u64 val, void __iomem *reg) ...@@ -41,6 +41,12 @@ static inline void writeq(u64 val, void __iomem *reg)
} }
#endif #endif
enum vnic_dev_hw_version {
VNIC_DEV_HW_VER_UNKNOWN,
VNIC_DEV_HW_VER_A1,
VNIC_DEV_HW_VER_A2,
};
enum vnic_dev_intr_mode { enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN, VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX, VNIC_DEV_INTR_MODE_INTX,
...@@ -88,6 +94,8 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, ...@@ -88,6 +94,8 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait); u64 *a0, u64 *a1, int wait);
int vnic_dev_fw_info(struct vnic_dev *vdev, int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info); struct vnic_devcmd_fw_info **fw_info);
int vnic_dev_hw_version(struct vnic_dev *vdev,
enum vnic_dev_hw_version *hw_ver);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value); void *value);
int vnic_dev_stats_clear(struct vnic_dev *vdev); int vnic_dev_stats_clear(struct vnic_dev *vdev);
......
...@@ -143,6 +143,11 @@ static inline void vnic_rq_post(struct vnic_rq *rq, ...@@ -143,6 +143,11 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
} }
} }
static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
{
return ((rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0);
}
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
{ {
rq->ring.desc_avail += count; rq->ring.desc_avail += count;
...@@ -186,7 +191,7 @@ static inline int vnic_rq_fill(struct vnic_rq *rq, ...@@ -186,7 +191,7 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
{ {
int err; int err;
while (vnic_rq_desc_avail(rq) > 1) { while (vnic_rq_desc_avail(rq) > 0) {
err = (*buf_fill)(rq); err = (*buf_fill)(rq);
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment