Commit 147a1c06 authored by David S. Miller's avatar David S. Miller

Merge branch 'ionic-diet'

Shannon Nelson says:

====================
ionic: putting ionic on a diet

Building on the performance work done in the previous patchset
    [Link] https://lore.kernel.org/netdev/20240229193935.14197-1-shannon.nelson@amd.com/
this patchset puts the ionic driver on a diet, decreasing the memory
requirements per queue, and simplifies a few more bits of logic.

We trimmed the queue management structs and gained some ground, but
the most savings came from trimming the individual buffer descriptors.
The original design used a single generic buffer descriptor for Tx, Rx and
Adminq needs, but the Rx and Adminq descriptors really don't need all the
info that the Tx descriptors track.  By splitting up the descriptor types
we can significantly reduce the descriptor sizes for Rx and Adminq use.

There is a small reduction in the queue management structs, saving about
3 cachelines per queuepair:

    ionic_qcq:
	Before:	/* size: 2176, cachelines: 34, members: 23 */
	After:	/* size: 2048, cachelines: 32, members: 23 */

We also remove an array of completion descriptor pointers, or about
8 Kbytes per queue.

But the biggest savings came from splitting the desc_info struct into
queue specific structs and trimming out what was unnecessary.

    Before:
	ionic_desc_info:
		/* size: 496, cachelines: 8, members: 10 */
    After:
	ionic_tx_desc_info:
		/* size: 496, cachelines: 8, members: 6 */
	ionic_rx_desc_info:
		/* size: 224, cachelines: 4, members: 2 */
	ionic_admin_desc_info:
		/* size: 8, cachelines: 1, members: 1 */

In a 64 core host the ionic driver will default to 64 queuepairs of
1024 descriptors for Rx, 1024 for Tx, and 80 for Adminq and Notifyq.

The total memory usage for 64 queues:
    Before:
	  65 * sizeof(ionic_qcq)			   141,440
	+ 64 * 1024 * sizeof(ionic_desc_info)		32,505,856
	+ 64 * 1024 * sizeof(ionic_desc_info)		32,505,856
	+ 64 * 1024 * 2 * sizeof(ionic_qc_info)		    16,384
	+  1 *   80 * sizeof(ionic_desc_info)		    39,690
							----------
							65,201,038

    After:
	  65 * sizeof(ionic_qcq)			   133,120
	+ 64 * 1024 * sizeof(ionic_tx_desc_info)	32,505,856
	+ 64 * 1024 * sizeof(ionic_rx_desc_info)	14,680,064
	+                           (removed)		         0
	+  1 *   80 * sizeof(ionic_admin desc_info)	       640
							----------
							47,319,680

This saves us approximately 18 Mbytes per port in a 64 core machine,
a 28% savings in our memory needs.

In addition, this improves our simple single thread / single queue
iperf case on a 9100 MTU connection from 86.7 to 95 Gbits/sec.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e3eec349 2854242d
...@@ -76,6 +76,8 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); ...@@ -76,6 +76,8 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx); int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode, void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
u8 status, int err); u8 status, int err);
bool ionic_notifyq_service(struct ionic_cq *cq);
bool ionic_adminq_service(struct ionic_cq *cq);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait); int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait); int ionic_dev_cmd_wait_nomsg(struct ionic *ionic, unsigned long max_wait);
......
...@@ -629,43 +629,25 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, ...@@ -629,43 +629,25 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
cq->desc_size = desc_size; cq->desc_size = desc_size;
cq->tail_idx = 0; cq->tail_idx = 0;
cq->done_color = 1; cq->done_color = 1;
cq->idev = &lif->ionic->idev;
return 0; return 0;
} }
void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa)
{
struct ionic_cq_info *cur;
unsigned int i;
cq->base = base;
cq->base_pa = base_pa;
for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
cur->cq_desc = base + (i * cq->desc_size);
}
void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q)
{
cq->bound_q = q;
}
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb, ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg) void *done_arg)
{ {
struct ionic_cq_info *cq_info;
unsigned int work_done = 0; unsigned int work_done = 0;
if (work_to_do == 0) if (work_to_do == 0)
return 0; return 0;
cq_info = &cq->info[cq->tail_idx]; while (cb(cq)) {
while (cb(cq, cq_info)) {
if (cq->tail_idx == cq->num_descs - 1) if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color; cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq_info = &cq->info[cq->tail_idx];
if (++work_done >= work_to_do) if (++work_done >= work_to_do)
break; break;
...@@ -692,7 +674,6 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, ...@@ -692,7 +674,6 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
return -EINVAL; return -EINVAL;
q->lif = lif; q->lif = lif;
q->idev = idev;
q->index = index; q->index = index;
q->num_descs = num_descs; q->num_descs = num_descs;
q->desc_size = desc_size; q->desc_size = desc_size;
...@@ -706,53 +687,11 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, ...@@ -706,53 +687,11 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
return 0; return 0;
} }
void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
{ {
struct ionic_desc_info *cur;
unsigned int i;
q->base = base;
q->base_pa = base_pa;
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
cur->desc = base + (i * q->desc_size);
}
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
{
struct ionic_desc_info *cur;
unsigned int i;
q->cmb_base = base;
q->cmb_base_pa = base_pa;
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
cur->cmb_desc = base + (i * q->desc_size);
}
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
{
struct ionic_desc_info *cur;
unsigned int i;
q->sg_base = base;
q->sg_base_pa = base_pa;
for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
cur->sg_desc = base + (i * q->sg_desc_size);
}
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg)
{
struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif; struct ionic_lif *lif = q->lif;
struct device *dev = q->dev; struct device *dev = q->dev;
desc_info = &q->info[q->head_idx];
desc_info->cb = cb;
desc_info->cb_arg = cb_arg;
q->head_idx = (q->head_idx + 1) & (q->num_descs - 1); q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n", dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
...@@ -771,7 +710,7 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, ...@@ -771,7 +710,7 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
} }
} }
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
{ {
unsigned int mask, tail, head; unsigned int mask, tail, head;
...@@ -781,37 +720,3 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) ...@@ -781,37 +720,3 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
return ((pos - tail) & mask) < ((head - tail) & mask); return ((pos - tail) & mask) < ((head - tail) & mask);
} }
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index)
{
struct ionic_desc_info *desc_info;
ionic_desc_cb cb;
void *cb_arg;
u16 index;
/* check for empty queue */
if (q->tail_idx == q->head_idx)
return;
/* stop index must be for a descriptor that is not yet completed */
if (unlikely(!ionic_q_is_posted(q, stop_index)))
dev_err(q->dev,
"ionic stop is not posted %s stop %u tail %u head %u\n",
q->name, stop_index, q->tail_idx, q->head_idx);
do {
desc_info = &q->info[q->tail_idx];
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
cb = desc_info->cb;
cb_arg = desc_info->cb_arg;
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
if (cb)
cb(q, desc_info, cq_info, cb_arg);
} while (index != stop_index);
}
...@@ -122,11 +122,13 @@ static_assert(sizeof(struct ionic_log_event) == 64); ...@@ -122,11 +122,13 @@ static_assert(sizeof(struct ionic_log_event) == 64);
/* I/O */ /* I/O */
static_assert(sizeof(struct ionic_txq_desc) == 16); static_assert(sizeof(struct ionic_txq_desc) == 16);
static_assert(sizeof(struct ionic_txq_sg_desc) == 128); static_assert(sizeof(struct ionic_txq_sg_desc) == 128);
static_assert(sizeof(struct ionic_txq_sg_desc_v1) == 256);
static_assert(sizeof(struct ionic_txq_comp) == 16); static_assert(sizeof(struct ionic_txq_comp) == 16);
static_assert(sizeof(struct ionic_rxq_desc) == 16); static_assert(sizeof(struct ionic_rxq_desc) == 16);
static_assert(sizeof(struct ionic_rxq_sg_desc) == 128); static_assert(sizeof(struct ionic_rxq_sg_desc) == 128);
static_assert(sizeof(struct ionic_rxq_comp) == 16); static_assert(sizeof(struct ionic_rxq_comp) == 16);
static_assert(sizeof(struct ionic_rxq_comp) == sizeof(struct ionic_txq_comp));
/* SR/IOV */ /* SR/IOV */
static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64); static_assert(sizeof(struct ionic_vf_setattr_cmd) == 64);
...@@ -175,21 +177,8 @@ struct ionic_dev { ...@@ -175,21 +177,8 @@ struct ionic_dev {
struct ionic_devinfo dev_info; struct ionic_devinfo dev_info;
}; };
struct ionic_cq_info {
union {
void *cq_desc;
struct ionic_admin_comp *admincq;
struct ionic_notifyq_event *notifyq;
};
};
struct ionic_queue; struct ionic_queue;
struct ionic_qcq; struct ionic_qcq;
struct ionic_desc_info;
typedef void (*ionic_desc_cb)(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg);
#define IONIC_MAX_BUF_LEN ((u16)-1) #define IONIC_MAX_BUF_LEN ((u16)-1)
#define IONIC_PAGE_SIZE PAGE_SIZE #define IONIC_PAGE_SIZE PAGE_SIZE
...@@ -209,28 +198,25 @@ struct ionic_buf_info { ...@@ -209,28 +198,25 @@ struct ionic_buf_info {
u32 len; u32 len;
}; };
#define IONIC_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1) #define IONIC_TX_MAX_FRAGS (1 + IONIC_TX_MAX_SG_ELEMS_V1)
#define IONIC_RX_MAX_FRAGS (1 + IONIC_RX_MAX_SG_ELEMS)
struct ionic_desc_info { struct ionic_tx_desc_info {
union {
void *desc;
struct ionic_txq_desc *txq_desc;
struct ionic_rxq_desc *rxq_desc;
struct ionic_admin_cmd *adminq_desc;
};
void __iomem *cmb_desc;
union {
void *sg_desc;
struct ionic_txq_sg_desc *txq_sg_desc;
struct ionic_rxq_sg_desc *rxq_sgl_desc;
};
unsigned int bytes; unsigned int bytes;
unsigned int nbufs; unsigned int nbufs;
struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1]; struct sk_buff *skb;
ionic_desc_cb cb;
void *cb_arg;
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
enum xdp_action act; enum xdp_action act;
struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1];
};
struct ionic_rx_desc_info {
unsigned int nbufs;
struct ionic_buf_info bufs[IONIC_RX_MAX_FRAGS];
};
struct ionic_admin_desc_info {
void *ctx;
}; };
#define IONIC_QUEUE_NAME_MAX_SZ 16 #define IONIC_QUEUE_NAME_MAX_SZ 16
...@@ -238,7 +224,12 @@ struct ionic_desc_info { ...@@ -238,7 +224,12 @@ struct ionic_desc_info {
struct ionic_queue { struct ionic_queue {
struct device *dev; struct device *dev;
struct ionic_lif *lif; struct ionic_lif *lif;
struct ionic_desc_info *info; union {
void *info;
struct ionic_tx_desc_info *tx_info;
struct ionic_rx_desc_info *rx_info;
struct ionic_admin_desc_info *admin_info;
};
u64 dbval; u64 dbval;
unsigned long dbell_deadline; unsigned long dbell_deadline;
unsigned long dbell_jiffies; unsigned long dbell_jiffies;
...@@ -248,29 +239,33 @@ struct ionic_queue { ...@@ -248,29 +239,33 @@ struct ionic_queue {
unsigned int num_descs; unsigned int num_descs;
unsigned int max_sg_elems; unsigned int max_sg_elems;
u64 features; u64 features;
u64 drop;
struct ionic_dev *idev;
unsigned int type; unsigned int type;
unsigned int hw_index; unsigned int hw_index;
unsigned int hw_type; unsigned int hw_type;
bool xdp_flush;
union { union {
void *base; void *base;
struct ionic_txq_desc *txq; struct ionic_txq_desc *txq;
struct ionic_rxq_desc *rxq; struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq; struct ionic_admin_cmd *adminq;
}; };
void __iomem *cmb_base; union {
void __iomem *cmb_base;
struct ionic_txq_desc __iomem *cmb_txq;
struct ionic_rxq_desc __iomem *cmb_rxq;
};
union { union {
void *sg_base; void *sg_base;
struct ionic_txq_sg_desc *txq_sgl; struct ionic_txq_sg_desc *txq_sgl;
struct ionic_txq_sg_desc_v1 *txq_sgl_v1;
struct ionic_rxq_sg_desc *rxq_sgl; struct ionic_rxq_sg_desc *rxq_sgl;
}; };
struct xdp_rxq_info *xdp_rxq_info; struct xdp_rxq_info *xdp_rxq_info;
struct ionic_queue *partner; struct ionic_queue *partner;
bool xdp_flush;
dma_addr_t base_pa; dma_addr_t base_pa;
dma_addr_t cmb_base_pa; dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa; dma_addr_t sg_base_pa;
u64 drop;
unsigned int desc_size; unsigned int desc_size;
unsigned int sg_desc_size; unsigned int sg_desc_size;
unsigned int pid; unsigned int pid;
...@@ -292,7 +287,6 @@ struct ionic_intr_info { ...@@ -292,7 +287,6 @@ struct ionic_intr_info {
struct ionic_cq { struct ionic_cq {
struct ionic_lif *lif; struct ionic_lif *lif;
struct ionic_cq_info *info;
struct ionic_queue *bound_q; struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr; struct ionic_intr_info *bound_intr;
u16 tail_idx; u16 tail_idx;
...@@ -301,6 +295,7 @@ struct ionic_cq { ...@@ -301,6 +295,7 @@ struct ionic_cq {
unsigned int desc_size; unsigned int desc_size;
void *base; void *base;
dma_addr_t base_pa; dma_addr_t base_pa;
struct ionic_dev *idev;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct ionic; struct ionic;
...@@ -375,7 +370,7 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, ...@@ -375,7 +370,7 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
unsigned int num_descs, size_t desc_size); unsigned int num_descs, size_t desc_size);
void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa); void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa);
void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q); void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q);
typedef bool (*ionic_cq_cb)(struct ionic_cq *cq, struct ionic_cq_info *cq_info); typedef bool (*ionic_cq_cb)(struct ionic_cq *cq);
typedef void (*ionic_cq_done_cb)(void *done_arg); typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb, ionic_cq_cb cb, ionic_cq_done_cb done_cb,
...@@ -386,13 +381,9 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, ...@@ -386,13 +381,9 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name, struct ionic_queue *q, unsigned int index, const char *name,
unsigned int num_descs, size_t desc_size, unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid); size_t sg_desc_size, unsigned int pid);
void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); void ionic_q_post(struct ionic_queue *q, bool ring_doorbell);
void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa); bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos);
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg);
void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
unsigned int stop_index);
int ionic_heartbeat_check(struct ionic *ionic); int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev); bool ionic_is_fw_running(struct ionic_dev *idev);
......
...@@ -433,8 +433,6 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) ...@@ -433,8 +433,6 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_xdp_unregister_rxq_info(&qcq->q); ionic_xdp_unregister_rxq_info(&qcq->q);
ionic_qcq_intr_free(lif, qcq); ionic_qcq_intr_free(lif, qcq);
vfree(qcq->cq.info);
qcq->cq.info = NULL;
vfree(qcq->q.info); vfree(qcq->q.info);
qcq->q.info = NULL; qcq->q.info = NULL;
} }
...@@ -538,14 +536,11 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -538,14 +536,11 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int num_descs, unsigned int desc_size, unsigned int num_descs, unsigned int desc_size,
unsigned int cq_desc_size, unsigned int cq_desc_size,
unsigned int sg_desc_size, unsigned int sg_desc_size,
unsigned int desc_info_size,
unsigned int pid, struct ionic_qcq **qcq) unsigned int pid, struct ionic_qcq **qcq)
{ {
struct ionic_dev *idev = &lif->ionic->idev; struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev; struct device *dev = lif->ionic->dev;
void *q_base, *cq_base, *sg_base;
dma_addr_t cq_base_pa = 0;
dma_addr_t sg_base_pa = 0;
dma_addr_t q_base_pa = 0;
struct ionic_qcq *new; struct ionic_qcq *new;
int err; int err;
...@@ -561,7 +556,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -561,7 +556,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
new->q.dev = dev; new->q.dev = dev;
new->flags = flags; new->flags = flags;
new->q.info = vcalloc(num_descs, sizeof(*new->q.info)); new->q.info = vcalloc(num_descs, desc_info_size);
if (!new->q.info) { if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n"); netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -580,19 +575,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -580,19 +575,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = ionic_alloc_qcq_interrupt(lif, new); err = ionic_alloc_qcq_interrupt(lif, new);
if (err) if (err)
goto err_out; goto err_out_free_q_info;
new->cq.info = vcalloc(num_descs, sizeof(*new->cq.info));
if (!new->cq.info) {
netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
err = -ENOMEM;
goto err_out_free_irq;
}
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size); err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) { if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n"); netdev_err(lif->netdev, "Cannot initialize completion queue\n");
goto err_out_free_cq_info; goto err_out_free_irq;
} }
if (flags & IONIC_QCQ_F_NOTIFYQ) { if (flags & IONIC_QCQ_F_NOTIFYQ) {
...@@ -610,16 +598,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -610,16 +598,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q_base) { if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n"); netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
err = -ENOMEM; err = -ENOMEM;
goto err_out_free_cq_info; goto err_out_free_irq;
} }
q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
ionic_q_map(&new->q, q_base, q_base_pa);
/* Base the NotifyQ cq.base off of the ALIGNed q.base */
cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE); new->cq.base = PTR_ALIGN(new->q.base + q_size, PAGE_SIZE);
cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE); new->cq.base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
ionic_cq_map(&new->cq, cq_base, cq_base_pa); new->cq.bound_q = &new->q;
ionic_cq_bind(&new->cq, &new->q);
} else { } else {
/* regular DMA q descriptors */ /* regular DMA q descriptors */
new->q_size = PAGE_SIZE + (num_descs * desc_size); new->q_size = PAGE_SIZE + (num_descs * desc_size);
...@@ -628,11 +615,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -628,11 +615,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q_base) { if (!new->q_base) {
netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n"); netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
err = -ENOMEM; err = -ENOMEM;
goto err_out_free_cq_info; goto err_out_free_irq;
} }
q_base = PTR_ALIGN(new->q_base, PAGE_SIZE); new->q.base = PTR_ALIGN(new->q_base, PAGE_SIZE);
q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE); new->q.base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
ionic_q_map(&new->q, q_base, q_base_pa);
if (flags & IONIC_QCQ_F_CMB_RINGS) { if (flags & IONIC_QCQ_F_CMB_RINGS) {
/* on-chip CMB q descriptors */ /* on-chip CMB q descriptors */
...@@ -657,7 +643,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -657,7 +643,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
} }
new->cmb_q_base_pa -= idev->phy_cmb_pages; new->cmb_q_base_pa -= idev->phy_cmb_pages;
ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa); new->q.cmb_base = new->cmb_q_base;
new->q.cmb_base_pa = new->cmb_q_base_pa;
} }
/* cq DMA descriptors */ /* cq DMA descriptors */
...@@ -669,10 +656,9 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -669,10 +656,9 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = -ENOMEM; err = -ENOMEM;
goto err_out_free_q; goto err_out_free_q;
} }
cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE); new->cq.base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE); new->cq.base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
ionic_cq_map(&new->cq, cq_base, cq_base_pa); new->cq.bound_q = &new->q;
ionic_cq_bind(&new->cq, &new->q);
} }
if (flags & IONIC_QCQ_F_SG) { if (flags & IONIC_QCQ_F_SG) {
...@@ -684,9 +670,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -684,9 +670,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = -ENOMEM; err = -ENOMEM;
goto err_out_free_cq; goto err_out_free_cq;
} }
sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE); new->q.sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE); new->q.sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
} }
INIT_WORK(&new->dim.work, ionic_dim_work); INIT_WORK(&new->dim.work, ionic_dim_work);
...@@ -704,8 +689,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, ...@@ -704,8 +689,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order); ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
} }
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa); dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
err_out_free_cq_info:
vfree(new->cq.info);
err_out_free_irq: err_out_free_irq:
if (flags & IONIC_QCQ_F_INTR) { if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi); devm_free_irq(dev, new->intr.vector, &new->napi);
...@@ -731,7 +714,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif) ...@@ -731,7 +714,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
IONIC_ADMINQ_LENGTH, IONIC_ADMINQ_LENGTH,
sizeof(struct ionic_admin_cmd), sizeof(struct ionic_admin_cmd),
sizeof(struct ionic_admin_comp), sizeof(struct ionic_admin_comp),
0, lif->kern_pid, &lif->adminqcq); 0,
sizeof(struct ionic_admin_desc_info),
lif->kern_pid, &lif->adminqcq);
if (err) if (err)
return err; return err;
ionic_debugfs_add_qcq(lif, lif->adminqcq); ionic_debugfs_add_qcq(lif, lif->adminqcq);
...@@ -742,7 +727,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif) ...@@ -742,7 +727,9 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
flags, IONIC_NOTIFYQ_LENGTH, flags, IONIC_NOTIFYQ_LENGTH,
sizeof(struct ionic_notifyq_cmd), sizeof(struct ionic_notifyq_cmd),
sizeof(union ionic_notifyq_comp), sizeof(union ionic_notifyq_comp),
0, lif->kern_pid, &lif->notifyqcq); 0,
sizeof(struct ionic_admin_desc_info),
lif->kern_pid, &lif->notifyqcq);
if (err) if (err)
goto err_out; goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq); ionic_debugfs_add_qcq(lif, lif->notifyqcq);
...@@ -960,6 +947,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) ...@@ -960,6 +947,7 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, txq_i, "hwstamp_tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz, num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &txq); lif->kern_pid, &txq);
if (err) if (err)
goto err_qcq_alloc; goto err_qcq_alloc;
...@@ -1019,6 +1007,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) ...@@ -1019,6 +1007,7 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif)
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, rxq_i, "hwstamp_rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz, num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &rxq); lif->kern_pid, &rxq);
if (err) if (err)
goto err_qcq_alloc; goto err_qcq_alloc;
...@@ -1172,71 +1161,6 @@ int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) ...@@ -1172,71 +1161,6 @@ int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class)
return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class); return ionic_lif_add_hwstamp_rxfilt(lif, pkt_class);
} }
static bool ionic_notifyq_service(struct ionic_cq *cq,
struct ionic_cq_info *cq_info)
{
union ionic_notifyq_comp *comp = cq_info->cq_desc;
struct ionic_deferred_work *work;
struct net_device *netdev;
struct ionic_queue *q;
struct ionic_lif *lif;
u64 eid;
q = cq->bound_q;
lif = q->info[0].cb_arg;
netdev = lif->netdev;
eid = le64_to_cpu(comp->event.eid);
/* Have we run out of new completions to process? */
if ((s64)(eid - lif->last_eid) <= 0)
return false;
lif->last_eid = eid;
dev_dbg(lif->ionic->dev, "notifyq event:\n");
dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
comp, sizeof(*comp), true);
switch (le16_to_cpu(comp->event.ecode)) {
case IONIC_EVENT_LINK_CHANGE:
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
break;
case IONIC_EVENT_RESET:
if (lif->ionic->idev.fw_status_ready &&
!test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
!test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
netdev_err(lif->netdev, "Reset event dropped\n");
clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
} else {
work->type = IONIC_DW_TYPE_LIF_RESET;
ionic_lif_deferred_enqueue(&lif->deferred, work);
}
}
break;
default:
netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
comp->event.ecode, eid);
break;
}
return true;
}
static bool ionic_adminq_service(struct ionic_cq *cq,
struct ionic_cq_info *cq_info)
{
struct ionic_admin_comp *comp = cq_info->cq_desc;
if (!color_match(comp->color, cq->done_color))
return false;
ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
return true;
}
static int ionic_adminq_napi(struct napi_struct *napi, int budget) static int ionic_adminq_napi(struct napi_struct *napi, int budget)
{ {
struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr; struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
...@@ -2110,6 +2034,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) ...@@ -2110,6 +2034,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++) { for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz, num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &lif->txqcqs[i]); lif->kern_pid, &lif->txqcqs[i]);
if (err) if (err)
goto err_out; goto err_out;
...@@ -2141,6 +2066,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) ...@@ -2141,6 +2066,7 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
for (i = 0; i < lif->nxqs; i++) { for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz, num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &lif->rxqcqs[i]); lif->kern_pid, &lif->rxqcqs[i]);
if (err) if (err)
goto err_out; goto err_out;
...@@ -2958,7 +2884,6 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) ...@@ -2958,7 +2884,6 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->cq.desc_size, b->cq.desc_size); swap(a->cq.desc_size, b->cq.desc_size);
swap(a->cq.base, b->cq.base); swap(a->cq.base, b->cq.base);
swap(a->cq.base_pa, b->cq.base_pa); swap(a->cq.base_pa, b->cq.base_pa);
swap(a->cq.info, b->cq.info);
swap(a->cq_base, b->cq_base); swap(a->cq_base, b->cq_base);
swap(a->cq_base_pa, b->cq_base_pa); swap(a->cq_base_pa, b->cq_base_pa);
swap(a->cq_size, b->cq_size); swap(a->cq_size, b->cq_size);
...@@ -3022,6 +2947,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, ...@@ -3022,6 +2947,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
4, desc_sz, comp_sz, sg_desc_sz, 4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &lif->txqcqs[i]); lif->kern_pid, &lif->txqcqs[i]);
if (err) if (err)
goto err_out; goto err_out;
...@@ -3030,6 +2956,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, ...@@ -3030,6 +2956,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR; flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz, num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_tx_desc_info),
lif->kern_pid, &tx_qcqs[i]); lif->kern_pid, &tx_qcqs[i]);
if (err) if (err)
goto err_out; goto err_out;
...@@ -3051,6 +2978,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, ...@@ -3051,6 +2978,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG; flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
4, desc_sz, comp_sz, sg_desc_sz, 4, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &lif->rxqcqs[i]); lif->kern_pid, &lif->rxqcqs[i]);
if (err) if (err)
goto err_out; goto err_out;
...@@ -3059,6 +2987,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, ...@@ -3059,6 +2987,7 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR; flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags, err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz, num_desc, desc_sz, comp_sz, sg_desc_sz,
sizeof(struct ionic_rx_desc_info),
lif->kern_pid, &rx_qcqs[i]); lif->kern_pid, &rx_qcqs[i]);
if (err) if (err)
goto err_out; goto err_out;
...@@ -3633,7 +3562,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif) ...@@ -3633,7 +3562,7 @@ static int ionic_lif_notifyq_init(struct ionic_lif *lif)
dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
/* preset the callback info */ /* preset the callback info */
q->info[0].cb_arg = lif; q->admin_info[0].ctx = lif;
qcq->flags |= IONIC_QCQ_F_INITED; qcq->flags |= IONIC_QCQ_F_INITED;
...@@ -3885,6 +3814,7 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif) ...@@ -3885,6 +3814,7 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
union ionic_q_identity __iomem *q_ident; union ionic_q_identity __iomem *q_ident;
struct ionic *ionic = lif->ionic; struct ionic *ionic = lif->ionic;
struct ionic_dev *idev; struct ionic_dev *idev;
u16 max_frags;
int qtype; int qtype;
int err; int err;
...@@ -3952,17 +3882,16 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif) ...@@ -3952,17 +3882,16 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n", dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
qtype, qti->sg_desc_stride); qtype, qti->sg_desc_stride);
if (qti->max_sg_elems >= IONIC_MAX_FRAGS) { if (qtype == IONIC_QTYPE_TXQ)
qti->max_sg_elems = IONIC_MAX_FRAGS - 1; max_frags = IONIC_TX_MAX_FRAGS;
dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n", else if (qtype == IONIC_QTYPE_RXQ)
qtype, qti->max_sg_elems); max_frags = IONIC_RX_MAX_FRAGS;
} else
max_frags = 1;
if (qti->max_sg_elems > MAX_SKB_FRAGS) { qti->max_sg_elems = min_t(u16, max_frags - 1, MAX_SKB_FRAGS);
qti->max_sg_elems = MAX_SKB_FRAGS; dev_dbg(ionic->dev, "qtype %d max_sg_elems %d\n",
dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n", qtype, qti->max_sg_elems);
qtype, qti->max_sg_elems);
}
} }
} }
......
...@@ -71,25 +71,25 @@ struct ionic_qcq { ...@@ -71,25 +71,25 @@ struct ionic_qcq {
void *q_base; void *q_base;
dma_addr_t q_base_pa; dma_addr_t q_base_pa;
u32 q_size; u32 q_size;
u32 cq_size;
void *cq_base; void *cq_base;
dma_addr_t cq_base_pa; dma_addr_t cq_base_pa;
u32 cq_size;
void *sg_base; void *sg_base;
dma_addr_t sg_base_pa; dma_addr_t sg_base_pa;
u32 sg_size; u32 sg_size;
unsigned int flags;
void __iomem *cmb_q_base; void __iomem *cmb_q_base;
phys_addr_t cmb_q_base_pa; phys_addr_t cmb_q_base_pa;
u32 cmb_q_size; u32 cmb_q_size;
u32 cmb_pgid; u32 cmb_pgid;
u32 cmb_order; u32 cmb_order;
struct dim dim; struct dim dim;
struct timer_list napi_deadline;
struct ionic_queue q; struct ionic_queue q;
struct ionic_cq cq; struct ionic_cq cq;
struct ionic_intr_info intr;
struct timer_list napi_deadline;
struct napi_struct napi; struct napi_struct napi;
unsigned int flags;
struct ionic_qcq *napi_qcq; struct ionic_qcq *napi_qcq;
struct ionic_intr_info intr;
struct dentry *dentry; struct dentry *dentry;
}; };
......
...@@ -190,7 +190,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode) ...@@ -190,7 +190,8 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif) static void ionic_adminq_flush(struct ionic_lif *lif)
{ {
struct ionic_desc_info *desc_info; struct ionic_admin_desc_info *desc_info;
struct ionic_admin_cmd *desc;
unsigned long irqflags; unsigned long irqflags;
struct ionic_queue *q; struct ionic_queue *q;
...@@ -203,10 +204,10 @@ static void ionic_adminq_flush(struct ionic_lif *lif) ...@@ -203,10 +204,10 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
q = &lif->adminqcq->q; q = &lif->adminqcq->q;
while (q->tail_idx != q->head_idx) { while (q->tail_idx != q->head_idx) {
desc_info = &q->info[q->tail_idx]; desc = &q->adminq[q->tail_idx];
memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd)); desc_info = &q->admin_info[q->tail_idx];
desc_info->cb = NULL; memset(desc, 0, sizeof(union ionic_adminq_cmd));
desc_info->cb_arg = NULL; desc_info->ctx = NULL;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
} }
spin_unlock_irqrestore(&lif->adminq_lock, irqflags); spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
...@@ -246,25 +247,93 @@ static int ionic_adminq_check_err(struct ionic_lif *lif, ...@@ -246,25 +247,93 @@ static int ionic_adminq_check_err(struct ionic_lif *lif,
return err; return err;
} }
static void ionic_adminq_cb(struct ionic_queue *q, bool ionic_notifyq_service(struct ionic_cq *cq)
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg)
{ {
struct ionic_admin_ctx *ctx = cb_arg; struct ionic_deferred_work *work;
union ionic_notifyq_comp *comp;
struct net_device *netdev;
struct ionic_queue *q;
struct ionic_lif *lif;
u64 eid;
comp = &((union ionic_notifyq_comp *)cq->base)[cq->tail_idx];
q = cq->bound_q;
lif = q->admin_info[0].ctx;
netdev = lif->netdev;
eid = le64_to_cpu(comp->event.eid);
/* Have we run out of new completions to process? */
if ((s64)(eid - lif->last_eid) <= 0)
return false;
lif->last_eid = eid;
dev_dbg(lif->ionic->dev, "notifyq event:\n");
dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
comp, sizeof(*comp), true);
switch (le16_to_cpu(comp->event.ecode)) {
case IONIC_EVENT_LINK_CHANGE:
ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
break;
case IONIC_EVENT_RESET:
if (lif->ionic->idev.fw_status_ready &&
!test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
!test_and_set_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
netdev_err(lif->netdev, "Reset event dropped\n");
clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
} else {
work->type = IONIC_DW_TYPE_LIF_RESET;
ionic_lif_deferred_enqueue(&lif->deferred, work);
}
}
break;
default:
netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
comp->event.ecode, eid);
break;
}
return true;
}
bool ionic_adminq_service(struct ionic_cq *cq)
{
struct ionic_admin_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
struct ionic_admin_comp *comp; struct ionic_admin_comp *comp;
u16 index;
if (!ctx) comp = &((struct ionic_admin_comp *)cq->base)[cq->tail_idx];
return;
if (!color_match(comp->color, cq->done_color))
return false;
/* check for empty queue */
if (q->tail_idx == q->head_idx)
return false;
comp = cq_info->cq_desc; do {
desc_info = &q->admin_info[q->tail_idx];
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
if (likely(desc_info->ctx)) {
struct ionic_admin_ctx *ctx = desc_info->ctx;
memcpy(&ctx->comp, comp, sizeof(*comp)); memcpy(&ctx->comp, comp, sizeof(*comp));
dev_dbg(q->dev, "comp admin queue command:\n"); dev_dbg(q->dev, "comp admin queue command:\n");
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1, dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->comp, sizeof(ctx->comp), true); &ctx->comp, sizeof(ctx->comp), true);
complete_all(&ctx->work);
desc_info->ctx = NULL;
}
} while (index != le16_to_cpu(comp->comp_index));
complete_all(&ctx->work); return true;
} }
bool ionic_adminq_poke_doorbell(struct ionic_queue *q) bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
...@@ -298,7 +367,8 @@ bool ionic_adminq_poke_doorbell(struct ionic_queue *q) ...@@ -298,7 +367,8 @@ bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{ {
struct ionic_desc_info *desc_info; struct ionic_admin_desc_info *desc_info;
struct ionic_admin_cmd *desc;
unsigned long irqflags; unsigned long irqflags;
struct ionic_queue *q; struct ionic_queue *q;
int err = 0; int err = 0;
...@@ -320,14 +390,17 @@ int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx) ...@@ -320,14 +390,17 @@ int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (err) if (err)
goto err_out; goto err_out;
desc_info = &q->info[q->head_idx]; desc_info = &q->admin_info[q->head_idx];
memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd)); desc_info->ctx = ctx;
desc = &q->adminq[q->head_idx];
memcpy(desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n"); dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1, dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true); &ctx->cmd, sizeof(ctx->cmd), true);
ionic_q_post(q, true, ionic_adminq_cb, ctx); ionic_q_post(q, true);
err_out: err_out:
spin_unlock_irqrestore(&lif->adminq_lock, irqflags); spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
......
...@@ -19,23 +19,20 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, ...@@ -19,23 +19,20 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
size_t offset, size_t len); size_t offset, size_t len);
static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
struct ionic_desc_info *desc_info); struct ionic_tx_desc_info *desc_info);
static void ionic_tx_clean(struct ionic_queue *q, static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info, struct ionic_tx_desc_info *desc_info,
struct ionic_cq_info *cq_info, struct ionic_txq_comp *comp);
void *cb_arg);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
ionic_desc_cb cb_func, void *cb_arg)
{ {
ionic_q_post(q, ring_dbell, cb_func, cb_arg); ionic_q_post(q, ring_dbell);
} }
static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell)
ionic_desc_cb cb_func, void *cb_arg)
{ {
ionic_q_post(q, ring_dbell, cb_func, cb_arg); ionic_q_post(q, ring_dbell);
} }
bool ionic_txq_poke_doorbell(struct ionic_queue *q) bool ionic_txq_poke_doorbell(struct ionic_queue *q)
...@@ -99,6 +96,14 @@ bool ionic_rxq_poke_doorbell(struct ionic_queue *q) ...@@ -99,6 +96,14 @@ bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
return true; return true;
} }
static inline struct ionic_txq_sg_elem *ionic_tx_sg_elems(struct ionic_queue *q)
{
if (likely(q->sg_desc_size == sizeof(struct ionic_txq_sg_desc_v1)))
return q->txq_sgl_v1[q->head_idx].elems;
else
return q->txq_sgl[q->head_idx].elems;
}
static inline struct netdev_queue *q_to_ndq(struct net_device *netdev, static inline struct netdev_queue *q_to_ndq(struct net_device *netdev,
struct ionic_queue *q) struct ionic_queue *q)
{ {
...@@ -123,37 +128,29 @@ static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info) ...@@ -123,37 +128,29 @@ static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info)
static int ionic_rx_page_alloc(struct ionic_queue *q, static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_buf_info *buf_info) struct ionic_buf_info *buf_info)
{ {
struct ionic_rx_stats *stats; struct device *dev = q->dev;
struct device *dev; dma_addr_t dma_addr;
struct page *page; struct page *page;
dev = q->dev;
stats = q_to_rx_stats(q);
if (unlikely(!buf_info)) {
net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
dev_name(dev), q->name);
return -EINVAL;
}
page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
if (unlikely(!page)) { if (unlikely(!page)) {
net_err_ratelimited("%s: %s page alloc failed\n", net_err_ratelimited("%s: %s page alloc failed\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->alloc_err++; q_to_rx_stats(q)->alloc_err++;
return -ENOMEM; return -ENOMEM;
} }
buf_info->dma_addr = dma_map_page(dev, page, 0, dma_addr = dma_map_page(dev, page, 0,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE); IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { if (unlikely(dma_mapping_error(dev, dma_addr))) {
__free_pages(page, 0); __free_pages(page, 0);
net_err_ratelimited("%s: %s dma map failed\n", net_err_ratelimited("%s: %s dma map failed\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->dma_map_err++; q_to_rx_stats(q)->dma_map_err++;
return -EIO; return -EIO;
} }
buf_info->dma_addr = dma_addr;
buf_info->page = page; buf_info->page = page;
buf_info->page_offset = 0; buf_info->page_offset = 0;
...@@ -180,7 +177,7 @@ static void ionic_rx_page_free(struct ionic_queue *q, ...@@ -180,7 +177,7 @@ static void ionic_rx_page_free(struct ionic_queue *q,
} }
static bool ionic_rx_buf_recycle(struct ionic_queue *q, static bool ionic_rx_buf_recycle(struct ionic_queue *q,
struct ionic_buf_info *buf_info, u32 used) struct ionic_buf_info *buf_info, u32 len)
{ {
u32 size; u32 size;
...@@ -192,7 +189,7 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q, ...@@ -192,7 +189,7 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
if (page_to_nid(buf_info->page) != numa_mem_id()) if (page_to_nid(buf_info->page) != numa_mem_id())
return false; return false;
size = ALIGN(used, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ); size = ALIGN(len, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ);
buf_info->page_offset += size; buf_info->page_offset += size;
if (buf_info->page_offset >= IONIC_PAGE_SIZE) if (buf_info->page_offset >= IONIC_PAGE_SIZE)
return false; return false;
...@@ -202,95 +199,96 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q, ...@@ -202,95 +199,96 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q,
return true; return true;
} }
static struct sk_buff *ionic_rx_frags(struct net_device *netdev, static void ionic_rx_add_skb_frag(struct ionic_queue *q,
struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info, struct ionic_buf_info *buf_info,
unsigned int headroom, u32 off, u32 len,
unsigned int len, bool synced)
unsigned int num_sg_elems, {
bool synced) if (!synced)
dma_sync_single_range_for_cpu(q->dev, ionic_rx_buf_pa(buf_info),
off, len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf_info->page, buf_info->page_offset + off,
len,
IONIC_PAGE_SIZE);
if (!ionic_rx_buf_recycle(q, buf_info, len)) {
dma_unmap_page(q->dev, buf_info->dma_addr,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
buf_info->page = NULL;
}
}
static struct sk_buff *ionic_rx_build_skb(struct ionic_queue *q,
struct ionic_rx_desc_info *desc_info,
unsigned int headroom,
unsigned int len,
unsigned int num_sg_elems,
bool synced)
{ {
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
struct ionic_rx_stats *stats;
struct device *dev = q->dev;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i; unsigned int i;
u16 frag_len; u16 frag_len;
stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0]; buf_info = &desc_info->bufs[0];
prefetchw(buf_info->page); prefetchw(buf_info->page);
skb = napi_get_frags(&q_to_qcq(q)->napi); skb = napi_get_frags(&q_to_qcq(q)->napi);
if (unlikely(!skb)) { if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n", net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
dev_name(dev), q->name); dev_name(q->dev), q->name);
stats->alloc_err++; q_to_rx_stats(q)->alloc_err++;
return NULL; return NULL;
} }
i = num_sg_elems + 1; if (headroom)
do { frag_len = min_t(u16, len,
if (unlikely(!buf_info->page)) { IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
dev_kfree_skb(skb); else
return NULL; frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
}
if (headroom)
frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
else
frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
len -= frag_len;
if (!synced)
dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info),
headroom, frag_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf_info->page, buf_info->page_offset + headroom,
frag_len, IONIC_PAGE_SIZE);
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
dma_unmap_page(dev, buf_info->dma_addr,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
buf_info->page = NULL;
}
/* only needed on the first buffer */
if (headroom)
headroom = 0;
buf_info++; if (unlikely(!buf_info->page))
goto err_bad_buf_page;
ionic_rx_add_skb_frag(q, skb, buf_info, headroom, frag_len, synced);
len -= frag_len;
buf_info++;
i--; for (i = 0; i < num_sg_elems; i++, buf_info++) {
} while (i > 0); if (unlikely(!buf_info->page))
goto err_bad_buf_page;
frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info));
ionic_rx_add_skb_frag(q, skb, buf_info, 0, frag_len, synced);
len -= frag_len;
}
return skb; return skb;
err_bad_buf_page:
dev_kfree_skb(skb);
return NULL;
} }
static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev, static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
struct ionic_queue *q, struct ionic_queue *q,
struct ionic_desc_info *desc_info, struct ionic_rx_desc_info *desc_info,
unsigned int headroom, unsigned int headroom,
unsigned int len, unsigned int len,
bool synced) bool synced)
{ {
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
struct ionic_rx_stats *stats;
struct device *dev = q->dev; struct device *dev = q->dev;
struct sk_buff *skb; struct sk_buff *skb;
stats = q_to_rx_stats(q);
buf_info = &desc_info->bufs[0]; buf_info = &desc_info->bufs[0];
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
if (unlikely(!skb)) { if (unlikely(!skb)) {
net_warn_ratelimited("%s: SKB alloc failed on %s!\n", net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->alloc_err++; q_to_rx_stats(q)->alloc_err++;
return NULL; return NULL;
} }
...@@ -313,7 +311,7 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev, ...@@ -313,7 +311,7 @@ static struct sk_buff *ionic_rx_copybreak(struct net_device *netdev,
} }
static void ionic_xdp_tx_desc_clean(struct ionic_queue *q, static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info) struct ionic_tx_desc_info *desc_info)
{ {
unsigned int nbufs = desc_info->nbufs; unsigned int nbufs = desc_info->nbufs;
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
...@@ -351,7 +349,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, ...@@ -351,7 +349,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
enum xdp_action act, struct page *page, int off, enum xdp_action act, struct page *page, int off,
bool ring_doorbell) bool ring_doorbell)
{ {
struct ionic_desc_info *desc_info; struct ionic_tx_desc_info *desc_info;
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
struct ionic_tx_stats *stats; struct ionic_tx_stats *stats;
struct ionic_txq_desc *desc; struct ionic_txq_desc *desc;
...@@ -359,16 +357,14 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, ...@@ -359,16 +357,14 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
dma_addr_t dma_addr; dma_addr_t dma_addr;
u64 cmd; u64 cmd;
desc_info = &q->info[q->head_idx]; desc_info = &q->tx_info[q->head_idx];
desc = desc_info->txq_desc; desc = &q->txq[q->head_idx];
buf_info = desc_info->bufs; buf_info = desc_info->bufs;
stats = q_to_tx_stats(q); stats = q_to_tx_stats(q);
dma_addr = ionic_tx_map_single(q, frame->data, len); dma_addr = ionic_tx_map_single(q, frame->data, len);
if (dma_mapping_error(q->dev, dma_addr)) { if (!dma_addr)
stats->dma_map_err++;
return -EIO; return -EIO;
}
buf_info->dma_addr = dma_addr; buf_info->dma_addr = dma_addr;
buf_info->len = len; buf_info->len = len;
buf_info->page = page; buf_info->page = page;
...@@ -388,11 +384,10 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, ...@@ -388,11 +384,10 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
bi = &buf_info[1]; bi = &buf_info[1];
sinfo = xdp_get_shared_info_from_frame(frame); sinfo = xdp_get_shared_info_from_frame(frame);
frag = sinfo->frags; frag = sinfo->frags;
elem = desc_info->txq_sg_desc->elems; elem = ionic_tx_sg_elems(q);
for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) { for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
if (dma_mapping_error(q->dev, dma_addr)) { if (!dma_addr) {
stats->dma_map_err++;
ionic_tx_desc_unmap_bufs(q, desc_info); ionic_tx_desc_unmap_bufs(q, desc_info);
return -EIO; return -EIO;
} }
...@@ -419,7 +414,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame, ...@@ -419,7 +414,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
stats->pkts++; stats->pkts++;
stats->bytes += len; stats->bytes += len;
ionic_txq_post(q, ring_doorbell, ionic_tx_clean, NULL); ionic_txq_post(q, ring_doorbell);
return 0; return 0;
} }
...@@ -627,21 +622,19 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, ...@@ -627,21 +622,19 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
} }
static void ionic_rx_clean(struct ionic_queue *q, static void ionic_rx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info, struct ionic_rx_desc_info *desc_info,
struct ionic_cq_info *cq_info, struct ionic_rxq_comp *comp)
void *cb_arg)
{ {
struct net_device *netdev = q->lif->netdev; struct net_device *netdev = q->lif->netdev;
struct ionic_qcq *qcq = q_to_qcq(q); struct ionic_qcq *qcq = q_to_qcq(q);
struct ionic_rx_stats *stats; struct ionic_rx_stats *stats;
struct ionic_rxq_comp *comp;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
unsigned int headroom; unsigned int headroom;
struct sk_buff *skb; struct sk_buff *skb;
bool synced = false;
bool use_copybreak;
u16 len; u16 len;
comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
stats = q_to_rx_stats(q); stats = q_to_rx_stats(q);
if (comp->status) { if (comp->status) {
...@@ -654,17 +647,20 @@ static void ionic_rx_clean(struct ionic_queue *q, ...@@ -654,17 +647,20 @@ static void ionic_rx_clean(struct ionic_queue *q,
stats->bytes += len; stats->bytes += len;
xdp_prog = READ_ONCE(q->lif->xdp_prog); xdp_prog = READ_ONCE(q->lif->xdp_prog);
if (xdp_prog && if (xdp_prog) {
ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len)) if (ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len))
return; return;
synced = true;
}
headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0;
if (len <= q->lif->rx_copybreak) use_copybreak = len <= q->lif->rx_copybreak;
if (use_copybreak)
skb = ionic_rx_copybreak(netdev, q, desc_info, skb = ionic_rx_copybreak(netdev, q, desc_info,
headroom, len, !!xdp_prog); headroom, len, synced);
else else
skb = ionic_rx_frags(netdev, q, desc_info, headroom, len, skb = ionic_rx_build_skb(q, desc_info, headroom, len,
comp->num_sg_elems, !!xdp_prog); comp->num_sg_elems, synced);
if (unlikely(!skb)) { if (unlikely(!skb)) {
stats->dropped++; stats->dropped++;
...@@ -716,7 +712,7 @@ static void ionic_rx_clean(struct ionic_queue *q, ...@@ -716,7 +712,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
u64 hwstamp; u64 hwstamp;
cq_desc_hwstamp = cq_desc_hwstamp =
cq_info->cq_desc + (void *)comp +
qcq->cq.desc_size - qcq->cq.desc_size -
sizeof(struct ionic_rxq_comp) - sizeof(struct ionic_rxq_comp) -
IONIC_HWSTAMP_CQ_NEGOFFSET; IONIC_HWSTAMP_CQ_NEGOFFSET;
...@@ -731,19 +727,19 @@ static void ionic_rx_clean(struct ionic_queue *q, ...@@ -731,19 +727,19 @@ static void ionic_rx_clean(struct ionic_queue *q,
} }
} }
if (len <= q->lif->rx_copybreak) if (use_copybreak)
napi_gro_receive(&qcq->napi, skb); napi_gro_receive(&qcq->napi, skb);
else else
napi_gro_frags(&qcq->napi); napi_gro_frags(&qcq->napi);
} }
bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) bool ionic_rx_service(struct ionic_cq *cq)
{ {
struct ionic_rx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q; struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
struct ionic_rxq_comp *comp; struct ionic_rxq_comp *comp;
comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); comp = &((struct ionic_rxq_comp *)cq->base)[cq->tail_idx];
if (!color_match(comp->pkt_type_color, cq->done_color)) if (!color_match(comp->pkt_type_color, cq->done_color))
return false; return false;
...@@ -755,31 +751,29 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) ...@@ -755,31 +751,29 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
if (q->tail_idx != le16_to_cpu(comp->comp_index)) if (q->tail_idx != le16_to_cpu(comp->comp_index))
return false; return false;
desc_info = &q->info[q->tail_idx]; desc_info = &q->rx_info[q->tail_idx];
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */ /* clean the related q entry, only one per qc completion */
ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); ionic_rx_clean(q, desc_info, comp);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
return true; return true;
} }
static inline void ionic_write_cmb_desc(struct ionic_queue *q, static inline void ionic_write_cmb_desc(struct ionic_queue *q,
void __iomem *cmb_desc,
void *desc) void *desc)
{ {
if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS) /* Since Rx and Tx descriptors are the same size, we can
memcpy_toio(cmb_desc, desc, q->desc_size); * save an instruction or two and skip the qtype check.
*/
if (unlikely(q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS))
memcpy_toio(&q->cmb_txq[q->head_idx], desc, sizeof(q->cmb_txq[0]));
} }
void ionic_rx_fill(struct ionic_queue *q) void ionic_rx_fill(struct ionic_queue *q)
{ {
struct net_device *netdev = q->lif->netdev; struct net_device *netdev = q->lif->netdev;
struct ionic_desc_info *desc_info; struct ionic_rx_desc_info *desc_info;
struct ionic_rxq_sg_desc *sg_desc;
struct ionic_rxq_sg_elem *sg_elem; struct ionic_rxq_sg_elem *sg_elem;
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
unsigned int fill_threshold; unsigned int fill_threshold;
...@@ -807,8 +801,8 @@ void ionic_rx_fill(struct ionic_queue *q) ...@@ -807,8 +801,8 @@ void ionic_rx_fill(struct ionic_queue *q)
nfrags = 0; nfrags = 0;
remain_len = len; remain_len = len;
desc_info = &q->info[q->head_idx]; desc = &q->rxq[q->head_idx];
desc = desc_info->desc; desc_info = &q->rx_info[q->head_idx];
buf_info = &desc_info->bufs[0]; buf_info = &desc_info->bufs[0];
if (!buf_info->page) { /* alloc a new buffer? */ if (!buf_info->page) { /* alloc a new buffer? */
...@@ -837,9 +831,8 @@ void ionic_rx_fill(struct ionic_queue *q) ...@@ -837,9 +831,8 @@ void ionic_rx_fill(struct ionic_queue *q)
nfrags++; nfrags++;
/* fill sg descriptors - buf[1..n] */ /* fill sg descriptors - buf[1..n] */
sg_desc = desc_info->sg_desc; sg_elem = q->rxq_sgl[q->head_idx].elems;
for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) { for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++, sg_elem++) {
sg_elem = &sg_desc->elems[j];
if (!buf_info->page) { /* alloc a new sg buffer? */ if (!buf_info->page) { /* alloc a new sg buffer? */
if (unlikely(ionic_rx_page_alloc(q, buf_info))) { if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
sg_elem->addr = 0; sg_elem->addr = 0;
...@@ -857,18 +850,16 @@ void ionic_rx_fill(struct ionic_queue *q) ...@@ -857,18 +850,16 @@ void ionic_rx_fill(struct ionic_queue *q)
} }
/* clear end sg element as a sentinel */ /* clear end sg element as a sentinel */
if (j < q->max_sg_elems) { if (j < q->max_sg_elems)
sg_elem = &sg_desc->elems[j];
memset(sg_elem, 0, sizeof(*sg_elem)); memset(sg_elem, 0, sizeof(*sg_elem));
}
desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG : desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
IONIC_RXQ_DESC_OPCODE_SIMPLE; IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->nbufs = nfrags; desc_info->nbufs = nfrags;
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); ionic_write_cmb_desc(q, desc);
ionic_rxq_post(q, false, ionic_rx_clean, NULL); ionic_rxq_post(q, false);
} }
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
...@@ -883,21 +874,19 @@ void ionic_rx_fill(struct ionic_queue *q) ...@@ -883,21 +874,19 @@ void ionic_rx_fill(struct ionic_queue *q)
void ionic_rx_empty(struct ionic_queue *q) void ionic_rx_empty(struct ionic_queue *q)
{ {
struct ionic_desc_info *desc_info; struct ionic_rx_desc_info *desc_info;
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
unsigned int i, j; unsigned int i, j;
for (i = 0; i < q->num_descs; i++) { for (i = 0; i < q->num_descs; i++) {
desc_info = &q->info[i]; desc_info = &q->rx_info[i];
for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) { for (j = 0; j < ARRAY_SIZE(desc_info->bufs); j++) {
buf_info = &desc_info->bufs[j]; buf_info = &desc_info->bufs[j];
if (buf_info->page) if (buf_info->page)
ionic_rx_page_free(q, buf_info); ionic_rx_page_free(q, buf_info);
} }
desc_info->nbufs = 0; desc_info->nbufs = 0;
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
} }
q->head_idx = 0; q->head_idx = 0;
...@@ -942,14 +931,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) ...@@ -942,14 +931,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
{ {
struct ionic_qcq *qcq = napi_to_qcq(napi); struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *cq = napi_to_cq(napi); struct ionic_cq *cq = napi_to_cq(napi);
struct ionic_dev *idev;
struct ionic_lif *lif;
u32 work_done = 0; u32 work_done = 0;
u32 flags = 0; u32 flags = 0;
lif = cq->bound_q->lif;
idev = &lif->ionic->idev;
work_done = ionic_tx_cq_service(cq, budget); work_done = ionic_tx_cq_service(cq, budget);
if (unlikely(!budget)) if (unlikely(!budget))
...@@ -963,7 +947,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) ...@@ -963,7 +947,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
if (work_done || flags) { if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE; flags |= IONIC_INTR_CRED_RESET_COALESCE;
ionic_intr_credits(idev->intr_ctrl, ionic_intr_credits(cq->idev->intr_ctrl,
cq->bound_intr->index, cq->bound_intr->index,
work_done, flags); work_done, flags);
} }
...@@ -986,17 +970,12 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) ...@@ -986,17 +970,12 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
{ {
struct ionic_qcq *qcq = napi_to_qcq(napi); struct ionic_qcq *qcq = napi_to_qcq(napi);
struct ionic_cq *cq = napi_to_cq(napi); struct ionic_cq *cq = napi_to_cq(napi);
struct ionic_dev *idev;
struct ionic_lif *lif;
u32 work_done = 0; u32 work_done = 0;
u32 flags = 0; u32 flags = 0;
if (unlikely(!budget)) if (unlikely(!budget))
return budget; return budget;
lif = cq->bound_q->lif;
idev = &lif->ionic->idev;
work_done = ionic_cq_service(cq, budget, work_done = ionic_cq_service(cq, budget,
ionic_rx_service, NULL, NULL); ionic_rx_service, NULL, NULL);
...@@ -1011,7 +990,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) ...@@ -1011,7 +990,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
if (work_done || flags) { if (work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE; flags |= IONIC_INTR_CRED_RESET_COALESCE;
ionic_intr_credits(idev->intr_ctrl, ionic_intr_credits(cq->idev->intr_ctrl,
cq->bound_intr->index, cq->bound_intr->index,
work_done, flags); work_done, flags);
} }
...@@ -1028,7 +1007,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) ...@@ -1028,7 +1007,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_cq *rxcq = napi_to_cq(napi); struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index; unsigned int qi = rxcq->bound_q->index;
struct ionic_qcq *txqcq; struct ionic_qcq *txqcq;
struct ionic_dev *idev;
struct ionic_lif *lif; struct ionic_lif *lif;
struct ionic_cq *txcq; struct ionic_cq *txcq;
bool resched = false; bool resched = false;
...@@ -1037,7 +1015,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) ...@@ -1037,7 +1015,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
u32 flags = 0; u32 flags = 0;
lif = rxcq->bound_q->lif; lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
txqcq = lif->txqcqs[qi]; txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq; txcq = &lif->txqcqs[qi]->cq;
...@@ -1060,7 +1037,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) ...@@ -1060,7 +1037,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
if (rx_work_done || flags) { if (rx_work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE; flags |= IONIC_INTR_CRED_RESET_COALESCE;
ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, ionic_intr_credits(rxcq->idev->intr_ctrl, rxcq->bound_intr->index,
tx_work_done + rx_work_done, flags); tx_work_done + rx_work_done, flags);
} }
...@@ -1077,7 +1054,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) ...@@ -1077,7 +1054,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
void *data, size_t len) void *data, size_t len)
{ {
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev; struct device *dev = q->dev;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -1085,7 +1061,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, ...@@ -1085,7 +1061,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
if (dma_mapping_error(dev, dma_addr)) { if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n", net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->dma_map_err++; q_to_tx_stats(q)->dma_map_err++;
return 0; return 0;
} }
return dma_addr; return dma_addr;
...@@ -1095,7 +1071,6 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, ...@@ -1095,7 +1071,6 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
const skb_frag_t *frag, const skb_frag_t *frag,
size_t offset, size_t len) size_t offset, size_t len)
{ {
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev; struct device *dev = q->dev;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -1103,16 +1078,16 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, ...@@ -1103,16 +1078,16 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
if (dma_mapping_error(dev, dma_addr)) { if (dma_mapping_error(dev, dma_addr)) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n", net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name); dev_name(dev), q->name);
stats->dma_map_err++; q_to_tx_stats(q)->dma_map_err++;
return 0;
} }
return dma_addr; return dma_addr;
} }
static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info) struct ionic_tx_desc_info *desc_info)
{ {
struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct device *dev = q->dev; struct device *dev = q->dev;
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned int nfrags; unsigned int nfrags;
...@@ -1120,10 +1095,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, ...@@ -1120,10 +1095,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
int frag_idx; int frag_idx;
dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
if (dma_mapping_error(dev, dma_addr)) { if (!dma_addr)
stats->dma_map_err++;
return -EIO; return -EIO;
}
buf_info->dma_addr = dma_addr; buf_info->dma_addr = dma_addr;
buf_info->len = skb_headlen(skb); buf_info->len = skb_headlen(skb);
buf_info++; buf_info++;
...@@ -1132,10 +1105,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, ...@@ -1132,10 +1105,8 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
nfrags = skb_shinfo(skb)->nr_frags; nfrags = skb_shinfo(skb)->nr_frags;
for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) { for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
if (dma_mapping_error(dev, dma_addr)) { if (!dma_addr)
stats->dma_map_err++;
goto dma_fail; goto dma_fail;
}
buf_info->dma_addr = dma_addr; buf_info->dma_addr = dma_addr;
buf_info->len = skb_frag_size(frag); buf_info->len = skb_frag_size(frag);
buf_info++; buf_info++;
...@@ -1153,12 +1124,13 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb, ...@@ -1153,12 +1124,13 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
dma_unmap_page(dev, buf_info->dma_addr, dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE); buf_info->len, DMA_TO_DEVICE);
} }
dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE); dma_unmap_single(dev, desc_info->bufs[0].dma_addr,
desc_info->bufs[0].len, DMA_TO_DEVICE);
return -EIO; return -EIO;
} }
static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
struct ionic_desc_info *desc_info) struct ionic_tx_desc_info *desc_info)
{ {
struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_buf_info *buf_info = desc_info->bufs;
struct device *dev = q->dev; struct device *dev = q->dev;
...@@ -1167,24 +1139,23 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, ...@@ -1167,24 +1139,23 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
if (!desc_info->nbufs) if (!desc_info->nbufs)
return; return;
dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr, dma_unmap_single(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE); buf_info->len, DMA_TO_DEVICE);
buf_info++; buf_info++;
for (i = 1; i < desc_info->nbufs; i++, buf_info++) for (i = 1; i < desc_info->nbufs; i++, buf_info++)
dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr, dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE); buf_info->len, DMA_TO_DEVICE);
desc_info->nbufs = 0; desc_info->nbufs = 0;
} }
static void ionic_tx_clean(struct ionic_queue *q, static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info, struct ionic_tx_desc_info *desc_info,
struct ionic_cq_info *cq_info, struct ionic_txq_comp *comp)
void *cb_arg)
{ {
struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q); struct ionic_qcq *qcq = q_to_qcq(q);
struct sk_buff *skb = cb_arg; struct sk_buff *skb;
if (desc_info->xdpf) { if (desc_info->xdpf) {
ionic_xdp_tx_desc_clean(q->partner, desc_info); ionic_xdp_tx_desc_clean(q->partner, desc_info);
...@@ -1198,17 +1169,18 @@ static void ionic_tx_clean(struct ionic_queue *q, ...@@ -1198,17 +1169,18 @@ static void ionic_tx_clean(struct ionic_queue *q,
ionic_tx_desc_unmap_bufs(q, desc_info); ionic_tx_desc_unmap_bufs(q, desc_info);
skb = desc_info->skb;
if (!skb) if (!skb)
return; return;
if (unlikely(ionic_txq_hwstamp_enabled(q))) { if (unlikely(ionic_txq_hwstamp_enabled(q))) {
if (cq_info) { if (comp) {
struct skb_shared_hwtstamps hwts = {}; struct skb_shared_hwtstamps hwts = {};
__le64 *cq_desc_hwstamp; __le64 *cq_desc_hwstamp;
u64 hwstamp; u64 hwstamp;
cq_desc_hwstamp = cq_desc_hwstamp =
cq_info->cq_desc + (void *)comp +
qcq->cq.desc_size - qcq->cq.desc_size -
sizeof(struct ionic_txq_comp) - sizeof(struct ionic_txq_comp) -
IONIC_HWSTAMP_CQ_NEGOFFSET; IONIC_HWSTAMP_CQ_NEGOFFSET;
...@@ -1234,17 +1206,17 @@ static void ionic_tx_clean(struct ionic_queue *q, ...@@ -1234,17 +1206,17 @@ static void ionic_tx_clean(struct ionic_queue *q,
napi_consume_skb(skb, 1); napi_consume_skb(skb, 1);
} }
static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info, static bool ionic_tx_service(struct ionic_cq *cq,
unsigned int *total_pkts, unsigned int *total_bytes) unsigned int *total_pkts, unsigned int *total_bytes)
{ {
struct ionic_tx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q; struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
struct ionic_txq_comp *comp; struct ionic_txq_comp *comp;
unsigned int bytes = 0; unsigned int bytes = 0;
unsigned int pkts = 0; unsigned int pkts = 0;
u16 index; u16 index;
comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp); comp = &((struct ionic_txq_comp *)cq->base)[cq->tail_idx];
if (!color_match(comp->color, cq->done_color)) if (!color_match(comp->color, cq->done_color))
return false; return false;
...@@ -1253,17 +1225,16 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info, ...@@ -1253,17 +1225,16 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info,
* several q entries completed for each cq completion * several q entries completed for each cq completion
*/ */
do { do {
desc_info = &q->info[q->tail_idx]; desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0; desc_info->bytes = 0;
index = q->tail_idx; index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg); ionic_tx_clean(q, desc_info, comp);
if (desc_info->cb_arg) { if (desc_info->skb) {
pkts++; pkts++;
bytes += desc_info->bytes; bytes += desc_info->bytes;
desc_info->skb = NULL;
} }
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
} while (index != le16_to_cpu(comp->comp_index)); } while (index != le16_to_cpu(comp->comp_index));
(*total_pkts) += pkts; (*total_pkts) += pkts;
...@@ -1274,7 +1245,6 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info, ...@@ -1274,7 +1245,6 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info,
unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
{ {
struct ionic_cq_info *cq_info;
unsigned int work_done = 0; unsigned int work_done = 0;
unsigned int bytes = 0; unsigned int bytes = 0;
unsigned int pkts = 0; unsigned int pkts = 0;
...@@ -1282,12 +1252,10 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) ...@@ -1282,12 +1252,10 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
if (work_to_do == 0) if (work_to_do == 0)
return 0; return 0;
cq_info = &cq->info[cq->tail_idx]; while (ionic_tx_service(cq, &pkts, &bytes)) {
while (ionic_tx_service(cq, cq_info, &pkts, &bytes)) {
if (cq->tail_idx == cq->num_descs - 1) if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color; cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq_info = &cq->info[cq->tail_idx];
if (++work_done >= work_to_do) if (++work_done >= work_to_do)
break; break;
...@@ -1308,33 +1276,31 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do) ...@@ -1308,33 +1276,31 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
void ionic_tx_flush(struct ionic_cq *cq) void ionic_tx_flush(struct ionic_cq *cq)
{ {
struct ionic_dev *idev = &cq->lif->ionic->idev;
u32 work_done; u32 work_done;
work_done = ionic_tx_cq_service(cq, cq->num_descs); work_done = ionic_tx_cq_service(cq, cq->num_descs);
if (work_done) if (work_done)
ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE); work_done, IONIC_INTR_CRED_RESET_COALESCE);
} }
void ionic_tx_empty(struct ionic_queue *q) void ionic_tx_empty(struct ionic_queue *q)
{ {
struct ionic_desc_info *desc_info; struct ionic_tx_desc_info *desc_info;
int bytes = 0; int bytes = 0;
int pkts = 0; int pkts = 0;
/* walk the not completed tx entries, if any */ /* walk the not completed tx entries, if any */
while (q->head_idx != q->tail_idx) { while (q->head_idx != q->tail_idx) {
desc_info = &q->info[q->tail_idx]; desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0; desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg); ionic_tx_clean(q, desc_info, NULL);
if (desc_info->cb_arg) { if (desc_info->skb) {
pkts++; pkts++;
bytes += desc_info->bytes; bytes += desc_info->bytes;
desc_info->skb = NULL;
} }
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
} }
if (likely(!ionic_txq_hwstamp_enabled(q))) { if (likely(!ionic_txq_hwstamp_enabled(q))) {
...@@ -1391,7 +1357,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) ...@@ -1391,7 +1357,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
} }
static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
struct ionic_desc_info *desc_info, struct ionic_tx_desc_info *desc_info,
struct sk_buff *skb, struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len, dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss, unsigned int hdrlen, unsigned int mss,
...@@ -1399,7 +1365,7 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, ...@@ -1399,7 +1365,7 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
u16 vlan_tci, bool has_vlan, u16 vlan_tci, bool has_vlan,
bool start, bool done) bool start, bool done)
{ {
struct ionic_txq_desc *desc = desc_info->desc; struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 flags = 0; u8 flags = 0;
u64 cmd; u64 cmd;
...@@ -1415,15 +1381,15 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, ...@@ -1415,15 +1381,15 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
desc->hdr_len = cpu_to_le16(hdrlen); desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss); desc->mss = cpu_to_le16(mss);
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); ionic_write_cmb_desc(q, desc);
if (start) { if (start) {
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
if (likely(!ionic_txq_hwstamp_enabled(q))) if (likely(!ionic_txq_hwstamp_enabled(q)))
netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len); netdev_tx_sent_queue(q_to_ndq(netdev, q), skb->len);
ionic_txq_post(q, false, ionic_tx_clean, skb); ionic_txq_post(q, false);
} else { } else {
ionic_txq_post(q, done, NULL, NULL); ionic_txq_post(q, done);
} }
} }
...@@ -1431,7 +1397,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, ...@@ -1431,7 +1397,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_desc_info *desc_info; struct ionic_tx_desc_info *desc_info;
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
struct ionic_txq_sg_elem *elem; struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc; struct ionic_txq_desc *desc;
...@@ -1453,8 +1419,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, ...@@ -1453,8 +1419,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
bool encap; bool encap;
int err; int err;
desc_info = &q->info[q->head_idx]; desc_info = &q->tx_info[q->head_idx];
buf_info = desc_info->bufs;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO; return -EIO;
...@@ -1491,6 +1456,8 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, ...@@ -1491,6 +1456,8 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
else else
hdrlen = skb_tcp_all_headers(skb); hdrlen = skb_tcp_all_headers(skb);
desc_info->skb = skb;
buf_info = desc_info->bufs;
tso_rem = len; tso_rem = len;
seg_rem = min(tso_rem, hdrlen + mss); seg_rem = min(tso_rem, hdrlen + mss);
...@@ -1517,8 +1484,8 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, ...@@ -1517,8 +1484,8 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
chunk_len = min(frag_rem, seg_rem); chunk_len = min(frag_rem, seg_rem);
if (!desc) { if (!desc) {
/* fill main descriptor */ /* fill main descriptor */
desc = desc_info->txq_desc; desc = &q->txq[q->head_idx];
elem = desc_info->txq_sg_desc->elems; elem = ionic_tx_sg_elems(q);
desc_addr = frag_addr; desc_addr = frag_addr;
desc_len = chunk_len; desc_len = chunk_len;
} else { } else {
...@@ -1542,7 +1509,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, ...@@ -1542,7 +1509,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
start, done); start, done);
start = false; start = false;
/* Buffer information is stored with the first tso descriptor */ /* Buffer information is stored with the first tso descriptor */
desc_info = &q->info[q->head_idx]; desc_info = &q->tx_info[q->head_idx];
desc_info->nbufs = 0; desc_info->nbufs = 0;
} }
...@@ -1555,9 +1522,9 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, ...@@ -1555,9 +1522,9 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
} }
static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info) struct ionic_tx_desc_info *desc_info)
{ {
struct ionic_txq_desc *desc = desc_info->txq_desc; struct ionic_txq_desc *desc = &q->txq[q->head_idx];
struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool has_vlan; bool has_vlan;
...@@ -1585,7 +1552,7 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, ...@@ -1585,7 +1552,7 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb)); desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset); desc->csum_offset = cpu_to_le16(skb->csum_offset);
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); ionic_write_cmb_desc(q, desc);
if (skb_csum_is_sctp(skb)) if (skb_csum_is_sctp(skb))
stats->crc32_csum++; stats->crc32_csum++;
...@@ -1594,9 +1561,9 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb, ...@@ -1594,9 +1561,9 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
} }
static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info) struct ionic_tx_desc_info *desc_info)
{ {
struct ionic_txq_desc *desc = desc_info->txq_desc; struct ionic_txq_desc *desc = &q->txq[q->head_idx];
struct ionic_buf_info *buf_info = desc_info->bufs; struct ionic_buf_info *buf_info = desc_info->bufs;
struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool has_vlan; bool has_vlan;
...@@ -1624,20 +1591,20 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb, ...@@ -1624,20 +1591,20 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = 0; desc->csum_start = 0;
desc->csum_offset = 0; desc->csum_offset = 0;
ionic_write_cmb_desc(q, desc_info->cmb_desc, desc); ionic_write_cmb_desc(q, desc);
stats->csum_none++; stats->csum_none++;
} }
static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
struct ionic_desc_info *desc_info) struct ionic_tx_desc_info *desc_info)
{ {
struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
struct ionic_buf_info *buf_info = &desc_info->bufs[1]; struct ionic_buf_info *buf_info = &desc_info->bufs[1];
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_txq_sg_elem *elem;
unsigned int i; unsigned int i;
elem = ionic_tx_sg_elems(q);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
elem->addr = cpu_to_le64(buf_info->dma_addr); elem->addr = cpu_to_le64(buf_info->dma_addr);
elem->len = cpu_to_le16(buf_info->len); elem->len = cpu_to_le16(buf_info->len);
...@@ -1649,13 +1616,15 @@ static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb, ...@@ -1649,13 +1616,15 @@ static void ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
static int ionic_tx(struct net_device *netdev, struct ionic_queue *q, static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ionic_desc_info *desc_info = &q->info[q->head_idx]; struct ionic_tx_desc_info *desc_info = &q->tx_info[q->head_idx];
struct ionic_tx_stats *stats = q_to_tx_stats(q); struct ionic_tx_stats *stats = q_to_tx_stats(q);
bool ring_dbell = true; bool ring_dbell = true;
if (unlikely(ionic_tx_map_skb(q, skb, desc_info))) if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
return -EIO; return -EIO;
desc_info->skb = skb;
/* set up the initial descriptor */ /* set up the initial descriptor */
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
ionic_tx_calc_csum(q, skb, desc_info); ionic_tx_calc_csum(q, skb, desc_info);
...@@ -1677,7 +1646,7 @@ static int ionic_tx(struct net_device *netdev, struct ionic_queue *q, ...@@ -1677,7 +1646,7 @@ static int ionic_tx(struct net_device *netdev, struct ionic_queue *q,
ring_dbell = __netdev_tx_sent_queue(ndq, skb->len, ring_dbell = __netdev_tx_sent_queue(ndq, skb->len,
netdev_xmit_more()); netdev_xmit_more());
} }
ionic_txq_post(q, ring_dbell, ionic_tx_clean, skb); ionic_txq_post(q, ring_dbell);
return 0; return 0;
} }
...@@ -1761,12 +1730,10 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) ...@@ -1761,12 +1730,10 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
linearize: linearize:
if (too_many_frags) { if (too_many_frags) {
struct ionic_tx_stats *stats = q_to_tx_stats(q);
err = skb_linearize(skb); err = skb_linearize(skb);
if (err) if (err)
return err; return err;
stats->linearize++; q_to_tx_stats(q)->linearize++;
} }
return ndescs; return ndescs;
......
...@@ -14,7 +14,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget); ...@@ -14,7 +14,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget);
int ionic_txrx_napi(struct napi_struct *napi, int budget); int ionic_txrx_napi(struct napi_struct *napi, int budget);
netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); bool ionic_rx_service(struct ionic_cq *cq);
int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags); int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags);
#endif /* _IONIC_TXRX_H_ */ #endif /* _IONIC_TXRX_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment