Commit a754a8fc authored by Dave Jiang's avatar Dave Jiang Committed by Jon Mason

NTB: allocate number transport entries depending on size of ring size

Currently we only allocate a fixed default number of descriptors for the tx
and rx side. We should dynamically resize it to the number of descriptors
resides in the transport rings. We should know the number of transmit
descriptors at initializaiton. We will allocate the default number of
descriptors for receive side and allocate additional ones when we know the
actual max entries for receive.
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Acked-by: default avatarAllen Hubbe <allen.hubbe@emc.com>
Signed-off-by: default avatarJon Mason <jdmason@kudzu.us>
parent 625f0802
...@@ -153,6 +153,7 @@ struct ntb_transport_qp { ...@@ -153,6 +153,7 @@ struct ntb_transport_qp {
unsigned int rx_index; unsigned int rx_index;
unsigned int rx_max_entry; unsigned int rx_max_entry;
unsigned int rx_max_frame; unsigned int rx_max_frame;
unsigned int rx_alloc_entry;
dma_cookie_t last_cookie; dma_cookie_t last_cookie;
struct tasklet_struct rxc_db_work; struct tasklet_struct rxc_db_work;
...@@ -480,7 +481,9 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, ...@@ -480,7 +481,9 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
out_offset += snprintf(buf + out_offset, out_count - out_offset, out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_index - \t%u\n", qp->rx_index); "rx_index - \t%u\n", qp->rx_index);
out_offset += snprintf(buf + out_offset, out_count - out_offset, out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_max_entry - \t%u\n\n", qp->rx_max_entry); "rx_max_entry - \t%u\n", qp->rx_max_entry);
out_offset += snprintf(buf + out_offset, out_count - out_offset,
"rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
out_offset += snprintf(buf + out_offset, out_count - out_offset, out_offset += snprintf(buf + out_offset, out_count - out_offset,
"tx_bytes - \t%llu\n", qp->tx_bytes); "tx_bytes - \t%llu\n", qp->tx_bytes);
...@@ -597,9 +600,12 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, ...@@ -597,9 +600,12 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
{ {
struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
struct ntb_transport_mw *mw; struct ntb_transport_mw *mw;
struct ntb_dev *ndev = nt->ndev;
struct ntb_queue_entry *entry;
unsigned int rx_size, num_qps_mw; unsigned int rx_size, num_qps_mw;
unsigned int mw_num, mw_count, qp_count; unsigned int mw_num, mw_count, qp_count;
unsigned int i; unsigned int i;
int node;
mw_count = nt->mw_count; mw_count = nt->mw_count;
qp_count = nt->qp_count; qp_count = nt->qp_count;
...@@ -626,6 +632,23 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, ...@@ -626,6 +632,23 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
qp->rx_max_entry = rx_size / qp->rx_max_frame; qp->rx_max_entry = rx_size / qp->rx_max_frame;
qp->rx_index = 0; qp->rx_index = 0;
/*
* Checking to see if we have more entries than the default.
* We should add additional entries if that is the case so we
* can be in sync with the transport frames.
*/
node = dev_to_node(&ndev->dev);
for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
if (!entry)
return -ENOMEM;
entry->qp = qp;
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q);
qp->rx_alloc_entry++;
}
qp->remote_rx_info->entry = qp->rx_max_entry - 1; qp->remote_rx_info->entry = qp->rx_max_entry - 1;
/* setup the hdr offsets with 0's */ /* setup the hdr offsets with 0's */
...@@ -1722,8 +1745,9 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1722,8 +1745,9 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
&qp->rx_free_q); &qp->rx_free_q);
} }
qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { for (i = 0; i < qp->tx_max_entry; i++) {
entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
if (!entry) if (!entry)
goto err2; goto err2;
...@@ -1744,6 +1768,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev, ...@@ -1744,6 +1768,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
kfree(entry); kfree(entry);
err1: err1:
qp->rx_alloc_entry = 0;
while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
kfree(entry); kfree(entry);
if (qp->tx_dma_chan) if (qp->tx_dma_chan)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment