Commit b0e32e20 authored by Doug Ledford's avatar Doug Ledford

Merge branch 'k.o/for-4.13-rc' into k.o/for-next

Merging our (hopefully) final -rc pull branch into our for-next branch
because some of our pending patches won't apply cleanly without having
the -rc patches in our tree.
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parents d3cf4d99 870201f9
...@@ -61,6 +61,7 @@ struct addr_req { ...@@ -61,6 +61,7 @@ struct addr_req {
void (*callback)(int status, struct sockaddr *src_addr, void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context); struct rdma_dev_addr *addr, void *context);
unsigned long timeout; unsigned long timeout;
struct delayed_work work;
int status; int status;
u32 seq; u32 seq;
}; };
...@@ -293,7 +294,7 @@ int rdma_translate_ip(const struct sockaddr *addr, ...@@ -293,7 +294,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
} }
EXPORT_SYMBOL(rdma_translate_ip); EXPORT_SYMBOL(rdma_translate_ip);
static void set_timeout(unsigned long time) static void set_timeout(struct delayed_work *delayed_work, unsigned long time)
{ {
unsigned long delay; unsigned long delay;
...@@ -301,7 +302,7 @@ static void set_timeout(unsigned long time) ...@@ -301,7 +302,7 @@ static void set_timeout(unsigned long time)
if ((long)delay < 0) if ((long)delay < 0)
delay = 0; delay = 0;
mod_delayed_work(addr_wq, &work, delay); mod_delayed_work(addr_wq, delayed_work, delay);
} }
static void queue_req(struct addr_req *req) static void queue_req(struct addr_req *req)
...@@ -316,8 +317,7 @@ static void queue_req(struct addr_req *req) ...@@ -316,8 +317,7 @@ static void queue_req(struct addr_req *req)
list_add(&req->list, &temp_req->list); list_add(&req->list, &temp_req->list);
if (req_list.next == &req->list) set_timeout(&req->work, req->timeout);
set_timeout(req->timeout);
mutex_unlock(&lock); mutex_unlock(&lock);
} }
...@@ -572,6 +572,37 @@ static int addr_resolve(struct sockaddr *src_in, ...@@ -572,6 +572,37 @@ static int addr_resolve(struct sockaddr *src_in,
return ret; return ret;
} }
static void process_one_req(struct work_struct *_work)
{
struct addr_req *req;
struct sockaddr *src_in, *dst_in;
mutex_lock(&lock);
req = container_of(_work, struct addr_req, work.work);
if (req->status == -ENODATA) {
src_in = (struct sockaddr *)&req->src_addr;
dst_in = (struct sockaddr *)&req->dst_addr;
req->status = addr_resolve(src_in, dst_in, req->addr,
true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout)) {
req->status = -ETIMEDOUT;
} else if (req->status == -ENODATA) {
/* requeue the work for retrying again */
set_timeout(&req->work, req->timeout);
mutex_unlock(&lock);
return;
}
}
list_del(&req->list);
mutex_unlock(&lock);
req->callback(req->status, (struct sockaddr *)&req->src_addr,
req->addr, req->context);
put_client(req->client);
kfree(req);
}
static void process_req(struct work_struct *work) static void process_req(struct work_struct *work)
{ {
struct addr_req *req, *temp_req; struct addr_req *req, *temp_req;
...@@ -589,20 +620,23 @@ static void process_req(struct work_struct *work) ...@@ -589,20 +620,23 @@ static void process_req(struct work_struct *work)
true, req->seq); true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout)) if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT; req->status = -ETIMEDOUT;
else if (req->status == -ENODATA) else if (req->status == -ENODATA) {
set_timeout(&req->work, req->timeout);
continue; continue;
} }
}
list_move_tail(&req->list, &done_list); list_move_tail(&req->list, &done_list);
} }
if (!list_empty(&req_list)) {
req = list_entry(req_list.next, struct addr_req, list);
set_timeout(req->timeout);
}
mutex_unlock(&lock); mutex_unlock(&lock);
list_for_each_entry_safe(req, temp_req, &done_list, list) { list_for_each_entry_safe(req, temp_req, &done_list, list) {
list_del(&req->list); list_del(&req->list);
/* It is safe to cancel other work items from this work item
* because at a time there can be only one work item running
* with this single threaded work queue.
*/
cancel_delayed_work(&req->work);
req->callback(req->status, (struct sockaddr *) &req->src_addr, req->callback(req->status, (struct sockaddr *) &req->src_addr,
req->addr, req->context); req->addr, req->context);
put_client(req->client); put_client(req->client);
...@@ -645,6 +679,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client, ...@@ -645,6 +679,7 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->context = context; req->context = context;
req->client = client; req->client = client;
atomic_inc(&client->refcount); atomic_inc(&client->refcount);
INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq); req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
req->status = addr_resolve(src_in, dst_in, addr, true, req->seq); req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
...@@ -699,7 +734,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) ...@@ -699,7 +734,7 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
req->status = -ECANCELED; req->status = -ECANCELED;
req->timeout = jiffies; req->timeout = jiffies;
list_move(&req->list, &req_list); list_move(&req->list, &req_list);
set_timeout(req->timeout); set_timeout(&req->work, req->timeout);
break; break;
} }
} }
...@@ -805,9 +840,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event, ...@@ -805,9 +840,8 @@ static int netevent_callback(struct notifier_block *self, unsigned long event,
if (event == NETEVENT_NEIGH_UPDATE) { if (event == NETEVENT_NEIGH_UPDATE) {
struct neighbour *neigh = ctx; struct neighbour *neigh = ctx;
if (neigh->nud_state & NUD_VALID) { if (neigh->nud_state & NUD_VALID)
set_timeout(jiffies); set_timeout(&work, jiffies);
}
} }
return 0; return 0;
} }
...@@ -818,7 +852,7 @@ static struct notifier_block nb = { ...@@ -818,7 +852,7 @@ static struct notifier_block nb = {
int addr_init(void) int addr_init(void)
{ {
addr_wq = alloc_workqueue("ib_addr", WQ_MEM_RECLAIM, 0); addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
if (!addr_wq) if (!addr_wq)
return -ENOMEM; return -ENOMEM;
......
...@@ -572,10 +572,11 @@ void ib_unregister_device(struct ib_device *device) ...@@ -572,10 +572,11 @@ void ib_unregister_device(struct ib_device *device)
} }
up_read(&lists_rwsem); up_read(&lists_rwsem);
mutex_unlock(&device_mutex);
ib_device_unregister_rdmacg(device); ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device); ib_device_unregister_sysfs(device);
mutex_unlock(&device_mutex);
ib_cache_cleanup_one(device); ib_cache_cleanup_one(device);
ib_security_destroy_port_pkey_list(device); ib_security_destroy_port_pkey_list(device);
......
...@@ -1158,7 +1158,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file, ...@@ -1158,7 +1158,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
int out_len) int out_len)
{ {
struct ib_uverbs_resize_cq cmd; struct ib_uverbs_resize_cq cmd;
struct ib_uverbs_resize_cq_resp resp; struct ib_uverbs_resize_cq_resp resp = {};
struct ib_udata udata; struct ib_udata udata;
struct ib_cq *cq; struct ib_cq *cq;
int ret = -EINVAL; int ret = -EINVAL;
......
...@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref) ...@@ -250,6 +250,7 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount)) if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device); ib_uverbs_comp_dev(file->device);
kobject_put(&file->device->kobj);
kfree(file); kfree(file);
} }
...@@ -917,7 +918,6 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) ...@@ -917,7 +918,6 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
static int ib_uverbs_close(struct inode *inode, struct file *filp) static int ib_uverbs_close(struct inode *inode, struct file *filp)
{ {
struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_file *file = filp->private_data;
struct ib_uverbs_device *dev = file->device;
mutex_lock(&file->cleanup_mutex); mutex_lock(&file->cleanup_mutex);
if (file->ucontext) { if (file->ucontext) {
...@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) ...@@ -939,7 +939,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
ib_uverbs_release_async_event_file); ib_uverbs_release_async_event_file);
kref_put(&file->ref, ib_uverbs_release_file); kref_put(&file->ref, ib_uverbs_release_file);
kobject_put(&dev->kobj);
return 0; return 0;
} }
...@@ -1154,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, ...@@ -1154,7 +1153,6 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
kref_get(&file->ref); kref_get(&file->ref);
mutex_unlock(&uverbs_dev->lists_mutex); mutex_unlock(&uverbs_dev->lists_mutex);
ib_uverbs_event_handler(&file->event_handler, &event);
mutex_lock(&file->cleanup_mutex); mutex_lock(&file->cleanup_mutex);
ucontext = file->ucontext; ucontext = file->ucontext;
...@@ -1171,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, ...@@ -1171,6 +1169,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
* for example due to freeing the resources * for example due to freeing the resources
* (e.g mmput). * (e.g mmput).
*/ */
ib_uverbs_event_handler(&file->event_handler, &event);
ib_dev->disassociate_ucontext(ucontext); ib_dev->disassociate_ucontext(ucontext);
mutex_lock(&file->cleanup_mutex); mutex_lock(&file->cleanup_mutex);
ib_uverbs_cleanup_ucontext(file, ucontext, true); ib_uverbs_cleanup_ucontext(file, ucontext, true);
......
...@@ -895,7 +895,6 @@ static const struct { ...@@ -895,7 +895,6 @@ static const struct {
} qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = { [IB_QPS_RESET] = {
[IB_QPS_RESET] = { .valid = 1 }, [IB_QPS_RESET] = { .valid = 1 },
[IB_QPS_ERR] = { .valid = 1 },
[IB_QPS_INIT] = { [IB_QPS_INIT] = {
.valid = 1, .valid = 1,
.req_param = { .req_param = {
......
...@@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, ...@@ -661,7 +661,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
rhp = php->rhp; rhp = php->rhp;
if (mr_type != IB_MR_TYPE_MEM_REG || if (mr_type != IB_MR_TYPE_MEM_REG ||
max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl && max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
use_dsgl)) use_dsgl))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
...@@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, ...@@ -64,8 +64,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
} else { } else {
u8 *dmac = rdma_ah_retrieve_dmac(ah_attr); u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
if (!dmac) if (!dmac) {
kfree(ah);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
}
memcpy(ah->av.mac, dmac, ETH_ALEN); memcpy(ah->av.mac, dmac, ETH_ALEN);
} }
......
...@@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) ...@@ -733,7 +733,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
continue; continue;
free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
if (IS_ERR(free_mr->mr_free_qp[i])) { if (!free_mr->mr_free_qp[i]) {
dev_err(dev, "Create loop qp failed!\n"); dev_err(dev, "Create loop qp failed!\n");
goto create_lp_qp_failed; goto create_lp_qp_failed;
} }
......
...@@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( ...@@ -130,20 +130,32 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
u64 base = 0; u64 base = 0;
u32 i, j; u32 i, j;
u32 k = 0; u32 k = 0;
u32 low;
/* copy base values in obj_info */ /* copy base values in obj_info */
for (i = I40IW_HMC_IW_QP, j = 0; for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
i <= I40IW_HMC_IW_PBLE; i++, j += 8) { if ((i == I40IW_HMC_IW_SRQ) ||
(i == I40IW_HMC_IW_FSIMC) ||
(i == I40IW_HMC_IW_FSIAV)) {
info[i].base = 0;
info[i].cnt = 0;
continue;
}
get_64bit_val(buf, j, &temp); get_64bit_val(buf, j, &temp);
info[i].base = RS_64_1(temp, 32) * 512; info[i].base = RS_64_1(temp, 32) * 512;
if (info[i].base > base) { if (info[i].base > base) {
base = info[i].base; base = info[i].base;
k = i; k = i;
} }
low = (u32)(temp); if (i == I40IW_HMC_IW_APBVT_ENTRY) {
if (low) info[i].cnt = 1;
info[i].cnt = low; continue;
}
if (i == I40IW_HMC_IW_QP)
info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
else if (i == I40IW_HMC_IW_CQ)
info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
else
info[i].cnt = (u32)(temp);
} }
size = info[k].cnt * info[k].size + info[k].base; size = info[k].cnt * info[k].size + info[k].base;
if (size & 0x1FFFFF) if (size & 0x1FFFFF)
...@@ -154,6 +166,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( ...@@ -154,6 +166,31 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
return 0; return 0;
} }
/**
* i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
* @buf: ptr to fpm query buffer
* @buf_idx: index into buf
* @info: ptr to i40iw_hmc_obj_info struct
* @rsrc_idx: resource index into info
*
* Decode a 64 bit value from fpm query buffer into max count and size
*/
static u64 i40iw_sc_decode_fpm_query(u64 *buf,
u32 buf_idx,
struct i40iw_hmc_obj_info *obj_info,
u32 rsrc_idx)
{
u64 temp;
u32 size;
get_64bit_val(buf, buf_idx, &temp);
obj_info[rsrc_idx].max_cnt = (u32)temp;
size = (u32)RS_64_1(temp, 32);
obj_info[rsrc_idx].size = LS_64_1(1, size);
return temp;
}
/** /**
* i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
* @buf: ptr to fpm query buffer * @buf: ptr to fpm query buffer
...@@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( ...@@ -168,9 +205,9 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
struct i40iw_hmc_info *hmc_info, struct i40iw_hmc_info *hmc_info,
struct i40iw_hmc_fpm_misc *hmc_fpm_misc) struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
{ {
u64 temp;
struct i40iw_hmc_obj_info *obj_info; struct i40iw_hmc_obj_info *obj_info;
u32 i, j, size; u64 temp;
u32 size;
u16 max_pe_sds; u16 max_pe_sds;
obj_info = hmc_info->hmc_obj; obj_info = hmc_info->hmc_obj;
...@@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( ...@@ -185,41 +222,52 @@ static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
hmc_fpm_misc->max_sds = max_pe_sds; hmc_fpm_misc->max_sds = max_pe_sds;
hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
for (i = I40IW_HMC_IW_QP, j = 8; get_64bit_val(buf, 8, &temp);
i <= I40IW_HMC_IW_ARP; i++, j += 8) { obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
get_64bit_val(buf, j, &temp);
if (i == I40IW_HMC_IW_QP)
obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
else if (i == I40IW_HMC_IW_CQ)
obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
else
obj_info[i].max_cnt = (u32)temp;
size = (u32)RS_64_1(temp, 32); size = (u32)RS_64_1(temp, 32);
obj_info[i].size = ((u64)1 << size); obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
}
for (i = I40IW_HMC_IW_MR, j = 48; get_64bit_val(buf, 16, &temp);
i <= I40IW_HMC_IW_PBLE; i++, j += 8) { obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
get_64bit_val(buf, j, &temp);
obj_info[i].max_cnt = (u32)temp;
size = (u32)RS_64_1(temp, 32); size = (u32)RS_64_1(temp, 32);
obj_info[i].size = LS_64_1(1, size); obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
}
i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
get_64bit_val(buf, 120, &temp);
hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
get_64bit_val(buf, 120, &temp);
hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
get_64bit_val(buf, 120, &temp);
hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
get_64bit_val(buf, 64, &temp); get_64bit_val(buf, 64, &temp);
obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
obj_info[I40IW_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
if (!hmc_fpm_misc->xf_block_size) if (!hmc_fpm_misc->xf_block_size)
return I40IW_ERR_INVALID_SIZE; return I40IW_ERR_INVALID_SIZE;
i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
get_64bit_val(buf, 80, &temp); get_64bit_val(buf, 80, &temp);
obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
obj_info[I40IW_HMC_IW_Q1FL].size = 4;
hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
if (!hmc_fpm_misc->q1_block_size) if (!hmc_fpm_misc->q1_block_size)
return I40IW_ERR_INVALID_SIZE; return I40IW_ERR_INVALID_SIZE;
i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
get_64bit_val(buf, 112, &temp);
obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
obj_info[I40IW_HMC_IW_PBLE].size = 8;
get_64bit_val(buf, 120, &temp);
hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
return 0; return 0;
} }
...@@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_ ...@@ -3392,13 +3440,6 @@ enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_
hmc_info->sd_table.sd_entry = virt_mem.va; hmc_info->sd_table.sd_entry = virt_mem.va;
} }
/* fill size of objects which are fixed */
hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
return ret_code; return ret_code;
} }
...@@ -4840,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi) ...@@ -4840,7 +4881,7 @@ void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
{ {
u8 fcn_id = vsi->fcn_id; u8 fcn_id = vsi->fcn_id;
if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID)) if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
vsi->dev->fcn_id_array[fcn_id] = false; vsi->dev->fcn_id_array[fcn_id] = false;
i40iw_hw_stats_stop_timer(vsi); i40iw_hw_stats_stop_timer(vsi);
} }
......
...@@ -1507,8 +1507,8 @@ enum { ...@@ -1507,8 +1507,8 @@ enum {
I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), I40IW_CQ0_ALIGNMENT_MASK = (256 - 1),
I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1),
I40IW_SHADOWAREA_MASK = (128 - 1), I40IW_SHADOWAREA_MASK = (128 - 1),
I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0, I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1),
I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0 I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1)
}; };
enum i40iw_alignment { enum i40iw_alignment {
......
...@@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc) ...@@ -685,7 +685,7 @@ static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
I40IW_CQ0_ALIGNMENT_MASK); I40IW_CQ0_ALIGNMENT);
if (ret) if (ret)
return ret; return ret;
......
...@@ -62,7 +62,7 @@ enum i40iw_status_code { ...@@ -62,7 +62,7 @@ enum i40iw_status_code {
I40IW_ERR_INVALID_ALIGNMENT = -23, I40IW_ERR_INVALID_ALIGNMENT = -23,
I40IW_ERR_FLUSHED_QUEUE = -24, I40IW_ERR_FLUSHED_QUEUE = -24,
I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
I40IW_ERR_INVALID_IMM_DATA_SIZE = -26, I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26,
I40IW_ERR_TIMEOUT = -27, I40IW_ERR_TIMEOUT = -27,
I40IW_ERR_OPCODE_MISMATCH = -28, I40IW_ERR_OPCODE_MISMATCH = -28,
I40IW_ERR_CQP_COMPL_ERROR = -29, I40IW_ERR_CQP_COMPL_ERROR = -29,
......
...@@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp, ...@@ -435,7 +435,7 @@ static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
op_info = &info->op.inline_rdma_write; op_info = &info->op.inline_rdma_write;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
return I40IW_ERR_INVALID_IMM_DATA_SIZE; return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code) if (ret_code)
...@@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp, ...@@ -511,7 +511,7 @@ static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
op_info = &info->op.inline_send; op_info = &info->op.inline_send;
if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
return I40IW_ERR_INVALID_IMM_DATA_SIZE; return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
if (ret_code) if (ret_code)
...@@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, ...@@ -784,7 +784,7 @@ static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
get_64bit_val(cqe, 0, &qword0); get_64bit_val(cqe, 0, &qword0);
get_64bit_val(cqe, 16, &qword2); get_64bit_val(cqe, 16, &qword2);
info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM); info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM);
info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
...@@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, ...@@ -1187,7 +1187,7 @@ enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
u8 *wqe_size) u8 *wqe_size)
{ {
if (data_size > I40IW_MAX_INLINE_DATA_SIZE) if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
return I40IW_ERR_INVALID_IMM_DATA_SIZE; return I40IW_ERR_INVALID_INLINE_DATA_SIZE;
if (data_size <= 16) if (data_size <= 16)
*wqe_size = I40IW_QP_WQE_MIN_SIZE; *wqe_size = I40IW_QP_WQE_MIN_SIZE;
......
...@@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq, ...@@ -65,13 +65,28 @@ int pvrdma_req_notify_cq(struct ib_cq *ibcq,
struct pvrdma_dev *dev = to_vdev(ibcq->device); struct pvrdma_dev *dev = to_vdev(ibcq->device);
struct pvrdma_cq *cq = to_vcq(ibcq); struct pvrdma_cq *cq = to_vcq(ibcq);
u32 val = cq->cq_handle; u32 val = cq->cq_handle;
unsigned long flags;
int has_data = 0;
val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? val |= (notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM; PVRDMA_UAR_CQ_ARM_SOL : PVRDMA_UAR_CQ_ARM;
spin_lock_irqsave(&cq->cq_lock, flags);
pvrdma_write_uar_cq(dev, val); pvrdma_write_uar_cq(dev, val);
return 0; if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
unsigned int head;
has_data = pvrdma_idx_ring_has_data(&cq->ring_state->rx,
cq->ibcq.cqe, &head);
if (unlikely(has_data == PVRDMA_INVALID_IDX))
dev_err(&dev->pdev->dev, "CQ ring state invalid\n");
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
return has_data;
} }
/** /**
......
...@@ -336,6 +336,7 @@ struct ipoib_dev_priv { ...@@ -336,6 +336,7 @@ struct ipoib_dev_priv {
unsigned long flags; unsigned long flags;
struct rw_semaphore vlan_rwsem; struct rw_semaphore vlan_rwsem;
struct mutex mcast_mutex;
struct rb_root path_tree; struct rb_root path_tree;
struct list_head path_list; struct list_head path_list;
......
...@@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, ...@@ -511,7 +511,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
case IB_CM_REQ_RECEIVED: case IB_CM_REQ_RECEIVED:
return ipoib_cm_req_handler(cm_id, event); return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED: case IB_CM_DREQ_RECEIVED:
p = cm_id->context;
ib_send_cm_drep(cm_id, NULL, 0); ib_send_cm_drep(cm_id, NULL, 0);
/* Fall through */ /* Fall through */
case IB_CM_REJ_RECEIVED: case IB_CM_REJ_RECEIVED:
......
...@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = { ...@@ -52,7 +52,8 @@ static const struct ipoib_stats ipoib_gstrings_stats[] = {
IPOIB_NETDEV_STAT(tx_bytes), IPOIB_NETDEV_STAT(tx_bytes),
IPOIB_NETDEV_STAT(tx_errors), IPOIB_NETDEV_STAT(tx_errors),
IPOIB_NETDEV_STAT(rx_dropped), IPOIB_NETDEV_STAT(rx_dropped),
IPOIB_NETDEV_STAT(tx_dropped) IPOIB_NETDEV_STAT(tx_dropped),
IPOIB_NETDEV_STAT(multicast),
}; };
#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats) #define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
......
...@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -256,6 +256,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
++dev->stats.rx_packets; ++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len; dev->stats.rx_bytes += skb->len;
if (skb->pkt_type == PACKET_MULTICAST)
dev->stats.multicast++;
skb->dev = dev; skb->dev = dev;
if ((dev->features & NETIF_F_RXCSUM) && if ((dev->features & NETIF_F_RXCSUM) &&
...@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev) ...@@ -709,6 +711,27 @@ static int recvs_pending(struct net_device *dev)
return pending; return pending;
} }
static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
struct ib_qp *qp,
enum ib_qp_state new_state)
{
struct ib_qp_attr qp_attr;
struct ib_qp_init_attr query_init_attr;
int ret;
ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
if (ret) {
ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
return;
}
/* print according to the new-state and the previous state.*/
if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
else
ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
new_state, qp_attr.qp_state);
}
int ipoib_ib_dev_stop_default(struct net_device *dev) int ipoib_ib_dev_stop_default(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
...@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) ...@@ -728,7 +751,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
*/ */
qp_attr.qp_state = IB_QPS_ERR; qp_attr.qp_state = IB_QPS_ERR;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
/* Wait for all sends and receives to complete */ /* Wait for all sends and receives to complete */
begin = jiffies; begin = jiffies;
......
...@@ -1559,6 +1559,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) ...@@ -1559,6 +1559,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
int i, wait_flushed = 0; int i, wait_flushed = 0;
init_completion(&priv->ntbl.flushed); init_completion(&priv->ntbl.flushed);
set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
...@@ -1603,7 +1604,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) ...@@ -1603,7 +1604,6 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n");
init_completion(&priv->ntbl.deleted); init_completion(&priv->ntbl.deleted);
set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
/* Stop GC if called at init fail need to cancel work */ /* Stop GC if called at init fail need to cancel work */
stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); stopped = test_and_set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
...@@ -1846,6 +1846,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = { ...@@ -1846,6 +1846,7 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_tx_timeout = ipoib_timeout, .ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list, .ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink, .ndo_get_iflink = ipoib_get_iflink,
.ndo_get_stats64 = ipoib_get_stats,
}; };
void ipoib_setup_common(struct net_device *dev) void ipoib_setup_common(struct net_device *dev)
...@@ -1876,6 +1877,7 @@ static void ipoib_build_priv(struct net_device *dev) ...@@ -1876,6 +1877,7 @@ static void ipoib_build_priv(struct net_device *dev)
priv->dev = dev; priv->dev = dev;
spin_lock_init(&priv->lock); spin_lock_init(&priv->lock);
init_rwsem(&priv->vlan_rwsem); init_rwsem(&priv->vlan_rwsem);
mutex_init(&priv->mcast_mutex);
INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs); INIT_LIST_HEAD(&priv->child_intfs);
...@@ -2172,14 +2174,14 @@ static struct net_device *ipoib_add_port(const char *format, ...@@ -2172,14 +2174,14 @@ static struct net_device *ipoib_add_port(const char *format,
priv->dev->dev_id = port - 1; priv->dev->dev_id = port - 1;
result = ib_query_port(hca, port, &attr); result = ib_query_port(hca, port, &attr);
if (!result) if (result) {
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
else {
printk(KERN_WARNING "%s: ib_query_port %d failed\n", printk(KERN_WARNING "%s: ib_query_port %d failed\n",
hca->name, port); hca->name, port);
goto device_init_failed; goto device_init_failed;
} }
priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
/* MTU will be reset when mcast join happens */ /* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
...@@ -2210,12 +2212,14 @@ static struct net_device *ipoib_add_port(const char *format, ...@@ -2210,12 +2212,14 @@ static struct net_device *ipoib_add_port(const char *format,
printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n", printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
hca->name, port, result); hca->name, port, result);
goto device_init_failed; goto device_init_failed;
} else }
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
sizeof(union ib_gid));
set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
result = ipoib_dev_init(priv->dev, hca, port); result = ipoib_dev_init(priv->dev, hca, port);
if (result < 0) { if (result) {
printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n", printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
hca->name, port, result); hca->name, port, result);
goto device_init_failed; goto device_init_failed;
...@@ -2364,6 +2368,7 @@ static int __init ipoib_init_module(void) ...@@ -2364,6 +2368,7 @@ static int __init ipoib_init_module(void)
ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE); ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
#ifdef CONFIG_INFINIBAND_IPOIB_CM #ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP); ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
#endif #endif
/* /*
......
...@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev) ...@@ -684,15 +684,10 @@ void ipoib_mcast_start_thread(struct net_device *dev)
int ipoib_mcast_stop_thread(struct net_device *dev) int ipoib_mcast_stop_thread(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
unsigned long flags;
ipoib_dbg_mcast(priv, "stopping multicast thread\n"); ipoib_dbg_mcast(priv, "stopping multicast thread\n");
spin_lock_irqsave(&priv->lock, flags); cancel_delayed_work_sync(&priv->mcast_task);
cancel_delayed_work(&priv->mcast_task);
spin_unlock_irqrestore(&priv->lock, flags);
flush_workqueue(priv->wq);
return 0; return 0;
} }
...@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list) ...@@ -748,6 +743,14 @@ void ipoib_mcast_remove_list(struct list_head *remove_list)
{ {
struct ipoib_mcast *mcast, *tmcast; struct ipoib_mcast *mcast, *tmcast;
/*
* make sure the in-flight joins have finished before we attempt
* to leave
*/
list_for_each_entry_safe(mcast, tmcast, remove_list, list)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, remove_list, list) { list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast); ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast); ipoib_mcast_free(mcast);
...@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev) ...@@ -838,6 +841,7 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
struct ipoib_mcast *mcast, *tmcast; struct ipoib_mcast *mcast, *tmcast;
unsigned long flags; unsigned long flags;
mutex_lock(&priv->mcast_mutex);
ipoib_dbg_mcast(priv, "flushing multicast list\n"); ipoib_dbg_mcast(priv, "flushing multicast list\n");
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
...@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev) ...@@ -856,15 +860,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
/*
* make sure the in-flight joins have finished before we attempt
* to leave
*/
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
ipoib_mcast_remove_list(&remove_list); ipoib_mcast_remove_list(&remove_list);
mutex_unlock(&priv->mcast_mutex);
} }
static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
...@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work) ...@@ -982,14 +979,6 @@ void ipoib_mcast_restart_task(struct work_struct *work)
netif_addr_unlock(dev); netif_addr_unlock(dev);
local_irq_restore(flags); local_irq_restore(flags);
/*
* make sure the in-flight joins have finished before we attempt
* to leave
*/
list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
ipoib_mcast_remove_list(&remove_list); ipoib_mcast_remove_list(&remove_list);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment