Commit 0b1ce1a8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull one more infiniband revert from Roland Dreier:
 "One more last-second RDMA change for 3.19: Yann realized that the
  previous revert of new userspace ABI did not go far enough, and we're
  still exposing a change that we don't want.  Revert even closer to
  3.18 interface to make sure we get things right in the long run"

Yann Droneaud pipes up:
 "I hope this could go in v3.19 as, at this stage, we don't want to
  expose any bits of this ABI in a released kernel"

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  Revert "IB/core: Add support for extended query device caps"
parents bdfeb5a1 43c61165
...@@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd); ...@@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
IB_UVERBS_DECLARE_EX_CMD(create_flow); IB_UVERBS_DECLARE_EX_CMD(create_flow);
IB_UVERBS_DECLARE_EX_CMD(destroy_flow); IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
IB_UVERBS_DECLARE_EX_CMD(query_device);
#endif /* UVERBS_H */ #endif /* UVERBS_H */
...@@ -400,52 +400,6 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, ...@@ -400,52 +400,6 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
return ret; return ret;
} }
static void copy_query_dev_fields(struct ib_uverbs_file *file,
struct ib_uverbs_query_device_resp *resp,
struct ib_device_attr *attr)
{
resp->fw_ver = attr->fw_ver;
resp->node_guid = file->device->ib_dev->node_guid;
resp->sys_image_guid = attr->sys_image_guid;
resp->max_mr_size = attr->max_mr_size;
resp->page_size_cap = attr->page_size_cap;
resp->vendor_id = attr->vendor_id;
resp->vendor_part_id = attr->vendor_part_id;
resp->hw_ver = attr->hw_ver;
resp->max_qp = attr->max_qp;
resp->max_qp_wr = attr->max_qp_wr;
resp->device_cap_flags = attr->device_cap_flags;
resp->max_sge = attr->max_sge;
resp->max_sge_rd = attr->max_sge_rd;
resp->max_cq = attr->max_cq;
resp->max_cqe = attr->max_cqe;
resp->max_mr = attr->max_mr;
resp->max_pd = attr->max_pd;
resp->max_qp_rd_atom = attr->max_qp_rd_atom;
resp->max_ee_rd_atom = attr->max_ee_rd_atom;
resp->max_res_rd_atom = attr->max_res_rd_atom;
resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
resp->atomic_cap = attr->atomic_cap;
resp->max_ee = attr->max_ee;
resp->max_rdd = attr->max_rdd;
resp->max_mw = attr->max_mw;
resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
resp->max_mcast_grp = attr->max_mcast_grp;
resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
resp->max_ah = attr->max_ah;
resp->max_fmr = attr->max_fmr;
resp->max_map_per_fmr = attr->max_map_per_fmr;
resp->max_srq = attr->max_srq;
resp->max_srq_wr = attr->max_srq_wr;
resp->max_srq_sge = attr->max_srq_sge;
resp->max_pkeys = attr->max_pkeys;
resp->local_ca_ack_delay = attr->local_ca_ack_delay;
resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt;
}
ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
const char __user *buf, const char __user *buf,
int in_len, int out_len) int in_len, int out_len)
...@@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, ...@@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
return ret; return ret;
memset(&resp, 0, sizeof resp); memset(&resp, 0, sizeof resp);
copy_query_dev_fields(file, &resp, &attr);
resp.fw_ver = attr.fw_ver;
resp.node_guid = file->device->ib_dev->node_guid;
resp.sys_image_guid = attr.sys_image_guid;
resp.max_mr_size = attr.max_mr_size;
resp.page_size_cap = attr.page_size_cap;
resp.vendor_id = attr.vendor_id;
resp.vendor_part_id = attr.vendor_part_id;
resp.hw_ver = attr.hw_ver;
resp.max_qp = attr.max_qp;
resp.max_qp_wr = attr.max_qp_wr;
resp.device_cap_flags = attr.device_cap_flags;
resp.max_sge = attr.max_sge;
resp.max_sge_rd = attr.max_sge_rd;
resp.max_cq = attr.max_cq;
resp.max_cqe = attr.max_cqe;
resp.max_mr = attr.max_mr;
resp.max_pd = attr.max_pd;
resp.max_qp_rd_atom = attr.max_qp_rd_atom;
resp.max_ee_rd_atom = attr.max_ee_rd_atom;
resp.max_res_rd_atom = attr.max_res_rd_atom;
resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
resp.atomic_cap = attr.atomic_cap;
resp.max_ee = attr.max_ee;
resp.max_rdd = attr.max_rdd;
resp.max_mw = attr.max_mw;
resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
resp.max_mcast_grp = attr.max_mcast_grp;
resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
resp.max_ah = attr.max_ah;
resp.max_fmr = attr.max_fmr;
resp.max_map_per_fmr = attr.max_map_per_fmr;
resp.max_srq = attr.max_srq;
resp.max_srq_wr = attr.max_srq_wr;
resp.max_srq_sge = attr.max_srq_sge;
resp.max_pkeys = attr.max_pkeys;
resp.local_ca_ack_delay = attr.local_ca_ack_delay;
resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
if (copy_to_user((void __user *) (unsigned long) cmd.response, if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) &resp, sizeof resp))
...@@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, ...@@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
return ret ? ret : in_len; return ret ? ret : in_len;
} }
int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_udata *ucore,
struct ib_udata *uhw)
{
struct ib_uverbs_ex_query_device_resp resp;
struct ib_uverbs_ex_query_device cmd;
struct ib_device_attr attr;
struct ib_device *device;
int err;
device = file->device->ib_dev;
if (ucore->inlen < sizeof(cmd))
return -EINVAL;
err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
if (err)
return err;
if (cmd.reserved)
return -EINVAL;
err = device->query_device(device, &attr);
if (err)
return err;
memset(&resp, 0, sizeof(resp));
copy_query_dev_fields(file, &resp.base, &attr);
resp.comp_mask = 0;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
resp.odp_caps.general_caps = attr.odp_caps.general_caps;
resp.odp_caps.per_transport_caps.rc_odp_caps =
attr.odp_caps.per_transport_caps.rc_odp_caps;
resp.odp_caps.per_transport_caps.uc_odp_caps =
attr.odp_caps.per_transport_caps.uc_odp_caps;
resp.odp_caps.per_transport_caps.ud_odp_caps =
attr.odp_caps.per_transport_caps.ud_odp_caps;
resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
}
#endif
err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
if (err)
return err;
return 0;
}
...@@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
(1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
(1ull << IB_USER_VERBS_CMD_OPEN_QP); (1ull << IB_USER_VERBS_CMD_OPEN_QP);
dev->ib_dev.uverbs_ex_cmd_mask =
(1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
dev->ib_dev.query_device = mlx5_ib_query_device; dev->ib_dev.query_device = mlx5_ib_query_device;
dev->ib_dev.query_port = mlx5_ib_query_port; dev->ib_dev.query_port = mlx5_ib_query_port;
......
...@@ -1707,10 +1707,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t ...@@ -1707,10 +1707,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len) static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
{ {
size_t copy_sz; return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
copy_sz = min_t(size_t, len, udata->outlen);
return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
} }
/** /**
......
...@@ -90,7 +90,6 @@ enum { ...@@ -90,7 +90,6 @@ enum {
}; };
enum { enum {
IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
IB_USER_VERBS_EX_CMD_DESTROY_FLOW, IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
}; };
...@@ -202,32 +201,6 @@ struct ib_uverbs_query_device_resp { ...@@ -202,32 +201,6 @@ struct ib_uverbs_query_device_resp {
__u8 reserved[4]; __u8 reserved[4];
}; };
enum {
IB_USER_VERBS_EX_QUERY_DEVICE_ODP = 1ULL << 0,
};
struct ib_uverbs_ex_query_device {
__u32 comp_mask;
__u32 reserved;
};
struct ib_uverbs_odp_caps {
__u64 general_caps;
struct {
__u32 rc_odp_caps;
__u32 uc_odp_caps;
__u32 ud_odp_caps;
} per_transport_caps;
__u32 reserved;
};
struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_query_device_resp base;
__u32 comp_mask;
__u32 reserved;
struct ib_uverbs_odp_caps odp_caps;
};
struct ib_uverbs_query_port { struct ib_uverbs_query_port {
__u64 response; __u64 response;
__u8 port_num; __u8 port_num;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment