Commit 1fde76f1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull more rdma updates from Doug Ledford:
 "Minor updates for rxe driver"

[ Starting to do merge window pulls again - the current -git tree does
  appear to have some netfilter use-after-free issues, but I've sent
  off the report to the proper channels, and I don't want to delay merge
  window activity any more ]

* tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/rxe: improved debug prints & code cleanup
  rdma_rxe: Ensure rdma_rxe init occurs at correct time
  IB/rxe: Properly honor max IRD value for rd/atomic.
  IB/{rxe,core,rdmavt}: Fix kernel crash for reg MR
  IB/rxe: Fix sending out loopback packet on netdev interface.
  IB/rxe: Avoid scheduling tasklet for userspace QP
parents b66484cd e404f945
...@@ -138,6 +138,21 @@ static void rvt_unmap_sg(struct ib_device *dev, ...@@ -138,6 +138,21 @@ static void rvt_unmap_sg(struct ib_device *dev,
/* This is a stub, nothing to be done here */ /* This is a stub, nothing to be done here */
} }
static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
return rvt_map_sg(dev, sgl, nents, direction);
}
static void rvt_unmap_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
unsigned long attrs)
{
return rvt_unmap_sg(dev, sg, nents, direction);
}
static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr, static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
...@@ -177,6 +192,8 @@ struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = { ...@@ -177,6 +192,8 @@ struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
.unmap_page = rvt_dma_unmap_page, .unmap_page = rvt_dma_unmap_page,
.map_sg = rvt_map_sg, .map_sg = rvt_map_sg,
.unmap_sg = rvt_unmap_sg, .unmap_sg = rvt_unmap_sg,
.map_sg_attrs = rvt_map_sg_attrs,
.unmap_sg_attrs = rvt_unmap_sg_attrs,
.sync_single_for_cpu = rvt_sync_single_for_cpu, .sync_single_for_cpu = rvt_sync_single_for_cpu,
.sync_single_for_device = rvt_sync_single_for_device, .sync_single_for_device = rvt_sync_single_for_device,
.alloc_coherent = rvt_dma_alloc_coherent, .alloc_coherent = rvt_dma_alloc_coherent,
......
...@@ -358,38 +358,16 @@ static int __init rxe_module_init(void) ...@@ -358,38 +358,16 @@ static int __init rxe_module_init(void)
/* initialize slab caches for managed objects */ /* initialize slab caches for managed objects */
err = rxe_cache_init(); err = rxe_cache_init();
if (err) { if (err) {
pr_err("rxe: unable to init object pools\n"); pr_err("unable to init object pools\n");
return err; return err;
} }
err = rxe_net_ipv4_init(); err = rxe_net_init();
if (err) { if (err)
pr_err("rxe: unable to init ipv4 tunnel\n"); return err;
rxe_cache_exit();
goto exit;
}
err = rxe_net_ipv6_init();
if (err) {
pr_err("rxe: unable to init ipv6 tunnel\n");
rxe_cache_exit();
goto exit;
}
err = register_netdevice_notifier(&rxe_net_notifier);
if (err) {
pr_err("rxe: Failed to rigister netdev notifier\n");
goto exit;
}
pr_info("rxe: loaded\n");
pr_info("loaded\n");
return 0; return 0;
exit:
rxe_release_udp_tunnel(recv_sockets.sk4);
rxe_release_udp_tunnel(recv_sockets.sk6);
return err;
} }
static void __exit rxe_module_exit(void) static void __exit rxe_module_exit(void)
...@@ -398,8 +376,8 @@ static void __exit rxe_module_exit(void) ...@@ -398,8 +376,8 @@ static void __exit rxe_module_exit(void)
rxe_net_exit(); rxe_net_exit();
rxe_cache_exit(); rxe_cache_exit();
pr_info("rxe: unloaded\n"); pr_info("unloaded\n");
} }
module_init(rxe_module_init); late_initcall(rxe_module_init);
module_exit(rxe_module_exit); module_exit(rxe_module_exit);
...@@ -34,6 +34,11 @@ ...@@ -34,6 +34,11 @@
#ifndef RXE_H #ifndef RXE_H
#define RXE_H #define RXE_H
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h> #include <linux/module.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/crc32.h> #include <linux/crc32.h>
......
...@@ -39,7 +39,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr) ...@@ -39,7 +39,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
struct rxe_port *port; struct rxe_port *port;
if (attr->port_num != 1) { if (attr->port_num != 1) {
pr_info("rxe: invalid port_num = %d\n", attr->port_num); pr_info("invalid port_num = %d\n", attr->port_num);
return -EINVAL; return -EINVAL;
} }
...@@ -47,7 +47,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr) ...@@ -47,7 +47,7 @@ int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
if (attr->ah_flags & IB_AH_GRH) { if (attr->ah_flags & IB_AH_GRH) {
if (attr->grh.sgid_index > port->attr.gid_tbl_len) { if (attr->grh.sgid_index > port->attr.gid_tbl_len) {
pr_info("rxe: invalid sgid index = %d\n", pr_info("invalid sgid index = %d\n",
attr->grh.sgid_index); attr->grh.sgid_index);
return -EINVAL; return -EINVAL;
} }
......
...@@ -567,7 +567,8 @@ int rxe_completer(void *arg) ...@@ -567,7 +567,8 @@ int rxe_completer(void *arg)
state = COMPST_GET_ACK; state = COMPST_GET_ACK;
while (1) { while (1) {
pr_debug("state = %s\n", comp_state_name[state]); pr_debug("qp#%d state = %s\n", qp_num(qp),
comp_state_name[state]);
switch (state) { switch (state) {
case COMPST_GET_ACK: case COMPST_GET_ACK:
skb = skb_dequeue(&qp->resp_pkts); skb = skb_dequeue(&qp->resp_pkts);
...@@ -709,7 +710,8 @@ int rxe_completer(void *arg) ...@@ -709,7 +710,8 @@ int rxe_completer(void *arg)
qp->comp.rnr_retry--; qp->comp.rnr_retry--;
qp->req.need_retry = 1; qp->req.need_retry = 1;
pr_debug("set rnr nak timer\n"); pr_debug("qp#%d set rnr nak timer\n",
qp_num(qp));
mod_timer(&qp->rnr_nak_timer, mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt) jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK)); & ~AETH_TYPE_MASK));
......
...@@ -117,6 +117,21 @@ static void rxe_unmap_sg(struct ib_device *dev, ...@@ -117,6 +117,21 @@ static void rxe_unmap_sg(struct ib_device *dev,
WARN_ON(!valid_dma_direction(direction)); WARN_ON(!valid_dma_direction(direction));
} }
static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
return rxe_map_sg(dev, sgl, nents, direction);
}
static void rxe_unmap_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
unsigned long attrs)
{
rxe_unmap_sg(dev, sg, nents, direction);
}
static void rxe_sync_single_for_cpu(struct ib_device *dev, static void rxe_sync_single_for_cpu(struct ib_device *dev,
u64 addr, u64 addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
...@@ -159,6 +174,8 @@ struct ib_dma_mapping_ops rxe_dma_mapping_ops = { ...@@ -159,6 +174,8 @@ struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
.unmap_page = rxe_dma_unmap_page, .unmap_page = rxe_dma_unmap_page,
.map_sg = rxe_map_sg, .map_sg = rxe_map_sg,
.unmap_sg = rxe_unmap_sg, .unmap_sg = rxe_unmap_sg,
.map_sg_attrs = rxe_map_sg_attrs,
.unmap_sg_attrs = rxe_unmap_sg_attrs,
.sync_single_for_cpu = rxe_sync_single_for_cpu, .sync_single_for_cpu = rxe_sync_single_for_cpu,
.sync_single_for_device = rxe_sync_single_for_device, .sync_single_for_device = rxe_sync_single_for_device,
.alloc_coherent = rxe_dma_alloc_coherent, .alloc_coherent = rxe_dma_alloc_coherent,
......
...@@ -198,7 +198,7 @@ void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res); ...@@ -198,7 +198,7 @@ void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp) static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
{ {
qp->resp.res_head++; qp->resp.res_head++;
if (unlikely(qp->resp.res_head == qp->attr.max_rd_atomic)) if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
qp->resp.res_head = 0; qp->resp.res_head = 0;
} }
......
...@@ -126,7 +126,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ...@@ -126,7 +126,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
ret = remap_vmalloc_range(vma, ip->obj, 0); ret = remap_vmalloc_range(vma, ip->obj, 0);
if (ret) { if (ret) {
pr_err("rxe: err %d from remap_vmalloc_range\n", ret); pr_err("err %d from remap_vmalloc_range\n", ret);
goto done; goto done;
} }
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
*/ */
static u8 rxe_get_key(void) static u8 rxe_get_key(void)
{ {
static unsigned key = 1; static u32 key = 1;
key = key << 1; key = key << 1;
......
...@@ -65,7 +65,7 @@ struct rxe_dev *net_to_rxe(struct net_device *ndev) ...@@ -65,7 +65,7 @@ struct rxe_dev *net_to_rxe(struct net_device *ndev)
return found; return found;
} }
struct rxe_dev *get_rxe_by_name(const char* name) struct rxe_dev *get_rxe_by_name(const char *name)
{ {
struct rxe_dev *rxe; struct rxe_dev *rxe;
struct rxe_dev *found = NULL; struct rxe_dev *found = NULL;
...@@ -350,14 +350,14 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, ...@@ -350,14 +350,14 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h)); ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
} }
static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av) static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct sk_buff *skb, struct rxe_av *av)
{ {
struct dst_entry *dst; struct dst_entry *dst;
bool xnet = false; bool xnet = false;
__be16 df = htons(IP_DF); __be16 df = htons(IP_DF);
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr; struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr; struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
dst = rxe_find_route4(rxe->ndev, saddr, daddr); dst = rxe_find_route4(rxe->ndev, saddr, daddr);
if (!dst) { if (!dst) {
...@@ -376,12 +376,12 @@ static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av) ...@@ -376,12 +376,12 @@ static int prepare4(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av)
return 0; return 0;
} }
static int prepare6(struct rxe_dev *rxe, struct sk_buff *skb, struct rxe_av *av) static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct sk_buff *skb, struct rxe_av *av)
{ {
struct dst_entry *dst; struct dst_entry *dst;
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr; struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr; struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
dst = rxe_find_route6(rxe->ndev, saddr, daddr); dst = rxe_find_route6(rxe->ndev, saddr, daddr);
if (!dst) { if (!dst) {
...@@ -408,9 +408,9 @@ static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, ...@@ -408,9 +408,9 @@ static int prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct rxe_av *av = rxe_get_av(pkt); struct rxe_av *av = rxe_get_av(pkt);
if (av->network_type == RDMA_NETWORK_IPV4) if (av->network_type == RDMA_NETWORK_IPV4)
err = prepare4(rxe, skb, av); err = prepare4(rxe, pkt, skb, av);
else if (av->network_type == RDMA_NETWORK_IPV6) else if (av->network_type == RDMA_NETWORK_IPV6)
err = prepare6(rxe, skb, av); err = prepare6(rxe, pkt, skb, av);
*crc = rxe_icrc_hdr(pkt, skb); *crc = rxe_icrc_hdr(pkt, skb);
...@@ -601,8 +601,7 @@ void rxe_port_up(struct rxe_dev *rxe) ...@@ -601,8 +601,7 @@ void rxe_port_up(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_UP; port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE); rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
pr_info("rxe: set %s active\n", rxe->ib_dev.name); pr_info("set %s active\n", rxe->ib_dev.name);
return;
} }
/* Caller must hold net_info_lock */ /* Caller must hold net_info_lock */
...@@ -615,8 +614,7 @@ void rxe_port_down(struct rxe_dev *rxe) ...@@ -615,8 +614,7 @@ void rxe_port_down(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN; port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
rxe_port_event(rxe, IB_EVENT_PORT_ERR); rxe_port_event(rxe, IB_EVENT_PORT_ERR);
pr_info("rxe: set %s down\n", rxe->ib_dev.name); pr_info("set %s down\n", rxe->ib_dev.name);
return;
} }
static int rxe_notify(struct notifier_block *not_blk, static int rxe_notify(struct notifier_block *not_blk,
...@@ -641,7 +639,7 @@ static int rxe_notify(struct notifier_block *not_blk, ...@@ -641,7 +639,7 @@ static int rxe_notify(struct notifier_block *not_blk,
rxe_port_down(rxe); rxe_port_down(rxe);
break; break;
case NETDEV_CHANGEMTU: case NETDEV_CHANGEMTU:
pr_info("rxe: %s changed mtu to %d\n", ndev->name, ndev->mtu); pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
rxe_set_mtu(rxe, ndev->mtu); rxe_set_mtu(rxe, ndev->mtu);
break; break;
case NETDEV_REBOOT: case NETDEV_REBOOT:
...@@ -651,7 +649,7 @@ static int rxe_notify(struct notifier_block *not_blk, ...@@ -651,7 +649,7 @@ static int rxe_notify(struct notifier_block *not_blk,
case NETDEV_CHANGENAME: case NETDEV_CHANGENAME:
case NETDEV_FEAT_CHANGE: case NETDEV_FEAT_CHANGE:
default: default:
pr_info("rxe: ignoring netdev event = %ld for %s\n", pr_info("ignoring netdev event = %ld for %s\n",
event, ndev->name); event, ndev->name);
break; break;
} }
...@@ -671,7 +669,7 @@ int rxe_net_ipv4_init(void) ...@@ -671,7 +669,7 @@ int rxe_net_ipv4_init(void)
htons(ROCE_V2_UDP_DPORT), false); htons(ROCE_V2_UDP_DPORT), false);
if (IS_ERR(recv_sockets.sk4)) { if (IS_ERR(recv_sockets.sk4)) {
recv_sockets.sk4 = NULL; recv_sockets.sk4 = NULL;
pr_err("rxe: Failed to create IPv4 UDP tunnel\n"); pr_err("Failed to create IPv4 UDP tunnel\n");
return -1; return -1;
} }
...@@ -688,7 +686,7 @@ int rxe_net_ipv6_init(void) ...@@ -688,7 +686,7 @@ int rxe_net_ipv6_init(void)
htons(ROCE_V2_UDP_DPORT), true); htons(ROCE_V2_UDP_DPORT), true);
if (IS_ERR(recv_sockets.sk6)) { if (IS_ERR(recv_sockets.sk6)) {
recv_sockets.sk6 = NULL; recv_sockets.sk6 = NULL;
pr_err("rxe: Failed to create IPv6 UDP tunnel\n"); pr_err("Failed to create IPv6 UDP tunnel\n");
return -1; return -1;
} }
#endif #endif
...@@ -701,3 +699,26 @@ void rxe_net_exit(void) ...@@ -701,3 +699,26 @@ void rxe_net_exit(void)
rxe_release_udp_tunnel(recv_sockets.sk4); rxe_release_udp_tunnel(recv_sockets.sk4);
unregister_netdevice_notifier(&rxe_net_notifier); unregister_netdevice_notifier(&rxe_net_notifier);
} }
int rxe_net_init(void)
{
int err;
recv_sockets.sk6 = NULL;
err = rxe_net_ipv4_init();
if (err)
return err;
err = rxe_net_ipv6_init();
if (err)
goto err_out;
err = register_netdevice_notifier(&rxe_net_notifier);
if (err) {
pr_err("Failed to register netdev notifier\n");
goto err_out;
}
return 0;
err_out:
rxe_net_exit();
return err;
}
...@@ -49,8 +49,7 @@ void rxe_release_udp_tunnel(struct socket *sk); ...@@ -49,8 +49,7 @@ void rxe_release_udp_tunnel(struct socket *sk);
struct rxe_dev *rxe_net_add(struct net_device *ndev); struct rxe_dev *rxe_net_add(struct net_device *ndev);
int rxe_net_ipv4_init(void); int rxe_net_init(void);
int rxe_net_ipv6_init(void);
void rxe_net_exit(void); void rxe_net_exit(void);
#endif /* RXE_NET_H */ #endif /* RXE_NET_H */
...@@ -146,7 +146,7 @@ static void free_rd_atomic_resources(struct rxe_qp *qp) ...@@ -146,7 +146,7 @@ static void free_rd_atomic_resources(struct rxe_qp *qp)
if (qp->resp.resources) { if (qp->resp.resources) {
int i; int i;
for (i = 0; i < qp->attr.max_rd_atomic; i++) { for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
struct resp_res *res = &qp->resp.resources[i]; struct resp_res *res = &qp->resp.resources[i];
free_rd_atomic_resource(qp, res); free_rd_atomic_resource(qp, res);
...@@ -174,7 +174,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp) ...@@ -174,7 +174,7 @@ static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
struct resp_res *res; struct resp_res *res;
if (qp->resp.resources) { if (qp->resp.resources) {
for (i = 0; i < qp->attr.max_rd_atomic; i++) { for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
res = &qp->resp.resources[i]; res = &qp->resp.resources[i];
free_rd_atomic_resource(qp, res); free_rd_atomic_resource(qp, res);
} }
...@@ -298,8 +298,8 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, ...@@ -298,8 +298,8 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
wqe_size = rcv_wqe_size(qp->rq.max_sge); wqe_size = rcv_wqe_size(qp->rq.max_sge);
pr_debug("max_wr = %d, max_sge = %d, wqe_size = %d\n", pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
qp->rq.max_wr, qp->rq.max_sge, wqe_size); qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
qp->rq.queue = rxe_queue_init(rxe, qp->rq.queue = rxe_queue_init(rxe,
&qp->rq.max_wr, &qp->rq.max_wr,
...@@ -596,14 +596,21 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -596,14 +596,21 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
if (mask & IB_QP_MAX_QP_RD_ATOMIC) { if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic); int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
qp->attr.max_rd_atomic = max_rd_atomic;
atomic_set(&qp->req.rd_atomic, max_rd_atomic);
}
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
int max_dest_rd_atomic =
__roundup_pow_of_two(attr->max_dest_rd_atomic);
qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
free_rd_atomic_resources(qp); free_rd_atomic_resources(qp);
err = alloc_rd_atomic_resources(qp, max_rd_atomic); err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
if (err) if (err)
return err; return err;
qp->attr.max_rd_atomic = max_rd_atomic;
atomic_set(&qp->req.rd_atomic, max_rd_atomic);
} }
if (mask & IB_QP_CUR_STATE) if (mask & IB_QP_CUR_STATE)
...@@ -673,24 +680,27 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -673,24 +680,27 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
if (mask & IB_QP_RETRY_CNT) { if (mask & IB_QP_RETRY_CNT) {
qp->attr.retry_cnt = attr->retry_cnt; qp->attr.retry_cnt = attr->retry_cnt;
qp->comp.retry_cnt = attr->retry_cnt; qp->comp.retry_cnt = attr->retry_cnt;
pr_debug("set retry count = %d\n", attr->retry_cnt); pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
attr->retry_cnt);
} }
if (mask & IB_QP_RNR_RETRY) { if (mask & IB_QP_RNR_RETRY) {
qp->attr.rnr_retry = attr->rnr_retry; qp->attr.rnr_retry = attr->rnr_retry;
qp->comp.rnr_retry = attr->rnr_retry; qp->comp.rnr_retry = attr->rnr_retry;
pr_debug("set rnr retry count = %d\n", attr->rnr_retry); pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
attr->rnr_retry);
} }
if (mask & IB_QP_RQ_PSN) { if (mask & IB_QP_RQ_PSN) {
qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
qp->resp.psn = qp->attr.rq_psn; qp->resp.psn = qp->attr.rq_psn;
pr_debug("set resp psn = 0x%x\n", qp->resp.psn); pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
qp->resp.psn);
} }
if (mask & IB_QP_MIN_RNR_TIMER) { if (mask & IB_QP_MIN_RNR_TIMER) {
qp->attr.min_rnr_timer = attr->min_rnr_timer; qp->attr.min_rnr_timer = attr->min_rnr_timer;
pr_debug("set min rnr timer = 0x%x\n", pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
attr->min_rnr_timer); attr->min_rnr_timer);
} }
...@@ -698,12 +708,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -698,12 +708,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
qp->req.psn = qp->attr.sq_psn; qp->req.psn = qp->attr.sq_psn;
qp->comp.psn = qp->attr.sq_psn; qp->comp.psn = qp->attr.sq_psn;
pr_debug("set req psn = 0x%x\n", qp->req.psn); pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
}
if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
qp->attr.max_dest_rd_atomic =
__roundup_pow_of_two(attr->max_dest_rd_atomic);
} }
if (mask & IB_QP_PATH_MIG_STATE) if (mask & IB_QP_PATH_MIG_STATE)
...@@ -717,38 +722,38 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, ...@@ -717,38 +722,38 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
switch (attr->qp_state) { switch (attr->qp_state) {
case IB_QPS_RESET: case IB_QPS_RESET:
pr_debug("qp state -> RESET\n"); pr_debug("qp#%d state -> RESET\n", qp_num(qp));
rxe_qp_reset(qp); rxe_qp_reset(qp);
break; break;
case IB_QPS_INIT: case IB_QPS_INIT:
pr_debug("qp state -> INIT\n"); pr_debug("qp#%d state -> INIT\n", qp_num(qp));
qp->req.state = QP_STATE_INIT; qp->req.state = QP_STATE_INIT;
qp->resp.state = QP_STATE_INIT; qp->resp.state = QP_STATE_INIT;
break; break;
case IB_QPS_RTR: case IB_QPS_RTR:
pr_debug("qp state -> RTR\n"); pr_debug("qp#%d state -> RTR\n", qp_num(qp));
qp->resp.state = QP_STATE_READY; qp->resp.state = QP_STATE_READY;
break; break;
case IB_QPS_RTS: case IB_QPS_RTS:
pr_debug("qp state -> RTS\n"); pr_debug("qp#%d state -> RTS\n", qp_num(qp));
qp->req.state = QP_STATE_READY; qp->req.state = QP_STATE_READY;
break; break;
case IB_QPS_SQD: case IB_QPS_SQD:
pr_debug("qp state -> SQD\n"); pr_debug("qp#%d state -> SQD\n", qp_num(qp));
rxe_qp_drain(qp); rxe_qp_drain(qp);
break; break;
case IB_QPS_SQE: case IB_QPS_SQE:
pr_warn("qp state -> SQE !!?\n"); pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
/* Not possible from modify_qp. */ /* Not possible from modify_qp. */
break; break;
case IB_QPS_ERR: case IB_QPS_ERR:
pr_debug("qp state -> ERR\n"); pr_debug("qp#%d state -> ERR\n", qp_num(qp));
rxe_qp_error(qp); rxe_qp_error(qp);
break; break;
} }
......
...@@ -387,7 +387,8 @@ int rxe_rcv(struct sk_buff *skb) ...@@ -387,7 +387,8 @@ int rxe_rcv(struct sk_buff *skb)
pack_icrc = be32_to_cpu(*icrcp); pack_icrc = be32_to_cpu(*icrcp);
calc_icrc = rxe_icrc_hdr(pkt, skb); calc_icrc = rxe_icrc_hdr(pkt, skb);
calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt), payload_size(pkt)); calc_icrc = crc32_le(calc_icrc, (u8 *)payload_addr(pkt),
payload_size(pkt));
calc_icrc = cpu_to_be32(~calc_icrc); calc_icrc = cpu_to_be32(~calc_icrc);
if (unlikely(calc_icrc != pack_icrc)) { if (unlikely(calc_icrc != pack_icrc)) {
char saddr[sizeof(struct in6_addr)]; char saddr[sizeof(struct in6_addr)];
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#include "rxe_queue.h" #include "rxe_queue.h"
static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
unsigned opcode); u32 opcode);
static inline void retry_first_write_send(struct rxe_qp *qp, static inline void retry_first_write_send(struct rxe_qp *qp,
struct rxe_send_wqe *wqe, struct rxe_send_wqe *wqe,
...@@ -121,7 +121,7 @@ void rnr_nak_timer(unsigned long data) ...@@ -121,7 +121,7 @@ void rnr_nak_timer(unsigned long data)
{ {
struct rxe_qp *qp = (struct rxe_qp *)data; struct rxe_qp *qp = (struct rxe_qp *)data;
pr_debug("rnr nak timer fired\n"); pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
rxe_run_task(&qp->req.task, 1); rxe_run_task(&qp->req.task, 1);
} }
...@@ -187,7 +187,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) ...@@ -187,7 +187,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
return wqe; return wqe;
} }
static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits) static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
{ {
switch (opcode) { switch (opcode) {
case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE:
...@@ -259,7 +259,7 @@ static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits) ...@@ -259,7 +259,7 @@ static int next_opcode_rc(struct rxe_qp *qp, unsigned opcode, int fits)
return -EINVAL; return -EINVAL;
} }
static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits) static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
{ {
switch (opcode) { switch (opcode) {
case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE:
...@@ -311,7 +311,7 @@ static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits) ...@@ -311,7 +311,7 @@ static int next_opcode_uc(struct rxe_qp *qp, unsigned opcode, int fits)
} }
static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
unsigned opcode) u32 opcode)
{ {
int fits = (wqe->dma.resid <= qp->mtu); int fits = (wqe->dma.resid <= qp->mtu);
...@@ -588,7 +588,7 @@ int rxe_requester(void *arg) ...@@ -588,7 +588,7 @@ int rxe_requester(void *arg)
struct rxe_pkt_info pkt; struct rxe_pkt_info pkt;
struct sk_buff *skb; struct sk_buff *skb;
struct rxe_send_wqe *wqe; struct rxe_send_wqe *wqe;
unsigned mask; enum rxe_hdr_mask mask;
int payload; int payload;
int mtu; int mtu;
int opcode; int opcode;
...@@ -626,7 +626,8 @@ int rxe_requester(void *arg) ...@@ -626,7 +626,8 @@ int rxe_requester(void *arg)
rmr = rxe_pool_get_index(&rxe->mr_pool, rmr = rxe_pool_get_index(&rxe->mr_pool,
wqe->wr.ex.invalidate_rkey >> 8); wqe->wr.ex.invalidate_rkey >> 8);
if (!rmr) { if (!rmr) {
pr_err("No mr for key %#x\n", wqe->wr.ex.invalidate_rkey); pr_err("No mr for key %#x\n",
wqe->wr.ex.invalidate_rkey);
wqe->state = wqe_state_error; wqe->state = wqe_state_error;
wqe->status = IB_WC_MW_BIND_ERR; wqe->status = IB_WC_MW_BIND_ERR;
goto exit; goto exit;
...@@ -702,12 +703,12 @@ int rxe_requester(void *arg) ...@@ -702,12 +703,12 @@ int rxe_requester(void *arg)
skb = init_req_packet(qp, wqe, opcode, payload, &pkt); skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
if (unlikely(!skb)) { if (unlikely(!skb)) {
pr_err("Failed allocating skb\n"); pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
goto err; goto err;
} }
if (fill_packet(qp, wqe, &pkt, skb, payload)) { if (fill_packet(qp, wqe, &pkt, skb, payload)) {
pr_debug("Error during fill packet\n"); pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
goto err; goto err;
} }
......
...@@ -383,7 +383,7 @@ static enum resp_states check_resource(struct rxe_qp *qp, ...@@ -383,7 +383,7 @@ static enum resp_states check_resource(struct rxe_qp *qp,
* too many read/atomic ops, we just * too many read/atomic ops, we just
* recycle the responder resource queue * recycle the responder resource queue
*/ */
if (likely(qp->attr.max_rd_atomic > 0)) if (likely(qp->attr.max_dest_rd_atomic > 0))
return RESPST_CHK_LENGTH; return RESPST_CHK_LENGTH;
else else
return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ; return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
...@@ -749,6 +749,18 @@ static enum resp_states read_reply(struct rxe_qp *qp, ...@@ -749,6 +749,18 @@ static enum resp_states read_reply(struct rxe_qp *qp,
return state; return state;
} }
static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
struct rxe_pkt_info *pkt)
{
struct sk_buff *skb = PKT_TO_SKB(pkt);
memset(hdr, 0, sizeof(*hdr));
if (skb->protocol == htons(ETH_P_IP))
memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
else if (skb->protocol == htons(ETH_P_IPV6))
memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
}
/* Executes a new request. A retried request never reach that function (send /* Executes a new request. A retried request never reach that function (send
* and writes are discarded, and reads and atomics are retried elsewhere. * and writes are discarded, and reads and atomics are retried elsewhere.
*/ */
...@@ -761,13 +773,8 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) ...@@ -761,13 +773,8 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
qp_type(qp) == IB_QPT_SMI || qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI) { qp_type(qp) == IB_QPT_GSI) {
union rdma_network_hdr hdr; union rdma_network_hdr hdr;
struct sk_buff *skb = PKT_TO_SKB(pkt);
memset(&hdr, 0, sizeof(hdr)); build_rdma_network_hdr(&hdr, pkt);
if (skb->protocol == htons(ETH_P_IP))
memcpy(&hdr.roce4grh, ip_hdr(skb), sizeof(hdr.roce4grh));
else if (skb->protocol == htons(ETH_P_IPV6))
memcpy(&hdr.ibgrh, ipv6_hdr(skb), sizeof(hdr.ibgrh));
err = send_data_in(qp, &hdr, sizeof(hdr)); err = send_data_in(qp, &hdr, sizeof(hdr));
if (err) if (err)
...@@ -881,7 +888,8 @@ static enum resp_states do_complete(struct rxe_qp *qp, ...@@ -881,7 +888,8 @@ static enum resp_states do_complete(struct rxe_qp *qp,
rmr = rxe_pool_get_index(&rxe->mr_pool, rmr = rxe_pool_get_index(&rxe->mr_pool,
wc->ex.invalidate_rkey >> 8); wc->ex.invalidate_rkey >> 8);
if (unlikely(!rmr)) { if (unlikely(!rmr)) {
pr_err("Bad rkey %#x invalidation\n", wc->ex.invalidate_rkey); pr_err("Bad rkey %#x invalidation\n",
wc->ex.invalidate_rkey);
return RESPST_ERROR; return RESPST_ERROR;
} }
rmr->state = RXE_MEM_STATE_FREE; rmr->state = RXE_MEM_STATE_FREE;
...@@ -1208,7 +1216,8 @@ int rxe_responder(void *arg) ...@@ -1208,7 +1216,8 @@ int rxe_responder(void *arg)
} }
while (1) { while (1) {
pr_debug("state = %s\n", resp_state_name[state]); pr_debug("qp#%d state = %s\n", qp_num(qp),
resp_state_name[state]);
switch (state) { switch (state) {
case RESPST_GET_REQ: case RESPST_GET_REQ:
state = get_req(qp, &pkt); state = get_req(qp, &pkt);
......
...@@ -79,7 +79,7 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp) ...@@ -79,7 +79,7 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
len = sanitize_arg(val, intf, sizeof(intf)); len = sanitize_arg(val, intf, sizeof(intf));
if (!len) { if (!len) {
pr_err("rxe: add: invalid interface name\n"); pr_err("add: invalid interface name\n");
err = -EINVAL; err = -EINVAL;
goto err; goto err;
} }
...@@ -92,20 +92,20 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp) ...@@ -92,20 +92,20 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
} }
if (net_to_rxe(ndev)) { if (net_to_rxe(ndev)) {
pr_err("rxe: already configured on %s\n", intf); pr_err("already configured on %s\n", intf);
err = -EINVAL; err = -EINVAL;
goto err; goto err;
} }
rxe = rxe_net_add(ndev); rxe = rxe_net_add(ndev);
if (!rxe) { if (!rxe) {
pr_err("rxe: failed to add %s\n", intf); pr_err("failed to add %s\n", intf);
err = -EINVAL; err = -EINVAL;
goto err; goto err;
} }
rxe_set_port_state(ndev); rxe_set_port_state(ndev);
pr_info("rxe: added %s to %s\n", rxe->ib_dev.name, intf); pr_info("added %s to %s\n", rxe->ib_dev.name, intf);
err: err:
if (ndev) if (ndev)
dev_put(ndev); dev_put(ndev);
...@@ -120,7 +120,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp) ...@@ -120,7 +120,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
len = sanitize_arg(val, intf, sizeof(intf)); len = sanitize_arg(val, intf, sizeof(intf));
if (!len) { if (!len) {
pr_err("rxe: add: invalid interface name\n"); pr_err("add: invalid interface name\n");
return -EINVAL; return -EINVAL;
} }
...@@ -133,7 +133,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp) ...@@ -133,7 +133,7 @@ static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
rxe = get_rxe_by_name(intf); rxe = get_rxe_by_name(intf);
if (!rxe) { if (!rxe) {
pr_err("rxe: not configured on %s\n", intf); pr_err("not configured on %s\n", intf);
return -EINVAL; return -EINVAL;
} }
......
...@@ -100,10 +100,12 @@ static int rxe_query_port(struct ib_device *dev, ...@@ -100,10 +100,12 @@ static int rxe_query_port(struct ib_device *dev,
rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd); rxe->ndev->ethtool_ops->get_settings(rxe->ndev, &cmd);
speed = cmd.speed; speed = cmd.speed;
} else { } else {
pr_warn("%s speed is unknown, defaulting to 1000\n", rxe->ndev->name); pr_warn("%s speed is unknown, defaulting to 1000\n",
rxe->ndev->name);
speed = 1000; speed = 1000;
} }
rxe_eth_speed_to_ib_speed(speed, &attr->active_speed, &attr->active_width); rxe_eth_speed_to_ib_speed(speed, &attr->active_speed,
&attr->active_width);
mutex_unlock(&rxe->usdev_lock); mutex_unlock(&rxe->usdev_lock);
return 0; return 0;
...@@ -761,7 +763,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, ...@@ -761,7 +763,7 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
} }
static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
unsigned mask, u32 length) unsigned int mask, u32 length)
{ {
int err; int err;
struct rxe_sq *sq = &qp->sq; struct rxe_sq *sq = &qp->sq;
...@@ -801,26 +803,15 @@ static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr, ...@@ -801,26 +803,15 @@ static int post_one_send(struct rxe_qp *qp, struct ib_send_wr *ibwr,
return err; return err;
} }
static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, static int rxe_post_send_kernel(struct rxe_qp *qp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr) struct ib_send_wr **bad_wr)
{ {
int err = 0; int err = 0;
struct rxe_qp *qp = to_rqp(ibqp);
unsigned int mask; unsigned int mask;
unsigned int length = 0; unsigned int length = 0;
int i; int i;
int must_sched; int must_sched;
if (unlikely(!qp->valid)) {
*bad_wr = wr;
return -EINVAL;
}
if (unlikely(qp->req.state < QP_STATE_READY)) {
*bad_wr = wr;
return -EINVAL;
}
while (wr) { while (wr) {
mask = wr_opcode_mask(wr->opcode, qp); mask = wr_opcode_mask(wr->opcode, qp);
if (unlikely(!mask)) { if (unlikely(!mask)) {
...@@ -861,6 +852,29 @@ static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -861,6 +852,29 @@ static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
return err; return err;
} }
static int rxe_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct rxe_qp *qp = to_rqp(ibqp);
if (unlikely(!qp->valid)) {
*bad_wr = wr;
return -EINVAL;
}
if (unlikely(qp->req.state < QP_STATE_READY)) {
*bad_wr = wr;
return -EINVAL;
}
if (qp->is_user) {
/* Utilize process context to do protocol processing */
rxe_run_task(&qp->req.task, 0);
return 0;
} else
return rxe_post_send_kernel(qp, wr, bad_wr);
}
static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr) struct ib_recv_wr **bad_wr)
{ {
...@@ -1133,8 +1147,8 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr) ...@@ -1133,8 +1147,8 @@ static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
return 0; return 0;
} }
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
unsigned int *sg_offset) int sg_nents, unsigned int *sg_offset)
{ {
struct rxe_mem *mr = to_rmr(ibmr); struct rxe_mem *mr = to_rmr(ibmr);
int n; int n;
......
...@@ -1739,6 +1739,14 @@ struct ib_dma_mapping_ops { ...@@ -1739,6 +1739,14 @@ struct ib_dma_mapping_ops {
void (*unmap_sg)(struct ib_device *dev, void (*unmap_sg)(struct ib_device *dev,
struct scatterlist *sg, int nents, struct scatterlist *sg, int nents,
enum dma_data_direction direction); enum dma_data_direction direction);
int (*map_sg_attrs)(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
unsigned long attrs);
void (*unmap_sg_attrs)(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
unsigned long attrs);
void (*sync_single_for_cpu)(struct ib_device *dev, void (*sync_single_for_cpu)(struct ib_device *dev,
u64 dma_handle, u64 dma_handle,
size_t size, size_t size,
...@@ -3000,8 +3008,12 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, ...@@ -3000,8 +3008,12 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction, enum dma_data_direction direction,
unsigned long dma_attrs) unsigned long dma_attrs)
{ {
return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, if (dev->dma_ops)
dma_attrs); return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
dma_attrs);
else
return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
dma_attrs);
} }
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
...@@ -3009,7 +3021,12 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, ...@@ -3009,7 +3021,12 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction, enum dma_data_direction direction,
unsigned long dma_attrs) unsigned long dma_attrs)
{ {
dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); if (dev->dma_ops)
return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
dma_attrs);
else
dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
dma_attrs);
} }
/** /**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment