Commit 3b561130 authored by Erez Shitrit's avatar Erez Shitrit Committed by Doug Ledford

IB/ipoib: Support SendOnlyFullMember MCG for SendOnly join

Check (via an SA query) if the SM supports the new option for SendOnly
multicast joins.
If the SM supports that option it will use the new join state to create
such multicast group.
If SendOnlyFullMember is supported, we wouldn't use faked FullMember state
join for SendOnly MCG, use the correct state if supported.

This check is performed at every invocation of mcast_restart task, to be
sure that the driver stays in sync with the current state of the SM.
Signed-off-by: default avatarErez Shitrit <erezsh@mellanox.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent cd6e9b7e
...@@ -392,6 +392,7 @@ struct ipoib_dev_priv { ...@@ -392,6 +392,7 @@ struct ipoib_dev_priv {
struct ipoib_ethtool_st ethtool; struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer; struct timer_list poll_timer;
unsigned max_send_sge; unsigned max_send_sge;
bool sm_fullmember_sendonly_support;
}; };
struct ipoib_ah { struct ipoib_ah {
...@@ -476,6 +477,7 @@ void ipoib_reap_ah(struct work_struct *work); ...@@ -476,6 +477,7 @@ void ipoib_reap_ah(struct work_struct *work);
void ipoib_mark_paths_invalid(struct net_device *dev); void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev); void ipoib_flush_paths(struct net_device *dev);
int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
......
...@@ -117,6 +117,8 @@ int ipoib_open(struct net_device *dev) ...@@ -117,6 +117,8 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
priv->sm_fullmember_sendonly_support = false;
if (ipoib_ib_dev_open(dev)) { if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0; return 0;
...@@ -629,6 +631,77 @@ void ipoib_mark_paths_invalid(struct net_device *dev) ...@@ -629,6 +631,77 @@ void ipoib_mark_paths_invalid(struct net_device *dev)
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
} }
struct classport_info_context {
struct ipoib_dev_priv *priv;
struct completion done;
struct ib_sa_query *sa_query;
};
static void classport_info_query_cb(int status, struct ib_class_port_info *rec,
void *context)
{
struct classport_info_context *cb_ctx = context;
struct ipoib_dev_priv *priv;
WARN_ON(!context);
priv = cb_ctx->priv;
if (status || !rec) {
pr_debug("device: %s failed query classport_info status: %d\n",
priv->dev->name, status);
/* keeps the default, will try next mcast_restart */
priv->sm_fullmember_sendonly_support = false;
goto out;
}
if (ib_get_cpi_capmask2(rec) &
IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) {
pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n",
priv->dev->name);
priv->sm_fullmember_sendonly_support = true;
} else {
pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n",
priv->dev->name);
priv->sm_fullmember_sendonly_support = false;
}
out:
complete(&cb_ctx->done);
}
int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv)
{
struct classport_info_context *callback_context;
int ret;
callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL);
if (!callback_context)
return -ENOMEM;
callback_context->priv = priv;
init_completion(&callback_context->done);
ret = ib_sa_classport_info_rec_query(&ipoib_sa_client,
priv->ca, priv->port, 3000,
GFP_KERNEL,
classport_info_query_cb,
callback_context,
&callback_context->sa_query);
if (ret < 0) {
pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n",
priv->dev->name, ret);
kfree(callback_context);
return ret;
}
/* waiting for the callback to finish before returnning */
wait_for_completion(&callback_context->done);
kfree(callback_context);
return ret;
}
void ipoib_flush_paths(struct net_device *dev) void ipoib_flush_paths(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
......
...@@ -64,6 +64,9 @@ struct ipoib_mcast_iter { ...@@ -64,6 +64,9 @@ struct ipoib_mcast_iter {
unsigned int send_only; unsigned int send_only;
}; };
/* join state that allows creating mcg with sendonly member request */
#define SENDONLY_FULLMEMBER_JOIN 8
/* /*
* This should be called with the priv->lock held * This should be called with the priv->lock held
*/ */
...@@ -326,12 +329,23 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work) ...@@ -326,12 +329,23 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
carrier_on_task); carrier_on_task);
struct ib_port_attr attr; struct ib_port_attr attr;
int ret;
if (ib_query_port(priv->ca, priv->port, &attr) || if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) { attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n"); ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return; return;
} }
/*
* Check if can send sendonly MCG's with sendonly-fullmember join state.
* It done here after the successfully join to the broadcast group,
* because the broadcast group must always be joined first and is always
* re-joined if the SM changes substantially.
*/
ret = ipoib_check_sm_sendonly_fullmember_support(priv);
if (ret < 0)
pr_debug("%s failed query sm support for sendonly-fullmember (ret: %d)\n",
priv->dev->name, ret);
/* /*
* Take rtnl_lock to avoid racing with ipoib_stop() and * Take rtnl_lock to avoid racing with ipoib_stop() and
...@@ -515,22 +529,20 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) ...@@ -515,22 +529,20 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.hop_limit = priv->broadcast->mcmember.hop_limit; rec.hop_limit = priv->broadcast->mcmember.hop_limit;
/* /*
* Send-only IB Multicast joins do not work at the core * Send-only IB Multicast joins work at the core IB layer but
* IB layer yet, so we can't use them here. However, * require specific SM support.
* we are emulating an Ethernet multicast send, which * We can use such joins here only if the current SM supports that feature.
* does not require a multicast subscription and will * However, if not, we emulate an Ethernet multicast send,
* still send properly. The most appropriate thing to * which does not require a multicast subscription and will
* still send properly. The most appropriate thing to
* do is to create the group if it doesn't exist as that * do is to create the group if it doesn't exist as that
* most closely emulates the behavior, from a user space * most closely emulates the behavior, from a user space
* application perspecitive, of Ethernet multicast * application perspective, of Ethernet multicast operation.
* operation. For now, we do a full join, maybe later
* when the core IB layers support send only joins we
* will use them.
*/ */
#if 0 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) priv->sm_fullmember_sendonly_support)
rec.join_state = 4; /* SM supports sendonly-fullmember, otherwise fallback to full-member */
#endif rec.join_state = SENDONLY_FULLMEMBER_JOIN;
} }
spin_unlock_irq(&priv->lock); spin_unlock_irq(&priv->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment