Commit 256d0e9e authored by Dan Williams's avatar Dan Williams

cxl/port: Move 'cxl_ep' references to an xarray per port

In preparation for region provisioning that needs to walk the topology
by endpoints, use an xarray to record endpoint interest in a given port.
In addition to being more space and time efficient it also reduces the
complexity of the implementation by moving locking internal to the
xarray implementation. It also allows for a single cxl_ep reference to
be recorded in multiple xarrays.
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Link: https://lore.kernel.org/r/20220624041950.559155-2-dan.j.williams@intel.comSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 1b58b4ca
...@@ -431,22 +431,27 @@ static struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) ...@@ -431,22 +431,27 @@ static struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
static void cxl_ep_release(struct cxl_ep *ep) static void cxl_ep_release(struct cxl_ep *ep)
{ {
if (!ep)
return;
list_del(&ep->list);
put_device(ep->ep); put_device(ep->ep);
kfree(ep); kfree(ep);
} }
static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
{
if (!ep)
return;
xa_erase(&port->endpoints, (unsigned long) ep->ep);
cxl_ep_release(ep);
}
static void cxl_port_release(struct device *dev) static void cxl_port_release(struct device *dev)
{ {
struct cxl_port *port = to_cxl_port(dev); struct cxl_port *port = to_cxl_port(dev);
struct cxl_ep *ep, *_e; unsigned long index;
struct cxl_ep *ep;
device_lock(dev); xa_for_each(&port->endpoints, index, ep)
list_for_each_entry_safe(ep, _e, &port->endpoints, list) cxl_ep_remove(port, ep);
cxl_ep_release(ep); xa_destroy(&port->endpoints);
device_unlock(dev);
ida_free(&cxl_port_ida, port->id); ida_free(&cxl_port_ida, port->id);
kfree(port); kfree(port);
} }
...@@ -577,7 +582,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport, ...@@ -577,7 +582,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport,
ida_init(&port->decoder_ida); ida_init(&port->decoder_ida);
port->hdm_end = -1; port->hdm_end = -1;
INIT_LIST_HEAD(&port->dports); INIT_LIST_HEAD(&port->dports);
INIT_LIST_HEAD(&port->endpoints); xa_init(&port->endpoints);
device_initialize(dev); device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
...@@ -873,33 +878,21 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, ...@@ -873,33 +878,21 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
} }
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL);
static struct cxl_ep *find_ep(struct cxl_port *port, struct device *ep_dev)
{
struct cxl_ep *ep;
device_lock_assert(&port->dev);
list_for_each_entry(ep, &port->endpoints, list)
if (ep->ep == ep_dev)
return ep;
return NULL;
}
static int add_ep(struct cxl_ep *new) static int add_ep(struct cxl_ep *new)
{ {
struct cxl_port *port = new->dport->port; struct cxl_port *port = new->dport->port;
struct cxl_ep *dup; int rc;
device_lock(&port->dev); device_lock(&port->dev);
if (port->dead) { if (port->dead) {
device_unlock(&port->dev); device_unlock(&port->dev);
return -ENXIO; return -ENXIO;
} }
dup = find_ep(port, new->ep); rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new,
if (!dup) GFP_KERNEL);
list_add_tail(&new->list, &port->endpoints);
device_unlock(&port->dev); device_unlock(&port->dev);
return dup ? -EEXIST : 0; return rc;
} }
/** /**
...@@ -920,7 +913,6 @@ static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev) ...@@ -920,7 +913,6 @@ static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
if (!ep) if (!ep)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&ep->list);
ep->ep = get_device(ep_dev); ep->ep = get_device(ep_dev);
ep->dport = dport; ep->dport = dport;
...@@ -1063,6 +1055,12 @@ static void delete_switch_port(struct cxl_port *port, struct list_head *dports) ...@@ -1063,6 +1055,12 @@ static void delete_switch_port(struct cxl_port *port, struct list_head *dports)
devm_release_action(port->dev.parent, unregister_port, port); devm_release_action(port->dev.parent, unregister_port, port);
} }
static struct cxl_ep *cxl_ep_load(struct cxl_port *port,
struct cxl_memdev *cxlmd)
{
return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
}
static void cxl_detach_ep(void *data) static void cxl_detach_ep(void *data)
{ {
struct cxl_memdev *cxlmd = data; struct cxl_memdev *cxlmd = data;
...@@ -1101,11 +1099,11 @@ static void cxl_detach_ep(void *data) ...@@ -1101,11 +1099,11 @@ static void cxl_detach_ep(void *data)
} }
device_lock(&port->dev); device_lock(&port->dev);
ep = find_ep(port, &cxlmd->dev); ep = cxl_ep_load(port, cxlmd);
dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
cxl_ep_release(ep); cxl_ep_remove(port, ep);
if (ep && !port->dead && list_empty(&port->endpoints) && if (ep && !port->dead && xa_empty(&port->endpoints) &&
!is_cxl_root(parent_port)) { !is_cxl_root(parent_port)) {
/* /*
* This was the last ep attached to a dynamically * This was the last ep attached to a dynamically
...@@ -1199,7 +1197,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, ...@@ -1199,7 +1197,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
dev_name(&port->dev), dev_name(port->uport)); dev_name(&port->dev), dev_name(port->uport));
rc = cxl_add_ep(dport, &cxlmd->dev); rc = cxl_add_ep(dport, &cxlmd->dev);
if (rc == -EEXIST) { if (rc == -EBUSY) {
/* /*
* "can't" happen, but this error code means * "can't" happen, but this error code means
* something to the caller, so translate it. * something to the caller, so translate it.
...@@ -1262,7 +1260,7 @@ int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd) ...@@ -1262,7 +1260,7 @@ int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
* the parent_port lock as the current port may be being * the parent_port lock as the current port may be being
* reaped. * reaped.
*/ */
if (rc && rc != -EEXIST) { if (rc && rc != -EBUSY) {
put_device(&port->dev); put_device(&port->dev);
return rc; return rc;
} }
......
...@@ -347,7 +347,7 @@ struct cxl_port { ...@@ -347,7 +347,7 @@ struct cxl_port {
struct device *host_bridge; struct device *host_bridge;
int id; int id;
struct list_head dports; struct list_head dports;
struct list_head endpoints; struct xarray endpoints;
struct cxl_dport *parent_dport; struct cxl_dport *parent_dport;
struct ida decoder_ida; struct ida decoder_ida;
int hdm_end; int hdm_end;
...@@ -381,12 +381,10 @@ struct cxl_dport { ...@@ -381,12 +381,10 @@ struct cxl_dport {
* struct cxl_ep - track an endpoint's interest in a port * struct cxl_ep - track an endpoint's interest in a port
* @ep: device that hosts a generic CXL endpoint (expander or accelerator) * @ep: device that hosts a generic CXL endpoint (expander or accelerator)
* @dport: which dport routes to this endpoint on @port * @dport: which dport routes to this endpoint on @port
* @list: node on port->endpoints list
*/ */
struct cxl_ep { struct cxl_ep {
struct device *ep; struct device *ep;
struct cxl_dport *dport; struct cxl_dport *dport;
struct list_head list;
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment