Commit 6f42193f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe

memremap: don't use a separate devm action for devmap_managed_enable_get

Just clean up for early failures and then piggy back on
devm_memremap_pages_release.  This helps with a pending not device managed
version of devm_memremap_pages.

Link: https://lore.kernel.org/r/20190818090557.17853-4-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Tested-by: default avatarBharata B Rao <bharata@linux.ibm.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent fdc029b1
...@@ -21,13 +21,13 @@ DEFINE_STATIC_KEY_FALSE(devmap_managed_key); ...@@ -21,13 +21,13 @@ DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
EXPORT_SYMBOL(devmap_managed_key); EXPORT_SYMBOL(devmap_managed_key);
static atomic_t devmap_managed_enable; static atomic_t devmap_managed_enable;
static void devmap_managed_enable_put(void *data) static void devmap_managed_enable_put(void)
{ {
if (atomic_dec_and_test(&devmap_managed_enable)) if (atomic_dec_and_test(&devmap_managed_enable))
static_branch_disable(&devmap_managed_key); static_branch_disable(&devmap_managed_key);
} }
static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
{ {
if (!pgmap->ops || !pgmap->ops->page_free) { if (!pgmap->ops || !pgmap->ops->page_free) {
WARN(1, "Missing page_free method\n"); WARN(1, "Missing page_free method\n");
...@@ -36,13 +36,16 @@ static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgm ...@@ -36,13 +36,16 @@ static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgm
if (atomic_inc_return(&devmap_managed_enable) == 1) if (atomic_inc_return(&devmap_managed_enable) == 1)
static_branch_enable(&devmap_managed_key); static_branch_enable(&devmap_managed_key);
return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL); return 0;
} }
#else #else
static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap) static int devmap_managed_enable_get(struct dev_pagemap *pgmap)
{ {
return -EINVAL; return -EINVAL;
} }
static void devmap_managed_enable_put(void)
{
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */ #endif /* CONFIG_DEV_PAGEMAP_OPS */
static void pgmap_array_delete(struct resource *res) static void pgmap_array_delete(struct resource *res)
...@@ -123,6 +126,7 @@ static void devm_memremap_pages_release(void *data) ...@@ -123,6 +126,7 @@ static void devm_memremap_pages_release(void *data)
untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res)); untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
pgmap_array_delete(res); pgmap_array_delete(res);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
devmap_managed_enable_put();
} }
static void dev_pagemap_percpu_release(struct percpu_ref *ref) static void dev_pagemap_percpu_release(struct percpu_ref *ref)
...@@ -212,7 +216,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -212,7 +216,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
} }
if (need_devmap_managed) { if (need_devmap_managed) {
error = devmap_managed_enable_get(dev, pgmap); error = devmap_managed_enable_get(pgmap);
if (error) if (error)
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -321,6 +325,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -321,6 +325,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
err_array: err_array:
dev_pagemap_kill(pgmap); dev_pagemap_kill(pgmap);
dev_pagemap_cleanup(pgmap); dev_pagemap_cleanup(pgmap);
devmap_managed_enable_put();
return ERR_PTR(error); return ERR_PTR(error);
} }
EXPORT_SYMBOL_GPL(devm_memremap_pages); EXPORT_SYMBOL_GPL(devm_memremap_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment