Commit b9952b52 authored by Frank Rowand's avatar Frank Rowand Committed by Rob Herring

of: overlay: update phandle cache on overlay apply and remove

A comment in the review of the patch adding the phandle cache said that
the cache would have to be updated when modules are applied and removed.
This patch implements the cache updates.

Fixes: 0b3ce78e ("of: cache phandle nodes to reduce cost of of_find_node_by_phandle()")
Reported-by: default avatarAlan Tull <atull@kernel.org>
Suggested-by: default avatarAlan Tull <atull@kernel.org>
Signed-off-by: default avatarFrank Rowand <frank.rowand@sony.com>
Signed-off-by: default avatarRob Herring <robh@kernel.org>
parent a47c9b39
...@@ -102,7 +102,7 @@ static u32 phandle_cache_mask; ...@@ -102,7 +102,7 @@ static u32 phandle_cache_mask;
* - the phandle lookup overhead reduction provided by the cache * - the phandle lookup overhead reduction provided by the cache
* will likely be less * will likely be less
*/ */
static void of_populate_phandle_cache(void) void of_populate_phandle_cache(void)
{ {
unsigned long flags; unsigned long flags;
u32 cache_entries; u32 cache_entries;
...@@ -134,8 +134,7 @@ static void of_populate_phandle_cache(void) ...@@ -134,8 +134,7 @@ static void of_populate_phandle_cache(void)
raw_spin_unlock_irqrestore(&devtree_lock, flags); raw_spin_unlock_irqrestore(&devtree_lock, flags);
} }
#ifndef CONFIG_MODULES int of_free_phandle_cache(void)
static int __init of_free_phandle_cache(void)
{ {
unsigned long flags; unsigned long flags;
...@@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void) ...@@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void)
return 0; return 0;
} }
#if !defined(CONFIG_MODULES)
late_initcall_sync(of_free_phandle_cache); late_initcall_sync(of_free_phandle_cache);
#endif #endif
......
...@@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree); ...@@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree);
#if defined(CONFIG_OF_OVERLAY) #if defined(CONFIG_OF_OVERLAY)
void of_overlay_mutex_lock(void); void of_overlay_mutex_lock(void);
void of_overlay_mutex_unlock(void); void of_overlay_mutex_unlock(void);
int of_free_phandle_cache(void);
void of_populate_phandle_cache(void);
#else #else
static inline void of_overlay_mutex_lock(void) {}; static inline void of_overlay_mutex_lock(void) {};
static inline void of_overlay_mutex_unlock(void) {}; static inline void of_overlay_mutex_unlock(void) {};
......
...@@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree, ...@@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
goto err_free_overlay_changeset; goto err_free_overlay_changeset;
} }
of_populate_phandle_cache();
ret = __of_changeset_apply_notify(&ovcs->cset); ret = __of_changeset_apply_notify(&ovcs->cset);
if (ret) if (ret)
pr_err("overlay changeset entry notify error %d\n", ret); pr_err("overlay changeset entry notify error %d\n", ret);
...@@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id) ...@@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id)
list_del(&ovcs->ovcs_list); list_del(&ovcs->ovcs_list);
/*
* Disable phandle cache. Avoids race condition that would arise
* from removing cache entry when the associated node is deleted.
*/
of_free_phandle_cache();
ret_apply = 0; ret_apply = 0;
ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply); ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
of_populate_phandle_cache();
if (ret) { if (ret) {
if (ret_apply) if (ret_apply)
devicetree_state_flags |= DTSF_REVERT_FAIL; devicetree_state_flags |= DTSF_REVERT_FAIL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment