Commit 9190cc0c authored by Paulo Alcantara's avatar Paulo Alcantara Committed by Steve French

smb: client: improve purging of cached referrals

Purge cached referrals that have a single target when reaching maximum
of cache size as the client won't need them to failover.  Otherwise
remove oldest cache entry.
Signed-off-by: default avatarPaulo Alcantara (Red Hat) <pc@manguebit.com>
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent 242d23ef
...@@ -126,6 +126,7 @@ static inline void free_tgts(struct cache_entry *ce) ...@@ -126,6 +126,7 @@ static inline void free_tgts(struct cache_entry *ce)
static inline void flush_cache_ent(struct cache_entry *ce) static inline void flush_cache_ent(struct cache_entry *ce)
{ {
cifs_dbg(FYI, "%s: %s\n", __func__, ce->path);
hlist_del_init(&ce->hlist); hlist_del_init(&ce->hlist);
kfree(ce->path); kfree(ce->path);
free_tgts(ce); free_tgts(ce);
...@@ -441,34 +442,31 @@ static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int n ...@@ -441,34 +442,31 @@ static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int n
return ce; return ce;
} }
static void remove_oldest_entry_locked(void) /* Remove all referrals that have a single target or oldest entry */
static void purge_cache(void)
{ {
int i; int i;
struct cache_entry *ce; struct cache_entry *ce;
struct cache_entry *to_del = NULL; struct cache_entry *oldest = NULL;
WARN_ON(!rwsem_is_locked(&htable_rw_lock));
for (i = 0; i < CACHE_HTABLE_SIZE; i++) { for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
struct hlist_head *l = &cache_htable[i]; struct hlist_head *l = &cache_htable[i];
struct hlist_node *n;
hlist_for_each_entry(ce, l, hlist) { hlist_for_each_entry_safe(ce, n, l, hlist) {
if (hlist_unhashed(&ce->hlist)) if (hlist_unhashed(&ce->hlist))
continue; continue;
if (!to_del || timespec64_compare(&ce->etime, if (ce->numtgts == 1)
&to_del->etime) < 0) flush_cache_ent(ce);
to_del = ce; else if (!oldest ||
timespec64_compare(&ce->etime,
&oldest->etime) < 0)
oldest = ce;
} }
} }
if (!to_del) { if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES && oldest)
cifs_dbg(FYI, "%s: no entry to remove\n", __func__); flush_cache_ent(oldest);
return;
}
cifs_dbg(FYI, "%s: removing entry\n", __func__);
dump_ce(to_del);
flush_cache_ent(to_del);
} }
/* Add a new DFS cache entry */ /* Add a new DFS cache entry */
...@@ -484,7 +482,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs, ...@@ -484,7 +482,7 @@ static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) { if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES); cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
remove_oldest_entry_locked(); purge_cache();
} }
rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash); rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment