Commit 1122f400 authored by Xin Yin's avatar Xin Yin Committed by David Howells

cachefiles: make on-demand request distribution fairer

For now, enqueuing and dequeuing on-demand requests all start from
idx 0, this makes request distribution unfair. In the weighty
concurrent I/O scenario, the request stored in higher idx will starve.

Searching requests cyclically in cachefiles_ondemand_daemon_read,
makes distribution fairer.

Fixes: c8383054 ("cachefiles: notify the user daemon when looking up cookie")
Reported-by: default avatarYongqing Li <liyongqing@bytedance.com>
Signed-off-by: default avatarXin Yin <yinxin.x@bytedance.com>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarJeffle Xu <jefflexu@linux.alibaba.com>
Reviewed-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220817065200.11543-1-yinxin.x@bytedance.com/ # v1
Link: https://lore.kernel.org/r/20220825020945.2293-1-yinxin.x@bytedance.com/ # v2
parent c93ccd63
...@@ -111,6 +111,7 @@ struct cachefiles_cache { ...@@ -111,6 +111,7 @@ struct cachefiles_cache {
char *tag; /* cache binding tag */ char *tag; /* cache binding tag */
refcount_t unbind_pincount;/* refcount to do daemon unbind */ refcount_t unbind_pincount;/* refcount to do daemon unbind */
struct xarray reqs; /* xarray of pending on-demand requests */ struct xarray reqs; /* xarray of pending on-demand requests */
unsigned long req_id_next;
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next; u32 ondemand_id_next;
}; };
......
...@@ -242,14 +242,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, ...@@ -242,14 +242,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
unsigned long id = 0; unsigned long id = 0;
size_t n; size_t n;
int ret = 0; int ret = 0;
XA_STATE(xas, &cache->reqs, 0); XA_STATE(xas, &cache->reqs, cache->req_id_next);
/* /*
* Search for a request that has not ever been processed, to prevent * Cyclically search for a request that has not ever been processed,
* requests from being processed repeatedly. * to prevent requests from being processed repeatedly, and make
* request distribution fair.
*/ */
xa_lock(&cache->reqs); xa_lock(&cache->reqs);
req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
if (!req && cache->req_id_next > 0) {
xas_set(&xas, 0);
req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
}
if (!req) { if (!req) {
xa_unlock(&cache->reqs); xa_unlock(&cache->reqs);
return 0; return 0;
...@@ -264,6 +269,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, ...@@ -264,6 +269,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
} }
xas_clear_mark(&xas, CACHEFILES_REQ_NEW); xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
cache->req_id_next = xas.xa_index + 1;
xa_unlock(&cache->reqs); xa_unlock(&cache->reqs);
id = xas.xa_index; id = xas.xa_index;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment