Commit 366df726 authored by Adrian Hunter's avatar Adrian Hunter Committed by Arnaldo Carvalho de Melo

perf dso: Refactor dso_cache__read()

Refactor dso_cache__read() to separate populating the cache from copying
data from it.  This is preparation for adding a cache "write" that will
update the data in the cache.
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: x86@kernel.org
Link: http://lore.kernel.org/lkml/20191025130000.13032-3-adrian.hunter@intel.comSigned-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent fd62c109
......@@ -768,7 +768,7 @@ dso_cache__free(struct dso *dso)
pthread_mutex_unlock(&dso->lock);
}
static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
{
const struct rb_root *root = &dso->data.cache;
struct rb_node * const *p = &root->rb_node;
......@@ -863,54 +863,64 @@ static ssize_t file_read(struct dso *dso, struct machine *machine,
return ret;
}
static ssize_t
dso_cache__read(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
static struct dso_cache *dso_cache__populate(struct dso *dso,
struct machine *machine,
u64 offset, ssize_t *ret)
{
u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
struct dso_cache *cache;
struct dso_cache *old;
ssize_t ret;
cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
if (!cache)
return -ENOMEM;
if (!cache) {
*ret = -ENOMEM;
return NULL;
}
if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
ret = bpf_read(dso, cache_offset, cache->data);
*ret = bpf_read(dso, cache_offset, cache->data);
else
ret = file_read(dso, machine, cache_offset, cache->data);
*ret = file_read(dso, machine, cache_offset, cache->data);
if (ret > 0) {
cache->offset = cache_offset;
cache->size = ret;
if (*ret <= 0) {
free(cache);
return NULL;
}
old = dso_cache__insert(dso, cache);
if (old) {
/* we lose the race */
free(cache);
cache = old;
}
cache->offset = cache_offset;
cache->size = *ret;
ret = dso_cache__memcpy(cache, offset, data, size);
old = dso_cache__insert(dso, cache);
if (old) {
/* we lose the race */
free(cache);
cache = old;
}
if (ret <= 0)
free(cache);
return cache;
}
return ret;
static struct dso_cache *dso_cache__find(struct dso *dso,
struct machine *machine,
u64 offset,
ssize_t *ret)
{
struct dso_cache *cache = __dso_cache__find(dso, offset);
return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
}
static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
struct dso_cache *cache;
ssize_t ret = 0;
cache = dso_cache__find(dso, offset);
if (cache)
return dso_cache__memcpy(cache, offset, data, size);
else
return dso_cache__read(dso, machine, offset, data, size);
cache = dso_cache__find(dso, machine, offset, &ret);
if (!cache)
return ret;
return dso_cache__memcpy(cache, offset, data, size);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment