Commit bacdbe07 authored by Stephen Warren's avatar Stephen Warren Committed by Mark Brown

regmap: introduce fast_io busses, and use a spinlock for them

Some bus types have very fast IO. For these, acquiring a mutex for every
IO operation is a significant overhead. Allow busses to indicate their IO
is fast, and enhance regmap to use a spinlock for those busses.

[Currently limited to native endian registers -- broonie]
Signed-off-by: default avatarStephen Warren <swarren@nvidia.com>
Signed-off-by: default avatarMark Brown <broonie@opensource.wolfsonmicro.com>
parent 0135bbcc
......@@ -31,8 +31,14 @@ struct regmap_format {
unsigned int (*parse_val)(void *buf);
};
typedef void (*regmap_lock)(struct regmap *map);
typedef void (*regmap_unlock)(struct regmap *map);
struct regmap {
struct mutex lock;
struct mutex mutex;
spinlock_t spinlock;
regmap_lock lock;
regmap_unlock unlock;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
......
......@@ -139,7 +139,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
int nodes = 0;
int registers = 0;
mutex_lock(&map->lock);
map->lock(map);
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
......@@ -155,7 +155,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
seq_printf(s, "%d nodes, %d registers, average %d registers\n",
nodes, registers, registers / nodes);
mutex_unlock(&map->lock);
map->unlock(map);
return 0;
}
......
......@@ -264,7 +264,7 @@ int regcache_sync(struct regmap *map)
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
mutex_lock(&map->lock);
map->lock(map);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
dev_dbg(map->dev, "Syncing %s cache\n",
......@@ -296,7 +296,7 @@ int regcache_sync(struct regmap *map)
trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
map->cache_bypass = bypass;
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
......@@ -323,7 +323,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
mutex_lock(&map->lock);
map->lock(map);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
......@@ -342,7 +342,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
......@@ -361,11 +361,11 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
*/
void regcache_cache_only(struct regmap *map, bool enable)
{
mutex_lock(&map->lock);
map->lock(map);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map->dev, enable);
mutex_unlock(&map->lock);
map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
......@@ -380,9 +380,9 @@ EXPORT_SYMBOL_GPL(regcache_cache_only);
*/
void regcache_mark_dirty(struct regmap *map)
{
mutex_lock(&map->lock);
map->lock(map);
map->cache_dirty = true;
mutex_unlock(&map->lock);
map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
......@@ -399,11 +399,11 @@ EXPORT_SYMBOL_GPL(regcache_mark_dirty);
*/
void regcache_cache_bypass(struct regmap *map, bool enable)
{
mutex_lock(&map->lock);
map->lock(map);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
trace_regmap_cache_bypass(map->dev, enable);
mutex_unlock(&map->lock);
map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
......
......@@ -158,6 +158,26 @@ static unsigned int regmap_parse_32(void *buf)
return b[0];
}
static void regmap_lock_mutex(struct regmap *map)
{
mutex_lock(&map->mutex);
}
static void regmap_unlock_mutex(struct regmap *map)
{
mutex_unlock(&map->mutex);
}
static void regmap_lock_spinlock(struct regmap *map)
{
spin_lock(&map->spinlock);
}
static void regmap_unlock_spinlock(struct regmap *map)
{
spin_unlock(&map->spinlock);
}
/**
* regmap_init(): Initialise register map
*
......@@ -187,7 +207,15 @@ struct regmap *regmap_init(struct device *dev,
goto err;
}
mutex_init(&map->lock);
if (bus->fast_io) {
spin_lock_init(&map->spinlock);
map->lock = regmap_lock_spinlock;
map->unlock = regmap_unlock_spinlock;
} else {
mutex_init(&map->mutex);
map->lock = regmap_lock_mutex;
map->unlock = regmap_unlock_mutex;
}
map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
......@@ -365,7 +393,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
{
int ret;
mutex_lock(&map->lock);
map->lock(map);
regcache_exit(map);
regmap_debugfs_exit(map);
......@@ -384,7 +412,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
ret = regcache_init(map, config);
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
......@@ -536,11 +564,11 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
{
int ret;
mutex_lock(&map->lock);
map->lock(map);
ret = _regmap_write(map, reg, val);
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
......@@ -567,11 +595,11 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
{
int ret;
mutex_lock(&map->lock);
map->lock(map);
ret = _regmap_raw_write(map, reg, val, val_len);
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
......@@ -601,7 +629,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
if (!map->format.parse_val)
return -EINVAL;
mutex_lock(&map->lock);
map->lock(map);
/* No formatting is require if val_byte is 1 */
if (val_bytes == 1) {
......@@ -622,7 +650,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
kfree(wval);
out:
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
......@@ -696,11 +724,11 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
{
int ret;
mutex_lock(&map->lock);
map->lock(map);
ret = _regmap_read(map, reg, val);
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
......@@ -725,7 +753,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int v;
int ret, i;
mutex_lock(&map->lock);
map->lock(map);
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
......@@ -746,7 +774,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
}
out:
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
......@@ -799,7 +827,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int ret;
unsigned int tmp, orig;
mutex_lock(&map->lock);
map->lock(map);
ret = _regmap_read(map, reg, &orig);
if (ret != 0)
......@@ -816,7 +844,7 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
}
out:
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
......@@ -883,7 +911,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
if (map->patch)
return -EBUSY;
mutex_lock(&map->lock);
map->lock(map);
bypass = map->cache_bypass;
......@@ -911,7 +939,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
out:
map->cache_bypass = bypass;
mutex_unlock(&map->lock);
map->unlock(map);
return ret;
}
......
......@@ -110,6 +110,8 @@ typedef void (*regmap_hw_free_context)(void *context);
/**
* Description of a hardware bus for the register map infrastructure.
*
* @fast_io: Register IO is fast. Use a spinlock instead of a mutex
* to perform locking.
* @write: Write operation.
* @gather_write: Write operation with split register/value, return -ENOTSUPP
* if not implemented on a given device.
......@@ -119,6 +121,7 @@ typedef void (*regmap_hw_free_context)(void *context);
* a read.
*/
struct regmap_bus {
bool fast_io;
regmap_hw_write write;
regmap_hw_gather_write gather_write;
regmap_hw_read read;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment