Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7b053842 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull regmap updates from Mark Brown:
 "In user visible terms just a couple of enhancements here, though there
  was a moderate amount of refactoring required in order to support the
  register cache sync performance improvements.

   - Support for block and asynchronous I/O during register cache
     syncing; this provides a use case dependant performance
     improvement.
   - Additional debugfs information on the memory consuption and
     register set"

* tag 'regmap-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap: (23 commits)
  regmap: don't corrupt work buffer in _regmap_raw_write()
  regmap: cache: Fix format specifier in dev_dbg
  regmap: cache: Make regcache_sync_block_raw static
  regmap: cache: Write consecutive registers in a single block write
  regmap: cache: Split raw and non-raw syncs
  regmap: cache: Factor out block sync
  regmap: cache: Factor out reg_present support from rbtree cache
  regmap: cache: Use raw I/O to sync rbtrees if we can
  regmap: core: Provide regmap_can_raw_write() operation
  regmap: cache: Provide a get address of value operation
  regmap: Cut down on the average # of nodes in the rbtree cache
  regmap: core: Make raw write available to regcache
  regmap: core: Warn on invalid operation combinations
  regmap: irq: Clarify error message when we fail to request primary IRQ
  regmap: rbtree Expose total memory consumption in the rbtree debugfs entry
  regmap: debugfs: Add a registers `range' file
  regmap: debugfs: Simplify calculation of `c->max_reg'
  regmap: cache: Store caches in native register format where possible
  regmap: core: Split out in place value parsing
  regmap: cache: Use regcache_get_value() to check if we updated
  ...
parents 5415ba99 38a81796
Loading
Loading
Loading
Loading
+34 −6
Original line number Diff line number Diff line
@@ -38,7 +38,8 @@ struct regmap_format {
			     unsigned int reg, unsigned int val);
	void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
	void (*format_val)(void *buf, unsigned int val, unsigned int shift);
	unsigned int (*parse_val)(void *buf);
	unsigned int (*parse_val)(const void *buf);
	void (*parse_inplace)(void *buf);
};

struct regmap_async {
@@ -76,6 +77,7 @@ struct regmap {
	unsigned int debugfs_tot_len;

	struct list_head debugfs_off_cache;
	struct mutex cache_lock;
#endif

	unsigned int max_register;
@@ -125,6 +127,9 @@ struct regmap {
	void *cache;
	u32 cache_dirty;

	unsigned long *cache_present;
	unsigned int cache_present_nbits;

	struct reg_default *patch;
	int patch_regs;

@@ -187,12 +192,35 @@ int regcache_read(struct regmap *map,
int regcache_write(struct regmap *map,
			unsigned int reg, unsigned int value);
int regcache_sync(struct regmap *map);

unsigned int regcache_get_val(const void *base, unsigned int idx,
			      unsigned int word_size);
bool regcache_set_val(void *base, unsigned int idx,
		      unsigned int val, unsigned int word_size);
int regcache_sync_block(struct regmap *map, void *block,
			unsigned int block_base, unsigned int start,
			unsigned int end);

static inline const void *regcache_get_val_addr(struct regmap *map,
						const void *base,
						unsigned int idx)
{
	return base + (map->cache_word_size * idx);
}

unsigned int regcache_get_val(struct regmap *map, const void *base,
			      unsigned int idx);
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
		      unsigned int val);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
int regcache_set_reg_present(struct regmap *map, unsigned int reg);

static inline bool regcache_reg_present(struct regmap *map, unsigned int reg)
{
	if (!map->cache_present)
		return true;
	if (reg > map->cache_present_nbits)
		return false;
	return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg);
}

int _regmap_raw_write(struct regmap *map, unsigned int reg,
		      const void *val, size_t val_len, bool async);

void regmap_async_complete_cb(struct regmap_async *async, int ret);

+2 −4
Original line number Diff line number Diff line
@@ -260,8 +260,7 @@ static int regcache_lzo_read(struct regmap *map,
	ret = regcache_lzo_decompress_cache_block(map, lzo_block);
	if (ret >= 0)
		/* fetch the value from the cache */
		*value = regcache_get_val(lzo_block->dst, blkpos,
					  map->cache_word_size);
		*value = regcache_get_val(map, lzo_block->dst, blkpos);

	kfree(lzo_block->dst);
	/* restore the pointer and length of the compressed block */
@@ -304,8 +303,7 @@ static int regcache_lzo_write(struct regmap *map,
	}

	/* write the new value to the cache */
	if (regcache_set_val(lzo_block->dst, blkpos, value,
			     map->cache_word_size)) {
	if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
		kfree(lzo_block->dst);
		goto out;
	}
+46 −54
Original line number Diff line number Diff line
@@ -47,18 +47,17 @@ static inline void regcache_rbtree_get_base_top_reg(
	*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
}

static unsigned int regcache_rbtree_get_register(
	struct regcache_rbtree_node *rbnode, unsigned int idx,
	unsigned int word_size)
static unsigned int regcache_rbtree_get_register(struct regmap *map,
	struct regcache_rbtree_node *rbnode, unsigned int idx)
{
	return regcache_get_val(rbnode->block, idx, word_size);
	return regcache_get_val(map, rbnode->block, idx);
}

static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode,
					 unsigned int idx, unsigned int val,
					 unsigned int word_size)
static void regcache_rbtree_set_register(struct regmap *map,
					 struct regcache_rbtree_node *rbnode,
					 unsigned int idx, unsigned int val)
{
	regcache_set_val(rbnode->block, idx, val, word_size);
	regcache_set_val(map, rbnode->block, idx, val);
}

static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
@@ -139,15 +138,21 @@ static int rbtree_show(struct seq_file *s, void *ignored)
	struct regcache_rbtree_node *n;
	struct rb_node *node;
	unsigned int base, top;
	size_t mem_size;
	int nodes = 0;
	int registers = 0;
	int this_registers, average;

	map->lock(map);

	mem_size = sizeof(*rbtree_ctx);
	mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);

	for (node = rb_first(&rbtree_ctx->root); node != NULL;
	     node = rb_next(node)) {
		n = container_of(node, struct regcache_rbtree_node, node);
		mem_size += sizeof(*n);
		mem_size += (n->blklen * map->cache_word_size);

		regcache_rbtree_get_base_top_reg(map, n, &base, &top);
		this_registers = ((top - base) / map->reg_stride) + 1;
@@ -162,8 +167,8 @@ static int rbtree_show(struct seq_file *s, void *ignored)
	else
		average = 0;

	seq_printf(s, "%d nodes, %d registers, average %d registers\n",
		   nodes, registers, average);
	seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
		   nodes, registers, average, mem_size);

	map->unlock(map);

@@ -260,8 +265,9 @@ static int regcache_rbtree_read(struct regmap *map,
	rbnode = regcache_rbtree_lookup(map, reg);
	if (rbnode) {
		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
		*value = regcache_rbtree_get_register(rbnode, reg_tmp,
						      map->cache_word_size);
		if (!regcache_reg_present(map, reg))
			return -ENOENT;
		*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
	} else {
		return -ENOENT;
	}
@@ -270,21 +276,23 @@ static int regcache_rbtree_read(struct regmap *map,
}


static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
static int regcache_rbtree_insert_to_block(struct regmap *map,
					   struct regcache_rbtree_node *rbnode,
					   unsigned int pos, unsigned int reg,
					   unsigned int value, unsigned int word_size)
					   unsigned int value)
{
	u8 *blk;

	blk = krealloc(rbnode->block,
		       (rbnode->blklen + 1) * word_size, GFP_KERNEL);
		       (rbnode->blklen + 1) * map->cache_word_size,
		       GFP_KERNEL);
	if (!blk)
		return -ENOMEM;

	/* insert the register value in the correct place in the rbnode block */
	memmove(blk + (pos + 1) * word_size,
		blk + pos * word_size,
		(rbnode->blklen - pos) * word_size);
	memmove(blk + (pos + 1) * map->cache_word_size,
		blk + pos * map->cache_word_size,
		(rbnode->blklen - pos) * map->cache_word_size);

	/* update the rbnode block, its size and the base register */
	rbnode->block = blk;
@@ -292,7 +300,7 @@ static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
	if (!pos)
		rbnode->base_reg = reg;

	regcache_rbtree_set_register(rbnode, pos, value, word_size);
	regcache_rbtree_set_register(map, rbnode, pos, value);
	return 0;
}

@@ -302,25 +310,24 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
	struct regcache_rbtree_ctx *rbtree_ctx;
	struct regcache_rbtree_node *rbnode, *rbnode_tmp;
	struct rb_node *node;
	unsigned int val;
	unsigned int reg_tmp;
	unsigned int pos;
	int i;
	int ret;

	rbtree_ctx = map->cache;
	/* update the reg_present bitmap, make space if necessary */
	ret = regcache_set_reg_present(map, reg);
	if (ret < 0)
		return ret;

	/* if we can't locate it in the cached rbnode we'll have
	 * to traverse the rbtree looking for it.
	 */
	rbnode = regcache_rbtree_lookup(map, reg);
	if (rbnode) {
		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
		val = regcache_rbtree_get_register(rbnode, reg_tmp,
						   map->cache_word_size);
		if (val == value)
			return 0;
		regcache_rbtree_set_register(rbnode, reg_tmp, value,
					     map->cache_word_size);
		regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
	} else {
		/* look for an adjacent register to the one we are about to add */
		for (node = rb_first(&rbtree_ctx->root); node;
@@ -337,9 +344,10 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
					pos = i + 1;
				else
					pos = i;
				ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos,
								      reg, value,
								      map->cache_word_size);
				ret = regcache_rbtree_insert_to_block(map,
								      rbnode_tmp,
								      pos, reg,
								      value);
				if (ret)
					return ret;
				rbtree_ctx->cached_rbnode = rbnode_tmp;
@@ -354,7 +362,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
		rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
		if (!rbnode)
			return -ENOMEM;
		rbnode->blklen = 1;
		rbnode->blklen = sizeof(*rbnode);
		rbnode->base_reg = reg;
		rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
					GFP_KERNEL);
@@ -362,7 +370,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
			kfree(rbnode);
			return -ENOMEM;
		}
		regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size);
		regcache_rbtree_set_register(map, rbnode, 0, value);
		regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
		rbtree_ctx->cached_rbnode = rbnode;
	}
@@ -376,10 +384,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
	struct regcache_rbtree_ctx *rbtree_ctx;
	struct rb_node *node;
	struct regcache_rbtree_node *rbnode;
	unsigned int regtmp;
	unsigned int val;
	int ret;
	int i, base, end;
	int base, end;

	rbtree_ctx = map->cache;
	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
@@ -402,27 +408,13 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
		else
			end = rbnode->blklen;

		for (i = base; i < end; i++) {
			regtmp = rbnode->base_reg + (i * map->reg_stride);
			val = regcache_rbtree_get_register(rbnode, i,
							   map->cache_word_size);

			/* Is this the hardware default?  If so skip. */
			ret = regcache_lookup_reg(map, regtmp);
			if (ret >= 0 && val == map->reg_defaults[ret].def)
				continue;

			map->cache_bypass = 1;
			ret = _regmap_write(map, regtmp, val);
			map->cache_bypass = 0;
			if (ret)
		ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg,
					  base, end);
		if (ret != 0)
			return ret;
			dev_dbg(map->dev, "Synced register %#x, value %#x\n",
				regtmp, val);
		}
	}

	return 0;
	return regmap_async_complete(map);
}

struct regcache_ops regcache_rbtree_ops = {
+178 −18
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ static int regcache_hw_init(struct regmap *map)
		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
		if (!tmp_buf)
			return -EINVAL;
		ret = regmap_bulk_read(map, 0, tmp_buf,
		ret = regmap_raw_read(map, 0, tmp_buf,
				      map->num_reg_defaults_raw);
		map->cache_bypass = cache_bypass;
		if (ret < 0) {
@@ -58,8 +58,7 @@ static int regcache_hw_init(struct regmap *map)

	/* calculate the size of reg_defaults */
	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
		val = regcache_get_val(map->reg_defaults_raw,
				       i, map->cache_word_size);
		val = regcache_get_val(map, map->reg_defaults_raw, i);
		if (regmap_volatile(map, i * map->reg_stride))
			continue;
		count++;
@@ -75,8 +74,7 @@ static int regcache_hw_init(struct regmap *map)
	/* fill the reg_defaults */
	map->num_reg_defaults = count;
	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
		val = regcache_get_val(map->reg_defaults_raw,
				       i, map->cache_word_size);
		val = regcache_get_val(map, map->reg_defaults_raw, i);
		if (regmap_volatile(map, i * map->reg_stride))
			continue;
		map->reg_defaults[j].reg = i * map->reg_stride;
@@ -123,6 +121,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
	map->reg_defaults_raw = config->reg_defaults_raw;
	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
	map->cache_present = NULL;
	map->cache_present_nbits = 0;

	map->cache = NULL;
	map->cache_ops = cache_types[i];
@@ -181,6 +181,7 @@ void regcache_exit(struct regmap *map)

	BUG_ON(!map->cache_ops);

	kfree(map->cache_present);
	kfree(map->reg_defaults);
	if (map->cache_free)
		kfree(map->reg_defaults_raw);
@@ -417,28 +418,68 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);

bool regcache_set_val(void *base, unsigned int idx,
		      unsigned int val, unsigned int word_size)
int regcache_set_reg_present(struct regmap *map, unsigned int reg)
{
	switch (word_size) {
	unsigned long *cache_present;
	unsigned int cache_present_size;
	unsigned int nregs;
	int i;

	nregs = reg + 1;
	cache_present_size = BITS_TO_LONGS(nregs);
	cache_present_size *= sizeof(long);

	if (!map->cache_present) {
		cache_present = kmalloc(cache_present_size, GFP_KERNEL);
		if (!cache_present)
			return -ENOMEM;
		bitmap_zero(cache_present, nregs);
		map->cache_present = cache_present;
		map->cache_present_nbits = nregs;
	}

	if (nregs > map->cache_present_nbits) {
		cache_present = krealloc(map->cache_present,
					 cache_present_size, GFP_KERNEL);
		if (!cache_present)
			return -ENOMEM;
		for (i = 0; i < nregs; i++)
			if (i >= map->cache_present_nbits)
				clear_bit(i, cache_present);
		map->cache_present = cache_present;
		map->cache_present_nbits = nregs;
	}

	set_bit(reg, map->cache_present);
	return 0;
}

bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
		      unsigned int val)
{
	if (regcache_get_val(map, base, idx) == val)
		return true;

	/* Use device native format if possible */
	if (map->format.format_val) {
		map->format.format_val(base + (map->cache_word_size * idx),
				       val, 0);
		return false;
	}

	switch (map->cache_word_size) {
	case 1: {
		u8 *cache = base;
		if (cache[idx] == val)
			return true;
		cache[idx] = val;
		break;
	}
	case 2: {
		u16 *cache = base;
		if (cache[idx] == val)
			return true;
		cache[idx] = val;
		break;
	}
	case 4: {
		u32 *cache = base;
		if (cache[idx] == val)
			return true;
		cache[idx] = val;
		break;
	}
@@ -448,13 +489,18 @@ bool regcache_set_val(void *base, unsigned int idx,
	return false;
}

unsigned int regcache_get_val(const void *base, unsigned int idx,
			      unsigned int word_size)
unsigned int regcache_get_val(struct regmap *map, const void *base,
			      unsigned int idx)
{
	if (!base)
		return -EINVAL;

	switch (word_size) {
	/* Use device native format if possible */
	if (map->format.parse_val)
		return map->format.parse_val(regcache_get_val_addr(map, base,
								   idx));

	switch (map->cache_word_size) {
	case 1: {
		const u8 *cache = base;
		return cache[idx];
@@ -498,3 +544,117 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
	else
		return -ENOENT;
}

static int regcache_sync_block_single(struct regmap *map, void *block,
				      unsigned int block_base,
				      unsigned int start, unsigned int end)
{
	unsigned int i, regtmp, val;
	int ret;

	for (i = start; i < end; i++) {
		regtmp = block_base + (i * map->reg_stride);

		if (!regcache_reg_present(map, regtmp))
			continue;

		val = regcache_get_val(map, block, i);

		/* Is this the hardware default?  If so skip. */
		ret = regcache_lookup_reg(map, regtmp);
		if (ret >= 0 && val == map->reg_defaults[ret].def)
			continue;

		map->cache_bypass = 1;

		ret = _regmap_write(map, regtmp, val);

		map->cache_bypass = 0;
		if (ret != 0)
			return ret;
		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
			regtmp, val);
	}

	return 0;
}

static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
					 unsigned int base, unsigned int cur)
{
	size_t val_bytes = map->format.val_bytes;
	int ret, count;

	if (*data == NULL)
		return 0;

	count = cur - base;

	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
		count * val_bytes, count, base, cur - 1);

	map->cache_bypass = 1;

	ret = _regmap_raw_write(map, base, *data, count * val_bytes,
				false);

	map->cache_bypass = 0;

	*data = NULL;

	return ret;
}

static int regcache_sync_block_raw(struct regmap *map, void *block,
			    unsigned int block_base, unsigned int start,
			    unsigned int end)
{
	unsigned int i, val;
	unsigned int regtmp = 0;
	unsigned int base = 0;
	const void *data = NULL;
	int ret;

	for (i = start; i < end; i++) {
		regtmp = block_base + (i * map->reg_stride);

		if (!regcache_reg_present(map, regtmp)) {
			ret = regcache_sync_block_raw_flush(map, &data,
							    base, regtmp);
			if (ret != 0)
				return ret;
			continue;
		}

		val = regcache_get_val(map, block, i);

		/* Is this the hardware default?  If so skip. */
		ret = regcache_lookup_reg(map, regtmp);
		if (ret >= 0 && val == map->reg_defaults[ret].def) {
			ret = regcache_sync_block_raw_flush(map, &data,
							    base, regtmp);
			if (ret != 0)
				return ret;
			continue;
		}

		if (!data) {
			data = regcache_get_val_addr(map, block, i);
			base = regtmp;
		}
	}

	return regcache_sync_block_raw_flush(map, &data, base, regtmp);
}

int regcache_sync_block(struct regmap *map, void *block,
			unsigned int block_base, unsigned int start,
			unsigned int end)
{
	if (regmap_can_raw_write(map))
		return regcache_sync_block_raw(map, block, block_base,
					       start, end);
	else
		return regcache_sync_block_single(map, block, block_base,
						  start, end);
}
+87 −7
Original line number Diff line number Diff line
@@ -88,16 +88,16 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
	 * If we don't have a cache build one so we don't have to do a
	 * linear scan each time.
	 */
	mutex_lock(&map->cache_lock);
	i = base;
	if (list_empty(&map->debugfs_off_cache)) {
		for (i = base; i <= map->max_register; i += map->reg_stride) {
		for (; i <= map->max_register; i += map->reg_stride) {
			/* Skip unprinted registers, closing off cache entry */
			if (!regmap_readable(map, i) ||
			    regmap_precious(map, i)) {
				if (c) {
					c->max = p - 1;
					fpos_offset = c->max - c->min;
					reg_offset = fpos_offset / map->debugfs_tot_len;
					c->max_reg = c->base_reg + reg_offset;
					c->max_reg = i - map->reg_stride;
					list_add_tail(&c->list,
						      &map->debugfs_off_cache);
					c = NULL;
@@ -111,6 +111,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
				c = kzalloc(sizeof(*c), GFP_KERNEL);
				if (!c) {
					regmap_debugfs_free_dump_cache(map);
					mutex_unlock(&map->cache_lock);
					return base;
				}
				c->min = p;
@@ -124,9 +125,7 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
	/* Close the last entry off if we didn't scan beyond it */
	if (c) {
		c->max = p - 1;
		fpos_offset = c->max - c->min;
		reg_offset = fpos_offset / map->debugfs_tot_len;
		c->max_reg = c->base_reg + reg_offset;
		c->max_reg = i - map->reg_stride;
		list_add_tail(&c->list,
			      &map->debugfs_off_cache);
	}
@@ -145,12 +144,14 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
			fpos_offset = from - c->min;
			reg_offset = fpos_offset / map->debugfs_tot_len;
			*pos = c->min + (reg_offset * map->debugfs_tot_len);
			mutex_unlock(&map->cache_lock);
			return c->base_reg + reg_offset;
		}

		*pos = c->max;
		ret = c->max_reg;
	}
	mutex_unlock(&map->cache_lock);

	return ret;
}
@@ -311,6 +312,79 @@ static const struct file_operations regmap_range_fops = {
	.llseek = default_llseek,
};

static ssize_t regmap_reg_ranges_read_file(struct file *file,
					   char __user *user_buf, size_t count,
					   loff_t *ppos)
{
	struct regmap *map = file->private_data;
	struct regmap_debugfs_off_cache *c;
	loff_t p = 0;
	size_t buf_pos = 0;
	char *buf;
	char *entry;
	int ret;

	if (*ppos < 0 || !count)
		return -EINVAL;

	buf = kmalloc(count, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

	entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
	if (!entry) {
		kfree(buf);
		return -ENOMEM;
	}

	/* While we are at it, build the register dump cache
	 * now so the read() operation on the `registers' file
	 * can benefit from using the cache.  We do not care
	 * about the file position information that is contained
	 * in the cache, just about the actual register blocks */
	regmap_calc_tot_len(map, buf, count);
	regmap_debugfs_get_dump_start(map, 0, *ppos, &p);

	/* Reset file pointer as the fixed-format of the `registers'
	 * file is not compatible with the `range' file */
	p = 0;
	mutex_lock(&map->cache_lock);
	list_for_each_entry(c, &map->debugfs_off_cache, list) {
		snprintf(entry, PAGE_SIZE, "%x-%x",
			 c->base_reg, c->max_reg);
		if (p >= *ppos) {
			if (buf_pos + 1 + strlen(entry) > count)
				break;
			snprintf(buf + buf_pos, count - buf_pos,
				 "%s", entry);
			buf_pos += strlen(entry);
			buf[buf_pos] = '\n';
			buf_pos++;
		}
		p += strlen(entry) + 1;
	}
	mutex_unlock(&map->cache_lock);

	kfree(entry);
	ret = buf_pos;

	if (copy_to_user(user_buf, buf, buf_pos)) {
		ret = -EFAULT;
		goto out_buf;
	}

	*ppos += buf_pos;
out_buf:
	kfree(buf);
	return ret;
}

static const struct file_operations regmap_reg_ranges_fops = {
	.open = simple_open,
	.read = regmap_reg_ranges_read_file,
	.llseek = default_llseek,
};

static ssize_t regmap_access_read_file(struct file *file,
				       char __user *user_buf, size_t count,
				       loff_t *ppos)
@@ -385,6 +459,7 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
	struct regmap_range_node *range_node;

	INIT_LIST_HEAD(&map->debugfs_off_cache);
	mutex_init(&map->cache_lock);

	if (name) {
		map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
@@ -403,6 +478,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
	debugfs_create_file("name", 0400, map->debugfs,
			    map, &regmap_name_fops);

	debugfs_create_file("range", 0400, map->debugfs,
			    map, &regmap_reg_ranges_fops);

	if (map->max_register) {
		debugfs_create_file("registers", 0400, map->debugfs,
				    map, &regmap_map_fops);
@@ -435,7 +513,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
void regmap_debugfs_exit(struct regmap *map)
{
	debugfs_remove_recursive(map->debugfs);
	mutex_lock(&map->cache_lock);
	regmap_debugfs_free_dump_cache(map);
	mutex_unlock(&map->cache_lock);
	kfree(map->debugfs_name);
}

Loading