Loading drivers/base/regmap/internal.h +2 −0 Original line number Original line Diff line number Diff line Loading @@ -52,6 +52,7 @@ struct regmap_async { struct regmap { struct regmap { struct mutex mutex; struct mutex mutex; spinlock_t spinlock; spinlock_t spinlock; unsigned long spinlock_flags; regmap_lock lock; regmap_lock lock; regmap_unlock unlock; regmap_unlock unlock; void *lock_arg; /* This is passed to lock/unlock functions */ void *lock_arg; /* This is passed to lock/unlock functions */ Loading Loading @@ -148,6 +149,7 @@ struct regcache_ops { int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); int (*write)(struct regmap *map, unsigned int reg, unsigned int value); int (*write)(struct regmap *map, unsigned int reg, unsigned int value); int (*sync)(struct regmap *map, unsigned int min, unsigned int max); int (*sync)(struct regmap *map, unsigned int min, unsigned int max); int (*drop)(struct regmap *map, unsigned int min, unsigned int max); }; }; bool regmap_writeable(struct regmap *map, unsigned int reg); bool regmap_writeable(struct regmap *map, unsigned int reg); Loading drivers/base/regmap/regcache-rbtree.c +48 −14 Original line number Original line Diff line number Diff line Loading @@ -304,6 +304,48 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, return 0; return 0; } } static struct regcache_rbtree_node * regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) { struct regcache_rbtree_node *rbnode; const struct regmap_range *range; int i; rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL); if (!rbnode) return NULL; /* If there is a read table then use it to guess at an allocation */ if (map->rd_table) { for (i = 0; i < map->rd_table->n_yes_ranges; i++) { if (regmap_reg_in_range(reg, &map->rd_table->yes_ranges[i])) break; } if (i != map->rd_table->n_yes_ranges) { range = &map->rd_table->yes_ranges[i]; rbnode->blklen = range->range_max - range->range_min + 1; rbnode->base_reg = range->range_min; } } if (!rbnode->blklen) { rbnode->blklen = sizeof(*rbnode); rbnode->base_reg = reg; } rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, GFP_KERNEL); if (!rbnode->block) { kfree(rbnode); return NULL; } return rbnode; } static int regcache_rbtree_write(struct regmap *map, unsigned int reg, static int regcache_rbtree_write(struct regmap *map, unsigned int reg, unsigned int value) unsigned int value) { { Loading Loading @@ -354,23 +396,15 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, return 0; return 0; } } } } /* we did not manage to find a place to insert it in an existing * block so create a new rbnode with a single register in its block. /* We did not manage to find a place to insert it in * This block will get populated further if any other adjacent * an existing block so create a new rbnode. * registers get modified in the future. */ */ rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); rbnode = regcache_rbtree_node_alloc(map, reg); if (!rbnode) if (!rbnode) return -ENOMEM; return -ENOMEM; rbnode->blklen = sizeof(*rbnode); regcache_rbtree_set_register(map, rbnode, rbnode->base_reg = reg; reg - rbnode->base_reg, value); rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, GFP_KERNEL); if (!rbnode->block) { kfree(rbnode); return -ENOMEM; } regcache_rbtree_set_register(map, rbnode, 0, value); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); rbtree_ctx->cached_rbnode = rbnode; rbtree_ctx->cached_rbnode = rbnode; } } Loading drivers/base/regmap/regcache.c +79 −4 Original line number Original line Diff line number Diff line Loading @@ -250,6 +250,38 @@ int regcache_write(struct regmap *map, return 0; return 0; } } static int regcache_default_sync(struct regmap *map, unsigned int min, unsigned int max) { unsigned int reg; for (reg = min; reg <= max; reg++) { unsigned int val; int ret; if (regmap_volatile(map, reg)) continue; ret = regcache_read(map, reg, &val); if (ret) return ret; /* Is this the hardware default? If so skip. */ ret = regcache_lookup_reg(map, reg); if (ret >= 0 && val == map->reg_defaults[ret].def) continue; map->cache_bypass = 1; ret = _regmap_write(map, reg, val); map->cache_bypass = 0; if (ret) return ret; dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); } return 0; } /** /** * regcache_sync: Sync the register cache with the hardware. * regcache_sync: Sync the register cache with the hardware. * * Loading @@ -268,7 +300,7 @@ int regcache_sync(struct regmap *map) const char *name; const char *name; unsigned int bypass; unsigned int bypass; BUG_ON(!map->cache_ops || !map->cache_ops->sync); BUG_ON(!map->cache_ops); map->lock(map->lock_arg); map->lock(map->lock_arg); /* Remember the initial bypass state */ /* Remember the initial bypass state */ Loading Loading @@ -297,7 +329,10 @@ int regcache_sync(struct regmap *map) } } map->cache_bypass = 0; map->cache_bypass = 0; if (map->cache_ops->sync) ret = map->cache_ops->sync(map, 0, map->max_register); ret = map->cache_ops->sync(map, 0, map->max_register); else ret = regcache_default_sync(map, 0, map->max_register); if (ret == 0) if (ret == 0) map->cache_dirty = false; map->cache_dirty = false; Loading Loading @@ -331,7 +366,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, const char *name; const char *name; unsigned int bypass; unsigned int bypass; BUG_ON(!map->cache_ops || !map->cache_ops->sync); BUG_ON(!map->cache_ops); map->lock(map->lock_arg); map->lock(map->lock_arg); Loading @@ -346,7 +381,10 @@ int regcache_sync_region(struct regmap *map, unsigned int min, if (!map->cache_dirty) if (!map->cache_dirty) goto out; goto out; if (map->cache_ops->sync) ret = map->cache_ops->sync(map, min, max); ret = map->cache_ops->sync(map, min, max); else ret = regcache_default_sync(map, min, max); out: out: trace_regcache_sync(map->dev, name, "stop region"); trace_regcache_sync(map->dev, name, "stop region"); Loading @@ -358,6 +396,43 @@ int regcache_sync_region(struct regmap *map, unsigned int min, } } EXPORT_SYMBOL_GPL(regcache_sync_region); EXPORT_SYMBOL_GPL(regcache_sync_region); /** * regcache_drop_region: Discard part of the register cache * * @map: map to operate on * @min: first register to discard * @max: last register to discard * * Discard part of the register cache. * * Return a negative value on failure, 0 on success. */ int regcache_drop_region(struct regmap *map, unsigned int min, unsigned int max) { unsigned int reg; int ret = 0; if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop)) return -EINVAL; map->lock(map->lock_arg); trace_regcache_drop_region(map->dev, min, max); if (map->cache_present) for (reg = min; reg < max + 1; reg++) clear_bit(reg, map->cache_present); if (map->cache_ops && map->cache_ops->drop) ret = map->cache_ops->drop(map, min, max); map->unlock(map->lock_arg); return ret; } EXPORT_SYMBOL_GPL(regcache_drop_region); /** /** * regcache_cache_only: Put a register map into cache only mode * regcache_cache_only: Put a register map into cache only mode * * Loading drivers/base/regmap/regmap-debugfs.c +4 −0 Original line number Original line Diff line number Diff line Loading @@ -84,6 +84,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, unsigned int fpos_offset; unsigned int fpos_offset; unsigned int reg_offset; unsigned int reg_offset; /* Suppress the cache if we're using a subrange */ if (from) return from; /* /* * If we don't have a cache build one so we don't have to do a * If we don't have a cache build one so we don't have to do a * linear scan each time. * linear scan each time. Loading drivers/base/regmap/regmap.c +12 −9 Original line number Original line Diff line number Diff line Loading @@ -65,8 +65,7 @@ bool regmap_reg_in_ranges(unsigned int reg, } } EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); static bool _regmap_check_range_table(struct regmap *map, bool regmap_check_range_table(struct regmap *map, unsigned int reg, unsigned int reg, const struct regmap_access_table *table) const struct regmap_access_table *table) { { /* Check "no ranges" first */ /* Check "no ranges" first */ Loading @@ -80,6 +79,7 @@ static bool _regmap_check_range_table(struct regmap *map, return regmap_reg_in_ranges(reg, table->yes_ranges, return regmap_reg_in_ranges(reg, table->yes_ranges, table->n_yes_ranges); table->n_yes_ranges); } } EXPORT_SYMBOL_GPL(regmap_check_range_table); bool regmap_writeable(struct regmap *map, unsigned int reg) bool regmap_writeable(struct regmap *map, unsigned int reg) { { Loading @@ -90,7 +90,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg) return map->writeable_reg(map->dev, reg); return map->writeable_reg(map->dev, reg); if (map->wr_table) if (map->wr_table) return _regmap_check_range_table(map, reg, map->wr_table); return regmap_check_range_table(map, reg, map->wr_table); return true; return true; } } Loading @@ -107,7 +107,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg) return map->readable_reg(map->dev, reg); return map->readable_reg(map->dev, reg); if (map->rd_table) if (map->rd_table) return _regmap_check_range_table(map, reg, map->rd_table); return regmap_check_range_table(map, reg, map->rd_table); return true; return true; } } Loading @@ -121,7 +121,7 @@ bool regmap_volatile(struct regmap *map, unsigned int reg) return map->volatile_reg(map->dev, reg); return map->volatile_reg(map->dev, reg); if (map->volatile_table) if (map->volatile_table) return _regmap_check_range_table(map, reg, map->volatile_table); return regmap_check_range_table(map, reg, map->volatile_table); return true; return true; } } Loading @@ -135,7 +135,7 @@ bool regmap_precious(struct regmap *map, unsigned int reg) return map->precious_reg(map->dev, reg); return map->precious_reg(map->dev, reg); if (map->precious_table) if (map->precious_table) return _regmap_check_range_table(map, reg, map->precious_table); return regmap_check_range_table(map, reg, map->precious_table); return false; return false; } } Loading Loading @@ -302,13 +302,16 @@ static void regmap_unlock_mutex(void *__map) static void regmap_lock_spinlock(void *__map) static void regmap_lock_spinlock(void *__map) { { struct regmap *map = __map; struct regmap *map = __map; spin_lock(&map->spinlock); unsigned long flags; spin_lock_irqsave(&map->spinlock, flags); map->spinlock_flags = flags; } } static void regmap_unlock_spinlock(void *__map) static void regmap_unlock_spinlock(void *__map) { { struct regmap *map = __map; struct regmap *map = __map; spin_unlock(&map->spinlock); spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); } } static void dev_get_regmap_release(struct device *dev, void *res) static void dev_get_regmap_release(struct device *dev, void *res) Loading Loading
drivers/base/regmap/internal.h +2 −0 Original line number Original line Diff line number Diff line Loading @@ -52,6 +52,7 @@ struct regmap_async { struct regmap { struct regmap { struct mutex mutex; struct mutex mutex; spinlock_t spinlock; spinlock_t spinlock; unsigned long spinlock_flags; regmap_lock lock; regmap_lock lock; regmap_unlock unlock; regmap_unlock unlock; void *lock_arg; /* This is passed to lock/unlock functions */ void *lock_arg; /* This is passed to lock/unlock functions */ Loading Loading @@ -148,6 +149,7 @@ struct regcache_ops { int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); int (*write)(struct regmap *map, unsigned int reg, unsigned int value); int (*write)(struct regmap *map, unsigned int reg, unsigned int value); int (*sync)(struct regmap *map, unsigned int min, unsigned int max); int (*sync)(struct regmap *map, unsigned int min, unsigned int max); int (*drop)(struct regmap *map, unsigned int min, unsigned int max); }; }; bool regmap_writeable(struct regmap *map, unsigned int reg); bool regmap_writeable(struct regmap *map, unsigned int reg); Loading
drivers/base/regmap/regcache-rbtree.c +48 −14 Original line number Original line Diff line number Diff line Loading @@ -304,6 +304,48 @@ static int regcache_rbtree_insert_to_block(struct regmap *map, return 0; return 0; } } static struct regcache_rbtree_node * regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) { struct regcache_rbtree_node *rbnode; const struct regmap_range *range; int i; rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL); if (!rbnode) return NULL; /* If there is a read table then use it to guess at an allocation */ if (map->rd_table) { for (i = 0; i < map->rd_table->n_yes_ranges; i++) { if (regmap_reg_in_range(reg, &map->rd_table->yes_ranges[i])) break; } if (i != map->rd_table->n_yes_ranges) { range = &map->rd_table->yes_ranges[i]; rbnode->blklen = range->range_max - range->range_min + 1; rbnode->base_reg = range->range_min; } } if (!rbnode->blklen) { rbnode->blklen = sizeof(*rbnode); rbnode->base_reg = reg; } rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, GFP_KERNEL); if (!rbnode->block) { kfree(rbnode); return NULL; } return rbnode; } static int regcache_rbtree_write(struct regmap *map, unsigned int reg, static int regcache_rbtree_write(struct regmap *map, unsigned int reg, unsigned int value) unsigned int value) { { Loading Loading @@ -354,23 +396,15 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, return 0; return 0; } } } } /* we did not manage to find a place to insert it in an existing * block so create a new rbnode with a single register in its block. /* We did not manage to find a place to insert it in * This block will get populated further if any other adjacent * an existing block so create a new rbnode. * registers get modified in the future. */ */ rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); rbnode = regcache_rbtree_node_alloc(map, reg); if (!rbnode) if (!rbnode) return -ENOMEM; return -ENOMEM; rbnode->blklen = sizeof(*rbnode); regcache_rbtree_set_register(map, rbnode, rbnode->base_reg = reg; reg - rbnode->base_reg, value); rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, GFP_KERNEL); if (!rbnode->block) { kfree(rbnode); return -ENOMEM; } regcache_rbtree_set_register(map, rbnode, 0, value); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); rbtree_ctx->cached_rbnode = rbnode; rbtree_ctx->cached_rbnode = rbnode; } } Loading
drivers/base/regmap/regcache.c +79 −4 Original line number Original line Diff line number Diff line Loading @@ -250,6 +250,38 @@ int regcache_write(struct regmap *map, return 0; return 0; } } static int regcache_default_sync(struct regmap *map, unsigned int min, unsigned int max) { unsigned int reg; for (reg = min; reg <= max; reg++) { unsigned int val; int ret; if (regmap_volatile(map, reg)) continue; ret = regcache_read(map, reg, &val); if (ret) return ret; /* Is this the hardware default? If so skip. */ ret = regcache_lookup_reg(map, reg); if (ret >= 0 && val == map->reg_defaults[ret].def) continue; map->cache_bypass = 1; ret = _regmap_write(map, reg, val); map->cache_bypass = 0; if (ret) return ret; dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); } return 0; } /** /** * regcache_sync: Sync the register cache with the hardware. * regcache_sync: Sync the register cache with the hardware. * * Loading @@ -268,7 +300,7 @@ int regcache_sync(struct regmap *map) const char *name; const char *name; unsigned int bypass; unsigned int bypass; BUG_ON(!map->cache_ops || !map->cache_ops->sync); BUG_ON(!map->cache_ops); map->lock(map->lock_arg); map->lock(map->lock_arg); /* Remember the initial bypass state */ /* Remember the initial bypass state */ Loading Loading @@ -297,7 +329,10 @@ int regcache_sync(struct regmap *map) } } map->cache_bypass = 0; map->cache_bypass = 0; if (map->cache_ops->sync) ret = map->cache_ops->sync(map, 0, map->max_register); ret = map->cache_ops->sync(map, 0, map->max_register); else ret = regcache_default_sync(map, 0, map->max_register); if (ret == 0) if (ret == 0) map->cache_dirty = false; map->cache_dirty = false; Loading Loading @@ -331,7 +366,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min, const char *name; const char *name; unsigned int bypass; unsigned int bypass; BUG_ON(!map->cache_ops || !map->cache_ops->sync); BUG_ON(!map->cache_ops); map->lock(map->lock_arg); map->lock(map->lock_arg); Loading @@ -346,7 +381,10 @@ int regcache_sync_region(struct regmap *map, unsigned int min, if (!map->cache_dirty) if (!map->cache_dirty) goto out; goto out; if (map->cache_ops->sync) ret = map->cache_ops->sync(map, min, max); ret = map->cache_ops->sync(map, min, max); else ret = regcache_default_sync(map, min, max); out: out: trace_regcache_sync(map->dev, name, "stop region"); trace_regcache_sync(map->dev, name, "stop region"); Loading @@ -358,6 +396,43 @@ int regcache_sync_region(struct regmap *map, unsigned int min, } } EXPORT_SYMBOL_GPL(regcache_sync_region); EXPORT_SYMBOL_GPL(regcache_sync_region); /** * regcache_drop_region: Discard part of the register cache * * @map: map to operate on * @min: first register to discard * @max: last register to discard * * Discard part of the register cache. * * Return a negative value on failure, 0 on success. */ int regcache_drop_region(struct regmap *map, unsigned int min, unsigned int max) { unsigned int reg; int ret = 0; if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop)) return -EINVAL; map->lock(map->lock_arg); trace_regcache_drop_region(map->dev, min, max); if (map->cache_present) for (reg = min; reg < max + 1; reg++) clear_bit(reg, map->cache_present); if (map->cache_ops && map->cache_ops->drop) ret = map->cache_ops->drop(map, min, max); map->unlock(map->lock_arg); return ret; } EXPORT_SYMBOL_GPL(regcache_drop_region); /** /** * regcache_cache_only: Put a register map into cache only mode * regcache_cache_only: Put a register map into cache only mode * * Loading
drivers/base/regmap/regmap-debugfs.c +4 −0 Original line number Original line Diff line number Diff line Loading @@ -84,6 +84,10 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, unsigned int fpos_offset; unsigned int fpos_offset; unsigned int reg_offset; unsigned int reg_offset; /* Suppress the cache if we're using a subrange */ if (from) return from; /* /* * If we don't have a cache build one so we don't have to do a * If we don't have a cache build one so we don't have to do a * linear scan each time. * linear scan each time. Loading
drivers/base/regmap/regmap.c +12 −9 Original line number Original line Diff line number Diff line Loading @@ -65,8 +65,7 @@ bool regmap_reg_in_ranges(unsigned int reg, } } EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); EXPORT_SYMBOL_GPL(regmap_reg_in_ranges); static bool _regmap_check_range_table(struct regmap *map, bool regmap_check_range_table(struct regmap *map, unsigned int reg, unsigned int reg, const struct regmap_access_table *table) const struct regmap_access_table *table) { { /* Check "no ranges" first */ /* Check "no ranges" first */ Loading @@ -80,6 +79,7 @@ static bool _regmap_check_range_table(struct regmap *map, return regmap_reg_in_ranges(reg, table->yes_ranges, return regmap_reg_in_ranges(reg, table->yes_ranges, table->n_yes_ranges); table->n_yes_ranges); } } EXPORT_SYMBOL_GPL(regmap_check_range_table); bool regmap_writeable(struct regmap *map, unsigned int reg) bool regmap_writeable(struct regmap *map, unsigned int reg) { { Loading @@ -90,7 +90,7 @@ bool regmap_writeable(struct regmap *map, unsigned int reg) return map->writeable_reg(map->dev, reg); return map->writeable_reg(map->dev, reg); if (map->wr_table) if (map->wr_table) return _regmap_check_range_table(map, reg, map->wr_table); return regmap_check_range_table(map, reg, map->wr_table); return true; return true; } } Loading @@ -107,7 +107,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg) return map->readable_reg(map->dev, reg); return map->readable_reg(map->dev, reg); if (map->rd_table) if (map->rd_table) return _regmap_check_range_table(map, reg, map->rd_table); return regmap_check_range_table(map, reg, map->rd_table); return true; return true; } } Loading @@ -121,7 +121,7 @@ bool regmap_volatile(struct regmap *map, unsigned int reg) return map->volatile_reg(map->dev, reg); return map->volatile_reg(map->dev, reg); if (map->volatile_table) if (map->volatile_table) return _regmap_check_range_table(map, reg, map->volatile_table); return regmap_check_range_table(map, reg, map->volatile_table); return true; return true; } } Loading @@ -135,7 +135,7 @@ bool regmap_precious(struct regmap *map, unsigned int reg) return map->precious_reg(map->dev, reg); return map->precious_reg(map->dev, reg); if (map->precious_table) if (map->precious_table) return _regmap_check_range_table(map, reg, map->precious_table); return regmap_check_range_table(map, reg, map->precious_table); return false; return false; } } Loading Loading @@ -302,13 +302,16 @@ static void regmap_unlock_mutex(void *__map) static void regmap_lock_spinlock(void *__map) static void regmap_lock_spinlock(void *__map) { { struct regmap *map = __map; struct regmap *map = __map; spin_lock(&map->spinlock); unsigned long flags; spin_lock_irqsave(&map->spinlock, flags); map->spinlock_flags = flags; } } static void regmap_unlock_spinlock(void *__map) static void regmap_unlock_spinlock(void *__map) { { struct regmap *map = __map; struct regmap *map = __map; spin_unlock(&map->spinlock); spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); } } static void dev_get_regmap_release(struct device *dev, void *res) static void dev_get_regmap_release(struct device *dev, void *res) Loading