Loading drivers/base/regmap/regmap.c +119 −181 Original line number Original line Diff line number Diff line Loading @@ -1438,7 +1438,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, buf[i] |= (mask >> (8 * i)) & 0xff; buf[i] |= (mask >> (8 * i)) & 0xff; } } int _regmap_raw_write(struct regmap *map, unsigned int reg, static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, const void *val, size_t val_len) const void *val, size_t val_len) { { struct regmap_range_node *range; struct regmap_range_node *range; Loading Loading @@ -1490,7 +1490,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, while (val_num > win_residue) { while (val_num > win_residue) { dev_dbg(map->dev, "Writing window %d/%zu\n", dev_dbg(map->dev, "Writing window %d/%zu\n", win_residue, val_len / map->format.val_bytes); win_residue, val_len / map->format.val_bytes); ret = _regmap_raw_write(map, reg, val, win_residue * ret = _regmap_raw_write_impl(map, reg, val, win_residue * map->format.val_bytes); map->format.val_bytes); if (ret != 0) if (ret != 0) return ret; return ret; Loading Loading @@ -1707,7 +1708,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg, map->format.format_val(map->work_buf + map->format.reg_bytes map->format.format_val(map->work_buf + map->format.reg_bytes + map->format.pad_bytes, val, 0); + map->format.pad_bytes, val, 0); return _regmap_raw_write(map, reg, return _regmap_raw_write_impl(map, reg, map->work_buf + map->work_buf + map->format.reg_bytes + map->format.reg_bytes + map->format.pad_bytes, map->format.pad_bytes, Loading Loading @@ -1806,6 +1807,44 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) } } EXPORT_SYMBOL_GPL(regmap_write_async); EXPORT_SYMBOL_GPL(regmap_write_async); int _regmap_raw_write(struct regmap *map, unsigned int reg, const void *val, size_t val_len) { size_t val_bytes = map->format.val_bytes; size_t val_count = val_len / val_bytes; size_t chunk_count, chunk_bytes; size_t chunk_regs = val_count; int ret, i; if (!val_count) return -EINVAL; if (map->use_single_write) chunk_regs = 1; else if (map->max_raw_write && val_len > map->max_raw_write) chunk_regs = map->max_raw_write / val_bytes; chunk_count = val_count / chunk_regs; chunk_bytes = chunk_regs * val_bytes; /* Write as many bytes as possible with chunk_size */ for (i = 0; i < chunk_count; i++) { ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); if (ret) return ret; reg += regmap_get_offset(map, chunk_regs); val += chunk_bytes; val_len -= chunk_bytes; } /* Write remaining bytes */ if (val_len) ret = _regmap_raw_write_impl(map, reg, val, val_len); return ret; } /** /** * regmap_raw_write() - Write raw values to one or more registers * regmap_raw_write() - Write raw values to one or more registers * * Loading @@ -1831,8 +1870,6 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, return -EINVAL; return -EINVAL; if (val_len % map->format.val_bytes) if (val_len % map->format.val_bytes) return -EINVAL; return -EINVAL; if (map->max_raw_write && map->max_raw_write < val_len) return -E2BIG; map->lock(map->lock_arg); map->lock(map->lock_arg); Loading Loading @@ -1923,23 +1960,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, { { int ret = 0, i; int ret = 0, i; size_t val_bytes = map->format.val_bytes; size_t val_bytes = map->format.val_bytes; size_t total_size = val_bytes * val_count; if (!IS_ALIGNED(reg, map->reg_stride)) if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; return -EINVAL; /* /* * Some devices don't support bulk write, for * Some devices don't support bulk write, for them we have a series of * them we have a series of single write operations in the first two if * single write operations. * blocks. * * The first if block is used for memory mapped io. It does not allow * val_bytes of 3 for example. * The second one is for busses that do not provide raw I/O. * The third one is used for busses which do not have these limitations * and can write arbitrary value lengths. */ */ if (!map->bus) { if (!map->bus || !map->format.parse_inplace) { map->lock(map->lock_arg); map->lock(map->lock_arg); for (i = 0; i < val_count; i++) { for (i = 0; i < val_count; i++) { unsigned int ival; unsigned int ival; Loading Loading @@ -1972,81 +2001,17 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, } } out: out: map->unlock(map->lock_arg); map->unlock(map->lock_arg); } else if (map->bus && !map->format.parse_inplace) { const u8 *u8 = val; const u16 *u16 = val; const u32 *u32 = val; unsigned int ival; for (i = 0; i < val_count; i++) { switch (map->format.val_bytes) { case 4: ival = u32[i]; break; case 2: ival = u16[i]; break; case 1: ival = u8[i]; break; default: return -EINVAL; } ret = regmap_write(map, reg + (i * map->reg_stride), ival); if (ret) return ret; } } else if (map->use_single_write || (map->max_raw_write && map->max_raw_write < total_size)) { int chunk_stride = map->reg_stride; size_t chunk_size = val_bytes; size_t chunk_count = val_count; if (!map->use_single_write) { chunk_size = map->max_raw_write; if (chunk_size % val_bytes) chunk_size -= chunk_size % val_bytes; chunk_count = total_size / chunk_size; chunk_stride *= chunk_size / val_bytes; } map->lock(map->lock_arg); /* Write as many bytes as possible with chunk_size */ for (i = 0; i < chunk_count; i++) { ret = _regmap_raw_write(map, reg + (i * chunk_stride), val + (i * chunk_size), chunk_size); if (ret) break; } /* Write remaining bytes */ if (!ret && chunk_size * i < total_size) { ret = _regmap_raw_write(map, reg + (i * chunk_stride), val + (i * chunk_size), total_size - i * chunk_size); } map->unlock(map->lock_arg); } else { } else { void *wval; void *wval; if (!val_count) return -EINVAL; wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); if (!wval) { if (!wval) dev_err(map->dev, "Error in memory allocation\n"); return -ENOMEM; return -ENOMEM; } for (i = 0; i < val_count * val_bytes; i += val_bytes) for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(wval + i); map->format.parse_inplace(wval + i); map->lock(map->lock_arg); ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); map->unlock(map->lock_arg); kfree(wval); kfree(wval); } } Loading Loading @@ -2542,18 +2507,39 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || map->cache_type == REGCACHE_NONE) { map->cache_type == REGCACHE_NONE) { size_t chunk_count, chunk_bytes; size_t chunk_regs = val_count; if (!map->bus->read) { if (!map->bus->read) { ret = -ENOTSUPP; ret = -ENOTSUPP; goto out; goto out; } } if (map->max_raw_read && map->max_raw_read < val_len) { ret = -E2BIG; if (map->use_single_read) chunk_regs = 1; else if (map->max_raw_read && val_len > map->max_raw_read) chunk_regs = map->max_raw_read / val_bytes; chunk_count = val_count / chunk_regs; chunk_bytes = chunk_regs * val_bytes; /* Read bytes that fit into whole chunks */ for (i = 0; i < chunk_count; i++) { ret = _regmap_raw_read(map, reg, val, chunk_bytes); if (ret != 0) goto out; goto out; reg += regmap_get_offset(map, chunk_regs); val += chunk_bytes; val_len -= chunk_bytes; } } /* Physical block read if there's no cache involved */ /* Read remaining bytes */ if (val_len) { ret = _regmap_raw_read(map, reg, val, val_len); ret = _regmap_raw_read(map, reg, val, val_len); if (ret != 0) goto out; } } else { } else { /* Otherwise go word by word for the cache; should be low /* Otherwise go word by word for the cache; should be low * cost as we expect to hit the cache. * cost as we expect to hit the cache. Loading Loading @@ -2653,78 +2639,17 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, if (!IS_ALIGNED(reg, map->reg_stride)) if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; return -EINVAL; if (val_count == 0) return -EINVAL; if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { /* ret = regmap_raw_read(map, reg, val, val_bytes * val_count); * Some devices does not support bulk read, for * them we have a series of single read operations. */ size_t total_size = val_bytes * val_count; if (!map->use_single_read && (!map->max_raw_read || map->max_raw_read > total_size)) { ret = regmap_raw_read(map, reg, val, val_bytes * val_count); if (ret != 0) if (ret != 0) return ret; return ret; } else { /* * Some devices do not support bulk read or do not * support large bulk reads, for them we have a series * of read operations. */ int chunk_stride = map->reg_stride; size_t chunk_size = val_bytes; size_t chunk_count = val_count; if (!map->use_single_read) { chunk_size = map->max_raw_read; if (chunk_size % val_bytes) chunk_size -= chunk_size % val_bytes; chunk_count = total_size / chunk_size; chunk_stride *= chunk_size / val_bytes; } /* Read bytes that fit into a multiple of chunk_size */ for (i = 0; i < chunk_count; i++) { ret = regmap_raw_read(map, reg + (i * chunk_stride), val + (i * chunk_size), chunk_size); if (ret != 0) return ret; } /* Read remaining bytes */ if (chunk_size * i < total_size) { ret = regmap_raw_read(map, reg + (i * chunk_stride), val + (i * chunk_size), total_size - i * chunk_size); if (ret != 0) return ret; } } for (i = 0; i < val_count * val_bytes; i += val_bytes) for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(val + i); map->format.parse_inplace(val + i); } else { } else { for (i = 0; i < val_count; i++) { unsigned int ival; ret = regmap_read(map, reg + regmap_get_offset(map, i), &ival); if (ret != 0) return ret; if (map->format.format_val) { map->format.format_val(val + (i * val_bytes), ival, 0); } else { /* Devices providing read and write * operations can use the bulk I/O * functions if they define a val_bytes, * we assume that the values are native * endian. */ #ifdef CONFIG_64BIT #ifdef CONFIG_64BIT u64 *u64 = val; u64 *u64 = val; #endif #endif Loading @@ -2732,6 +2657,16 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, u16 *u16 = val; u16 *u16 = val; u8 *u8 = val; u8 *u8 = val; map->lock(map->lock_arg); for (i = 0; i < val_count; i++) { unsigned int ival; ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival); if (ret != 0) goto out; switch (map->format.val_bytes) { switch (map->format.val_bytes) { #ifdef CONFIG_64BIT #ifdef CONFIG_64BIT case 8: case 8: Loading @@ -2748,13 +2683,16 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, u8[i] = ival; u8[i] = ival; break; break; default: default: return -EINVAL; ret = -EINVAL; } goto out; } } } } out: map->unlock(map->lock_arg); } } return 0; return ret; } } EXPORT_SYMBOL_GPL(regmap_bulk_read); EXPORT_SYMBOL_GPL(regmap_bulk_read); Loading Loading
drivers/base/regmap/regmap.c +119 −181 Original line number Original line Diff line number Diff line Loading @@ -1438,7 +1438,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, buf[i] |= (mask >> (8 * i)) & 0xff; buf[i] |= (mask >> (8 * i)) & 0xff; } } int _regmap_raw_write(struct regmap *map, unsigned int reg, static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, const void *val, size_t val_len) const void *val, size_t val_len) { { struct regmap_range_node *range; struct regmap_range_node *range; Loading Loading @@ -1490,7 +1490,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, while (val_num > win_residue) { while (val_num > win_residue) { dev_dbg(map->dev, "Writing window %d/%zu\n", dev_dbg(map->dev, "Writing window %d/%zu\n", win_residue, val_len / map->format.val_bytes); win_residue, val_len / map->format.val_bytes); ret = _regmap_raw_write(map, reg, val, win_residue * ret = _regmap_raw_write_impl(map, reg, val, win_residue * map->format.val_bytes); map->format.val_bytes); if (ret != 0) if (ret != 0) return ret; return ret; Loading Loading @@ -1707,7 +1708,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg, map->format.format_val(map->work_buf + map->format.reg_bytes map->format.format_val(map->work_buf + map->format.reg_bytes + map->format.pad_bytes, val, 0); + map->format.pad_bytes, val, 0); return _regmap_raw_write(map, reg, return _regmap_raw_write_impl(map, reg, map->work_buf + map->work_buf + map->format.reg_bytes + map->format.reg_bytes + map->format.pad_bytes, map->format.pad_bytes, Loading Loading @@ -1806,6 +1807,44 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) } } EXPORT_SYMBOL_GPL(regmap_write_async); EXPORT_SYMBOL_GPL(regmap_write_async); int _regmap_raw_write(struct regmap *map, unsigned int reg, const void *val, size_t val_len) { size_t val_bytes = map->format.val_bytes; size_t val_count = val_len / val_bytes; size_t chunk_count, chunk_bytes; size_t chunk_regs = val_count; int ret, i; if (!val_count) return -EINVAL; if (map->use_single_write) chunk_regs = 1; else if (map->max_raw_write && val_len > map->max_raw_write) chunk_regs = map->max_raw_write / val_bytes; chunk_count = val_count / chunk_regs; chunk_bytes = chunk_regs * val_bytes; /* Write as many bytes as possible with chunk_size */ for (i = 0; i < chunk_count; i++) { ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); if (ret) return ret; reg += regmap_get_offset(map, chunk_regs); val += chunk_bytes; val_len -= chunk_bytes; } /* Write remaining bytes */ if (val_len) ret = _regmap_raw_write_impl(map, reg, val, val_len); return ret; } /** /** * regmap_raw_write() - Write raw values to one or more registers * regmap_raw_write() - Write raw values to one or more registers * * Loading @@ -1831,8 +1870,6 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, return -EINVAL; return -EINVAL; if (val_len % map->format.val_bytes) if (val_len % map->format.val_bytes) return -EINVAL; return -EINVAL; if (map->max_raw_write && map->max_raw_write < val_len) return -E2BIG; map->lock(map->lock_arg); map->lock(map->lock_arg); Loading Loading @@ -1923,23 +1960,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, { { int ret = 0, i; int ret = 0, i; size_t val_bytes = map->format.val_bytes; size_t val_bytes = map->format.val_bytes; size_t total_size = val_bytes * val_count; if (!IS_ALIGNED(reg, map->reg_stride)) if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; return -EINVAL; /* /* * Some devices don't support bulk write, for * Some devices don't support bulk write, for them we have a series of * them we have a series of single write operations in the first two if * single write operations. * blocks. * * The first if block is used for memory mapped io. It does not allow * val_bytes of 3 for example. * The second one is for busses that do not provide raw I/O. * The third one is used for busses which do not have these limitations * and can write arbitrary value lengths. */ */ if (!map->bus) { if (!map->bus || !map->format.parse_inplace) { map->lock(map->lock_arg); map->lock(map->lock_arg); for (i = 0; i < val_count; i++) { for (i = 0; i < val_count; i++) { unsigned int ival; unsigned int ival; Loading Loading @@ -1972,81 +2001,17 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, } } out: out: map->unlock(map->lock_arg); map->unlock(map->lock_arg); } else if (map->bus && !map->format.parse_inplace) { const u8 *u8 = val; const u16 *u16 = val; const u32 *u32 = val; unsigned int ival; for (i = 0; i < val_count; i++) { switch (map->format.val_bytes) { case 4: ival = u32[i]; break; case 2: ival = u16[i]; break; case 1: ival = u8[i]; break; default: return -EINVAL; } ret = regmap_write(map, reg + (i * map->reg_stride), ival); if (ret) return ret; } } else if (map->use_single_write || (map->max_raw_write && map->max_raw_write < total_size)) { int chunk_stride = map->reg_stride; size_t chunk_size = val_bytes; size_t chunk_count = val_count; if (!map->use_single_write) { chunk_size = map->max_raw_write; if (chunk_size % val_bytes) chunk_size -= chunk_size % val_bytes; chunk_count = total_size / chunk_size; chunk_stride *= chunk_size / val_bytes; } map->lock(map->lock_arg); /* Write as many bytes as possible with chunk_size */ for (i = 0; i < chunk_count; i++) { ret = _regmap_raw_write(map, reg + (i * chunk_stride), val + (i * chunk_size), chunk_size); if (ret) break; } /* Write remaining bytes */ if (!ret && chunk_size * i < total_size) { ret = _regmap_raw_write(map, reg + (i * chunk_stride), val + (i * chunk_size), total_size - i * chunk_size); } map->unlock(map->lock_arg); } else { } else { void *wval; void *wval; if (!val_count) return -EINVAL; wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); if (!wval) { if (!wval) dev_err(map->dev, "Error in memory allocation\n"); return -ENOMEM; return -ENOMEM; } for (i = 0; i < val_count * val_bytes; i += val_bytes) for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(wval + i); map->format.parse_inplace(wval + i); map->lock(map->lock_arg); ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); map->unlock(map->lock_arg); kfree(wval); kfree(wval); } } Loading Loading @@ -2542,18 +2507,39 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || map->cache_type == REGCACHE_NONE) { map->cache_type == REGCACHE_NONE) { size_t chunk_count, chunk_bytes; size_t chunk_regs = val_count; if (!map->bus->read) { if (!map->bus->read) { ret = -ENOTSUPP; ret = -ENOTSUPP; goto out; goto out; } } if (map->max_raw_read && map->max_raw_read < val_len) { ret = -E2BIG; if (map->use_single_read) chunk_regs = 1; else if (map->max_raw_read && val_len > map->max_raw_read) chunk_regs = map->max_raw_read / val_bytes; chunk_count = val_count / chunk_regs; chunk_bytes = chunk_regs * val_bytes; /* Read bytes that fit into whole chunks */ for (i = 0; i < chunk_count; i++) { ret = _regmap_raw_read(map, reg, val, chunk_bytes); if (ret != 0) goto out; goto out; reg += regmap_get_offset(map, chunk_regs); val += chunk_bytes; val_len -= chunk_bytes; } } /* Physical block read if there's no cache involved */ /* Read remaining bytes */ if (val_len) { ret = _regmap_raw_read(map, reg, val, val_len); ret = _regmap_raw_read(map, reg, val, val_len); if (ret != 0) goto out; } } else { } else { /* Otherwise go word by word for the cache; should be low /* Otherwise go word by word for the cache; should be low * cost as we expect to hit the cache. * cost as we expect to hit the cache. Loading Loading @@ -2653,78 +2639,17 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, if (!IS_ALIGNED(reg, map->reg_stride)) if (!IS_ALIGNED(reg, map->reg_stride)) return -EINVAL; return -EINVAL; if (val_count == 0) return -EINVAL; if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { /* ret = regmap_raw_read(map, reg, val, val_bytes * val_count); * Some devices does not support bulk read, for * them we have a series of single read operations. */ size_t total_size = val_bytes * val_count; if (!map->use_single_read && (!map->max_raw_read || map->max_raw_read > total_size)) { ret = regmap_raw_read(map, reg, val, val_bytes * val_count); if (ret != 0) if (ret != 0) return ret; return ret; } else { /* * Some devices do not support bulk read or do not * support large bulk reads, for them we have a series * of read operations. */ int chunk_stride = map->reg_stride; size_t chunk_size = val_bytes; size_t chunk_count = val_count; if (!map->use_single_read) { chunk_size = map->max_raw_read; if (chunk_size % val_bytes) chunk_size -= chunk_size % val_bytes; chunk_count = total_size / chunk_size; chunk_stride *= chunk_size / val_bytes; } /* Read bytes that fit into a multiple of chunk_size */ for (i = 0; i < chunk_count; i++) { ret = regmap_raw_read(map, reg + (i * chunk_stride), val + (i * chunk_size), chunk_size); if (ret != 0) return ret; } /* Read remaining bytes */ if (chunk_size * i < total_size) { ret = regmap_raw_read(map, reg + (i * chunk_stride), val + (i * chunk_size), total_size - i * chunk_size); if (ret != 0) return ret; } } for (i = 0; i < val_count * val_bytes; i += val_bytes) for (i = 0; i < val_count * val_bytes; i += val_bytes) map->format.parse_inplace(val + i); map->format.parse_inplace(val + i); } else { } else { for (i = 0; i < val_count; i++) { unsigned int ival; ret = regmap_read(map, reg + regmap_get_offset(map, i), &ival); if (ret != 0) return ret; if (map->format.format_val) { map->format.format_val(val + (i * val_bytes), ival, 0); } else { /* Devices providing read and write * operations can use the bulk I/O * functions if they define a val_bytes, * we assume that the values are native * endian. */ #ifdef CONFIG_64BIT #ifdef CONFIG_64BIT u64 *u64 = val; u64 *u64 = val; #endif #endif Loading @@ -2732,6 +2657,16 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, u16 *u16 = val; u16 *u16 = val; u8 *u8 = val; u8 *u8 = val; map->lock(map->lock_arg); for (i = 0; i < val_count; i++) { unsigned int ival; ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival); if (ret != 0) goto out; switch (map->format.val_bytes) { switch (map->format.val_bytes) { #ifdef CONFIG_64BIT #ifdef CONFIG_64BIT case 8: case 8: Loading @@ -2748,13 +2683,16 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, u8[i] = ival; u8[i] = ival; break; break; default: default: return -EINVAL; ret = -EINVAL; } goto out; } } } } out: map->unlock(map->lock_arg); } } return 0; return ret; } } EXPORT_SYMBOL_GPL(regmap_bulk_read); EXPORT_SYMBOL_GPL(regmap_bulk_read); Loading