Loading drivers/base/regmap/regmap-irq.c +99 −26 Original line number Diff line number Diff line Loading @@ -34,6 +34,7 @@ struct regmap_irq_chip_data { int irq; int wake_count; void *status_reg_buf; unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; Loading Loading @@ -87,6 +88,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data) if (ret != 0) dev_err(d->map->dev, "Failed to sync masks in %x\n", reg); reg = d->chip->wake_base + (i * map->reg_stride * d->irq_reg_stride); if (d->wake_buf) { if (d->chip->wake_invert) ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->wake_buf[i]); else ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync wakes in %x: %d\n", reg, ret); } } if (d->chip->runtime_pm) Loading Loading @@ -129,14 +147,13 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); if (!d->chip->wake_base) return -EINVAL; if (on) { if (d->wake_buf) d->wake_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask; d->wake_count++; } else { if (d->wake_buf) d->wake_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; d->wake_count--; Loading Loading @@ -172,25 +189,69 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } /* * Ignore masked IRQs and ack if we need to; we ack early so * there is no race between handling and acknowleding the * interrupt. We assume that typically few of the interrupts * will fire simultaneously so don't worry about overhead from * doing a write per register. * Read in the statuses, using a single bulk read if possible * in order to reduce the I/O overheads. */ if (!map->use_single_rw && map->reg_stride == 1 && data->irq_reg_stride == 1) { u8 *buf8 = data->status_reg_buf; u16 *buf16 = data->status_reg_buf; u32 *buf32 = data->status_reg_buf; BUG_ON(!data->status_reg_buf); ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf, chip->num_regs); if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); return IRQ_NONE; } for (i = 0; i < data->chip->num_regs; i++) { ret = regmap_read(map, chip->status_base + (i * map->reg_stride switch (map->format.val_bytes) { case 1: data->status_buf[i] = buf8[i]; break; case 2: data->status_buf[i] = buf16[i]; break; case 4: data->status_buf[i] = buf32[i]; break; default: BUG(); return IRQ_NONE; } } } else { for (i = 0; i < data->chip->num_regs; i++) { ret = regmap_read(map, chip->status_base + (i * map->reg_stride * data->irq_reg_stride), &data->status_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); if (chip->runtime_pm) pm_runtime_put(map->dev); return IRQ_NONE; } } } /* * Ignore masked IRQs and ack if we need to; we ack early so * there is no race between handling and acknowleding the * interrupt. We assume that typically few of the interrupts * will fire simultaneously so don't worry about overhead from * doing a write per register. */ for (i = 0; i < data->chip->num_regs; i++) { data->status_buf[i] &= ~data->mask_buf[i]; if (data->status_buf[i] && chip->ack_base) { Loading Loading @@ -316,11 +377,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, d->irq_chip = regmap_irq_chip; d->irq_chip.name = chip->name; if (!chip->wake_base) { d->irq_chip.irq_set_wake = NULL; d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; } d->irq = irq; d->map = map; d->chip = chip; Loading @@ -331,6 +387,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, else d->irq_reg_stride = 1; if (!map->use_single_rw && map->reg_stride == 1 && d->irq_reg_stride == 1) { d->status_reg_buf = kmalloc(map->format.val_bytes * chip->num_regs, GFP_KERNEL); if (!d->status_reg_buf) goto err_alloc; } mutex_init(&d->lock); for (i = 0; i < chip->num_irqs; i++) Loading Loading @@ -361,7 +425,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, d->wake_buf[i] = d->mask_buf_def[i]; reg = chip->wake_base + (i * map->reg_stride * d->irq_reg_stride); ret = regmap_update_bits(map, reg, d->wake_buf[i], if (chip->wake_invert) ret = regmap_update_bits(map, reg, d->mask_buf_def[i], 0); else ret = regmap_update_bits(map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", Loading Loading @@ -401,6 +472,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_buf); kfree(d->status_reg_buf); kfree(d); return ret; } Loading @@ -422,6 +494,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_reg_buf); kfree(d->status_buf); kfree(d); } Loading drivers/mfd/wm5102-tables.c +1 −0 Original line number Diff line number Diff line Loading @@ -96,6 +96,7 @@ const struct regmap_irq_chip wm5102_aod = { .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1, .ack_base = ARIZONA_AOD_IRQ1, .wake_base = ARIZONA_WAKE_CONTROL, .wake_invert = 1, .num_regs = 1, .irqs = wm5102_aod_irqs, .num_irqs = ARRAY_SIZE(wm5102_aod_irqs), Loading drivers/mfd/wm5110-tables.c +1 −0 Original line number Diff line number Diff line Loading @@ -255,6 +255,7 @@ const struct regmap_irq_chip wm5110_aod = { .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1, .ack_base = ARIZONA_AOD_IRQ1, .wake_base = ARIZONA_WAKE_CONTROL, .wake_invert = 1, .num_regs = 1, .irqs = wm5110_aod_irqs, .num_irqs = ARRAY_SIZE(wm5110_aod_irqs), Loading include/linux/regmap.h +1 −0 Original line number Diff line number Diff line Loading @@ -398,6 +398,7 @@ struct regmap_irq_chip { unsigned int wake_base; unsigned int irq_reg_stride; unsigned int mask_invert; unsigned int wake_invert; bool runtime_pm; int num_regs; Loading Loading
drivers/base/regmap/regmap-irq.c +99 −26 Original line number Diff line number Diff line Loading @@ -34,6 +34,7 @@ struct regmap_irq_chip_data { int irq; int wake_count; void *status_reg_buf; unsigned int *status_buf; unsigned int *mask_buf; unsigned int *mask_buf_def; Loading Loading @@ -87,6 +88,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data) if (ret != 0) dev_err(d->map->dev, "Failed to sync masks in %x\n", reg); reg = d->chip->wake_base + (i * map->reg_stride * d->irq_reg_stride); if (d->wake_buf) { if (d->chip->wake_invert) ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], ~d->wake_buf[i]); else ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync wakes in %x: %d\n", reg, ret); } } if (d->chip->runtime_pm) Loading Loading @@ -129,14 +147,13 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); if (!d->chip->wake_base) return -EINVAL; if (on) { if (d->wake_buf) d->wake_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask; d->wake_count++; } else { if (d->wake_buf) d->wake_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; d->wake_count--; Loading Loading @@ -172,25 +189,69 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } /* * Ignore masked IRQs and ack if we need to; we ack early so * there is no race between handling and acknowleding the * interrupt. We assume that typically few of the interrupts * will fire simultaneously so don't worry about overhead from * doing a write per register. * Read in the statuses, using a single bulk read if possible * in order to reduce the I/O overheads. */ if (!map->use_single_rw && map->reg_stride == 1 && data->irq_reg_stride == 1) { u8 *buf8 = data->status_reg_buf; u16 *buf16 = data->status_reg_buf; u32 *buf32 = data->status_reg_buf; BUG_ON(!data->status_reg_buf); ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf, chip->num_regs); if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); return IRQ_NONE; } for (i = 0; i < data->chip->num_regs; i++) { ret = regmap_read(map, chip->status_base + (i * map->reg_stride switch (map->format.val_bytes) { case 1: data->status_buf[i] = buf8[i]; break; case 2: data->status_buf[i] = buf16[i]; break; case 4: data->status_buf[i] = buf32[i]; break; default: BUG(); return IRQ_NONE; } } } else { for (i = 0; i < data->chip->num_regs; i++) { ret = regmap_read(map, chip->status_base + (i * map->reg_stride * data->irq_reg_stride), &data->status_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); if (chip->runtime_pm) pm_runtime_put(map->dev); return IRQ_NONE; } } } /* * Ignore masked IRQs and ack if we need to; we ack early so * there is no race between handling and acknowleding the * interrupt. We assume that typically few of the interrupts * will fire simultaneously so don't worry about overhead from * doing a write per register. */ for (i = 0; i < data->chip->num_regs; i++) { data->status_buf[i] &= ~data->mask_buf[i]; if (data->status_buf[i] && chip->ack_base) { Loading Loading @@ -316,11 +377,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, d->irq_chip = regmap_irq_chip; d->irq_chip.name = chip->name; if (!chip->wake_base) { d->irq_chip.irq_set_wake = NULL; d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE; } d->irq = irq; d->map = map; d->chip = chip; Loading @@ -331,6 +387,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, else d->irq_reg_stride = 1; if (!map->use_single_rw && map->reg_stride == 1 && d->irq_reg_stride == 1) { d->status_reg_buf = kmalloc(map->format.val_bytes * chip->num_regs, GFP_KERNEL); if (!d->status_reg_buf) goto err_alloc; } mutex_init(&d->lock); for (i = 0; i < chip->num_irqs; i++) Loading Loading @@ -361,7 +425,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, d->wake_buf[i] = d->mask_buf_def[i]; reg = chip->wake_base + (i * map->reg_stride * d->irq_reg_stride); ret = regmap_update_bits(map, reg, d->wake_buf[i], if (chip->wake_invert) ret = regmap_update_bits(map, reg, d->mask_buf_def[i], 0); else ret = regmap_update_bits(map, reg, d->mask_buf_def[i], d->wake_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", Loading Loading @@ -401,6 +472,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_buf); kfree(d->status_reg_buf); kfree(d); return ret; } Loading @@ -422,6 +494,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); kfree(d->status_reg_buf); kfree(d->status_buf); kfree(d); } Loading
drivers/mfd/wm5102-tables.c +1 −0 Original line number Diff line number Diff line Loading @@ -96,6 +96,7 @@ const struct regmap_irq_chip wm5102_aod = { .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1, .ack_base = ARIZONA_AOD_IRQ1, .wake_base = ARIZONA_WAKE_CONTROL, .wake_invert = 1, .num_regs = 1, .irqs = wm5102_aod_irqs, .num_irqs = ARRAY_SIZE(wm5102_aod_irqs), Loading
drivers/mfd/wm5110-tables.c +1 −0 Original line number Diff line number Diff line Loading @@ -255,6 +255,7 @@ const struct regmap_irq_chip wm5110_aod = { .mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1, .ack_base = ARIZONA_AOD_IRQ1, .wake_base = ARIZONA_WAKE_CONTROL, .wake_invert = 1, .num_regs = 1, .irqs = wm5110_aod_irqs, .num_irqs = ARRAY_SIZE(wm5110_aod_irqs), Loading
include/linux/regmap.h +1 −0 Original line number Diff line number Diff line Loading @@ -398,6 +398,7 @@ struct regmap_irq_chip { unsigned int wake_base; unsigned int irq_reg_stride; unsigned int mask_invert; unsigned int wake_invert; bool runtime_pm; int num_regs; Loading