Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e41166c5 authored by Michael Lyle's avatar Michael Lyle Committed by Jens Axboe
Browse files

bcache: writeback rate shouldn't artifically clamp



The previous code artificially limited writeback rate to 1000000
blocks/second (NSEC_PER_MSEC), which is a rate that can be met on fast
hardware.  The rate limiting code works fine (though with decreased
precision) up to 3 orders of magnitude faster, so use NSEC_PER_SEC.

Additionally, ensure that uint32_t is used as a type for rate throughout
the rate management so that type checking/clamp_t can work properly.

bch_next_delay should be rewritten for increased precision and better
handling of high rates and long sleep periods, but this is adequate for
now.

Signed-off-by: default avatarMichael Lyle <mlyle@lyle.org>
Reported-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ae82ddbf
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -361,7 +361,7 @@ struct cached_dev {
	int64_t			writeback_rate_proportional;
	int64_t			writeback_rate_integral;
	int64_t			writeback_rate_integral_scaled;
	int64_t			writeback_rate_change;
	int32_t			writeback_rate_change;

	unsigned		writeback_rate_update_seconds;
	unsigned		writeback_rate_i_term_inverse;
+2 −2
Original line number Diff line number Diff line
@@ -441,10 +441,10 @@ struct bch_ratelimit {
	uint64_t		next;

	/*
	 * Rate at which we want to do work, in units per nanosecond
	 * Rate at which we want to do work, in units per second
	 * The units here correspond to the units passed to bch_next_delay()
	 */
	unsigned		rate;
	uint32_t		rate;
};

static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
+4 −3
Original line number Diff line number Diff line
@@ -52,7 +52,8 @@ static void __update_writeback_rate(struct cached_dev *dc)
	int64_t error = dirty - target;
	int64_t proportional_scaled =
		div_s64(error, dc->writeback_rate_p_term_inverse);
	int64_t integral_scaled, new_rate;
	int64_t integral_scaled;
	uint32_t new_rate;

	if ((error < 0 && dc->writeback_rate_integral > 0) ||
	    (error > 0 && time_before64(local_clock(),
@@ -74,8 +75,8 @@ static void __update_writeback_rate(struct cached_dev *dc)
	integral_scaled = div_s64(dc->writeback_rate_integral,
			dc->writeback_rate_i_term_inverse);

	new_rate = clamp_t(int64_t, (proportional_scaled + integral_scaled),
			dc->writeback_rate_minimum, NSEC_PER_MSEC);
	new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
			dc->writeback_rate_minimum, NSEC_PER_SEC);

	dc->writeback_rate_proportional = proportional_scaled;
	dc->writeback_rate_integral_scaled = integral_scaled;