Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 84c7afce authored by Omar Sandoval's avatar Omar Sandoval Committed by Jens Axboe
Browse files

block: use ktime_get_ns() instead of sched_clock() for cfq and bfq



cfq and bfq have some internal fields that use sched_clock() which can
trivially use ktime_get_ns() instead. Their timestamp fields in struct
request can also use ktime_get_ns(), which resolves the 8 year old
comment added by commit 28f4197e ("block: disable preemption before
using sched_clock()").

Signed-off-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 544ccc8d
Loading
Loading
Loading
Loading
+20 −20
Original line number Diff line number Diff line
@@ -55,13 +55,13 @@ BFQG_FLAG_FNS(empty)
/* This should be called with the scheduler lock held. */
static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
{
	unsigned long long now;
	u64 now;

	if (!bfqg_stats_waiting(stats))
		return;

	now = sched_clock();
	if (time_after64(now, stats->start_group_wait_time))
	now = ktime_get_ns();
	if (now > stats->start_group_wait_time)
		blkg_stat_add(&stats->group_wait_time,
			      now - stats->start_group_wait_time);
	bfqg_stats_clear_waiting(stats);
@@ -77,20 +77,20 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
		return;
	if (bfqg == curr_bfqg)
		return;
	stats->start_group_wait_time = sched_clock();
	stats->start_group_wait_time = ktime_get_ns();
	bfqg_stats_mark_waiting(stats);
}

/* This should be called with the scheduler lock held. */
static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
{
	unsigned long long now;
	u64 now;

	if (!bfqg_stats_empty(stats))
		return;

	now = sched_clock();
	if (time_after64(now, stats->start_empty_time))
	now = ktime_get_ns();
	if (now > stats->start_empty_time)
		blkg_stat_add(&stats->empty_time,
			      now - stats->start_empty_time);
	bfqg_stats_clear_empty(stats);
@@ -116,7 +116,7 @@ void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
	if (bfqg_stats_empty(stats))
		return;

	stats->start_empty_time = sched_clock();
	stats->start_empty_time = ktime_get_ns();
	bfqg_stats_mark_empty(stats);
}

@@ -125,9 +125,9 @@ void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
	struct bfqg_stats *stats = &bfqg->stats;

	if (bfqg_stats_idling(stats)) {
		unsigned long long now = sched_clock();
		u64 now = ktime_get_ns();

		if (time_after64(now, stats->start_idle_time))
		if (now > stats->start_idle_time)
			blkg_stat_add(&stats->idle_time,
				      now - stats->start_idle_time);
		bfqg_stats_clear_idling(stats);
@@ -138,7 +138,7 @@ void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
{
	struct bfqg_stats *stats = &bfqg->stats;

	stats->start_idle_time = sched_clock();
	stats->start_idle_time = ktime_get_ns();
	bfqg_stats_mark_idling(stats);
}

@@ -171,18 +171,18 @@ void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
	blkg_rwstat_add(&bfqg->stats.merged, op, 1);
}

void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
				  uint64_t io_start_time, unsigned int op)
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
				  u64 io_start_time_ns, unsigned int op)
{
	struct bfqg_stats *stats = &bfqg->stats;
	unsigned long long now = sched_clock();
	u64 now = ktime_get_ns();

	if (time_after64(now, io_start_time))
	if (now > io_start_time_ns)
		blkg_rwstat_add(&stats->service_time, op,
				now - io_start_time);
	if (time_after64(io_start_time, start_time))
				now - io_start_time_ns);
	if (io_start_time_ns > start_time_ns)
		blkg_rwstat_add(&stats->wait_time, op,
				io_start_time - start_time);
				io_start_time_ns - start_time_ns);
}

#else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
@@ -191,8 +191,8 @@ void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
			      unsigned int op) { }
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
				  uint64_t io_start_time, unsigned int op) { }
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
				  u64 io_start_time_ns, unsigned int op) { }
void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
+5 −5
Original line number Diff line number Diff line
@@ -732,9 +732,9 @@ struct bfqg_stats {
	/* total time with empty current active q with other requests queued */
	struct blkg_stat		empty_time;
	/* fields after this shouldn't be cleared on stat reset */
	uint64_t			start_group_wait_time;
	uint64_t			start_idle_time;
	uint64_t			start_empty_time;
	u64				start_group_wait_time;
	u64				start_idle_time;
	u64				start_empty_time;
	uint16_t			flags;
#endif	/* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
};
@@ -856,8 +856,8 @@ void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
			      unsigned int op);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time,
				  uint64_t io_start_time, unsigned int op);
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
				  u64 io_start_time_ns, unsigned int op);
void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
void bfqg_stats_update_idle_time(struct bfq_group *bfqg);
+26 −23
Original line number Diff line number Diff line
@@ -210,9 +210,9 @@ struct cfqg_stats {
	/* total time with empty current active q with other requests queued */
	struct blkg_stat		empty_time;
	/* fields after this shouldn't be cleared on stat reset */
	uint64_t			start_group_wait_time;
	uint64_t			start_idle_time;
	uint64_t			start_empty_time;
	u64				start_group_wait_time;
	u64				start_idle_time;
	u64				start_empty_time;
	uint16_t			flags;
#endif	/* CONFIG_DEBUG_BLK_CGROUP */
#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
@@ -491,13 +491,13 @@ CFQG_FLAG_FNS(empty)
/* This should be called with the queue_lock held. */
static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
{
	unsigned long long now;
	u64 now;

	if (!cfqg_stats_waiting(stats))
		return;

	now = sched_clock();
	if (time_after64(now, stats->start_group_wait_time))
	now = ktime_get_ns();
	if (now > stats->start_group_wait_time)
		blkg_stat_add(&stats->group_wait_time,
			      now - stats->start_group_wait_time);
	cfqg_stats_clear_waiting(stats);
@@ -513,20 +513,20 @@ static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
		return;
	if (cfqg == curr_cfqg)
		return;
	stats->start_group_wait_time = sched_clock();
	stats->start_group_wait_time = ktime_get_ns();
	cfqg_stats_mark_waiting(stats);
}

/* This should be called with the queue_lock held. */
static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
{
	unsigned long long now;
	u64 now;

	if (!cfqg_stats_empty(stats))
		return;

	now = sched_clock();
	if (time_after64(now, stats->start_empty_time))
	now = ktime_get_ns();
	if (now > stats->start_empty_time)
		blkg_stat_add(&stats->empty_time,
			      now - stats->start_empty_time);
	cfqg_stats_clear_empty(stats);
@@ -552,7 +552,7 @@ static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
	if (cfqg_stats_empty(stats))
		return;

	stats->start_empty_time = sched_clock();
	stats->start_empty_time = ktime_get_ns();
	cfqg_stats_mark_empty(stats);
}

@@ -561,9 +561,9 @@ static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
	struct cfqg_stats *stats = &cfqg->stats;

	if (cfqg_stats_idling(stats)) {
		unsigned long long now = sched_clock();
		u64 now = ktime_get_ns();

		if (time_after64(now, stats->start_idle_time))
		if (now > stats->start_idle_time)
			blkg_stat_add(&stats->idle_time,
				      now - stats->start_idle_time);
		cfqg_stats_clear_idling(stats);
@@ -576,7 +576,7 @@ static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)

	BUG_ON(cfqg_stats_idling(stats));

	stats->start_idle_time = sched_clock();
	stats->start_idle_time = ktime_get_ns();
	cfqg_stats_mark_idling(stats);
}

@@ -701,17 +701,19 @@ static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
}

static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
			uint64_t start_time, uint64_t io_start_time,
						u64 start_time_ns,
						u64 io_start_time_ns,
						unsigned int op)
{
	struct cfqg_stats *stats = &cfqg->stats;
	unsigned long long now = sched_clock();
	u64 now = ktime_get_ns();

	if (time_after64(now, io_start_time))
		blkg_rwstat_add(&stats->service_time, op, now - io_start_time);
	if (time_after64(io_start_time, start_time))
	if (now > io_start_time_ns)
		blkg_rwstat_add(&stats->service_time, op,
				now - io_start_time_ns);
	if (io_start_time_ns > start_time_ns)
		blkg_rwstat_add(&stats->wait_time, op,
				io_start_time - start_time);
				io_start_time_ns - start_time_ns);
}

/* @stats = 0 */
@@ -797,7 +799,8 @@ static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
			unsigned int op) { }
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
			uint64_t start_time, uint64_t io_start_time,
						u64 start_time_ns,
						u64 io_start_time_ns,
						unsigned int op) { }

#endif	/* CONFIG_CFQ_GROUP_IOSCHED */
+6 −15
Original line number Diff line number Diff line
@@ -1799,42 +1799,33 @@ int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);

#ifdef CONFIG_BLK_CGROUP
/*
 * This should not be using sched_clock(). A real patch is in progress
 * to fix this up, until that is in place we need to disable preemption
 * around sched_clock() in this function and set_io_start_time_ns().
 */
static inline void set_start_time_ns(struct request *req)
{
	preempt_disable();
	req->cgroup_start_time_ns = sched_clock();
	preempt_enable();
	req->cgroup_start_time_ns = ktime_get_ns();
}

static inline void set_io_start_time_ns(struct request *req)
{
	preempt_disable();
	req->cgroup_io_start_time_ns = sched_clock();
	preempt_enable();
	req->cgroup_io_start_time_ns = ktime_get_ns();
}

static inline uint64_t rq_start_time_ns(struct request *req)
static inline u64 rq_start_time_ns(struct request *req)
{
	return req->cgroup_start_time_ns;
}

static inline uint64_t rq_io_start_time_ns(struct request *req)
static inline u64 rq_io_start_time_ns(struct request *req)
{
	return req->cgroup_io_start_time_ns;
}
#else
static inline void set_start_time_ns(struct request *req) {}
static inline void set_io_start_time_ns(struct request *req) {}
static inline uint64_t rq_start_time_ns(struct request *req)
static inline u64 rq_start_time_ns(struct request *req)
{
	return 0;
}
static inline uint64_t rq_io_start_time_ns(struct request *req)
static inline u64 rq_io_start_time_ns(struct request *req)
{
	return 0;
}