Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 63a4cc24 authored by Mike Christie's avatar Mike Christie Committed by Jens Axboe
Browse files

blkg_rwstat: separate op from flags



The bio and request operation and flags are going to be separate
definitions, so we cannot pass them in as a bitmap. This patch
converts the blkg_rwstat code and its caller, cfq, to pass in the
values separately.

Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent ba568ea0
Loading
Loading
Loading
Loading
+29 −20
Original line number Diff line number Diff line
@@ -667,9 +667,10 @@ static inline void cfqg_put(struct cfq_group *cfqg)
} while (0)

static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
					    struct cfq_group *curr_cfqg, int rw)
					    struct cfq_group *curr_cfqg, int op,
					    int op_flags)
{
	blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
	blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, 1);
	cfqg_stats_end_empty_time(&cfqg->stats);
	cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
}
@@ -683,26 +684,30 @@ static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
#endif
}

static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
					       int op_flags)
{
	blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
	blkg_rwstat_add(&cfqg->stats.queued, op, op_flags, -1);
}

static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
					       int op_flags)
{
	blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
	blkg_rwstat_add(&cfqg->stats.merged, op, op_flags, 1);
}

static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
			uint64_t start_time, uint64_t io_start_time, int rw)
			uint64_t start_time, uint64_t io_start_time, int op,
			int op_flags)
{
	struct cfqg_stats *stats = &cfqg->stats;
	unsigned long long now = sched_clock();

	if (time_after64(now, io_start_time))
		blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
		blkg_rwstat_add(&stats->service_time, op, op_flags,
				now - io_start_time);
	if (time_after64(io_start_time, start_time))
		blkg_rwstat_add(&stats->wait_time, rw,
		blkg_rwstat_add(&stats->wait_time, op, op_flags,
				io_start_time - start_time);
}

@@ -781,13 +786,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) { }
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...)		do {} while (0)

static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
			struct cfq_group *curr_cfqg, int rw) { }
			struct cfq_group *curr_cfqg, int op, int op_flags) { }
static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
			unsigned long time, unsigned long unaccounted_time) { }
static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int op,
			int op_flags) { }
static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int op,
			int op_flags) { }
static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
			uint64_t start_time, uint64_t io_start_time, int rw) { }
			uint64_t start_time, uint64_t io_start_time, int op,
			int op_flags) { }

#endif	/* CONFIG_CFQ_GROUP_IOSCHED */

@@ -2461,10 +2469,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
	cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
	cfq_add_rq_rb(rq);
	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
				 rq->cmd_flags);
				 req_op(rq), rq->cmd_flags);
}

static struct request *
@@ -2517,7 +2525,7 @@ static void cfq_remove_request(struct request *rq)
	cfq_del_rq_rb(rq);

	cfqq->cfqd->rq_queued--;
	cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
	cfqg_stats_update_io_remove(RQ_CFQG(rq), req_op(rq), rq->cmd_flags);
	if (rq->cmd_flags & REQ_PRIO) {
		WARN_ON(!cfqq->prio_pending);
		cfqq->prio_pending--;
@@ -2552,7 +2560,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
				struct bio *bio)
{
	cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
	cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
}

static void
@@ -2575,7 +2583,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
	if (cfqq->next_rq == next)
		cfqq->next_rq = rq;
	cfq_remove_request(next);
	cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
	cfqg_stats_update_io_merged(RQ_CFQG(rq), req_op(next), next->cmd_flags);

	cfqq = RQ_CFQQ(next);
	/*
@@ -4108,7 +4116,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
	rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
	list_add_tail(&rq->queuelist, &cfqq->fifo);
	cfq_add_rq_rb(rq);
	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, req_op(rq),
				 rq->cmd_flags);
	cfq_rq_enqueued(cfqd, cfqq, rq);
}
@@ -4206,7 +4214,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
	cfqq->dispatched--;
	(RQ_CFQG(rq))->dispatched--;
	cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
				     rq_io_start_time_ns(rq), rq->cmd_flags);
				     rq_io_start_time_ns(rq), req_op(rq),
				     rq->cmd_flags);

	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;

+7 −6
Original line number Diff line number Diff line
@@ -590,25 +590,26 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
/**
 * blkg_rwstat_add - add a value to a blkg_rwstat
 * @rwstat: target blkg_rwstat
 * @rw: mask of REQ_{WRITE|SYNC}
 * @op: REQ_OP
 * @op_flags: rq_flag_bits
 * @val: value to add
 *
 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 * caller is responsible for synchronizing calls to this function.
 */
static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
				   int rw, uint64_t val)
				   int op, int op_flags, uint64_t val)
{
	struct percpu_counter *cnt;

	if (op_is_write(rw))
	if (op_is_write(op))
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
	else
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];

	__percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);

	if (rw & REQ_SYNC)
	if (op_flags & REQ_SYNC)
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
	else
		cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
@@ -713,9 +714,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,

	if (!throtl) {
		blkg = blkg ?: q->root_blkg;
		blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw,
		blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_rw,
				bio->bi_iter.bi_size);
		blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1);
		blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_rw, 1);
	}

	rcu_read_unlock();