Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fa2e39cb authored by Omar Sandoval's avatar Omar Sandoval Committed by Jens Axboe
Browse files

blk-stat: use READ and WRITE instead of BLK_STAT_{READ,WRITE}



The stats buckets will become generic soon, so make the existing users
use the common READ and WRITE definitions instead of one internal to
blk-stat.

Signed-off-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 0315b159
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -333,17 +333,17 @@ static int hctx_stats_show(struct seq_file *m, void *v)
	struct blk_mq_hw_ctx *hctx = m->private;
	struct blk_rq_stat stat[2];

	blk_stat_init(&stat[BLK_STAT_READ]);
	blk_stat_init(&stat[BLK_STAT_WRITE]);
	blk_stat_init(&stat[READ]);
	blk_stat_init(&stat[WRITE]);

	blk_hctx_stat_get(hctx, stat);

	seq_puts(m, "read: ");
	print_stat(m, &stat[BLK_STAT_READ]);
	print_stat(m, &stat[READ]);
	seq_puts(m, "\n");

	seq_puts(m, "write: ");
	print_stat(m, &stat[BLK_STAT_WRITE]);
	print_stat(m, &stat[WRITE]);
	seq_puts(m, "\n");
	return 0;
}
@@ -362,8 +362,8 @@ static ssize_t hctx_stats_write(struct file *file, const char __user *buf,
	int i;

	hctx_for_each_ctx(hctx, ctx, i) {
		blk_stat_init(&ctx->stat[BLK_STAT_READ]);
		blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
		blk_stat_init(&ctx->stat[READ]);
		blk_stat_init(&ctx->stat[WRITE]);
	}
	return count;
}
+6 −6
Original line number Diff line number Diff line
@@ -2040,8 +2040,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
		spin_lock_init(&__ctx->lock);
		INIT_LIST_HEAD(&__ctx->rq_list);
		__ctx->queue = q;
		blk_stat_init(&__ctx->stat[BLK_STAT_READ]);
		blk_stat_init(&__ctx->stat[BLK_STAT_WRITE]);
		blk_stat_init(&__ctx->stat[READ]);
		blk_stat_init(&__ctx->stat[WRITE]);

		/* If the cpu isn't online, the cpu is mapped to first hctx */
		if (!cpu_online(i))
@@ -2769,10 +2769,10 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
	 * important on devices where the completion latencies are longer
	 * than ~10 usec.
	 */
	if (req_op(rq) == REQ_OP_READ && stat[BLK_STAT_READ].nr_samples)
		ret = (stat[BLK_STAT_READ].mean + 1) / 2;
	else if (req_op(rq) == REQ_OP_WRITE && stat[BLK_STAT_WRITE].nr_samples)
		ret = (stat[BLK_STAT_WRITE].mean + 1) / 2;
	if (req_op(rq) == REQ_OP_READ && stat[READ].nr_samples)
		ret = (stat[READ].mean + 1) / 2;
	else if (req_op(rq) == REQ_OP_WRITE && stat[WRITE].nr_samples)
		ret = (stat[WRITE].mean + 1) / 2;

	return ret;
}
+39 −41
Original line number Diff line number Diff line
@@ -55,8 +55,8 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
	uint64_t latest = 0;
	int i, j, nr;

	blk_stat_init(&dst[BLK_STAT_READ]);
	blk_stat_init(&dst[BLK_STAT_WRITE]);
	blk_stat_init(&dst[READ]);
	blk_stat_init(&dst[WRITE]);

	nr = 0;
	do {
@@ -64,16 +64,16 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)

		queue_for_each_hw_ctx(q, hctx, i) {
			hctx_for_each_ctx(hctx, ctx, j) {
				blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
				blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
				blk_stat_flush_batch(&ctx->stat[READ]);
				blk_stat_flush_batch(&ctx->stat[WRITE]);

				if (!ctx->stat[BLK_STAT_READ].nr_samples &&
				    !ctx->stat[BLK_STAT_WRITE].nr_samples)
				if (!ctx->stat[READ].nr_samples &&
				    !ctx->stat[WRITE].nr_samples)
					continue;
				if (ctx->stat[BLK_STAT_READ].time > newest)
					newest = ctx->stat[BLK_STAT_READ].time;
				if (ctx->stat[BLK_STAT_WRITE].time > newest)
					newest = ctx->stat[BLK_STAT_WRITE].time;
				if (ctx->stat[READ].time > newest)
					newest = ctx->stat[READ].time;
				if (ctx->stat[WRITE].time > newest)
					newest = ctx->stat[WRITE].time;
			}
		}

@@ -88,14 +88,14 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)

		queue_for_each_hw_ctx(q, hctx, i) {
			hctx_for_each_ctx(hctx, ctx, j) {
				if (ctx->stat[BLK_STAT_READ].time == newest) {
					blk_stat_sum(&dst[BLK_STAT_READ],
						     &ctx->stat[BLK_STAT_READ]);
				if (ctx->stat[READ].time == newest) {
					blk_stat_sum(&dst[READ],
						     &ctx->stat[READ]);
					nr++;
				}
				if (ctx->stat[BLK_STAT_WRITE].time == newest) {
					blk_stat_sum(&dst[BLK_STAT_WRITE],
						     &ctx->stat[BLK_STAT_WRITE]);
				if (ctx->stat[WRITE].time == newest) {
					blk_stat_sum(&dst[WRITE],
						     &ctx->stat[WRITE]);
					nr++;
				}
			}
@@ -106,7 +106,7 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
		 */
	} while (!nr);

	dst[BLK_STAT_READ].time = dst[BLK_STAT_WRITE].time = latest;
	dst[READ].time = dst[WRITE].time = latest;
}

void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
@@ -114,11 +114,11 @@ void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst)
	if (q->mq_ops)
		blk_mq_stat_get(q, dst);
	else {
		blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]);
		blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]);
		memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ],
		blk_stat_flush_batch(&q->rq_stats[READ]);
		blk_stat_flush_batch(&q->rq_stats[WRITE]);
		memcpy(&dst[READ], &q->rq_stats[READ],
		       sizeof(struct blk_rq_stat));
		memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE],
		memcpy(&dst[WRITE], &q->rq_stats[WRITE],
		       sizeof(struct blk_rq_stat));
	}
}
@@ -133,31 +133,29 @@ void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst)
		uint64_t newest = 0;

		hctx_for_each_ctx(hctx, ctx, i) {
			blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]);
			blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]);
			blk_stat_flush_batch(&ctx->stat[READ]);
			blk_stat_flush_batch(&ctx->stat[WRITE]);

			if (!ctx->stat[BLK_STAT_READ].nr_samples &&
			    !ctx->stat[BLK_STAT_WRITE].nr_samples)
			if (!ctx->stat[READ].nr_samples &&
			    !ctx->stat[WRITE].nr_samples)
				continue;

			if (ctx->stat[BLK_STAT_READ].time > newest)
				newest = ctx->stat[BLK_STAT_READ].time;
			if (ctx->stat[BLK_STAT_WRITE].time > newest)
				newest = ctx->stat[BLK_STAT_WRITE].time;
			if (ctx->stat[READ].time > newest)
				newest = ctx->stat[READ].time;
			if (ctx->stat[WRITE].time > newest)
				newest = ctx->stat[WRITE].time;
		}

		if (!newest)
			break;

		hctx_for_each_ctx(hctx, ctx, i) {
			if (ctx->stat[BLK_STAT_READ].time == newest) {
				blk_stat_sum(&dst[BLK_STAT_READ],
						&ctx->stat[BLK_STAT_READ]);
			if (ctx->stat[READ].time == newest) {
				blk_stat_sum(&dst[READ], &ctx->stat[READ]);
				nr++;
			}
			if (ctx->stat[BLK_STAT_WRITE].time == newest) {
				blk_stat_sum(&dst[BLK_STAT_WRITE],
						&ctx->stat[BLK_STAT_WRITE]);
			if (ctx->stat[WRITE].time == newest) {
				blk_stat_sum(&dst[WRITE], &ctx->stat[WRITE]);
				nr++;
			}
		}
@@ -226,13 +224,13 @@ void blk_stat_clear(struct request_queue *q)

		queue_for_each_hw_ctx(q, hctx, i) {
			hctx_for_each_ctx(hctx, ctx, j) {
				blk_stat_init(&ctx->stat[BLK_STAT_READ]);
				blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
				blk_stat_init(&ctx->stat[READ]);
				blk_stat_init(&ctx->stat[WRITE]);
			}
		}
	} else {
		blk_stat_init(&q->rq_stats[BLK_STAT_READ]);
		blk_stat_init(&q->rq_stats[BLK_STAT_WRITE]);
		blk_stat_init(&q->rq_stats[READ]);
		blk_stat_init(&q->rq_stats[WRITE]);
	}
}

+0 −5
Original line number Diff line number Diff line
@@ -15,11 +15,6 @@
#define BLK_STAT_TIME_MASK	((1ULL << BLK_STAT_SHIFT) - 1)
#define BLK_STAT_MASK		~BLK_STAT_TIME_MASK

enum {
	BLK_STAT_READ	= 0,
	BLK_STAT_WRITE,
};

void blk_stat_add(struct blk_rq_stat *, struct request *);
void blk_hctx_stat_get(struct blk_mq_hw_ctx *, struct blk_rq_stat *);
void blk_queue_stat_get(struct request_queue *, struct blk_rq_stat *);
+2 −2
Original line number Diff line number Diff line
@@ -518,8 +518,8 @@ static ssize_t queue_stats_show(struct request_queue *q, char *page)

	blk_queue_stat_get(q, stat);

	ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
	ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
	ret = print_stat(page, &stat[READ], "read :");
	ret += print_stat(page + ret, &stat[WRITE], "write:");
	return ret;
}

Loading