Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dc3b17cc authored by Jan Kara's avatar Jan Kara Committed by Jens Axboe
Browse files

block: Use pointer to backing_dev_info from request_queue



We will want to have struct backing_dev_info allocated separately from
struct request_queue. As the first step add pointer to backing_dev_info
to request_queue and convert all users touching it. No functional
changes in this patch.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent f44f1ab5
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
		goto err_free_blkg;
	}

	wb_congested = wb_congested_get_create(&q->backing_dev_info,
	wb_congested = wb_congested_get_create(q->backing_dev_info,
					       blkcg->css.id,
					       GFP_NOWAIT | __GFP_NOWARN);
	if (!wb_congested) {
@@ -469,8 +469,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	if (blkg->q->backing_dev_info->dev)
		return dev_name(blkg->q->backing_dev_info->dev);
	return NULL;
}
EXPORT_SYMBOL_GPL(blkg_dev_name);
+14 −13
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ static void blk_clear_congested(struct request_list *rl, int sync)
	 * flip its congestion state for events on other blkcgs.
	 */
	if (rl == &rl->q->root_rl)
		clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
		clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}

@@ -86,7 +86,7 @@ static void blk_set_congested(struct request_list *rl, int sync)
#else
	/* see blk_clear_congested() */
	if (rl == &rl->q->root_rl)
		set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
		set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}

@@ -117,7 +117,7 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	return &q->backing_dev_info;
	return q->backing_dev_info;
}
EXPORT_SYMBOL(blk_get_backing_dev_info);

@@ -575,7 +575,7 @@ void blk_cleanup_queue(struct request_queue *q)
	blk_flush_integrity();

	/* @q won't process any more request, flush async actions */
	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
	blk_sync_queue(q);

	if (q->mq_ops)
@@ -587,7 +587,7 @@ void blk_cleanup_queue(struct request_queue *q)
		q->queue_lock = &q->__queue_lock;
	spin_unlock_irq(lock);

	bdi_unregister(&q->backing_dev_info);
	bdi_unregister(q->backing_dev_info);

	/* @q is and will stay empty, shutdown and put */
	blk_put_queue(q);
@@ -728,17 +728,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	if (!q->bio_split)
		goto fail_id;

	q->backing_dev_info.ra_pages =
	q->backing_dev_info = &q->_backing_dev_info;
	q->backing_dev_info->ra_pages =
			(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
	q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
	q->backing_dev_info.name = "block";
	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
	q->backing_dev_info->name = "block";
	q->node = node_id;

	err = bdi_init(&q->backing_dev_info);
	err = bdi_init(q->backing_dev_info);
	if (err)
		goto fail_split;

	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
	setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
		    laptop_mode_timer_fn, (unsigned long) q);
	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
	INIT_LIST_HEAD(&q->queue_head);
@@ -788,7 +789,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
	bdi_destroy(&q->backing_dev_info);
	bdi_destroy(q->backing_dev_info);
fail_split:
	bioset_free(q->bio_split);
fail_id:
@@ -1182,7 +1183,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
	 * disturb iosched and blkcg but weird is bettern than dead.
	 */
	printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
			   __func__, dev_name(q->backing_dev_info.dev));
			   __func__, dev_name(q->backing_dev_info->dev));

	rq->rq_flags &= ~RQF_ELVPRIV;
	rq->elv.icq = NULL;
@@ -2659,7 +2660,7 @@ void blk_finish_request(struct request *req, int error)
	BUG_ON(blk_queued_rq(req));

	if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
		laptop_io_completion(&req->q->backing_dev_info);
		laptop_io_completion(req->q->backing_dev_info);

	blk_delete_timer(req);

+2 −2
Original line number Diff line number Diff line
@@ -443,10 +443,10 @@ void blk_integrity_revalidate(struct gendisk *disk)
		return;

	if (bi->profile)
		disk->queue->backing_dev_info.capabilities |=
		disk->queue->backing_dev_info->capabilities |=
			BDI_CAP_STABLE_WRITES;
	else
		disk->queue->backing_dev_info.capabilities &=
		disk->queue->backing_dev_info->capabilities &=
			~BDI_CAP_STABLE_WRITES;
}

+1 −1
Original line number Diff line number Diff line
@@ -253,7 +253,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
	max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
	limits->max_sectors = max_sectors;
	q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9);
	q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);

+4 −4
Original line number Diff line number Diff line
@@ -89,7 +89,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)

static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
					(PAGE_SHIFT - 10);

	return queue_var_show(ra_kb, (page));
@@ -104,7 +104,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
	if (ret < 0)
		return ret;

	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);

	return ret;
}
@@ -236,7 +236,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)

	spin_lock_irq(q->queue_lock);
	q->limits.max_sectors = max_sectors_kb << 1;
	q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
	spin_unlock_irq(q->queue_lock);

	return ret;
@@ -799,7 +799,7 @@ static void blk_release_queue(struct kobject *kobj)
		container_of(kobj, struct request_queue, kobj);

	wbt_exit(q);
	bdi_exit(&q->backing_dev_info);
	bdi_exit(q->backing_dev_info);
	blkcg_exit_queue(q);

	if (q->elevator) {
Loading