Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 22b27b16 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "block: Initialize bd_bdi on inode initialization"

parents c24ca7f9 16d28918
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
		goto err_free_blkg;
	}

	wb_congested = wb_congested_get_create(&q->backing_dev_info,
	wb_congested = wb_congested_get_create(q->backing_dev_info,
					       blkcg->css.id, GFP_NOWAIT);
	if (!wb_congested) {
		ret = -ENOMEM;
@@ -468,8 +468,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
const char *blkg_dev_name(struct blkcg_gq *blkg)
{
	/* some drivers (floppy) instantiate a queue w/o disk registered */
	if (blkg->q->backing_dev_info.dev)
		return dev_name(blkg->q->backing_dev_info.dev);
	if (blkg->q->backing_dev_info->dev)
		return dev_name(blkg->q->backing_dev_info->dev);
	return NULL;
}
EXPORT_SYMBOL_GPL(blkg_dev_name);
+17 −20
Original line number Diff line number Diff line
@@ -87,7 +87,7 @@ static void blk_clear_congested(struct request_list *rl, int sync)
	 * flip its congestion state for events on other blkcgs.
	 */
	if (rl == &rl->q->root_rl)
		clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
		clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}

@@ -98,7 +98,7 @@ static void blk_set_congested(struct request_list *rl, int sync)
#else
	/* see blk_clear_congested() */
	if (rl == &rl->q->root_rl)
		set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
		set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
#endif
}

@@ -122,14 +122,12 @@ void blk_queue_congestion_threshold(struct request_queue *q)
 * @bdev:	device
 *
 * Locates the passed device's request queue and returns the address of its
 * backing_dev_info.  This function can only be called if @bdev is opened
 * and the return value is never NULL.
 * backing_dev_info. The return value is never NULL however we may return
 * &noop_backing_dev_info if the bdev is not currently open.
 */
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
{
	struct request_queue *q = bdev_get_queue(bdev);

	return &q->backing_dev_info;
	return bdev->bd_bdi;
}
EXPORT_SYMBOL(blk_get_backing_dev_info);

@@ -597,7 +595,7 @@ void blk_cleanup_queue(struct request_queue *q)
	blk_flush_integrity();

	/* @q won't process any more request, flush async actions */
	del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
	blk_sync_queue(q);

	if (q->mq_ops)
@@ -697,7 +695,6 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
	struct request_queue *q;
	int err;

	q = kmem_cache_alloc_node(blk_requestq_cachep,
				gfp_mask | __GFP_ZERO, node_id);
@@ -712,17 +709,17 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	if (!q->bio_split)
		goto fail_id;

	q->backing_dev_info.ra_pages =
	q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
	if (!q->backing_dev_info)
		goto fail_split;

	q->backing_dev_info->ra_pages =
			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
	q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
	q->backing_dev_info.name = "block";
	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
	q->backing_dev_info->name = "block";
	q->node = node_id;

	err = bdi_init(&q->backing_dev_info);
	if (err)
		goto fail_split;

	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
	setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
		    laptop_mode_timer_fn, (unsigned long) q);
	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
	INIT_LIST_HEAD(&q->queue_head);
@@ -772,7 +769,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
fail_bdi:
	bdi_destroy(&q->backing_dev_info);
	bdi_put(q->backing_dev_info);
fail_split:
	bioset_free(q->bio_split);
fail_id:
@@ -1195,7 +1192,7 @@ fail_elvpriv:
	 * disturb iosched and blkcg but weird is bettern than dead.
	 */
	printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
			   __func__, dev_name(q->backing_dev_info.dev));
			   __func__, dev_name(q->backing_dev_info->dev));

	rq->cmd_flags &= ~REQ_ELVPRIV;
	rq->elv.icq = NULL;
@@ -3251,7 +3248,7 @@ void blk_finish_request(struct request *req, int error)
	BUG_ON(blk_queued_rq(req));

	if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
		laptop_io_completion(&req->q->backing_dev_info);
		laptop_io_completion(req->q->backing_dev_info);

	blk_delete_timer(req);

+2 −2
Original line number Diff line number Diff line
@@ -418,7 +418,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
	bi->tuple_size = template->tuple_size;
	bi->tag_size = template->tag_size;

	disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
	disk->queue->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
}
EXPORT_SYMBOL(blk_integrity_register);

@@ -431,7 +431,7 @@ EXPORT_SYMBOL(blk_integrity_register);
 */
void blk_integrity_unregister(struct gendisk *disk)
{
	disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
	disk->queue->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
	memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
}
EXPORT_SYMBOL(blk_integrity_unregister);
+3 −3
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)

static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
	unsigned long ra_kb = q->backing_dev_info.ra_pages <<
	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
					(PAGE_CACHE_SHIFT - 10);

	return queue_var_show(ra_kb, (page));
@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
	if (ret < 0)
		return ret;

	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);

	return ret;
}
@@ -578,7 +578,7 @@ static void blk_release_queue(struct kobject *kobj)
	struct request_queue *q =
		container_of(kobj, struct request_queue, kobj);

	bdi_exit(&q->backing_dev_info);
	bdi_put(q->backing_dev_info);
	blkcg_exit_queue(q);

	if (q->elevator) {
+4 −2
Original line number Diff line number Diff line
@@ -611,7 +611,7 @@ void add_disk(struct gendisk *disk)
	disk_alloc_events(disk);

	/* Register BDI before referencing it from bdev */
	bdi = &disk->queue->backing_dev_info;
	bdi = disk->queue->backing_dev_info;
	bdi_register_owner(bdi, disk_to_dev(disk));

	blk_register_region(disk_devt(disk), disk->minors, NULL,
@@ -646,6 +646,8 @@ void del_gendisk(struct gendisk *disk)
	disk_part_iter_init(&piter, disk,
			     DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
	while ((part = disk_part_iter_next(&piter))) {
		bdev_unhash_inode(MKDEV(disk->major,
					disk->first_minor + part->partno));
		invalidate_partition(disk, part->partno);
		delete_partition(disk, part->partno);
	}
@@ -661,7 +663,7 @@ void del_gendisk(struct gendisk *disk)
		 * Unregister bdi before releasing device numbers (as they can
		 * get reused and we'd get clashes in sysfs).
		 */
		bdi_unregister(&disk->queue->backing_dev_info);
		bdi_unregister(disk->queue->backing_dev_info);
		blk_unregister_queue(disk);
	} else {
		WARN_ON(1);
Loading