Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cfc4ba53 authored by Jens Axboe's avatar Jens Axboe
Browse files

writeback: use RCU to protect bdi_list



Now that bdi_writeback_all() no longer handles integrity writeback,
it doesn't have to block anymore. This means that we can switch
bdi_list reader side protection to RCU.

Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent f11fcae8
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -868,16 +868,16 @@ static void bdi_writeback_all(struct writeback_control *wbc)


	WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
	WARN_ON(wbc->sync_mode == WB_SYNC_ALL);


	spin_lock(&bdi_lock);
	rcu_read_lock();


	list_for_each_entry(bdi, &bdi_list, bdi_list) {
	list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
		if (!bdi_has_dirty_io(bdi))
		if (!bdi_has_dirty_io(bdi))
			continue;
			continue;


		bdi_alloc_queue_work(bdi, wbc);
		bdi_alloc_queue_work(bdi, wbc);
	}
	}


	spin_unlock(&bdi_lock);
	rcu_read_unlock();
}
}


/*
/*
+1 −0
Original line number Original line Diff line number Diff line
@@ -59,6 +59,7 @@ struct bdi_writeback {


struct backing_dev_info {
struct backing_dev_info {
	struct list_head bdi_list;
	struct list_head bdi_list;
	struct rcu_head rcu_head;
	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
	unsigned long ra_pages;	/* max readahead in PAGE_CACHE_SIZE units */
	unsigned long state;	/* Always use atomic bitops on this */
	unsigned long state;	/* Always use atomic bitops on this */
	unsigned int capabilities; /* Device capabilities */
	unsigned int capabilities; /* Device capabilities */
+55 −21
Original line number Original line Diff line number Diff line
@@ -26,6 +26,12 @@ struct backing_dev_info default_backing_dev_info = {
EXPORT_SYMBOL_GPL(default_backing_dev_info);
EXPORT_SYMBOL_GPL(default_backing_dev_info);


static struct class *bdi_class;
static struct class *bdi_class;

/*
 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
 * locking.
 */
DEFINE_SPINLOCK(bdi_lock);
DEFINE_SPINLOCK(bdi_lock);
LIST_HEAD(bdi_list);
LIST_HEAD(bdi_list);
LIST_HEAD(bdi_pending_list);
LIST_HEAD(bdi_pending_list);
@@ -284,9 +290,9 @@ static int bdi_start_fn(void *ptr)
	/*
	/*
	 * Add us to the active bdi_list
	 * Add us to the active bdi_list
	 */
	 */
	spin_lock(&bdi_lock);
	spin_lock_bh(&bdi_lock);
	list_add(&bdi->bdi_list, &bdi_list);
	list_add_rcu(&bdi->bdi_list, &bdi_list);
	spin_unlock(&bdi_lock);
	spin_unlock_bh(&bdi_lock);


	bdi_task_init(bdi, wb);
	bdi_task_init(bdi, wb);


@@ -389,7 +395,7 @@ static int bdi_forker_task(void *ptr)
		if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
		if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
			wb_do_writeback(me, 0);
			wb_do_writeback(me, 0);


		spin_lock(&bdi_lock);
		spin_lock_bh(&bdi_lock);


		/*
		/*
		 * Check if any existing bdi's have dirty data without
		 * Check if any existing bdi's have dirty data without
@@ -410,7 +416,7 @@ static int bdi_forker_task(void *ptr)
		if (list_empty(&bdi_pending_list)) {
		if (list_empty(&bdi_pending_list)) {
			unsigned long wait;
			unsigned long wait;


			spin_unlock(&bdi_lock);
			spin_unlock_bh(&bdi_lock);
			wait = msecs_to_jiffies(dirty_writeback_interval * 10);
			wait = msecs_to_jiffies(dirty_writeback_interval * 10);
			schedule_timeout(wait);
			schedule_timeout(wait);
			try_to_freeze();
			try_to_freeze();
@@ -426,7 +432,7 @@ static int bdi_forker_task(void *ptr)
		bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
		bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
				 bdi_list);
				 bdi_list);
		list_del_init(&bdi->bdi_list);
		list_del_init(&bdi->bdi_list);
		spin_unlock(&bdi_lock);
		spin_unlock_bh(&bdi_lock);


		wb = &bdi->wb;
		wb = &bdi->wb;
		wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
		wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
@@ -445,9 +451,9 @@ static int bdi_forker_task(void *ptr)
			 * a chance to flush other bdi's to free
			 * a chance to flush other bdi's to free
			 * memory.
			 * memory.
			 */
			 */
			spin_lock(&bdi_lock);
			spin_lock_bh(&bdi_lock);
			list_add_tail(&bdi->bdi_list, &bdi_pending_list);
			list_add_tail(&bdi->bdi_list, &bdi_pending_list);
			spin_unlock(&bdi_lock);
			spin_unlock_bh(&bdi_lock);


			bdi_flush_io(bdi);
			bdi_flush_io(bdi);
		}
		}
@@ -456,6 +462,24 @@ static int bdi_forker_task(void *ptr)
	return 0;
	return 0;
}
}


static void bdi_add_to_pending(struct rcu_head *head)
{
	struct backing_dev_info *bdi;

	bdi = container_of(head, struct backing_dev_info, rcu_head);
	INIT_LIST_HEAD(&bdi->bdi_list);

	spin_lock(&bdi_lock);
	list_add_tail(&bdi->bdi_list, &bdi_pending_list);
	spin_unlock(&bdi_lock);

	/*
	 * We are now on the pending list, wake up bdi_forker_task()
	 * to finish the job and add us back to the active bdi_list
	 */
	wake_up_process(default_backing_dev_info.wb.task);
}

/*
/*
 * Add the default flusher task that gets created for any bdi
 * Add the default flusher task that gets created for any bdi
 * that has dirty data pending writeout
 * that has dirty data pending writeout
@@ -478,16 +502,29 @@ void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
	 * waiting for previous additions to finish.
	 * waiting for previous additions to finish.
	 */
	 */
	if (!test_and_set_bit(BDI_pending, &bdi->state)) {
	if (!test_and_set_bit(BDI_pending, &bdi->state)) {
		list_move_tail(&bdi->bdi_list, &bdi_pending_list);
		list_del_rcu(&bdi->bdi_list);


		/*
		/*
		 * We are now on the pending list, wake up bdi_forker_task()
		 * We must wait for the current RCU period to end before
		 * to finish the job and add us back to the active bdi_list
		 * moving to the pending list. So schedule that operation
		 * from an RCU callback.
		 */
		 */
		wake_up_process(default_backing_dev_info.wb.task);
		call_rcu(&bdi->rcu_head, bdi_add_to_pending);
	}
	}
}
}


/*
 * Remove bdi from bdi_list, and ensure that it is no longer visible
 */
static void bdi_remove_from_list(struct backing_dev_info *bdi)
{
	spin_lock_bh(&bdi_lock);
	list_del_rcu(&bdi->bdi_list);
	spin_unlock_bh(&bdi_lock);

	synchronize_rcu();
}

int bdi_register(struct backing_dev_info *bdi, struct device *parent,
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
		const char *fmt, ...)
		const char *fmt, ...)
{
{
@@ -506,9 +543,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
		goto exit;
		goto exit;
	}
	}


	spin_lock(&bdi_lock);
	spin_lock_bh(&bdi_lock);
	list_add_tail(&bdi->bdi_list, &bdi_list);
	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
	spin_unlock(&bdi_lock);
	spin_unlock_bh(&bdi_lock);


	bdi->dev = dev;
	bdi->dev = dev;


@@ -526,9 +563,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
			wb->task = NULL;
			wb->task = NULL;
			ret = -ENOMEM;
			ret = -ENOMEM;


			spin_lock(&bdi_lock);
			bdi_remove_from_list(bdi);
			list_del(&bdi->bdi_list);
			spin_unlock(&bdi_lock);
			goto exit;
			goto exit;
		}
		}
	}
	}
@@ -565,9 +600,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
	/*
	/*
	 * Make sure nobody finds us on the bdi_list anymore
	 * Make sure nobody finds us on the bdi_list anymore
	 */
	 */
	spin_lock(&bdi_lock);
	bdi_remove_from_list(bdi);
	list_del(&bdi->bdi_list);
	spin_unlock(&bdi_lock);


	/*
	/*
	 * Finally, kill the kernel threads. We don't need to be RCU
	 * Finally, kill the kernel threads. We don't need to be RCU
@@ -599,6 +632,7 @@ int bdi_init(struct backing_dev_info *bdi)
	bdi->max_ratio = 100;
	bdi->max_ratio = 100;
	bdi->max_prop_frac = PROP_FRAC_BASE;
	bdi->max_prop_frac = PROP_FRAC_BASE;
	spin_lock_init(&bdi->wb_lock);
	spin_lock_init(&bdi->wb_lock);
	INIT_RCU_HEAD(&bdi->rcu_head);
	INIT_LIST_HEAD(&bdi->bdi_list);
	INIT_LIST_HEAD(&bdi->bdi_list);
	INIT_LIST_HEAD(&bdi->wb_list);
	INIT_LIST_HEAD(&bdi->wb_list);
	INIT_LIST_HEAD(&bdi->work_list);
	INIT_LIST_HEAD(&bdi->work_list);
+4 −4
Original line number Original line Diff line number Diff line
@@ -315,7 +315,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
{
	int ret = 0;
	int ret = 0;


	spin_lock(&bdi_lock);
	spin_lock_bh(&bdi_lock);
	if (min_ratio > bdi->max_ratio) {
	if (min_ratio > bdi->max_ratio) {
		ret = -EINVAL;
		ret = -EINVAL;
	} else {
	} else {
@@ -327,7 +327,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
			ret = -EINVAL;
			ret = -EINVAL;
		}
		}
	}
	}
	spin_unlock(&bdi_lock);
	spin_unlock_bh(&bdi_lock);


	return ret;
	return ret;
}
}
@@ -339,14 +339,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
	if (max_ratio > 100)
	if (max_ratio > 100)
		return -EINVAL;
		return -EINVAL;


	spin_lock(&bdi_lock);
	spin_lock_bh(&bdi_lock);
	if (bdi->min_ratio > max_ratio) {
	if (bdi->min_ratio > max_ratio) {
		ret = -EINVAL;
		ret = -EINVAL;
	} else {
	} else {
		bdi->max_ratio = max_ratio;
		bdi->max_ratio = max_ratio;
		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
	}
	}
	spin_unlock(&bdi_lock);
	spin_unlock_bh(&bdi_lock);


	return ret;
	return ret;
}
}