Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3e8f399d authored by Nikolay Borisov's avatar Nikolay Borisov Committed by Linus Torvalds
Browse files

writeback: rework wb_[dec|inc]_stat family of functions

Currently the writeback statistics code uses a percpu counters to hold
various statistics.  Furthermore we have 2 families of functions - those
which disable local irq and those which doesn't and whose names begin
with double underscore.  However, they both end up calling
__add_wb_stats which in turn calls percpu_counter_add_batch which is
already irq-safe.

Exploiting this fact allows to eliminated the __wb_* functions since
they don't add any further protection than we already have.
Furthermore, refactor the wb_* function to call __add_wb_stat directly
without the irq-disabling dance.  This will likely result in better
runtime of code which deals with modifying the stat counters.

While at it also document why percpu_counter_add_batch is in fact
preempt and irq-safe since at least 3 people got confused.

Link: http://lkml.kernel.org/r/1498029937-27293-1-git-send-email-nborisov@suse.com


Signed-off-by: default avatarNikolay Borisov <nborisov@suse.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Cc: Josef Bacik <jbacik@fb.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Jeff Layton <jlayton@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c945dccc
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -380,8 +380,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
		struct page *page = radix_tree_deref_slot_protected(slot,
							&mapping->tree_lock);
		if (likely(page) && PageDirty(page)) {
			__dec_wb_stat(old_wb, WB_RECLAIMABLE);
			__inc_wb_stat(new_wb, WB_RECLAIMABLE);
			dec_wb_stat(old_wb, WB_RECLAIMABLE);
			inc_wb_stat(new_wb, WB_RECLAIMABLE);
		}
	}

@@ -391,8 +391,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
							&mapping->tree_lock);
		if (likely(page)) {
			WARN_ON_ONCE(!PageWriteback(page));
			__dec_wb_stat(old_wb, WB_WRITEBACK);
			__inc_wb_stat(new_wb, WB_WRITEBACK);
			dec_wb_stat(old_wb, WB_WRITEBACK);
			inc_wb_stat(new_wb, WB_WRITEBACK);
		}
	}

+2 −22
Original line number Diff line number Diff line
@@ -69,34 +69,14 @@ static inline void __add_wb_stat(struct bdi_writeback *wb,
	percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
}

static inline void __inc_wb_stat(struct bdi_writeback *wb,
				 enum wb_stat_item item)
{
	__add_wb_stat(wb, item, 1);
}

static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
	unsigned long flags;

	local_irq_save(flags);
	__inc_wb_stat(wb, item);
	local_irq_restore(flags);
}

static inline void __dec_wb_stat(struct bdi_writeback *wb,
				 enum wb_stat_item item)
{
	__add_wb_stat(wb, item, -1);
	__add_wb_stat(wb, item, 1);
}

static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
{
	unsigned long flags;

	local_irq_save(flags);
	__dec_wb_stat(wb, item);
	local_irq_restore(flags);
	__add_wb_stat(wb, item, -1);
}

static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
+7 −0
Original line number Diff line number Diff line
@@ -72,6 +72,13 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
}
EXPORT_SYMBOL(percpu_counter_set);

/**
 * This function is both preempt and irq safe. The former is due to explicit
 * preemption disable. The latter is guaranteed by the fact that the slow path
 * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
 * this_cpu_add which is irq-safe by definition. Hence there is no need muck
 * with irq state before calling this one
 */
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
	s64 count;
+5 −5
Original line number Diff line number Diff line
@@ -601,7 +601,7 @@ static inline void __wb_writeout_inc(struct bdi_writeback *wb)
{
	struct wb_domain *cgdom;

	__inc_wb_stat(wb, WB_WRITTEN);
	inc_wb_stat(wb, WB_WRITTEN);
	wb_domain_writeout_inc(&global_wb_domain, &wb->completions,
			       wb->bdi->max_prop_frac);

@@ -2435,8 +2435,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
		__inc_lruvec_page_state(page, NR_FILE_DIRTY);
		__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
		__inc_node_page_state(page, NR_DIRTIED);
		__inc_wb_stat(wb, WB_RECLAIMABLE);
		__inc_wb_stat(wb, WB_DIRTIED);
		inc_wb_stat(wb, WB_RECLAIMABLE);
		inc_wb_stat(wb, WB_DIRTIED);
		task_io_account_write(PAGE_SIZE);
		current->nr_dirtied++;
		this_cpu_inc(bdp_ratelimits);
@@ -2741,7 +2741,7 @@ int test_clear_page_writeback(struct page *page)
			if (bdi_cap_account_writeback(bdi)) {
				struct bdi_writeback *wb = inode_to_wb(inode);

				__dec_wb_stat(wb, WB_WRITEBACK);
				dec_wb_stat(wb, WB_WRITEBACK);
				__wb_writeout_inc(wb);
			}
		}
@@ -2786,7 +2786,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
			if (bdi_cap_account_writeback(bdi))
				__inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);
				inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK);

			/*
			 * We can come through here when swapping anonymous