Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f01ef569 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg/writeback: (27 commits)
  mm: properly reflect task dirty limits in dirty_exceeded logic
  writeback: don't busy retry writeback on new/freeing inodes
  writeback: scale IO chunk size up to half device bandwidth
  writeback: trace global_dirty_state
  writeback: introduce max-pause and pass-good dirty limits
  writeback: introduce smoothed global dirty limit
  writeback: consolidate variable names in balance_dirty_pages()
  writeback: show bdi write bandwidth in debugfs
  writeback: bdi write bandwidth estimation
  writeback: account per-bdi accumulated written pages
  writeback: make writeback_control.nr_to_write straight
  writeback: skip tmpfs early in balance_dirty_pages_ratelimited_nr()
  writeback: trace event writeback_queue_io
  writeback: trace event writeback_single_inode
  writeback: remove .nonblocking and .encountered_congestion
  writeback: remove writeback_control.more_io
  writeback: skip balance_dirty_pages() for in-memory fs
  writeback: add bdi_dirty_limit() kernel-doc
  writeback: avoid extra sync work at enqueue time
  writeback: elevate queue_io() into wb_writeback()
  ...

Fix up trivial conflicts in fs/fs-writeback.c and mm/filemap.c
parents a93a1329 bcff25fc
Loading
Loading
Loading
Loading
+10 −6
Original line number Diff line number Diff line
@@ -44,24 +44,28 @@ inline struct block_device *I_BDEV(struct inode *inode)
{
	return &BDEV_I(inode)->bdev;
}

EXPORT_SYMBOL(I_BDEV);

/*
 * move the inode from it's current bdi to the a new bdi. if the inode is dirty
 * we need to move it onto the dirty list of @dst so that the inode is always
 * on the right list.
 * Move the inode from its current bdi to a new bdi. If the inode is dirty we
 * need to move it onto the dirty list of @dst so that the inode is always on
 * the right list.
 */
static void bdev_inode_switch_bdi(struct inode *inode,
			struct backing_dev_info *dst)
{
	spin_lock(&inode_wb_list_lock);
	struct backing_dev_info *old = inode->i_data.backing_dev_info;

	if (unlikely(dst == old))		/* deadlock avoidance */
		return;
	bdi_lock_two(&old->wb, &dst->wb);
	spin_lock(&inode->i_lock);
	inode->i_data.backing_dev_info = dst;
	if (inode->i_state & I_DIRTY)
		list_move(&inode->i_wb_list, &dst->wb.b_dirty);
	spin_unlock(&inode->i_lock);
	spin_unlock(&inode_wb_list_lock);
	spin_unlock(&old->wb.list_lock);
	spin_unlock(&dst->wb.list_lock);
}

static sector_t max_block(struct block_device *bdev)
+0 −2
Original line number Diff line number Diff line
@@ -2551,7 +2551,6 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
	};
	struct writeback_control wbc_writepages = {
		.sync_mode	= wbc->sync_mode,
		.older_than_this = NULL,
		.nr_to_write	= 64,
		.range_start	= page_offset(page) + PAGE_CACHE_SIZE,
		.range_end	= (loff_t)-1,
@@ -2584,7 +2583,6 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
	};
	struct writeback_control wbc_writepages = {
		.sync_mode	= mode,
		.older_than_this = NULL,
		.nr_to_write	= nr_pages * 2,
		.range_start	= start,
		.range_end	= end + 1,
+2 −2
Original line number Diff line number Diff line
@@ -2741,7 +2741,7 @@ static int write_cache_pages_da(struct address_space *mapping,
	index = wbc->range_start >> PAGE_CACHE_SHIFT;
	end = wbc->range_end >> PAGE_CACHE_SHIFT;

	if (wbc->sync_mode == WB_SYNC_ALL)
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
	else
		tag = PAGECACHE_TAG_DIRTY;
@@ -2973,7 +2973,7 @@ static int ext4_da_writepages(struct address_space *mapping,
	}

retry:
	if (wbc->sync_mode == WB_SYNC_ALL)
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag_pages_for_writeback(mapping, index, end);

	while (!ret && wbc->nr_to_write > 0) {
+208 −165

File changed.

Preview size limit exceeded, changes collapsed.

+2 −3
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@
 *   inode->i_sb->s_inode_lru, inode->i_lru
 * inode_sb_list_lock protects:
 *   sb->s_inodes, inode->i_sb_list
 * inode_wb_list_lock protects:
 * bdi->wb.list_lock protects:
 *   bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
 * inode_hash_lock protects:
 *   inode_hashtable, inode->i_hash
@@ -48,7 +48,7 @@
 *   inode->i_lock
 *     inode->i_sb->s_inode_lru_lock
 *
 * inode_wb_list_lock
 * bdi->wb.list_lock
 *   inode->i_lock
 *
 * inode_hash_lock
@@ -65,7 +65,6 @@ static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);

__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);

/*
 * Empty aops. Can be used for the cases where the user does not
Loading