Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a7c1120d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ext4 bug fixes from Ted Ts'o:
 "Various bug fixes for ext4.  The most important is a fix for the new
  extent cache's slab shrinker which can cause significant, user-visible
  pauses when the system is under memory pressure."

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: enable quotas before orphan cleanup
  ext4: don't allow quota mount options when quota feature enabled
  ext4: fix a warning from sparse check for ext4_dir_llseek
  ext4: convert number of blocks to clusters properly
  ext4: fix possible memory leak in ext4_remount()
  jbd2: fix ERR_PTR dereference in jbd2__journal_start
  ext4: use percpu counter for extent cache count
  ext4: optimize ext4_es_shrink()
parents 6ec40b42 9b2ff357
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -635,7 +635,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
	brelse(bitmap_bh);
	printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
	       ", computed = %llu, %llu\n",
	       EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
	       EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
	       desc_count, bitmap_count);
	return bitmap_count;
#else
+1 −1
Original line number Diff line number Diff line
@@ -334,7 +334,7 @@ static inline loff_t ext4_get_htree_eof(struct file *filp)
 *
 * For non-htree, ext4_llseek already chooses the proper max offset.
 */
loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
{
	struct inode *inode = file->f_mapping->host;
	int dx_dir = is_dx_dir(inode);
+1 −0
Original line number Diff line number Diff line
@@ -1309,6 +1309,7 @@ struct ext4_sb_info {
	/* Reclaim extents from extent status tree */
	struct shrinker s_es_shrinker;
	struct list_head s_es_lru;
	struct percpu_counter s_extent_cache_cnt;
	spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
};

+13 −26
Original line number Diff line number Diff line
@@ -147,11 +147,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
			      ext4_lblk_t end);
static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
				       int nr_to_scan);
static int ext4_es_reclaim_extents_count(struct super_block *sb);

int __init ext4_init_es(void)
{
	ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT);
	ext4_es_cachep = kmem_cache_create("ext4_extent_status",
					   sizeof(struct extent_status),
					   0, (SLAB_RECLAIM_ACCOUNT), NULL);
	if (ext4_es_cachep == NULL)
		return -ENOMEM;
	return 0;
@@ -302,8 +303,10 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
	/*
	 * We don't count delayed extent because we never try to reclaim them
	 */
	if (!ext4_es_is_delayed(es))
	if (!ext4_es_is_delayed(es)) {
		EXT4_I(inode)->i_es_lru_nr++;
		percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
	}

	return es;
}
@@ -314,6 +317,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
	if (!ext4_es_is_delayed(es)) {
		BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
		EXT4_I(inode)->i_es_lru_nr--;
		percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
	}

	kmem_cache_free(ext4_es_cachep, es);
@@ -674,10 +678,11 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
	int nr_to_scan = sc->nr_to_scan;
	int ret, nr_shrunk = 0;

	trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan);
	ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
	trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);

	if (!nr_to_scan)
		return ext4_es_reclaim_extents_count(sbi->s_sb);
		return ret;

	INIT_LIST_HEAD(&scanned);

@@ -705,9 +710,10 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
	}
	list_splice_tail(&scanned, &sbi->s_es_lru);
	spin_unlock(&sbi->s_es_lru_lock);
	trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk);

	return ext4_es_reclaim_extents_count(sbi->s_sb);
	ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
	trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
	return ret;
}

void ext4_es_register_shrinker(struct super_block *sb)
@@ -751,25 +757,6 @@ void ext4_es_lru_del(struct inode *inode)
	spin_unlock(&sbi->s_es_lru_lock);
}

static int ext4_es_reclaim_extents_count(struct super_block *sb)
{
	struct ext4_sb_info *sbi = EXT4_SB(sb);
	struct ext4_inode_info *ei;
	struct list_head *cur;
	int nr_cached = 0;

	spin_lock(&sbi->s_es_lru_lock);
	list_for_each(cur, &sbi->s_es_lru) {
		ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
		read_lock(&ei->i_es_lock);
		nr_cached += ei->i_es_lru_nr;
		read_unlock(&ei->i_es_lock);
	}
	spin_unlock(&sbi->s_es_lru_lock);
	trace_ext4_es_reclaim_extents_count(sb, nr_cached);
	return nr_cached;
}

static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
				       int nr_to_scan)
{
+4 −4
Original line number Diff line number Diff line
@@ -3419,7 +3419,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
			win = offs;

		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
			EXT4_B2C(sbi, win);
			EXT4_NUM_B2C(sbi, win);
		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
	}
@@ -4565,7 +4565,7 @@ do_more:
			EXT4_BLOCKS_PER_GROUP(sb);
		count -= overflow;
	}
	count_clusters = EXT4_B2C(sbi, count);
	count_clusters = EXT4_NUM_B2C(sbi, count);
	bitmap_bh = ext4_read_block_bitmap(sb, block_group);
	if (!bitmap_bh) {
		err = -EIO;
@@ -4807,11 +4807,11 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
	ext4_group_desc_csum_set(sb, block_group, desc);
	ext4_unlock_group(sb, block_group);
	percpu_counter_add(&sbi->s_freeclusters_counter,
			   EXT4_B2C(sbi, blocks_freed));
			   EXT4_NUM_B2C(sbi, blocks_freed));

	if (sbi->s_log_groups_per_flex) {
		ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
		atomic_add(EXT4_B2C(sbi, blocks_freed),
		atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),
			   &sbi->s_flex_groups[flex_group].free_clusters);
	}

Loading