Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit faeb20ec authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ext4 updates from Ted Ts'o:
 "Performance improvements in SEEK_DATA and xattr scalability
  improvements, plus a lot of clean ups and bug fixes"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (38 commits)
  ext4: clean up error handling in the MMP support
  jbd2: do not fail journal because of frozen_buffer allocation failure
  ext4: use __GFP_NOFAIL in ext4_free_blocks()
  ext4: fix compile error while opening the macro DOUBLE_CHECK
  ext4: print ext4 mount option data_err=abort correctly
  ext4: fix NULL pointer dereference in ext4_mark_inode_dirty()
  ext4: drop unneeded BUFFER_TRACE in ext4_delete_inline_entry()
  ext4: fix misspellings in comments.
  jbd2: fix FS corruption possibility in jbd2_journal_destroy() on umount path
  ext4: more efficient SEEK_DATA implementation
  ext4: cleanup handling of bh->b_state in DAX mmap
  ext4: return hole from ext4_map_blocks()
  ext4: factor out determining of hole size
  ext4: fix setting of referenced bit in ext4_es_lookup_extent()
  ext4: remove i_ioend_count
  ext4: simplify io_end handling for AIO DIO
  ext4: move trans handling and completion deferal out of _ext4_get_block
  ext4: rename and split get blocks functions
  ext4: use i_mutex to serialize unaligned AIO DIO
  ext4: pack ioend structure better
  ...
parents 364e8dd9 03046886
Loading
Loading
Loading
Loading
+3 −0
Original line number Original line Diff line number Diff line
@@ -61,6 +61,8 @@ struct ext2_block_alloc_info {
#define rsv_start rsv_window._rsv_start
#define rsv_start rsv_window._rsv_start
#define rsv_end rsv_window._rsv_end
#define rsv_end rsv_window._rsv_end


struct mb_cache;

/*
/*
 * second extended-fs super-block data in memory
 * second extended-fs super-block data in memory
 */
 */
@@ -111,6 +113,7 @@ struct ext2_sb_info {
	 * of the mount options.
	 * of the mount options.
	 */
	 */
	spinlock_t s_lock;
	spinlock_t s_lock;
	struct mb_cache *s_mb_cache;
};
};


static inline spinlock_t *
static inline spinlock_t *
+17 −8
Original line number Original line Diff line number Diff line
@@ -131,7 +131,10 @@ static void ext2_put_super (struct super_block * sb)


	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);


	ext2_xattr_put_super(sb);
	if (sbi->s_mb_cache) {
		ext2_xattr_destroy_cache(sbi->s_mb_cache);
		sbi->s_mb_cache = NULL;
	}
	if (!(sb->s_flags & MS_RDONLY)) {
	if (!(sb->s_flags & MS_RDONLY)) {
		struct ext2_super_block *es = sbi->s_es;
		struct ext2_super_block *es = sbi->s_es;


@@ -1104,6 +1107,14 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
		ext2_msg(sb, KERN_ERR, "error: insufficient memory");
		ext2_msg(sb, KERN_ERR, "error: insufficient memory");
		goto failed_mount3;
		goto failed_mount3;
	}
	}

#ifdef CONFIG_EXT2_FS_XATTR
	sbi->s_mb_cache = ext2_xattr_create_cache();
	if (!sbi->s_mb_cache) {
		ext2_msg(sb, KERN_ERR, "Failed to create an mb_cache");
		goto failed_mount3;
	}
#endif
	/*
	/*
	 * set up enough so that it can read an inode
	 * set up enough so that it can read an inode
	 */
	 */
@@ -1149,6 +1160,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
			sb->s_id);
			sb->s_id);
	goto failed_mount;
	goto failed_mount;
failed_mount3:
failed_mount3:
	if (sbi->s_mb_cache)
		ext2_xattr_destroy_cache(sbi->s_mb_cache);
	percpu_counter_destroy(&sbi->s_freeblocks_counter);
	percpu_counter_destroy(&sbi->s_freeblocks_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_freeinodes_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
	percpu_counter_destroy(&sbi->s_dirs_counter);
@@ -1555,20 +1568,17 @@ MODULE_ALIAS_FS("ext2");


static int __init init_ext2_fs(void)
static int __init init_ext2_fs(void)
{
{
	int err = init_ext2_xattr();
	int err;
	if (err)

		return err;
	err = init_inodecache();
	err = init_inodecache();
	if (err)
	if (err)
		goto out1;
		return err;
        err = register_filesystem(&ext2_fs_type);
        err = register_filesystem(&ext2_fs_type);
	if (err)
	if (err)
		goto out;
		goto out;
	return 0;
	return 0;
out:
out:
	destroy_inodecache();
	destroy_inodecache();
out1:
	exit_ext2_xattr();
	return err;
	return err;
}
}


@@ -1576,7 +1586,6 @@ static void __exit exit_ext2_fs(void)
{
{
	unregister_filesystem(&ext2_fs_type);
	unregister_filesystem(&ext2_fs_type);
	destroy_inodecache();
	destroy_inodecache();
	exit_ext2_xattr();
}
}


MODULE_AUTHOR("Remy Card and others");
MODULE_AUTHOR("Remy Card and others");
+65 −74
Original line number Original line Diff line number Diff line
@@ -90,14 +90,12 @@
static int ext2_xattr_set2(struct inode *, struct buffer_head *,
static int ext2_xattr_set2(struct inode *, struct buffer_head *,
			   struct ext2_xattr_header *);
			   struct ext2_xattr_header *);


static int ext2_xattr_cache_insert(struct buffer_head *);
static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
static struct buffer_head *ext2_xattr_cache_find(struct inode *,
static struct buffer_head *ext2_xattr_cache_find(struct inode *,
						 struct ext2_xattr_header *);
						 struct ext2_xattr_header *);
static void ext2_xattr_rehash(struct ext2_xattr_header *,
static void ext2_xattr_rehash(struct ext2_xattr_header *,
			      struct ext2_xattr_entry *);
			      struct ext2_xattr_entry *);


static struct mb_cache *ext2_xattr_cache;

static const struct xattr_handler *ext2_xattr_handler_map[] = {
static const struct xattr_handler *ext2_xattr_handler_map[] = {
	[EXT2_XATTR_INDEX_USER]		     = &ext2_xattr_user_handler,
	[EXT2_XATTR_INDEX_USER]		     = &ext2_xattr_user_handler,
#ifdef CONFIG_EXT2_FS_POSIX_ACL
#ifdef CONFIG_EXT2_FS_POSIX_ACL
@@ -152,6 +150,7 @@ ext2_xattr_get(struct inode *inode, int name_index, const char *name,
	size_t name_len, size;
	size_t name_len, size;
	char *end;
	char *end;
	int error;
	int error;
	struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;


	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
		  name_index, name, buffer, (long)buffer_size);
		  name_index, name, buffer, (long)buffer_size);
@@ -196,7 +195,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
			goto found;
			goto found;
		entry = next;
		entry = next;
	}
	}
	if (ext2_xattr_cache_insert(bh))
	if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
		ea_idebug(inode, "cache insert failed");
		ea_idebug(inode, "cache insert failed");
	error = -ENODATA;
	error = -ENODATA;
	goto cleanup;
	goto cleanup;
@@ -209,7 +208,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
	    le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
	    le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
		goto bad_block;
		goto bad_block;


	if (ext2_xattr_cache_insert(bh))
	if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
		ea_idebug(inode, "cache insert failed");
		ea_idebug(inode, "cache insert failed");
	if (buffer) {
	if (buffer) {
		error = -ERANGE;
		error = -ERANGE;
@@ -247,6 +246,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
	char *end;
	char *end;
	size_t rest = buffer_size;
	size_t rest = buffer_size;
	int error;
	int error;
	struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;


	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
		  buffer, (long)buffer_size);
		  buffer, (long)buffer_size);
@@ -281,7 +281,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
			goto bad_block;
			goto bad_block;
		entry = next;
		entry = next;
	}
	}
	if (ext2_xattr_cache_insert(bh))
	if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
		ea_idebug(inode, "cache insert failed");
		ea_idebug(inode, "cache insert failed");


	/* list the attribute names */
	/* list the attribute names */
@@ -483,22 +483,23 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
	/* Here we know that we can set the new attribute. */
	/* Here we know that we can set the new attribute. */


	if (header) {
	if (header) {
		struct mb_cache_entry *ce;

		/* assert(header == HDR(bh)); */
		/* assert(header == HDR(bh)); */
		ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev,
					bh->b_blocknr);
		lock_buffer(bh);
		lock_buffer(bh);
		if (header->h_refcount == cpu_to_le32(1)) {
		if (header->h_refcount == cpu_to_le32(1)) {
			__u32 hash = le32_to_cpu(header->h_hash);

			ea_bdebug(bh, "modifying in-place");
			ea_bdebug(bh, "modifying in-place");
			if (ce)
			/*
				mb_cache_entry_free(ce);
			 * This must happen under buffer lock for
			 * ext2_xattr_set2() to reliably detect modified block
			 */
			mb_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache,
						    hash, bh->b_blocknr);

			/* keep the buffer locked while modifying it. */
			/* keep the buffer locked while modifying it. */
		} else {
		} else {
			int offset;
			int offset;


			if (ce)
				mb_cache_entry_release(ce);
			unlock_buffer(bh);
			unlock_buffer(bh);
			ea_bdebug(bh, "cloning");
			ea_bdebug(bh, "cloning");
			header = kmalloc(bh->b_size, GFP_KERNEL);
			header = kmalloc(bh->b_size, GFP_KERNEL);
@@ -626,6 +627,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
	struct super_block *sb = inode->i_sb;
	struct super_block *sb = inode->i_sb;
	struct buffer_head *new_bh = NULL;
	struct buffer_head *new_bh = NULL;
	int error;
	int error;
	struct mb_cache *ext2_mb_cache = EXT2_SB(sb)->s_mb_cache;


	if (header) {
	if (header) {
		new_bh = ext2_xattr_cache_find(inode, header);
		new_bh = ext2_xattr_cache_find(inode, header);
@@ -653,7 +655,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
			   don't need to change the reference count. */
			   don't need to change the reference count. */
			new_bh = old_bh;
			new_bh = old_bh;
			get_bh(new_bh);
			get_bh(new_bh);
			ext2_xattr_cache_insert(new_bh);
			ext2_xattr_cache_insert(ext2_mb_cache, new_bh);
		} else {
		} else {
			/* We need to allocate a new block */
			/* We need to allocate a new block */
			ext2_fsblk_t goal = ext2_group_first_block_no(sb,
			ext2_fsblk_t goal = ext2_group_first_block_no(sb,
@@ -674,7 +676,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
			memcpy(new_bh->b_data, header, new_bh->b_size);
			memcpy(new_bh->b_data, header, new_bh->b_size);
			set_buffer_uptodate(new_bh);
			set_buffer_uptodate(new_bh);
			unlock_buffer(new_bh);
			unlock_buffer(new_bh);
			ext2_xattr_cache_insert(new_bh);
			ext2_xattr_cache_insert(ext2_mb_cache, new_bh);
			
			
			ext2_xattr_update_super_block(sb);
			ext2_xattr_update_super_block(sb);
		}
		}
@@ -707,19 +709,21 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,


	error = 0;
	error = 0;
	if (old_bh && old_bh != new_bh) {
	if (old_bh && old_bh != new_bh) {
		struct mb_cache_entry *ce;

		/*
		/*
		 * If there was an old block and we are no longer using it,
		 * If there was an old block and we are no longer using it,
		 * release the old block.
		 * release the old block.
		 */
		 */
		ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev,
					old_bh->b_blocknr);
		lock_buffer(old_bh);
		lock_buffer(old_bh);
		if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
		if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
			__u32 hash = le32_to_cpu(HDR(old_bh)->h_hash);

			/*
			 * This must happen under buffer lock for
			 * ext2_xattr_set2() to reliably detect freed block
			 */
			mb_cache_entry_delete_block(ext2_mb_cache,
						    hash, old_bh->b_blocknr);
			/* Free the old block. */
			/* Free the old block. */
			if (ce)
				mb_cache_entry_free(ce);
			ea_bdebug(old_bh, "freeing");
			ea_bdebug(old_bh, "freeing");
			ext2_free_blocks(inode, old_bh->b_blocknr, 1);
			ext2_free_blocks(inode, old_bh->b_blocknr, 1);
			mark_inode_dirty(inode);
			mark_inode_dirty(inode);
@@ -730,8 +734,6 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
		} else {
		} else {
			/* Decrement the refcount only. */
			/* Decrement the refcount only. */
			le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
			le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
			if (ce)
				mb_cache_entry_release(ce);
			dquot_free_block_nodirty(inode, 1);
			dquot_free_block_nodirty(inode, 1);
			mark_inode_dirty(inode);
			mark_inode_dirty(inode);
			mark_buffer_dirty(old_bh);
			mark_buffer_dirty(old_bh);
@@ -757,7 +759,6 @@ void
ext2_xattr_delete_inode(struct inode *inode)
ext2_xattr_delete_inode(struct inode *inode)
{
{
	struct buffer_head *bh = NULL;
	struct buffer_head *bh = NULL;
	struct mb_cache_entry *ce;


	down_write(&EXT2_I(inode)->xattr_sem);
	down_write(&EXT2_I(inode)->xattr_sem);
	if (!EXT2_I(inode)->i_file_acl)
	if (!EXT2_I(inode)->i_file_acl)
@@ -777,19 +778,22 @@ ext2_xattr_delete_inode(struct inode *inode)
			EXT2_I(inode)->i_file_acl);
			EXT2_I(inode)->i_file_acl);
		goto cleanup;
		goto cleanup;
	}
	}
	ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr);
	lock_buffer(bh);
	lock_buffer(bh);
	if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
	if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
		if (ce)
		__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
			mb_cache_entry_free(ce);

		/*
		 * This must happen under buffer lock for ext2_xattr_set2() to
		 * reliably detect freed block
		 */
		mb_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache,
					    hash, bh->b_blocknr);
		ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
		ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
		get_bh(bh);
		get_bh(bh);
		bforget(bh);
		bforget(bh);
		unlock_buffer(bh);
		unlock_buffer(bh);
	} else {
	} else {
		le32_add_cpu(&HDR(bh)->h_refcount, -1);
		le32_add_cpu(&HDR(bh)->h_refcount, -1);
		if (ce)
			mb_cache_entry_release(ce);
		ea_bdebug(bh, "refcount now=%d",
		ea_bdebug(bh, "refcount now=%d",
			le32_to_cpu(HDR(bh)->h_refcount));
			le32_to_cpu(HDR(bh)->h_refcount));
		unlock_buffer(bh);
		unlock_buffer(bh);
@@ -805,18 +809,6 @@ ext2_xattr_delete_inode(struct inode *inode)
	up_write(&EXT2_I(inode)->xattr_sem);
	up_write(&EXT2_I(inode)->xattr_sem);
}
}


/*
 * ext2_xattr_put_super()
 *
 * This is called when a file system is unmounted.
 */
void
ext2_xattr_put_super(struct super_block *sb)
{
	mb_cache_shrink(sb->s_bdev);
}


/*
/*
 * ext2_xattr_cache_insert()
 * ext2_xattr_cache_insert()
 *
 *
@@ -826,28 +818,20 @@ ext2_xattr_put_super(struct super_block *sb)
 * Returns 0, or a negative error number on failure.
 * Returns 0, or a negative error number on failure.
 */
 */
static int
static int
ext2_xattr_cache_insert(struct buffer_head *bh)
ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
{
{
	__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
	__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
	struct mb_cache_entry *ce;
	int error;
	int error;


	ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS);
	error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1);
	if (!ce)
		return -ENOMEM;
	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
	if (error) {
	if (error) {
		mb_cache_entry_free(ce);
		if (error == -EBUSY) {
		if (error == -EBUSY) {
			ea_bdebug(bh, "already in cache (%d cache entries)",
			ea_bdebug(bh, "already in cache (%d cache entries)",
				atomic_read(&ext2_xattr_cache->c_entry_count));
				atomic_read(&ext2_xattr_cache->c_entry_count));
			error = 0;
			error = 0;
		}
		}
	} else {
	} else
		ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash,
		ea_bdebug(bh, "inserting [%x]", (int)hash);
			  atomic_read(&ext2_xattr_cache->c_entry_count));
		mb_cache_entry_release(ce);
	}
	return error;
	return error;
}
}


@@ -904,22 +888,16 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
{
{
	__u32 hash = le32_to_cpu(header->h_hash);
	__u32 hash = le32_to_cpu(header->h_hash);
	struct mb_cache_entry *ce;
	struct mb_cache_entry *ce;
	struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;


	if (!header->h_hash)
	if (!header->h_hash)
		return NULL;  /* never share */
		return NULL;  /* never share */
	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
again:
again:
	ce = mb_cache_entry_find_first(ext2_xattr_cache, inode->i_sb->s_bdev,
	ce = mb_cache_entry_find_first(ext2_mb_cache, hash);
				       hash);
	while (ce) {
	while (ce) {
		struct buffer_head *bh;
		struct buffer_head *bh;


		if (IS_ERR(ce)) {
			if (PTR_ERR(ce) == -EAGAIN)
				goto again;
			break;
		}

		bh = sb_bread(inode->i_sb, ce->e_block);
		bh = sb_bread(inode->i_sb, ce->e_block);
		if (!bh) {
		if (!bh) {
			ext2_error(inode->i_sb, "ext2_xattr_cache_find",
			ext2_error(inode->i_sb, "ext2_xattr_cache_find",
@@ -927,7 +905,21 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
				inode->i_ino, (unsigned long) ce->e_block);
				inode->i_ino, (unsigned long) ce->e_block);
		} else {
		} else {
			lock_buffer(bh);
			lock_buffer(bh);
			if (le32_to_cpu(HDR(bh)->h_refcount) >
			/*
			 * We have to be careful about races with freeing or
			 * rehashing of xattr block. Once we hold buffer lock
			 * xattr block's state is stable so we can check
			 * whether the block got freed / rehashed or not.
			 * Since we unhash mbcache entry under buffer lock when
			 * freeing / rehashing xattr block, checking whether
			 * entry is still hashed is reliable.
			 */
			if (hlist_bl_unhashed(&ce->e_hash_list)) {
				mb_cache_entry_put(ext2_mb_cache, ce);
				unlock_buffer(bh);
				brelse(bh);
				goto again;
			} else if (le32_to_cpu(HDR(bh)->h_refcount) >
				   EXT2_XATTR_REFCOUNT_MAX) {
				   EXT2_XATTR_REFCOUNT_MAX) {
				ea_idebug(inode, "block %ld refcount %d>%d",
				ea_idebug(inode, "block %ld refcount %d>%d",
					  (unsigned long) ce->e_block,
					  (unsigned long) ce->e_block,
@@ -936,13 +928,14 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
			} else if (!ext2_xattr_cmp(header, HDR(bh))) {
			} else if (!ext2_xattr_cmp(header, HDR(bh))) {
				ea_bdebug(bh, "b_count=%d",
				ea_bdebug(bh, "b_count=%d",
					  atomic_read(&(bh->b_count)));
					  atomic_read(&(bh->b_count)));
				mb_cache_entry_release(ce);
				mb_cache_entry_touch(ext2_mb_cache, ce);
				mb_cache_entry_put(ext2_mb_cache, ce);
				return bh;
				return bh;
			}
			}
			unlock_buffer(bh);
			unlock_buffer(bh);
			brelse(bh);
			brelse(bh);
		}
		}
		ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
		ce = mb_cache_entry_find_next(ext2_mb_cache, ce);
	}
	}
	return NULL;
	return NULL;
}
}
@@ -1015,17 +1008,15 @@ static void ext2_xattr_rehash(struct ext2_xattr_header *header,


#undef BLOCK_HASH_SHIFT
#undef BLOCK_HASH_SHIFT


int __init
#define HASH_BUCKET_BITS 10
init_ext2_xattr(void)

struct mb_cache *ext2_xattr_create_cache(void)
{
{
	ext2_xattr_cache = mb_cache_create("ext2_xattr", 6);
	return mb_cache_create(HASH_BUCKET_BITS);
	if (!ext2_xattr_cache)
		return -ENOMEM;
	return 0;
}
}


void
void ext2_xattr_destroy_cache(struct mb_cache *cache)
exit_ext2_xattr(void)
{
{
	mb_cache_destroy(ext2_xattr_cache);
	if (cache)
		mb_cache_destroy(cache);
}
}
+5 −16
Original line number Original line Diff line number Diff line
@@ -53,6 +53,8 @@ struct ext2_xattr_entry {
#define EXT2_XATTR_SIZE(size) \
#define EXT2_XATTR_SIZE(size) \
	(((size) + EXT2_XATTR_ROUND) & ~EXT2_XATTR_ROUND)
	(((size) + EXT2_XATTR_ROUND) & ~EXT2_XATTR_ROUND)


struct mb_cache;

# ifdef CONFIG_EXT2_FS_XATTR
# ifdef CONFIG_EXT2_FS_XATTR


extern const struct xattr_handler ext2_xattr_user_handler;
extern const struct xattr_handler ext2_xattr_user_handler;
@@ -65,10 +67,9 @@ extern int ext2_xattr_get(struct inode *, int, const char *, void *, size_t);
extern int ext2_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
extern int ext2_xattr_set(struct inode *, int, const char *, const void *, size_t, int);


extern void ext2_xattr_delete_inode(struct inode *);
extern void ext2_xattr_delete_inode(struct inode *);
extern void ext2_xattr_put_super(struct super_block *);


extern int init_ext2_xattr(void);
extern struct mb_cache *ext2_xattr_create_cache(void);
extern void exit_ext2_xattr(void);
extern void ext2_xattr_destroy_cache(struct mb_cache *cache);


extern const struct xattr_handler *ext2_xattr_handlers[];
extern const struct xattr_handler *ext2_xattr_handlers[];


@@ -93,19 +94,7 @@ ext2_xattr_delete_inode(struct inode *inode)
{
{
}
}


static inline void
static inline void ext2_xattr_destroy_cache(struct mb_cache *cache)
ext2_xattr_put_super(struct super_block *sb)
{
}

static inline int
init_ext2_xattr(void)
{
	return 0;
}

static inline void
exit_ext2_xattr(void)
{
{
}
}


+22 −23
Original line number Original line Diff line number Diff line
@@ -41,6 +41,18 @@
 * The fourth extended filesystem constants/structures
 * The fourth extended filesystem constants/structures
 */
 */


/*
 * with AGGRESSIVE_CHECK allocator runs consistency checks over
 * structures. these checks slow things down a lot
 */
#define AGGRESSIVE_CHECK__

/*
 * with DOUBLE_CHECK defined mballoc creates persistent in-core
 * bitmaps, maintains and uses them to check for double allocations
 */
#define DOUBLE_CHECK__

/*
/*
 * Define EXT4FS_DEBUG to produce debug messages
 * Define EXT4FS_DEBUG to produce debug messages
 */
 */
@@ -182,9 +194,9 @@ typedef struct ext4_io_end {
	struct bio		*bio;		/* Linked list of completed
	struct bio		*bio;		/* Linked list of completed
						 * bios covering the extent */
						 * bios covering the extent */
	unsigned int		flag;		/* unwritten or not */
	unsigned int		flag;		/* unwritten or not */
	atomic_t		count;		/* reference counter */
	loff_t			offset;		/* offset in the file */
	loff_t			offset;		/* offset in the file */
	ssize_t			size;		/* size of the extent */
	ssize_t			size;		/* size of the extent */
	atomic_t		count;		/* reference counter */
} ext4_io_end_t;
} ext4_io_end_t;


struct ext4_io_submit {
struct ext4_io_submit {
@@ -1024,13 +1036,8 @@ struct ext4_inode_info {
	 * transaction reserved
	 * transaction reserved
	 */
	 */
	struct list_head i_rsv_conversion_list;
	struct list_head i_rsv_conversion_list;
	/*
	 * Completed IOs that need unwritten extents handling and don't have
	 * transaction reserved
	 */
	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
	atomic_t i_unwritten; /* Nr. of inflight conversions pending */
	struct work_struct i_rsv_conversion_work;
	struct work_struct i_rsv_conversion_work;
	atomic_t i_unwritten; /* Nr. of inflight conversions pending */


	spinlock_t i_block_reservation_lock;
	spinlock_t i_block_reservation_lock;


@@ -1513,16 +1520,6 @@ static inline void ext4_set_io_unwritten_flag(struct inode *inode,
	}
	}
}
}


static inline ext4_io_end_t *ext4_inode_aio(struct inode *inode)
{
	return inode->i_private;
}

static inline void ext4_inode_aio_set(struct inode *inode, ext4_io_end_t *io)
{
	inode->i_private = io;
}

/*
/*
 * Inode dynamic state flags
 * Inode dynamic state flags
 */
 */
@@ -2506,12 +2503,14 @@ extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
int ext4_inode_is_fast_symlink(struct inode *inode);
int ext4_inode_is_fast_symlink(struct inode *inode);
struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
int ext4_get_block_write(struct inode *inode, sector_t iblock,
int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
			     struct buffer_head *bh_result, int create);
			     struct buffer_head *bh_result, int create);
int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
int ext4_dax_mmap_get_block(struct inode *inode, sector_t iblock,
			    struct buffer_head *bh_result, int create);
			    struct buffer_head *bh_result, int create);
int ext4_get_block(struct inode *inode, sector_t iblock,
int ext4_get_block(struct inode *inode, sector_t iblock,
		   struct buffer_head *bh_result, int create);
		   struct buffer_head *bh_result, int create);
int ext4_dio_get_block(struct inode *inode, sector_t iblock,
		       struct buffer_head *bh_result, int create);
int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
			   struct buffer_head *bh, int create);
			   struct buffer_head *bh, int create);
int ext4_walk_page_buffers(handle_t *handle,
int ext4_walk_page_buffers(handle_t *handle,
@@ -2559,6 +2558,9 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
					int used, int quota_claim);
					int used, int quota_claim);
extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
extern int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk,
			      ext4_fsblk_t pblk, ext4_lblk_t len);
			      ext4_fsblk_t pblk, ext4_lblk_t len);
extern int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk,
				unsigned int map_len,
				struct extent_status *result);


/* indirect.c */
/* indirect.c */
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
@@ -3285,10 +3287,7 @@ static inline void ext4_inode_resume_unlocked_dio(struct inode *inode)
#define EXT4_WQ_HASH_SZ		37
#define EXT4_WQ_HASH_SZ		37
#define ext4_ioend_wq(v)   (&ext4__ioend_wq[((unsigned long)(v)) %\
#define ext4_ioend_wq(v)   (&ext4__ioend_wq[((unsigned long)(v)) %\
					    EXT4_WQ_HASH_SZ])
					    EXT4_WQ_HASH_SZ])
#define ext4_aio_mutex(v)  (&ext4__aio_mutex[((unsigned long)(v)) %\
					     EXT4_WQ_HASH_SZ])
extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];


#define EXT4_RESIZING	0
#define EXT4_RESIZING	0
extern int ext4_resize_begin(struct super_block *sb);
extern int ext4_resize_begin(struct super_block *sb);
Loading