Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d2a17637 authored by Mingming Cao's avatar Mingming Cao Committed by Theodore Ts'o
Browse files

ext4: delayed allocation ENOSPC handling



This patch does block reservation for delayed
allocation, to avoid ENOSPC later at page flush time.

Blocks(data and metadata) are reserved at da_write_begin()
time, the freeblocks counter is updated by then, and the number of
reserved blocks is store in per inode counter.
        
At the writepage time, the unused reserved meta blocks are returned
back. At unlink/truncate time, reserved blocks are properly released.

Updated fix from  Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
to fix the oldallocator block reservation accounting with delalloc, added
lock to guard the counters and also fix the reservation for meta blocks.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMingming Cao <cmm@us.ibm.com>
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent e8ced39d
Loading
Loading
Loading
Loading
+32 −17
Original line number Original line Diff line number Diff line
@@ -1701,7 +1701,12 @@ ext4_fsblk_t ext4_old_new_blocks(handle_t *handle, struct inode *inode,
	}
	}


	sbi = EXT4_SB(sb);
	sbi = EXT4_SB(sb);
	if (!EXT4_I(inode)->i_delalloc_reserved_flag) {
		/*
		 * With delalloc we already reserved the blocks
		 */
		*count = ext4_has_free_blocks(sbi, *count);
		*count = ext4_has_free_blocks(sbi, *count);
	}
	if (*count == 0) {
	if (*count == 0) {
		*errp = -ENOSPC;
		*errp = -ENOSPC;
		return 0;	/*return with ENOSPC error */
		return 0;	/*return with ENOSPC error */
@@ -1902,6 +1907,7 @@ allocated:
	le16_add_cpu(&gdp->bg_free_blocks_count, -num);
	le16_add_cpu(&gdp->bg_free_blocks_count, -num);
	gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
	gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
	spin_unlock(sb_bgl_lock(sbi, group_no));
	spin_unlock(sb_bgl_lock(sbi, group_no));
	if (!EXT4_I(inode)->i_delalloc_reserved_flag)
		percpu_counter_sub(&sbi->s_freeblocks_counter, num);
		percpu_counter_sub(&sbi->s_freeblocks_counter, num);


	if (sbi->s_log_groups_per_flex) {
	if (sbi->s_log_groups_per_flex) {
@@ -1976,40 +1982,49 @@ static ext4_fsblk_t do_blk_alloc(handle_t *handle, struct inode *inode,
}
}


/*
/*
 * ext4_new_meta_block() -- allocate block for meta data (indexing) blocks
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
 *
 *
 * @handle:             handle to this transaction
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
 * @goal:               given target block(filesystem wide)
 * @count:		total number of blocks need
 * @errp:               error code
 * @errp:               error code
 *
 *
 * Return allocated block number on success
 * Return 1st allocated block numberon success, *count stores total account
 * error stores in errp pointer
 */
 */
ext4_fsblk_t ext4_new_meta_block(handle_t *handle, struct inode *inode,
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
		ext4_fsblk_t goal, int *errp)
		ext4_fsblk_t goal, unsigned long *count, int *errp)
{
{
	unsigned long count = 1;
	ext4_fsblk_t ret;
	return do_blk_alloc(handle, inode, 0, goal,
	ret = do_blk_alloc(handle, inode, 0, goal,
			&count, errp, EXT4_META_BLOCK);
				count, errp, EXT4_META_BLOCK);
	/*
	 * Account for the allocated meta blocks
	 */
	if (!(*errp)) {
		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
		EXT4_I(inode)->i_allocated_meta_blocks += *count;
		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
	}
	return ret;
}
}


/*
/*
 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
 * ext4_new_meta_block() -- allocate block for meta data (indexing) blocks
 *
 *
 * @handle:             handle to this transaction
 * @handle:             handle to this transaction
 * @inode:              file inode
 * @inode:              file inode
 * @goal:               given target block(filesystem wide)
 * @goal:               given target block(filesystem wide)
 * @count:		total number of blocks need
 * @errp:               error code
 * @errp:               error code
 *
 *
 * Return 1st allocated block numberon success, *count stores total account
 * Return allocated block number on success
 * error stores in errp pointer
 */
 */
ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
ext4_fsblk_t ext4_new_meta_block(handle_t *handle, struct inode *inode,
		ext4_fsblk_t goal, unsigned long *count, int *errp)
		ext4_fsblk_t goal, int *errp)
{
{
	return do_blk_alloc(handle, inode, 0, goal,
	unsigned long count = 1;
			count, errp, EXT4_META_BLOCK);
	return ext4_new_meta_blocks(handle, inode, goal, &count, errp);
}
}


/*
/*
+2 −1
Original line number Original line Diff line number Diff line
@@ -129,7 +129,8 @@ static int ext4_readdir(struct file * filp,
		struct buffer_head *bh = NULL;
		struct buffer_head *bh = NULL;


		map_bh.b_state = 0;
		map_bh.b_state = 0;
		err = ext4_get_blocks_wrap(NULL, inode, blk, 1, &map_bh, 0, 0);
		err = ext4_get_blocks_wrap(NULL, inode, blk, 1, &map_bh,
						0, 0, 0);
		if (err > 0) {
		if (err > 0) {
			pgoff_t index = map_bh.b_blocknr >>
			pgoff_t index = map_bh.b_blocknr >>
					(PAGE_CACHE_SHIFT - inode->i_blkbits);
					(PAGE_CACHE_SHIFT - inode->i_blkbits);
+5 −1
Original line number Original line Diff line number Diff line
@@ -74,6 +74,9 @@
#define EXT4_MB_HINT_GOAL_ONLY		256
#define EXT4_MB_HINT_GOAL_ONLY		256
/* goal is meaningful */
/* goal is meaningful */
#define EXT4_MB_HINT_TRY_GOAL		512
#define EXT4_MB_HINT_TRY_GOAL		512
/* blocks already pre-reserved by delayed allocation */
#define EXT4_MB_DELALLOC_RESERVED      1024



struct ext4_allocation_request {
struct ext4_allocation_request {
	/* target inode for block we're allocating */
	/* target inode for block we're allocating */
@@ -1041,6 +1044,7 @@ extern void ext4_mb_update_group_info(struct ext4_group_info *grp,




/* inode.c */
/* inode.c */
void ext4_da_release_space(struct inode *inode, int used, int to_free);
int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
		struct buffer_head *bh, ext4_fsblk_t blocknr);
		struct buffer_head *bh, ext4_fsblk_t blocknr);
struct buffer_head *ext4_getblk(handle_t *, struct inode *,
struct buffer_head *ext4_getblk(handle_t *, struct inode *,
@@ -1234,7 +1238,7 @@ extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode,
extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode,
			sector_t block, unsigned long max_blocks,
			sector_t block, unsigned long max_blocks,
			struct buffer_head *bh, int create,
			struct buffer_head *bh, int create,
			int extend_disksize);
			int extend_disksize, int flag);
#endif	/* __KERNEL__ */
#endif	/* __KERNEL__ */


#endif	/* _EXT4_H */
#endif	/* _EXT4_H */
+1 −0
Original line number Original line Diff line number Diff line
@@ -212,6 +212,7 @@ static inline int ext4_ext_get_actual_len(struct ext4_extent *ext)
		(le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN));
		(le16_to_cpu(ext->ee_len) - EXT_INIT_MAX_LEN));
}
}


extern int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks);
extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t);
extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t);
extern int ext4_extent_tree_init(handle_t *, struct inode *);
extern int ext4_extent_tree_init(handle_t *, struct inode *);
+7 −0
Original line number Original line Diff line number Diff line
@@ -163,6 +163,13 @@ struct ext4_inode_info {
	/* mballoc */
	/* mballoc */
	struct list_head i_prealloc_list;
	struct list_head i_prealloc_list;
	spinlock_t i_prealloc_lock;
	spinlock_t i_prealloc_lock;

	/* allocation reservation info for delalloc */
	unsigned long i_reserved_data_blocks;
	unsigned long i_reserved_meta_blocks;
	unsigned long i_allocated_meta_blocks;
	unsigned short i_delalloc_reserved_flag;
	spinlock_t i_block_reservation_lock;
};
};


#endif	/* _EXT4_I */
#endif	/* _EXT4_I */
Loading