Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1538a093 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: add checksum calculation when clearing UNINIT flag in ext4_new_inode
  ext4: Mark the buffer_heads as dirty and uptodate after prepare_write
  ext4: calculate journal credits correctly
  ext4: wait on all pending commits in ext4_sync_fs()
  ext4: Convert to host order before using the values.
  ext4: fix missing ext4_unlock_group in error path
  jbd2: deregister proc on failure in jbd2_journal_init_inode
  jbd2: don't give up looking for space so easily in __jbd2_log_wait_for_space
  jbd: don't give up looking for space so easily in __log_wait_for_space
parents 4bab0ea1 23712a9c
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -718,6 +718,8 @@ got:
			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
			free = ext4_free_blocks_after_init(sb, group, gdp);
			gdp->bg_free_blocks_count = cpu_to_le16(free);
			gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
								gdp);
		}
		spin_unlock(sb_bgl_lock(sbi, group));

+5 −2
Original line number Diff line number Diff line
@@ -2329,6 +2329,8 @@ static int ext4_da_writepage(struct page *page,
			unlock_page(page);
			return 0;
		}
		/* now mark the buffer_heads as dirty and uptodate */
		block_commit_write(page, 0, PAGE_CACHE_SIZE);
	}

	if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
@@ -4580,9 +4582,10 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks,
static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
{
	if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
		return ext4_indirect_trans_blocks(inode, nrblocks, 0);
	return ext4_ext_index_trans_blocks(inode, nrblocks, 0);
		return ext4_indirect_trans_blocks(inode, nrblocks, chunk);
	return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
}

/*
 * Account for index blocks, block groups bitmaps and block group
 * descriptor blocks if modify datablocks and index blocks
+1 −0
Original line number Diff line number Diff line
@@ -4441,6 +4441,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
		else if (block >= (entry->start_blk + entry->count))
			n = &(*n)->rb_right;
		else {
			ext4_unlock_group(sb, group);
			ext4_error(sb, __func__,
			    "Double free of blocks %d (%d %d)\n",
			    block, entry->start_blk, entry->count);
+10 −14
Original line number Diff line number Diff line
@@ -1458,9 +1458,8 @@ static int ext4_fill_flex_info(struct super_block *sb)

	/* We allocate both existing and potentially added groups */
	flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
			    ((sbi->s_es->s_reserved_gdt_blocks +1 ) <<
			      EXT4_DESC_PER_BLOCK_BITS(sb))) /
			   groups_per_flex;
			((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
			      EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
	sbi->s_flex_groups = kzalloc(flex_group_count *
				     sizeof(struct flex_groups), GFP_KERNEL);
	if (sbi->s_flex_groups == NULL) {
@@ -2885,12 +2884,9 @@ int ext4_force_commit(struct super_block *sb)
/*
 * Ext4 always journals updates to the superblock itself, so we don't
 * have to propagate any other updates to the superblock on disk at this
 * point.  Just start an async writeback to get the buffers on their way
 * to the disk.
 *
 * This implicitly triggers the writebehind on sync().
 * point.  (We can probably nuke this function altogether, and remove
 * any mention to sb->s_dirt in all of fs/ext4; eventual cleanup...)
 */

static void ext4_write_super(struct super_block *sb)
{
	if (mutex_trylock(&sb->s_lock) != 0)
@@ -2900,15 +2896,15 @@ static void ext4_write_super(struct super_block *sb)

static int ext4_sync_fs(struct super_block *sb, int wait)
{
	tid_t target;
	int ret = 0;

	trace_mark(ext4_sync_fs, "dev %s wait %d", sb->s_id, wait);
	sb->s_dirt = 0;
	if (jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, &target)) {
	if (wait)
			jbd2_log_wait_commit(EXT4_SB(sb)->s_journal, target);
	}
	return 0;
		ret = ext4_force_commit(sb);
	else
		jbd2_journal_start_commit(EXT4_SB(sb)->s_journal, NULL);
	return ret;
}

/*
+24 −7
Original line number Diff line number Diff line
@@ -115,7 +115,7 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
 */
void __log_wait_for_space(journal_t *journal)
{
	int nblocks;
	int nblocks, space_left;
	assert_spin_locked(&journal->j_state_lock);

	nblocks = jbd_space_needed(journal);
@@ -128,25 +128,42 @@ void __log_wait_for_space(journal_t *journal)
		/*
		 * Test again, another process may have checkpointed while we
		 * were waiting for the checkpoint lock. If there are no
		 * outstanding transactions there is nothing to checkpoint and
		 * we can't make progress. Abort the journal in this case.
		 * transactions ready to be checkpointed, try to recover
		 * journal space by calling cleanup_journal_tail(), and if
		 * that doesn't work, by waiting for the currently committing
		 * transaction to complete.  If there is absolutely no way
		 * to make progress, this is either a BUG or corrupted
		 * filesystem, so abort the journal and leave a stack
		 * trace for forensic evidence.
		 */
		spin_lock(&journal->j_state_lock);
		spin_lock(&journal->j_list_lock);
		nblocks = jbd_space_needed(journal);
		if (__log_space_left(journal) < nblocks) {
		space_left = __log_space_left(journal);
		if (space_left < nblocks) {
			int chkpt = journal->j_checkpoint_transactions != NULL;
			tid_t tid = 0;

			if (journal->j_committing_transaction)
				tid = journal->j_committing_transaction->t_tid;
			spin_unlock(&journal->j_list_lock);
			spin_unlock(&journal->j_state_lock);
			if (chkpt) {
				log_do_checkpoint(journal);
			} else if (cleanup_journal_tail(journal) == 0) {
				/* We were able to recover space; yay! */
				;
			} else if (tid) {
				log_wait_commit(journal, tid);
			} else {
				printk(KERN_ERR "%s: no transactions\n",
				       __func__);
				printk(KERN_ERR "%s: needed %d blocks and "
				       "only had %d space available\n",
				       __func__, nblocks, space_left);
				printk(KERN_ERR "%s: no way to get more "
				       "journal space\n", __func__);
				WARN_ON(1);
				journal_abort(journal, 0);
			}

			spin_lock(&journal->j_state_lock);
		} else {
			spin_unlock(&journal->j_list_lock);
Loading