Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 50e8a289 authored by Marcin Slusarz's avatar Marcin Slusarz Committed by Linus Torvalds
Browse files

ext3: replace all adds to little endians variables with le*_add_cpu



replace all:
	little_endian_variable = cpu_to_leX(leX_to_cpu(little_endian_variable) +
				expression_in_cpu_byteorder);
with:
	leX_add_cpu(&little_endian_variable, expression_in_cpu_byteorder);
sparse didn't generate any new warning with this patch

Signed-off-by: default avatarMarcin Slusarz <marcin.slusarz@gmail.com>
Cc: Mark Fasheh <mark.fasheh@oracle.com>
Cc: David Chinner <dgc@sgi.com>
Cc: Timothy Shimmin <tes@sgi.com>
Cc: <linux-ext4@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8b5f6883
Loading
Loading
Loading
Loading
+2 −5
Original line number Original line Diff line number Diff line
@@ -630,9 +630,7 @@ do_more:
	jbd_unlock_bh_state(bitmap_bh);
	jbd_unlock_bh_state(bitmap_bh);


	spin_lock(sb_bgl_lock(sbi, block_group));
	spin_lock(sb_bgl_lock(sbi, block_group));
	desc->bg_free_blocks_count =
	le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
		cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
			group_freed);
	spin_unlock(sb_bgl_lock(sbi, block_group));
	spin_unlock(sb_bgl_lock(sbi, block_group));
	percpu_counter_add(&sbi->s_freeblocks_counter, count);
	percpu_counter_add(&sbi->s_freeblocks_counter, count);


@@ -1696,8 +1694,7 @@ allocated:
			ret_block, goal_hits, goal_attempts);
			ret_block, goal_hits, goal_attempts);


	spin_lock(sb_bgl_lock(sbi, group_no));
	spin_lock(sb_bgl_lock(sbi, group_no));
	gdp->bg_free_blocks_count =
	le16_add_cpu(&gdp->bg_free_blocks_count, -num);
			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
	spin_unlock(sb_bgl_lock(sbi, group_no));
	spin_unlock(sb_bgl_lock(sbi, group_no));
	percpu_counter_sub(&sbi->s_freeblocks_counter, num);
	percpu_counter_sub(&sbi->s_freeblocks_counter, num);


+4 −8
Original line number Original line Diff line number Diff line
@@ -164,11 +164,9 @@ void ext3_free_inode (handle_t *handle, struct inode * inode)


		if (gdp) {
		if (gdp) {
			spin_lock(sb_bgl_lock(sbi, block_group));
			spin_lock(sb_bgl_lock(sbi, block_group));
			gdp->bg_free_inodes_count = cpu_to_le16(
			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
				le16_to_cpu(gdp->bg_free_inodes_count) + 1);
			if (is_directory)
			if (is_directory)
				gdp->bg_used_dirs_count = cpu_to_le16(
				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
				  le16_to_cpu(gdp->bg_used_dirs_count) - 1);
			spin_unlock(sb_bgl_lock(sbi, block_group));
			spin_unlock(sb_bgl_lock(sbi, block_group));
			percpu_counter_inc(&sbi->s_freeinodes_counter);
			percpu_counter_inc(&sbi->s_freeinodes_counter);
			if (is_directory)
			if (is_directory)
@@ -527,11 +525,9 @@ got:
	err = ext3_journal_get_write_access(handle, bh2);
	err = ext3_journal_get_write_access(handle, bh2);
	if (err) goto fail;
	if (err) goto fail;
	spin_lock(sb_bgl_lock(sbi, group));
	spin_lock(sb_bgl_lock(sbi, group));
	gdp->bg_free_inodes_count =
	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
		cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
	if (S_ISDIR(mode)) {
	if (S_ISDIR(mode)) {
		gdp->bg_used_dirs_count =
		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
			cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
	}
	}
	spin_unlock(sb_bgl_lock(sbi, group));
	spin_unlock(sb_bgl_lock(sbi, group));
	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
+4 −8
Original line number Original line Diff line number Diff line
@@ -518,8 +518,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
	EXT3_SB(sb)->s_gdb_count++;
	EXT3_SB(sb)->s_gdb_count++;
	kfree(o_group_desc);
	kfree(o_group_desc);


	es->s_reserved_gdt_blocks =
	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
		cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);


	return 0;
	return 0;
@@ -890,10 +889,8 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
	 * blocks/inodes before the group is live won't actually let us
	 * blocks/inodes before the group is live won't actually let us
	 * allocate the new space yet.
	 * allocate the new space yet.
	 */
	 */
	es->s_blocks_count = cpu_to_le32(le32_to_cpu(es->s_blocks_count) +
	le32_add_cpu(&es->s_blocks_count, input->blocks_count);
		input->blocks_count);
	le32_add_cpu(&es->s_inodes_count, EXT3_INODES_PER_GROUP(sb));
	es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) +
		EXT3_INODES_PER_GROUP(sb));


	/*
	/*
	 * We need to protect s_groups_count against other CPUs seeing
	 * We need to protect s_groups_count against other CPUs seeing
@@ -926,8 +923,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)


	/* Update the reserved block counts only once the new group is
	/* Update the reserved block counts only once the new group is
	 * active. */
	 * active. */
	es->s_r_blocks_count = cpu_to_le32(le32_to_cpu(es->s_r_blocks_count) +
	le32_add_cpu(&es->s_r_blocks_count, input->reserved_blocks);
		input->reserved_blocks);


	/* Update the free space counts */
	/* Update the free space counts */
	percpu_counter_add(&sbi->s_freeblocks_counter,
	percpu_counter_add(&sbi->s_freeblocks_counter,
+1 −1
Original line number Original line Diff line number Diff line
@@ -1222,7 +1222,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
#endif
#endif
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
		es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
		es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
	es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
	le16_add_cpu(&es->s_mnt_count, 1);
	es->s_mtime = cpu_to_le32(get_seconds());
	es->s_mtime = cpu_to_le32(get_seconds());
	ext3_update_dynamic_rev(sb);
	ext3_update_dynamic_rev(sb);
	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
+2 −4
Original line number Original line Diff line number Diff line
@@ -492,8 +492,7 @@ ext3_xattr_release_block(handle_t *handle, struct inode *inode,
		get_bh(bh);
		get_bh(bh);
		ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
		ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
	} else {
	} else {
		BHDR(bh)->h_refcount = cpu_to_le32(
		le32_add_cpu(&BHDR(bh)->h_refcount, -1);
				le32_to_cpu(BHDR(bh)->h_refcount) - 1);
		error = ext3_journal_dirty_metadata(handle, bh);
		error = ext3_journal_dirty_metadata(handle, bh);
		if (IS_SYNC(inode))
		if (IS_SYNC(inode))
			handle->h_sync = 1;
			handle->h_sync = 1;
@@ -780,8 +779,7 @@ inserted:
				if (error)
				if (error)
					goto cleanup_dquot;
					goto cleanup_dquot;
				lock_buffer(new_bh);
				lock_buffer(new_bh);
				BHDR(new_bh)->h_refcount = cpu_to_le32(1 +
				le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
					le32_to_cpu(BHDR(new_bh)->h_refcount));
				ea_bdebug(new_bh, "reusing; refcount now=%d",
				ea_bdebug(new_bh, "reusing; refcount now=%d",
					le32_to_cpu(BHDR(new_bh)->h_refcount));
					le32_to_cpu(BHDR(new_bh)->h_refcount));
				unlock_buffer(new_bh);
				unlock_buffer(new_bh);