Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 559b6d90 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull btrfs cleanups and fixes from Chris Mason:
 "We have another round of fixes and a few cleanups.

  I have a fix for short returns from btrfs_copy_from_user, which
  finally nails down a very hard to find regression we added in v4.6.

  Dave is pushing around gfp parameters, mostly to cleanup internal apis
  and make it a little more consistent.

  The rest are smaller fixes, and one speelling fixup patch"

* 'for-linus-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (22 commits)
  Btrfs: fix handling of faults from btrfs_copy_from_user
  btrfs: fix string and comment grammatical issues and typos
  btrfs: scrub: Set bbio to NULL before calling btrfs_map_block
  Btrfs: fix unexpected return value of fiemap
  Btrfs: free sys_array eb as soon as possible
  btrfs: sink gfp parameter to convert_extent_bit
  btrfs: make state preallocation more speculative in __set_extent_bit
  btrfs: untangle gotos a bit in convert_extent_bit
  btrfs: untangle gotos a bit in __clear_extent_bit
  btrfs: untangle gotos a bit in __set_extent_bit
  btrfs: sink gfp parameter to set_record_extent_bits
  btrfs: sink gfp parameter to set_extent_new
  btrfs: sink gfp parameter to set_extent_defrag
  btrfs: sink gfp parameter to set_extent_delalloc
  btrfs: sink gfp parameter to clear_extent_dirty
  btrfs: sink gfp parameter to clear_record_extent_bits
  btrfs: sink gfp parameter to clear_extent_bits
  btrfs: sink gfp parameter to set_extent_bits
  btrfs: make find_workspace warn if there are no workspaces
  btrfs: make find_workspace always succeed
  ...
parents aa00edc1 56244ef1
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -1939,7 +1939,7 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
 * from ipath->fspath->val[i].
 * from ipath->fspath->val[i].
 * when it returns, there are ipath->fspath->elem_cnt number of paths available
 * when it returns, there are ipath->fspath->elem_cnt number of paths available
 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
 * have been needed to return all paths.
 * have been needed to return all paths.
 */
 */
+1 −1
Original line number Original line Diff line number Diff line
@@ -313,7 +313,7 @@ struct btrfs_dio_private {
	struct bio *dio_bio;
	struct bio *dio_bio;


	/*
	/*
	 * The original bio may be splited to several sub-bios, this is
	 * The original bio may be split to several sub-bios, this is
	 * done during endio of sub-bios
	 * done during endio of sub-bios
	 */
	 */
	int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
	int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
+1 −1
Original line number Original line Diff line number Diff line
@@ -1939,7 +1939,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
		/*
		/*
		 * Clear all references of this block. Do not free
		 * Clear all references of this block. Do not free
		 * the block itself even if is not referenced anymore
		 * the block itself even if is not referenced anymore
		 * because it still carries valueable information
		 * because it still carries valuable information
		 * like whether it was ever written and IO completed.
		 * like whether it was ever written and IO completed.
		 */
		 */
		list_for_each_entry_safe(l, tmp, &block->ref_to_list,
		list_for_each_entry_safe(l, tmp, &block->ref_to_list,
+7 −7
Original line number Original line Diff line number Diff line
@@ -156,7 +156,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)


		/*
		/*
		 * RCU really hurts here, we could free up the root node because
		 * RCU really hurts here, we could free up the root node because
		 * it was cow'ed but we may not get the new root node yet so do
		 * it was COWed but we may not get the new root node yet so do
		 * the inc_not_zero dance and if it doesn't work then
		 * the inc_not_zero dance and if it doesn't work then
		 * synchronize_rcu and try again.
		 * synchronize_rcu and try again.
		 */
		 */
@@ -955,7 +955,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
			      struct extent_buffer *buf)
			      struct extent_buffer *buf)
{
{
	/*
	/*
	 * Tree blocks not in refernece counted trees and tree roots
	 * Tree blocks not in reference counted trees and tree roots
	 * are never shared. If a block was allocated after the last
	 * are never shared. If a block was allocated after the last
	 * snapshot and the block was not allocated by tree relocation,
	 * snapshot and the block was not allocated by tree relocation,
	 * we know the block is not shared.
	 * we know the block is not shared.
@@ -1270,7 +1270,7 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,


/*
/*
 * tm is a pointer to the first operation to rewind within eb. then, all
 * tm is a pointer to the first operation to rewind within eb. then, all
 * previous operations will be rewinded (until we reach something older than
 * previous operations will be rewound (until we reach something older than
 * time_seq).
 * time_seq).
 */
 */
static void
static void
@@ -1345,7 +1345,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
}
}


/*
/*
 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
 * is returned. If rewind operations happen, a fresh buffer is returned. The
 * is returned. If rewind operations happen, a fresh buffer is returned. The
 * returned buffer is always read-locked. If the returned buffer is not the
 * returned buffer is always read-locked. If the returned buffer is not the
 * input buffer, the lock on the input buffer is released and the input buffer
 * input buffer, the lock on the input buffer is released and the input buffer
@@ -1516,7 +1516,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
	 * 3) the root is not forced COW.
	 * 3) the root is not forced COW.
	 *
	 *
	 * What is forced COW:
	 * What is forced COW:
	 *    when we create snapshot during commiting the transaction,
	 *    when we create snapshot during committing the transaction,
	 *    after we've finished coping src root, we must COW the shared
	 *    after we've finished coping src root, we must COW the shared
	 *    block to ensure the metadata consistency.
	 *    block to ensure the metadata consistency.
	 */
	 */
@@ -1531,7 +1531,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,


/*
/*
 * cows a single block, see __btrfs_cow_block for the real work.
 * cows a single block, see __btrfs_cow_block for the real work.
 * This version of it has extra checks so that a block isn't cow'd more than
 * This version of it has extra checks so that a block isn't COWed more than
 * once per transaction, as long as it hasn't been written yet
 * once per transaction, as long as it hasn't been written yet
 */
 */
noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
@@ -2986,7 +2986,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
		btrfs_unlock_up_safe(p, level + 1);
		btrfs_unlock_up_safe(p, level + 1);


		/*
		/*
		 * Since we can unwind eb's we want to do a real search every
		 * Since we can unwind ebs we want to do a real search every
		 * time.
		 * time.
		 */
		 */
		prev_cmp = -1;
		prev_cmp = -1;
+3 −3
Original line number Original line Diff line number Diff line
@@ -89,7 +89,7 @@ static const int btrfs_csum_sizes[] = { 4 };
/* four bytes for CRC32 */
/* four bytes for CRC32 */
#define BTRFS_EMPTY_DIR_SIZE 0
#define BTRFS_EMPTY_DIR_SIZE 0


/* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */
/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
#define REQ_GET_READ_MIRRORS	(1 << 30)
#define REQ_GET_READ_MIRRORS	(1 << 30)


/* ioprio of readahead is set to idle */
/* ioprio of readahead is set to idle */
@@ -431,7 +431,7 @@ struct btrfs_space_info {
	 * bytes_pinned does not reflect the bytes that will be pinned once the
	 * bytes_pinned does not reflect the bytes that will be pinned once the
	 * delayed refs are flushed, so this counter is inc'ed every time we
	 * delayed refs are flushed, so this counter is inc'ed every time we
	 * call btrfs_free_extent so it is a realtime count of what will be
	 * call btrfs_free_extent so it is a realtime count of what will be
	 * freed once the transaction is committed.  It will be zero'ed every
	 * freed once the transaction is committed.  It will be zeroed every
	 * time the transaction commits.
	 * time the transaction commits.
	 */
	 */
	struct percpu_counter total_bytes_pinned;
	struct percpu_counter total_bytes_pinned;
@@ -1401,7 +1401,7 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
	token->kaddr = NULL;
	token->kaddr = NULL;
}
}


/* some macros to generate set/get funcs for the struct fields.  This
/* some macros to generate set/get functions for the struct fields.  This
 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
 * assumes there is a lefoo_to_cpu for every type, so lets make a simple
 * one for u8:
 * one for u8:
 */
 */
Loading