Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73d32ce2 authored by David Sterba's avatar David Sterba
Browse files

Merge branch 'misc-4.7' into for-chris-4.7-20160516

parents 02da2d72 4673272f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1991,7 +1991,7 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,

	ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
	if (!ifp) {
		kfree(fspath);
		vfree(fspath);
		return ERR_PTR(-ENOMEM);
	}

+61 −24
Original line number Diff line number Diff line
@@ -743,8 +743,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
static struct {
	struct list_head idle_ws;
	spinlock_t ws_lock;
	int num_ws;
	atomic_t alloc_ws;
	/* Number of free workspaces */
	int free_ws;
	/* Total number of allocated workspaces */
	atomic_t total_ws;
	/* Waiters for a free workspace */
	wait_queue_head_t ws_wait;
} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];

@@ -758,16 +761,34 @@ void __init btrfs_init_compress(void)
	int i;

	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
		struct list_head *workspace;

		INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
		spin_lock_init(&btrfs_comp_ws[i].ws_lock);
		atomic_set(&btrfs_comp_ws[i].alloc_ws, 0);
		atomic_set(&btrfs_comp_ws[i].total_ws, 0);
		init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);

		/*
		 * Preallocate one workspace for each compression type so
		 * we can guarantee forward progress in the worst case
		 */
		workspace = btrfs_compress_op[i]->alloc_workspace();
		if (IS_ERR(workspace)) {
			printk(KERN_WARNING
	"BTRFS: cannot preallocate compression workspace, will try later");
		} else {
			atomic_set(&btrfs_comp_ws[i].total_ws, 1);
			btrfs_comp_ws[i].free_ws = 1;
			list_add(workspace, &btrfs_comp_ws[i].idle_ws);
		}
	}
}

/*
 * this finds an available workspace or allocates a new one
 * ERR_PTR is returned if things go bad.
 * This finds an available workspace or allocates a new one.
 * If it's not possible to allocate a new one, waits until there's one.
 * Preallocation makes a forward progress guarantees and we do not return
 * errors.
 */
static struct list_head *find_workspace(int type)
{
@@ -777,36 +798,58 @@ static struct list_head *find_workspace(int type)

	struct list_head *idle_ws	= &btrfs_comp_ws[idx].idle_ws;
	spinlock_t *ws_lock		= &btrfs_comp_ws[idx].ws_lock;
	atomic_t *alloc_ws		= &btrfs_comp_ws[idx].alloc_ws;
	atomic_t *total_ws		= &btrfs_comp_ws[idx].total_ws;
	wait_queue_head_t *ws_wait	= &btrfs_comp_ws[idx].ws_wait;
	int *num_ws			= &btrfs_comp_ws[idx].num_ws;
	int *free_ws			= &btrfs_comp_ws[idx].free_ws;
again:
	spin_lock(ws_lock);
	if (!list_empty(idle_ws)) {
		workspace = idle_ws->next;
		list_del(workspace);
		(*num_ws)--;
		(*free_ws)--;
		spin_unlock(ws_lock);
		return workspace;

	}
	if (atomic_read(alloc_ws) > cpus) {
	if (atomic_read(total_ws) > cpus) {
		DEFINE_WAIT(wait);

		spin_unlock(ws_lock);
		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
		if (atomic_read(alloc_ws) > cpus && !*num_ws)
		if (atomic_read(total_ws) > cpus && !*free_ws)
			schedule();
		finish_wait(ws_wait, &wait);
		goto again;
	}
	atomic_inc(alloc_ws);
	atomic_inc(total_ws);
	spin_unlock(ws_lock);

	workspace = btrfs_compress_op[idx]->alloc_workspace();
	if (IS_ERR(workspace)) {
		atomic_dec(alloc_ws);
		atomic_dec(total_ws);
		wake_up(ws_wait);

		/*
		 * Do not return the error but go back to waiting. There's a
		 * workspace preallocated for each type and the compression
		 * time is bounded so we get to a workspace eventually. This
		 * makes our caller's life easier.
		 *
		 * To prevent silent and low-probability deadlocks (when the
		 * initial preallocation fails), check if there are any
		 * workspaces at all.
		 */
		if (atomic_read(total_ws) == 0) {
			static DEFINE_RATELIMIT_STATE(_rs,
					/* once per minute */ 60 * HZ,
					/* no burst */ 1);

			if (__ratelimit(&_rs)) {
				printk(KERN_WARNING
			    "no compression workspaces, low memory, retrying");
			}
		}
		goto again;
	}
	return workspace;
}
@@ -820,21 +863,21 @@ static void free_workspace(int type, struct list_head *workspace)
	int idx = type - 1;
	struct list_head *idle_ws	= &btrfs_comp_ws[idx].idle_ws;
	spinlock_t *ws_lock		= &btrfs_comp_ws[idx].ws_lock;
	atomic_t *alloc_ws		= &btrfs_comp_ws[idx].alloc_ws;
	atomic_t *total_ws		= &btrfs_comp_ws[idx].total_ws;
	wait_queue_head_t *ws_wait	= &btrfs_comp_ws[idx].ws_wait;
	int *num_ws			= &btrfs_comp_ws[idx].num_ws;
	int *free_ws			= &btrfs_comp_ws[idx].free_ws;

	spin_lock(ws_lock);
	if (*num_ws < num_online_cpus()) {
	if (*free_ws < num_online_cpus()) {
		list_add(workspace, idle_ws);
		(*num_ws)++;
		(*free_ws)++;
		spin_unlock(ws_lock);
		goto wake;
	}
	spin_unlock(ws_lock);

	btrfs_compress_op[idx]->free_workspace(workspace);
	atomic_dec(alloc_ws);
	atomic_dec(total_ws);
wake:
	/*
	 * Make sure counter is updated before we wake up waiters.
@@ -857,7 +900,7 @@ static void free_workspaces(void)
			workspace = btrfs_comp_ws[i].idle_ws.next;
			list_del(workspace);
			btrfs_compress_op[i]->free_workspace(workspace);
			atomic_dec(&btrfs_comp_ws[i].alloc_ws);
			atomic_dec(&btrfs_comp_ws[i].total_ws);
		}
	}
}
@@ -894,8 +937,6 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
	int ret;

	workspace = find_workspace(type);
	if (IS_ERR(workspace))
		return PTR_ERR(workspace);

	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
						      start, len, pages,
@@ -930,8 +971,6 @@ static int btrfs_decompress_biovec(int type, struct page **pages_in,
	int ret;

	workspace = find_workspace(type);
	if (IS_ERR(workspace))
		return PTR_ERR(workspace);

	ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
							 disk_start,
@@ -952,8 +991,6 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
	int ret;

	workspace = find_workspace(type);
	if (IS_ERR(workspace))
		return PTR_ERR(workspace);

	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
						  dest_page, start_byte,
+1 −0
Original line number Diff line number Diff line
@@ -4122,6 +4122,7 @@ void btrfs_test_inode_set_ops(struct inode *inode);

/* ioctl.c */
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int btrfs_ioctl_get_supported_features(void __user *arg);
void btrfs_update_iflags(struct inode *inode);
void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
+15 −3
Original line number Diff line number Diff line
@@ -2517,6 +2517,7 @@ int open_ctree(struct super_block *sb,
	int num_backups_tried = 0;
	int backup_index = 0;
	int max_active;
	bool cleaner_mutex_locked = false;

	tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
	chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
@@ -2997,6 +2998,13 @@ int open_ctree(struct super_block *sb,
		goto fail_sysfs;
	}

	/*
	 * Hold the cleaner_mutex thread here so that we don't block
	 * for a long time on btrfs_recover_relocation.  cleaner_kthread
	 * will wait for us to finish mounting the filesystem.
	 */
	mutex_lock(&fs_info->cleaner_mutex);
	cleaner_mutex_locked = true;
	fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
					       "btrfs-cleaner");
	if (IS_ERR(fs_info->cleaner_kthread))
@@ -3056,10 +3064,8 @@ int open_ctree(struct super_block *sb,
		ret = btrfs_cleanup_fs_roots(fs_info);
		if (ret)
			goto fail_qgroup;

		mutex_lock(&fs_info->cleaner_mutex);
		/* We locked cleaner_mutex before creating cleaner_kthread. */
		ret = btrfs_recover_relocation(tree_root);
		mutex_unlock(&fs_info->cleaner_mutex);
		if (ret < 0) {
			printk(KERN_WARNING
			       "BTRFS: failed to recover relocation\n");
@@ -3067,6 +3073,8 @@ int open_ctree(struct super_block *sb,
			goto fail_qgroup;
		}
	}
	mutex_unlock(&fs_info->cleaner_mutex);
	cleaner_mutex_locked = false;

	location.objectid = BTRFS_FS_TREE_OBJECTID;
	location.type = BTRFS_ROOT_ITEM_KEY;
@@ -3180,6 +3188,10 @@ int open_ctree(struct super_block *sb,
	filemap_write_and_wait(fs_info->btree_inode->i_mapping);

fail_sysfs:
	if (cleaner_mutex_locked) {
		mutex_unlock(&fs_info->cleaner_mutex);
		cleaner_mutex_locked = false;
	}
	btrfs_sysfs_remove_mounted(fs_info);

fail_fsdev_sysfs:
+1 −1
Original line number Diff line number Diff line
@@ -4620,7 +4620,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,

	/* Calc the number of the pages we need flush for space reservation */
	items = calc_reclaim_items_nr(root, to_reclaim);
	to_reclaim = items * EXTENT_SIZE_PER_ITEM;
	to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM;

	trans = (struct btrfs_trans_handle *)current->journal_info;
	block_rsv = &root->fs_info->delalloc_block_rsv;
Loading