Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 54bcf382 authored by Chris Mason's avatar Chris Mason
Browse files

Merge branch 'master' of...

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable into for-linus

Conflicts:
	fs/btrfs/super.c
parents 94a8d5ca c65ddb52
Loading
Loading
Loading
Loading
+202 −52
Original line number Original line Diff line number Diff line
@@ -48,6 +48,9 @@ struct btrfs_worker_thread {
	/* number of things on the pending list */
	/* number of things on the pending list */
	atomic_t num_pending;
	atomic_t num_pending;


	/* reference counter for this struct */
	atomic_t refs;

	unsigned long sequence;
	unsigned long sequence;


	/* protects the pending list. */
	/* protects the pending list. */
@@ -71,7 +74,12 @@ static void check_idle_worker(struct btrfs_worker_thread *worker)
		unsigned long flags;
		unsigned long flags;
		spin_lock_irqsave(&worker->workers->lock, flags);
		spin_lock_irqsave(&worker->workers->lock, flags);
		worker->idle = 1;
		worker->idle = 1;
		list_move(&worker->worker_list, &worker->workers->idle_list);

		/* the list may be empty if the worker is just starting */
		if (!list_empty(&worker->worker_list)) {
			list_move(&worker->worker_list,
				 &worker->workers->idle_list);
		}
		spin_unlock_irqrestore(&worker->workers->lock, flags);
		spin_unlock_irqrestore(&worker->workers->lock, flags);
	}
	}
}
}
@@ -87,23 +95,49 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
		unsigned long flags;
		unsigned long flags;
		spin_lock_irqsave(&worker->workers->lock, flags);
		spin_lock_irqsave(&worker->workers->lock, flags);
		worker->idle = 0;
		worker->idle = 0;

		if (!list_empty(&worker->worker_list)) {
			list_move_tail(&worker->worker_list,
			list_move_tail(&worker->worker_list,
				      &worker->workers->worker_list);
				      &worker->workers->worker_list);
		}
		spin_unlock_irqrestore(&worker->workers->lock, flags);
		spin_unlock_irqrestore(&worker->workers->lock, flags);
	}
	}
}
}


static noinline int run_ordered_completions(struct btrfs_workers *workers,
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
					    struct btrfs_work *work)
{
{
	struct btrfs_workers *workers = worker->workers;
	unsigned long flags;
	unsigned long flags;


	rmb();
	if (!workers->atomic_start_pending)
		return;

	spin_lock_irqsave(&workers->lock, flags);
	if (!workers->atomic_start_pending)
		goto out;

	workers->atomic_start_pending = 0;
	if (workers->num_workers >= workers->max_workers)
		goto out;

	spin_unlock_irqrestore(&workers->lock, flags);
	btrfs_start_workers(workers, 1);
	return;

out:
	spin_unlock_irqrestore(&workers->lock, flags);
}

static noinline int run_ordered_completions(struct btrfs_workers *workers,
					    struct btrfs_work *work)
{
	if (!workers->ordered)
	if (!workers->ordered)
		return 0;
		return 0;


	set_bit(WORK_DONE_BIT, &work->flags);
	set_bit(WORK_DONE_BIT, &work->flags);


	spin_lock_irqsave(&workers->lock, flags);
	spin_lock(&workers->order_lock);


	while (1) {
	while (1) {
		if (!list_empty(&workers->prio_order_list)) {
		if (!list_empty(&workers->prio_order_list)) {
@@ -126,45 +160,118 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
			break;
			break;


		spin_unlock_irqrestore(&workers->lock, flags);
		spin_unlock(&workers->order_lock);


		work->ordered_func(work);
		work->ordered_func(work);


		/* now take the lock again and call the freeing code */
		/* now take the lock again and call the freeing code */
		spin_lock_irqsave(&workers->lock, flags);
		spin_lock(&workers->order_lock);
		list_del(&work->order_list);
		list_del(&work->order_list);
		work->ordered_free(work);
		work->ordered_free(work);
	}
	}


	spin_unlock_irqrestore(&workers->lock, flags);
	spin_unlock(&workers->order_lock);
	return 0;
	return 0;
}
}


static void put_worker(struct btrfs_worker_thread *worker)
{
	if (atomic_dec_and_test(&worker->refs))
		kfree(worker);
}

static int try_worker_shutdown(struct btrfs_worker_thread *worker)
{
	int freeit = 0;

	spin_lock_irq(&worker->lock);
	spin_lock(&worker->workers->lock);
	if (worker->workers->num_workers > 1 &&
	    worker->idle &&
	    !worker->working &&
	    !list_empty(&worker->worker_list) &&
	    list_empty(&worker->prio_pending) &&
	    list_empty(&worker->pending) &&
	    atomic_read(&worker->num_pending) == 0) {
		freeit = 1;
		list_del_init(&worker->worker_list);
		worker->workers->num_workers--;
	}
	spin_unlock(&worker->workers->lock);
	spin_unlock_irq(&worker->lock);

	if (freeit)
		put_worker(worker);
	return freeit;
}

static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
					struct list_head *prio_head,
					struct list_head *head)
{
	struct btrfs_work *work = NULL;
	struct list_head *cur = NULL;

	if(!list_empty(prio_head))
		cur = prio_head->next;

	smp_mb();
	if (!list_empty(&worker->prio_pending))
		goto refill;

	if (!list_empty(head))
		cur = head->next;

	if (cur)
		goto out;

refill:
	spin_lock_irq(&worker->lock);
	list_splice_tail_init(&worker->prio_pending, prio_head);
	list_splice_tail_init(&worker->pending, head);

	if (!list_empty(prio_head))
		cur = prio_head->next;
	else if (!list_empty(head))
		cur = head->next;
	spin_unlock_irq(&worker->lock);

	if (!cur)
		goto out_fail;

out:
	work = list_entry(cur, struct btrfs_work, list);

out_fail:
	return work;
}

/*
/*
 * main loop for servicing work items
 * main loop for servicing work items
 */
 */
static int worker_loop(void *arg)
static int worker_loop(void *arg)
{
{
	struct btrfs_worker_thread *worker = arg;
	struct btrfs_worker_thread *worker = arg;
	struct list_head *cur;
	struct list_head head;
	struct list_head prio_head;
	struct btrfs_work *work;
	struct btrfs_work *work;

	INIT_LIST_HEAD(&head);
	INIT_LIST_HEAD(&prio_head);

	do {
	do {
		spin_lock_irq(&worker->lock);
again:
again_locked:
		while (1) {
		while (1) {
			if (!list_empty(&worker->prio_pending))

				cur = worker->prio_pending.next;

			else if (!list_empty(&worker->pending))
			work = get_next_work(worker, &prio_head, &head);
				cur = worker->pending.next;
			if (!work)
			else
				break;
				break;


			work = list_entry(cur, struct btrfs_work, list);
			list_del(&work->list);
			list_del(&work->list);
			clear_bit(WORK_QUEUED_BIT, &work->flags);
			clear_bit(WORK_QUEUED_BIT, &work->flags);


			work->worker = worker;
			work->worker = worker;
			spin_unlock_irq(&worker->lock);


			work->func(work);
			work->func(work);


@@ -175,9 +282,13 @@ static int worker_loop(void *arg)
			 */
			 */
			run_ordered_completions(worker->workers, work);
			run_ordered_completions(worker->workers, work);


			check_pending_worker_creates(worker);

		}

		spin_lock_irq(&worker->lock);
		spin_lock_irq(&worker->lock);
		check_idle_worker(worker);
		check_idle_worker(worker);
		}

		if (freezing(current)) {
		if (freezing(current)) {
			worker->working = 0;
			worker->working = 0;
			spin_unlock_irq(&worker->lock);
			spin_unlock_irq(&worker->lock);
@@ -216,8 +327,10 @@ static int worker_loop(void *arg)
				spin_lock_irq(&worker->lock);
				spin_lock_irq(&worker->lock);
				set_current_state(TASK_INTERRUPTIBLE);
				set_current_state(TASK_INTERRUPTIBLE);
				if (!list_empty(&worker->pending) ||
				if (!list_empty(&worker->pending) ||
				    !list_empty(&worker->prio_pending))
				    !list_empty(&worker->prio_pending)) {
					goto again_locked;
					spin_unlock_irq(&worker->lock);
					goto again;
				}


				/*
				/*
				 * this makes sure we get a wakeup when someone
				 * this makes sure we get a wakeup when someone
@@ -226,8 +339,13 @@ static int worker_loop(void *arg)
				worker->working = 0;
				worker->working = 0;
				spin_unlock_irq(&worker->lock);
				spin_unlock_irq(&worker->lock);


				if (!kthread_should_stop())
				if (!kthread_should_stop()) {
					schedule();
					schedule_timeout(HZ * 120);
					if (!worker->working &&
					    try_worker_shutdown(worker)) {
						return 0;
					}
				}
			}
			}
			__set_current_state(TASK_RUNNING);
			__set_current_state(TASK_RUNNING);
		}
		}
@@ -242,16 +360,30 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
{
{
	struct list_head *cur;
	struct list_head *cur;
	struct btrfs_worker_thread *worker;
	struct btrfs_worker_thread *worker;
	int can_stop;


	spin_lock_irq(&workers->lock);
	list_splice_init(&workers->idle_list, &workers->worker_list);
	list_splice_init(&workers->idle_list, &workers->worker_list);
	while (!list_empty(&workers->worker_list)) {
	while (!list_empty(&workers->worker_list)) {
		cur = workers->worker_list.next;
		cur = workers->worker_list.next;
		worker = list_entry(cur, struct btrfs_worker_thread,
		worker = list_entry(cur, struct btrfs_worker_thread,
				    worker_list);
				    worker_list);

		atomic_inc(&worker->refs);
		workers->num_workers -= 1;
		if (!list_empty(&worker->worker_list)) {
			list_del_init(&worker->worker_list);
			put_worker(worker);
			can_stop = 1;
		} else
			can_stop = 0;
		spin_unlock_irq(&workers->lock);
		if (can_stop)
			kthread_stop(worker->task);
			kthread_stop(worker->task);
		list_del(&worker->worker_list);
		spin_lock_irq(&workers->lock);
		kfree(worker);
		put_worker(worker);
	}
	}
	spin_unlock_irq(&workers->lock);
	return 0;
	return 0;
}
}


@@ -266,10 +398,13 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
	INIT_LIST_HEAD(&workers->order_list);
	INIT_LIST_HEAD(&workers->order_list);
	INIT_LIST_HEAD(&workers->prio_order_list);
	INIT_LIST_HEAD(&workers->prio_order_list);
	spin_lock_init(&workers->lock);
	spin_lock_init(&workers->lock);
	spin_lock_init(&workers->order_lock);
	workers->max_workers = max;
	workers->max_workers = max;
	workers->idle_thresh = 32;
	workers->idle_thresh = 32;
	workers->name = name;
	workers->name = name;
	workers->ordered = 0;
	workers->ordered = 0;
	workers->atomic_start_pending = 0;
	workers->atomic_worker_start = 0;
}
}


/*
/*
@@ -293,7 +428,9 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
		INIT_LIST_HEAD(&worker->prio_pending);
		INIT_LIST_HEAD(&worker->prio_pending);
		INIT_LIST_HEAD(&worker->worker_list);
		INIT_LIST_HEAD(&worker->worker_list);
		spin_lock_init(&worker->lock);
		spin_lock_init(&worker->lock);

		atomic_set(&worker->num_pending, 0);
		atomic_set(&worker->num_pending, 0);
		atomic_set(&worker->refs, 1);
		worker->workers = workers;
		worker->workers = workers;
		worker->task = kthread_run(worker_loop, worker,
		worker->task = kthread_run(worker_loop, worker,
					   "btrfs-%s-%d", workers->name,
					   "btrfs-%s-%d", workers->name,
@@ -303,7 +440,6 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
			kfree(worker);
			kfree(worker);
			goto fail;
			goto fail;
		}
		}

		spin_lock_irq(&workers->lock);
		spin_lock_irq(&workers->lock);
		list_add_tail(&worker->worker_list, &workers->idle_list);
		list_add_tail(&worker->worker_list, &workers->idle_list);
		worker->idle = 1;
		worker->idle = 1;
@@ -350,7 +486,6 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
	 */
	 */
	next = workers->worker_list.next;
	next = workers->worker_list.next;
	worker = list_entry(next, struct btrfs_worker_thread, worker_list);
	worker = list_entry(next, struct btrfs_worker_thread, worker_list);
	atomic_inc(&worker->num_pending);
	worker->sequence++;
	worker->sequence++;


	if (worker->sequence % workers->idle_thresh == 0)
	if (worker->sequence % workers->idle_thresh == 0)
@@ -367,19 +502,32 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
{
{
	struct btrfs_worker_thread *worker;
	struct btrfs_worker_thread *worker;
	unsigned long flags;
	unsigned long flags;
	struct list_head *fallback;


again:
again:
	spin_lock_irqsave(&workers->lock, flags);
	spin_lock_irqsave(&workers->lock, flags);
	worker = next_worker(workers);
	worker = next_worker(workers);
	spin_unlock_irqrestore(&workers->lock, flags);


	if (!worker) {
	if (!worker) {
		spin_lock_irqsave(&workers->lock, flags);
		if (workers->num_workers >= workers->max_workers) {
		if (workers->num_workers >= workers->max_workers) {
			struct list_head *fallback = NULL;
			goto fallback;
		} else if (workers->atomic_worker_start) {
			workers->atomic_start_pending = 1;
			goto fallback;
		} else {
			spin_unlock_irqrestore(&workers->lock, flags);
			/* we're below the limit, start another worker */
			btrfs_start_workers(workers, 1);
			goto again;
		}
	}
	goto found;

fallback:
	fallback = NULL;
	/*
	/*
	 * we have failed to find any workers, just
	 * we have failed to find any workers, just
			 * return the force one
	 * return the first one we can find.
	 */
	 */
	if (!list_empty(&workers->worker_list))
	if (!list_empty(&workers->worker_list))
		fallback = workers->worker_list.next;
		fallback = workers->worker_list.next;
@@ -388,14 +536,13 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
	BUG_ON(!fallback);
	BUG_ON(!fallback);
	worker = list_entry(fallback,
	worker = list_entry(fallback,
		  struct btrfs_worker_thread, worker_list);
		  struct btrfs_worker_thread, worker_list);
found:
	/*
	 * this makes sure the worker doesn't exit before it is placed
	 * onto a busy/idle list
	 */
	atomic_inc(&worker->num_pending);
	spin_unlock_irqrestore(&workers->lock, flags);
	spin_unlock_irqrestore(&workers->lock, flags);
		} else {
			spin_unlock_irqrestore(&workers->lock, flags);
			/* we're below the limit, start another worker */
			btrfs_start_workers(workers, 1);
			goto again;
		}
	}
	return worker;
	return worker;
}
}


@@ -435,9 +582,9 @@ int btrfs_requeue_work(struct btrfs_work *work)
		worker->working = 1;
		worker->working = 1;
	}
	}


	spin_unlock_irqrestore(&worker->lock, flags);
	if (wake)
	if (wake)
		wake_up_process(worker->task);
		wake_up_process(worker->task);
	spin_unlock_irqrestore(&worker->lock, flags);
out:
out:


	return 0;
	return 0;
@@ -463,14 +610,18 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)


	worker = find_worker(workers);
	worker = find_worker(workers);
	if (workers->ordered) {
	if (workers->ordered) {
		spin_lock_irqsave(&workers->lock, flags);
		/*
		 * you're not allowed to do ordered queues from an
		 * interrupt handler
		 */
		spin_lock(&workers->order_lock);
		if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
		if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
			list_add_tail(&work->order_list,
			list_add_tail(&work->order_list,
				      &workers->prio_order_list);
				      &workers->prio_order_list);
		} else {
		} else {
			list_add_tail(&work->order_list, &workers->order_list);
			list_add_tail(&work->order_list, &workers->order_list);
		}
		}
		spin_unlock_irqrestore(&workers->lock, flags);
		spin_unlock(&workers->order_lock);
	} else {
	} else {
		INIT_LIST_HEAD(&work->order_list);
		INIT_LIST_HEAD(&work->order_list);
	}
	}
@@ -481,7 +632,6 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
		list_add_tail(&work->list, &worker->prio_pending);
		list_add_tail(&work->list, &worker->prio_pending);
	else
	else
		list_add_tail(&work->list, &worker->pending);
		list_add_tail(&work->list, &worker->pending);
	atomic_inc(&worker->num_pending);
	check_busy_worker(worker);
	check_busy_worker(worker);


	/*
	/*
@@ -492,10 +642,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
		wake = 1;
		wake = 1;
	worker->working = 1;
	worker->working = 1;


	spin_unlock_irqrestore(&worker->lock, flags);

	if (wake)
	if (wake)
		wake_up_process(worker->task);
		wake_up_process(worker->task);
	spin_unlock_irqrestore(&worker->lock, flags);

out:
out:
	return 0;
	return 0;
}
}
+12 −0
Original line number Original line Diff line number Diff line
@@ -73,6 +73,15 @@ struct btrfs_workers {
	/* force completions in the order they were queued */
	/* force completions in the order they were queued */
	int ordered;
	int ordered;


	/* more workers required, but in an interrupt handler */
	int atomic_start_pending;

	/*
	 * are we allowed to sleep while starting workers or are we required
	 * to start them at a later time?
	 */
	int atomic_worker_start;

	/* list with all the work threads.  The workers on the idle thread
	/* list with all the work threads.  The workers on the idle thread
	 * may be actively servicing jobs, but they haven't yet hit the
	 * may be actively servicing jobs, but they haven't yet hit the
	 * idle thresh limit above.
	 * idle thresh limit above.
@@ -90,6 +99,9 @@ struct btrfs_workers {
	/* lock for finding the next worker thread to queue on */
	/* lock for finding the next worker thread to queue on */
	spinlock_t lock;
	spinlock_t lock;


	/* lock for the ordered lists */
	spinlock_t order_lock;

	/* extra name for this worker, used for current->name */
	/* extra name for this worker, used for current->name */
	char *name;
	char *name;
};
};
+1 −0
Original line number Original line Diff line number Diff line
@@ -138,6 +138,7 @@ struct btrfs_inode {
	 * of these.
	 * of these.
	 */
	 */
	unsigned ordered_data_close:1;
	unsigned ordered_data_close:1;
	unsigned dummy_inode:1;


	struct inode vfs_inode;
	struct inode vfs_inode;
};
};
+4 −4
Original line number Original line Diff line number Diff line
@@ -506,10 +506,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
		 */
		 */
		set_page_extent_mapped(page);
		set_page_extent_mapped(page);
		lock_extent(tree, last_offset, end, GFP_NOFS);
		lock_extent(tree, last_offset, end, GFP_NOFS);
		spin_lock(&em_tree->lock);
		read_lock(&em_tree->lock);
		em = lookup_extent_mapping(em_tree, last_offset,
		em = lookup_extent_mapping(em_tree, last_offset,
					   PAGE_CACHE_SIZE);
					   PAGE_CACHE_SIZE);
		spin_unlock(&em_tree->lock);
		read_unlock(&em_tree->lock);


		if (!em || last_offset < em->start ||
		if (!em || last_offset < em->start ||
		    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
		    (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
@@ -593,11 +593,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
	em_tree = &BTRFS_I(inode)->extent_tree;
	em_tree = &BTRFS_I(inode)->extent_tree;


	/* we need the actual starting offset of this extent in the file */
	/* we need the actual starting offset of this extent in the file */
	spin_lock(&em_tree->lock);
	read_lock(&em_tree->lock);
	em = lookup_extent_mapping(em_tree,
	em = lookup_extent_mapping(em_tree,
				   page_offset(bio->bi_io_vec->bv_page),
				   page_offset(bio->bi_io_vec->bv_page),
				   PAGE_CACHE_SIZE);
				   PAGE_CACHE_SIZE);
	spin_unlock(&em_tree->lock);
	read_unlock(&em_tree->lock);


	compressed_len = em->block_len;
	compressed_len = em->block_len;
	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+6 −0
Original line number Original line Diff line number Diff line
@@ -2853,6 +2853,12 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
	int split;
	int split;
	int num_doubles = 0;
	int num_doubles = 0;


	l = path->nodes[0];
	slot = path->slots[0];
	if (extend && data_size + btrfs_item_size_nr(l, slot) +
	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
		return -EOVERFLOW;

	/* first try to make some room by pushing left and right */
	/* first try to make some room by pushing left and right */
	if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
	if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
		wret = push_leaf_right(trans, root, path, data_size, 0);
		wret = push_leaf_right(trans, root, path, data_size, 0);
Loading