Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ce6eba3d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'sched-wait-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull wait_var_event updates from Ingo Molnar:
 "This introduces the new wait_var_event() API, which is a more flexible
  waiting primitive than wait_on_atomic_t().

  All wait_on_atomic_t() users are migrated over to the new API and
  wait_on_atomic_t() is removed. The migration fixes one bug and should
  result in no functional changes for the other usecases"

* 'sched-wait-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/wait: Improve __var_waitqueue() code generation
  sched/wait: Remove the wait_on_atomic_t() API
  sched/wait, arch/mips: Fix and convert wait_on_atomic_t() usage to the new wait_var_event() API
  sched/wait, fs/ocfs2: Convert wait_on_atomic_t() usage to the new wait_var_event() API
  sched/wait, fs/nfs: Convert wait_on_atomic_t() usage to the new wait_var_event() API
  sched/wait, fs/fscache: Convert wait_on_atomic_t() usage to the new wait_var_event() API
  sched/wait, fs/btrfs: Convert wait_on_atomic_t() usage to the new wait_var_event() API
  sched/wait, fs/afs: Convert wait_on_atomic_t() usage to the new wait_var_event() API
  sched/wait, drivers/media: Convert wait_on_atomic_t() usage to the new wait_var_event() API
  sched/wait, drivers/drm: Convert wait_on_atomic_t() usage to the new wait_var_event() API
  sched/wait: Introduce wait_var_event()
parents a5532439 b3fc5c9b
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -781,6 +781,8 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
	atomic_set(&task->mm->context.fp_mode_switching, 0);
	preempt_enable();

	wake_up_var(&task->mm->context.fp_mode_switching);

	return 0;
}

+2 −2
Original line number Diff line number Diff line
@@ -1248,8 +1248,8 @@ static int enable_restore_fp_context(int msa)
	 * If an FP mode switch is currently underway, wait for it to
	 * complete before proceeding.
	 */
	wait_on_atomic_t(&current->mm->context.fp_mode_switching,
			 atomic_t_wait, TASK_KILLABLE);
	wait_var_event(&current->mm->context.fp_mode_switching,
		       !atomic_read(&current->mm->context.fp_mode_switching));

	if (!used_math()) {
		/* First time FP context user. */
+7 −6
Original line number Diff line number Diff line
@@ -177,8 +177,9 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
		res = pos - iocb->ki_pos;
	iocb->ki_pos = pos;

	atomic_dec(&aux_dev->usecount);
	wake_up_atomic_t(&aux_dev->usecount);
	if (atomic_dec_and_test(&aux_dev->usecount))
		wake_up_var(&aux_dev->usecount);

	return res;
}

@@ -218,8 +219,9 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
		res = pos - iocb->ki_pos;
	iocb->ki_pos = pos;

	atomic_dec(&aux_dev->usecount);
	wake_up_atomic_t(&aux_dev->usecount);
	if (atomic_dec_and_test(&aux_dev->usecount))
		wake_up_var(&aux_dev->usecount);

	return res;
}

@@ -277,8 +279,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
	mutex_unlock(&aux_idr_mutex);

	atomic_dec(&aux_dev->usecount);
	wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait,
			 TASK_UNINTERRUPTIBLE);
	wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount));

	minor = aux_dev->index;
	if (aux_dev->dev)
+4 −10
Original line number Diff line number Diff line
@@ -271,18 +271,13 @@ struct igt_wakeup {
	u32 seqno;
};

static int wait_atomic_timeout(atomic_t *p, unsigned int mode)
{
	return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
}

static bool wait_for_ready(struct igt_wakeup *w)
{
	DEFINE_WAIT(ready);

	set_bit(IDLE, &w->flags);
	if (atomic_dec_and_test(w->done))
		wake_up_atomic_t(w->done);
		wake_up_var(w->done);

	if (test_bit(STOP, &w->flags))
		goto out;
@@ -299,7 +294,7 @@ static bool wait_for_ready(struct igt_wakeup *w)
out:
	clear_bit(IDLE, &w->flags);
	if (atomic_dec_and_test(w->set))
		wake_up_atomic_t(w->set);
		wake_up_var(w->set);

	return !test_bit(STOP, &w->flags);
}
@@ -342,7 +337,7 @@ static void igt_wake_all_sync(atomic_t *ready,
	atomic_set(ready, 0);
	wake_up_all(wq);

	wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE);
	wait_var_event(set, !atomic_read(set));
	atomic_set(ready, count);
	atomic_set(done, count);
}
@@ -350,7 +345,6 @@ static void igt_wake_all_sync(atomic_t *ready,
static int igt_wakeup(void *arg)
{
	I915_RND_STATE(prng);
	const int state = TASK_UNINTERRUPTIBLE;
	struct intel_engine_cs *engine = arg;
	struct igt_wakeup *waiters;
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
@@ -418,7 +412,7 @@ static int igt_wakeup(void *arg)
		 * that they are ready for the next test. We wait until all
		 * threads are complete and waiting for us (i.e. not a seqno).
		 */
		err = wait_on_atomic_t(&done, wait_atomic_timeout, state);
		err = wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ);
		if (err) {
			pr_err("Timed out waiting for %d remaining waiters\n",
			       atomic_read(&done));
+4 −4
Original line number Diff line number Diff line
@@ -106,8 +106,8 @@ int hfi_core_deinit(struct venus_core *core, bool blocking)

	if (!empty) {
		mutex_unlock(&core->lock);
		wait_on_atomic_t(&core->insts_count, atomic_t_wait,
				 TASK_UNINTERRUPTIBLE);
		wait_var_event(&core->insts_count,
			       !atomic_read(&core->insts_count));
		mutex_lock(&core->lock);
	}

@@ -229,8 +229,8 @@ void hfi_session_destroy(struct venus_inst *inst)

	mutex_lock(&core->lock);
	list_del_init(&inst->list);
	atomic_dec(&core->insts_count);
	wake_up_atomic_t(&core->insts_count);
	if (atomic_dec_and_test(&core->insts_count))
		wake_up_var(&core->insts_count);
	mutex_unlock(&core->lock);
}
EXPORT_SYMBOL_GPL(hfi_session_destroy);
Loading