Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a4683487 authored by Dan Williams's avatar Dan Williams Committed by James Bottomley
Browse files

[SCSI] async: make async_synchronize_full() flush all work regardless of domain



In response to an async related regression James noted:

  "My theory is that this is an init problem: The assumption in a lot of
   our code is that async_synchronize_full() waits for everything ... even
   the domain specific async schedules, which isn't true."

...so make this assumption true.

Each domain, including the default one, registers itself on a global domain
list when work is scheduled.  Once all entries complete it exits that
list.  Waiting for the list to be empty syncs all in-flight work across
all domains.

Domains can opt-out of global syncing if they are declared as exclusive
ASYNC_DOMAIN_EXCLUSIVE().  All stack-based domains have been declared
exclusive since the domain may go out of scope as soon as the last work
item completes.

Statically declared domains are mostly ok, but async_unregister_domain()
is there to close any theoretical races with pending
async_synchronize_full waiters at module removal time.

Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Acked-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Reported-by: default avatarMeelis Roos <mroos@linux.ee>
Reported-by: default avatarEldad Zack <eldadzack@gmail.com>
Tested-by: default avatarEldad Zack <eldad@fogrefinery.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 2955b47d
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1355,6 +1355,7 @@ static void __exit exit_scsi(void)
	scsi_exit_devinfo();
	scsi_exit_procfs();
	scsi_exit_queue();
	async_unregister_domain(&scsi_sd_probe_domain);
}

subsys_initcall(init_scsi);
+1 −0
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@ struct async_domain {
extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
					    struct async_domain *domain);
void async_unregister_domain(struct async_domain *domain);
extern void async_synchronize_full(void);
extern void async_synchronize_full_domain(struct async_domain *domain);
extern void async_synchronize_cookie(async_cookie_t cookie);
+41 −2
Original line number Diff line number Diff line
@@ -63,7 +63,9 @@ static async_cookie_t next_cookie = 1;

static LIST_HEAD(async_pending);
static ASYNC_DOMAIN(async_running);
static LIST_HEAD(async_domains);
static DEFINE_SPINLOCK(async_lock);
static DEFINE_MUTEX(async_register_mutex);

struct async_entry {
	struct list_head	list;
@@ -145,6 +147,8 @@ static void async_run_entry_fn(struct work_struct *work)
	/* 3) remove self from the running queue */
	spin_lock_irqsave(&async_lock, flags);
	list_del(&entry->list);
	if (running->registered && --running->count == 0)
		list_del_init(&running->node);

	/* 4) free the entry */
	kfree(entry);
@@ -187,6 +191,8 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct a
	spin_lock_irqsave(&async_lock, flags);
	newcookie = entry->cookie = next_cookie++;
	list_add_tail(&entry->list, &async_pending);
	if (running->registered && running->count++ == 0)
		list_add_tail(&running->node, &async_domains);
	atomic_inc(&entry_count);
	spin_unlock_irqrestore(&async_lock, flags);

@@ -236,12 +242,42 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
 */
void async_synchronize_full(void)
{
	mutex_lock(&async_register_mutex);
	do {
		async_synchronize_cookie(next_cookie);
	} while (!list_empty(&async_running.domain) || !list_empty(&async_pending));
		struct async_domain *domain = NULL;

		spin_lock_irq(&async_lock);
		if (!list_empty(&async_domains))
			domain = list_first_entry(&async_domains, typeof(*domain), node);
		spin_unlock_irq(&async_lock);

		async_synchronize_cookie_domain(next_cookie, domain);
	} while (!list_empty(&async_domains));
	mutex_unlock(&async_register_mutex);
}
EXPORT_SYMBOL_GPL(async_synchronize_full);

/**
 * async_unregister_domain - ensure no more anonymous waiters on this domain
 * @domain: idle domain to flush out of any async_synchronize_full instances
 *
 * async_synchronize_{cookie|full}_domain() are not flushed since callers
 * of these routines should know the lifetime of @domain
 *
 * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
 */
void async_unregister_domain(struct async_domain *domain)
{
	mutex_lock(&async_register_mutex);
	spin_lock_irq(&async_lock);
	WARN_ON(!domain->registered || !list_empty(&domain->node) ||
		!list_empty(&domain->domain));
	domain->registered = 0;
	spin_unlock_irq(&async_lock);
	mutex_unlock(&async_register_mutex);
}
EXPORT_SYMBOL_GPL(async_unregister_domain);

/**
 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
 * @domain: running list to synchronize on
@@ -268,6 +304,9 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain
{
	ktime_t uninitialized_var(starttime), delta, endtime;

	if (!running)
		return;

	if (initcall_debug && system_state == SYSTEM_BOOTING) {
		printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
		starttime = ktime_get();