Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b07e9ca authored by Tejun Heo's avatar Tejun Heo
Browse files

workqueue: deprecate system_nrt[_freezable]_wq



system_nrt[_freezable]_wq are now spurious.  Mark them deprecated and
convert all users to system[_freezable]_wq.

If you're cc'd and wondering what's going on: Now all workqueues are
non-reentrant, so there's no reason to use system_nrt[_freezable]_wq.
Please use system[_freezable]_wq instead.

This patch doesn't make any functional difference.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-By: default avatarLai Jiangshan <laijs@cn.fujitsu.com>

Cc: Jens Axboe <axboe@kernel.dk>
Cc: David Airlie <airlied@linux.ie>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: David Howells <dhowells@redhat.com>
parent 43829731
Loading
Loading
Loading
Loading
+3 −4
Original line number Diff line number Diff line
@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)

/*
 * Worker for allocating per cpu stat for tgs. This is scheduled on the
 * system_nrt_wq once there are some groups on the alloc_list waiting for
 * system_wq once there are some groups on the alloc_list waiting for
 * allocation.
 */
static void tg_stats_alloc_fn(struct work_struct *work)
@@ -194,8 +194,7 @@ static void tg_stats_alloc_fn(struct work_struct *work)
		stats_cpu = alloc_percpu(struct tg_stats_cpu);
		if (!stats_cpu) {
			/* allocation failed, try again after some time */
			queue_delayed_work(system_nrt_wq, dwork,
					   msecs_to_jiffies(10));
			schedule_delayed_work(dwork, msecs_to_jiffies(10));
			return;
		}
	}
@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
	 */
	spin_lock_irqsave(&tg_stats_alloc_lock, flags);
	list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
	queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
	schedule_delayed_work(&tg_stats_alloc_work, 0);
	spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}

+5 −5
Original line number Diff line number Diff line
@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
	intv = disk_events_poll_jiffies(disk);
	set_timer_slack(&ev->dwork.timer, intv / 4);
	if (check_now)
		queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
		queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
	else if (intv)
		queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
		queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
out_unlock:
	spin_unlock_irqrestore(&ev->lock, flags);
}
@@ -1535,7 +1535,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
	spin_lock_irq(&ev->lock);
	ev->clearing |= mask;
	if (!ev->block)
		mod_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
		mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
	spin_unlock_irq(&ev->lock);
}

@@ -1571,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)

	/* uncondtionally schedule event check and wait for it to finish */
	disk_block_events(disk);
	queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
	queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
	flush_delayed_work(&ev->dwork);
	__disk_unblock_events(disk, false);

@@ -1608,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)

	intv = disk_events_poll_jiffies(disk);
	if (!ev->block && intv)
		queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
		queue_delayed_work(system_freezable_wq, &ev->dwork, intv);

	spin_unlock_irq(&ev->lock);

+3 −3
Original line number Diff line number Diff line
@@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work)
	}

	if (repoll)
		queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
		schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}

void drm_kms_helper_poll_disable(struct drm_device *dev)
@@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
	}

	if (poll)
		queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
		schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);

@@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
	/* kill timer and schedule immediate execution, this doesn't block */
	cancel_delayed_work(&dev->mode_config.output_poll_work);
	if (drm_kms_helper_poll)
		queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
		schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+1 −1
Original line number Diff line number Diff line
@@ -204,7 +204,7 @@ static void wiiext_worker(struct work_struct *work)
/* schedule work only once, otherwise mark for reschedule */
static void wiiext_schedule(struct wiimote_ext *ext)
{
	queue_work(system_nrt_wq, &ext->worker);
	schedule_work(&ext->worker);
}

/*
+2 −2
Original line number Diff line number Diff line
@@ -204,7 +204,7 @@ void mmc_host_clk_release(struct mmc_host *host)
	host->clk_requests--;
	if (mmc_host_may_gate_card(host->card) &&
	    !host->clk_requests)
		queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
		schedule_delayed_work(&host->clk_gate_work,
				      msecs_to_jiffies(host->clkgate_delay));
	spin_unlock_irqrestore(&host->clk_lock, flags);
}
Loading