Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c58a6507 authored by Bart Van Assche's avatar Bart Van Assche Committed by Greg Kroah-Hartman
Browse files

block, scsi: Change the preempt-only flag into a counter



commit cd84a62e0078dce09f4ed349bec84f86c9d54b30 upstream.

The RQF_PREEMPT flag is used for three purposes:
- In the SCSI core, for making sure that power management requests
  are executed even if a device is in the "quiesced" state.
- For domain validation by SCSI drivers that use the parallel port.
- In the IDE driver, for IDE preempt requests.
Rename "preempt-only" into "pm-only" because the primary purpose of
this mode is power management. Since the power management core may
but does not have to resume a runtime suspended device before
performing system-wide suspend and since a later patch will set
"pm-only" mode as long as a block device is runtime suspended, make
it possible to set "pm-only" mode from more than one context. Since
with this change scsi_device_quiesce() is no longer idempotent, make
that function return early if it is called for a quiesced queue.

Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Acked-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Cc: Jianchao Wang <jianchao.w.wang@oracle.com>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 9b17512d
Loading
Loading
Loading
Loading
+18 −17
Original line number Diff line number Diff line
@@ -421,24 +421,25 @@ void blk_sync_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_sync_queue);

/**
 * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
 * blk_set_pm_only - increment pm_only counter
 * @q: request queue pointer
 *
 * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
 * set and 1 if the flag was already set.
 */
int blk_set_preempt_only(struct request_queue *q)
void blk_set_pm_only(struct request_queue *q)
{
	return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
	atomic_inc(&q->pm_only);
}
EXPORT_SYMBOL_GPL(blk_set_preempt_only);
EXPORT_SYMBOL_GPL(blk_set_pm_only);

void blk_clear_preempt_only(struct request_queue *q)
void blk_clear_pm_only(struct request_queue *q)
{
	blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
}
EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
EXPORT_SYMBOL_GPL(blk_clear_pm_only);

/**
 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
@@ -916,7 +917,7 @@ EXPORT_SYMBOL(blk_alloc_queue);
 */
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
	const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
	const bool pm = flags & BLK_MQ_REQ_PREEMPT;

	while (true) {
		bool success = false;
@@ -924,11 +925,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
		rcu_read_lock();
		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
			/*
			 * The code that sets the PREEMPT_ONLY flag is
			 * responsible for ensuring that that flag is globally
			 * visible before the queue is unfrozen.
			 * The code that increments the pm_only counter is
			 * responsible for ensuring that that counter is
			 * globally visible before the queue is unfrozen.
			 */
			if (preempt || !blk_queue_preempt_only(q)) {
			if (pm || !blk_queue_pm_only(q)) {
				success = true;
			} else {
				percpu_ref_put(&q->q_usage_counter);
@@ -953,7 +954,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)

		wait_event(q->mq_freeze_wq,
			   (atomic_read(&q->mq_freeze_depth) == 0 &&
			    (preempt || !blk_queue_preempt_only(q))) ||
			    (pm || !blk_queue_pm_only(q))) ||
			   blk_queue_dying(q));
		if (blk_queue_dying(q))
			return -ENODEV;
+9 −1
Original line number Diff line number Diff line
@@ -102,6 +102,14 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
	return 0;
}

static int queue_pm_only_show(void *data, struct seq_file *m)
{
	struct request_queue *q = data;

	seq_printf(m, "%d\n", atomic_read(&q->pm_only));
	return 0;
}

#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
static const char *const blk_queue_flag_name[] = {
	QUEUE_FLAG_NAME(QUEUED),
@@ -132,7 +140,6 @@ static const char *const blk_queue_flag_name[] = {
	QUEUE_FLAG_NAME(REGISTERED),
	QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
	QUEUE_FLAG_NAME(QUIESCED),
	QUEUE_FLAG_NAME(PREEMPT_ONLY),
};
#undef QUEUE_FLAG_NAME

@@ -209,6 +216,7 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf,
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
	{ "poll_stat", 0400, queue_poll_stat_show },
	{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
	{ "pm_only", 0600, queue_pm_only_show, NULL },
	{ "state", 0600, queue_state_show, queue_state_write },
	{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
	{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
+7 −4
Original line number Diff line number Diff line
@@ -3059,11 +3059,14 @@ scsi_device_quiesce(struct scsi_device *sdev)
	 */
	WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);

	blk_set_preempt_only(q);
	if (sdev->quiesced_by == current)
		return 0;

	blk_set_pm_only(q);

	blk_mq_freeze_queue(q);
	/*
	 * Ensure that the effect of blk_set_preempt_only() will be visible
	 * Ensure that the effect of blk_set_pm_only() will be visible
	 * for percpu_ref_tryget() callers that occur after the queue
	 * unfreeze even if the queue was already frozen before this function
	 * was called. See also https://lwn.net/Articles/573497/.
@@ -3076,7 +3079,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
	if (err == 0)
		sdev->quiesced_by = current;
	else
		blk_clear_preempt_only(q);
		blk_clear_pm_only(q);
	mutex_unlock(&sdev->state_mutex);

	return err;
@@ -3100,7 +3103,7 @@ void scsi_device_resume(struct scsi_device *sdev)
	 */
	mutex_lock(&sdev->state_mutex);
	sdev->quiesced_by = NULL;
	blk_clear_preempt_only(sdev->request_queue);
	blk_clear_pm_only(sdev->request_queue);
	if (sdev->sdev_state == SDEV_QUIESCE)
		scsi_device_set_state(sdev, SDEV_RUNNING);
	mutex_unlock(&sdev->state_mutex);
+9 −5
Original line number Diff line number Diff line
@@ -504,6 +504,12 @@ struct request_queue {
	 * various queue flags, see QUEUE_* below
	 */
	unsigned long		queue_flags;
	/*
	 * Number of contexts that have called blk_set_pm_only(). If this
	 * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
	 * processed.
	 */
	atomic_t		pm_only;

	/*
	 * ida allocated id for this queue.  Used to index queues from
@@ -698,7 +704,6 @@ struct request_queue {
#define QUEUE_FLAG_REGISTERED  26	/* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27	/* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED    28	/* queue has been quiesced */
#define QUEUE_FLAG_PREEMPT_ONLY	29	/* only process REQ_PREEMPT requests */

#define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
@@ -736,12 +741,11 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
			     REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q)	test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_preempt_only(q)				\
	test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
#define blk_queue_pm_only(q)	atomic_read(&(q)->pm_only)
#define blk_queue_fua(q)	test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)

extern int blk_set_preempt_only(struct request_queue *q);
extern void blk_clear_preempt_only(struct request_queue *q);
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);

static inline int queue_in_flight(struct request_queue *q)
{