Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8814ce8a authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe
Browse files

block: Introduce blk_queue_flag_{set,clear,test_and_{set,clear}}()



Introduce functions that modify the queue flags and that protect
these modifications with the request queue lock. Except for moving
one wake_up_all() call from inside to outside a critical section,
this patch does not change any functionality.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Ming Lei <ming.lei@redhat.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f78bac2c
Loading
Loading
Loading
Loading
+75 −16
Original line number Diff line number Diff line
@@ -71,6 +71,78 @@ struct kmem_cache *blk_requestq_cachep;
 */
static struct workqueue_struct *kblockd_workqueue;

/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	queue_flag_set(flag, q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	queue_flag_clear(flag, q);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
	unsigned long flags;
	bool res;

	spin_lock_irqsave(q->queue_lock, flags);
	res = queue_flag_test_and_set(flag, q);
	spin_unlock_irqrestore(q->queue_lock, flags);

	return res;
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

/**
 * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was set.
 */
bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
{
	unsigned long flags;
	bool res;

	spin_lock_irqsave(q->queue_lock, flags);
	res = queue_flag_test_and_clear(flag, q);
	spin_unlock_irqrestore(q->queue_lock, flags);

	return res;
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);

static void blk_clear_congested(struct request_list *rl, int sync)
{
#ifdef CONFIG_CGROUP_WRITEBACK
@@ -361,25 +433,14 @@ EXPORT_SYMBOL(blk_sync_queue);
 */
int blk_set_preempt_only(struct request_queue *q)
{
	unsigned long flags;
	int res;

	spin_lock_irqsave(q->queue_lock, flags);
	res = queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
	spin_unlock_irqrestore(q->queue_lock, flags);

	return res;
	return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
}
EXPORT_SYMBOL_GPL(blk_set_preempt_only);

void blk_clear_preempt_only(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
	blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
	wake_up_all(&q->mq_freeze_wq);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL_GPL(blk_clear_preempt_only);

@@ -629,9 +690,7 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);

void blk_set_queue_dying(struct request_queue *q)
{
	spin_lock_irq(q->queue_lock);
	queue_flag_set(QUEUE_FLAG_DYING, q);
	spin_unlock_irq(q->queue_lock);
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);

	/*
	 * When queue DYING flag is set, we need to block new req
+2 −10
Original line number Diff line number Diff line
@@ -194,11 +194,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
 */
void blk_mq_quiesce_queue_nowait(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	queue_flag_set(QUEUE_FLAG_QUIESCED, q);
	spin_unlock_irqrestore(q->queue_lock, flags);
	blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q);
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);

@@ -239,11 +235,7 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
 */
void blk_mq_unquiesce_queue(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
	spin_unlock_irqrestore(q->queue_lock, flags);
	blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q);

	/* dispatch requests which are inserted during quiescing */
	blk_mq_run_hw_queues(q, true);
+2 −4
Original line number Diff line number Diff line
@@ -859,12 +859,10 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment);

void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
{
	spin_lock_irq(q->queue_lock);
	if (queueable)
		queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
		blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
	else
		queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
	spin_unlock_irq(q->queue_lock);
		blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);

+7 −15
Original line number Diff line number Diff line
@@ -276,12 +276,10 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
	if (neg)							\
		val = !val;						\
									\
	spin_lock_irq(q->queue_lock);					\
	if (val)							\
		queue_flag_set(QUEUE_FLAG_##flag, q);			\
		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
	else								\
		queue_flag_clear(QUEUE_FLAG_##flag, q);			\
	spin_unlock_irq(q->queue_lock);					\
		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
	return ret;							\
}

@@ -414,12 +412,10 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
	if (ret < 0)
		return ret;

	spin_lock_irq(q->queue_lock);
	if (poll_on)
		queue_flag_set(QUEUE_FLAG_POLL, q);
		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
	else
		queue_flag_clear(QUEUE_FLAG_POLL, q);
	spin_unlock_irq(q->queue_lock);
		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);

	return ret;
}
@@ -487,12 +483,10 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
	if (set == -1)
		return -EINVAL;

	spin_lock_irq(q->queue_lock);
	if (set)
		queue_flag_set(QUEUE_FLAG_WC, q);
		blk_queue_flag_set(QUEUE_FLAG_WC, q);
	else
		queue_flag_clear(QUEUE_FLAG_WC, q);
	spin_unlock_irq(q->queue_lock);
		blk_queue_flag_clear(QUEUE_FLAG_WC, q);

	return count;
}
@@ -946,9 +940,7 @@ void blk_unregister_queue(struct gendisk *disk)
	 */
	mutex_lock(&q->sysfs_lock);

	spin_lock_irq(q->queue_lock);
	queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
	spin_unlock_irq(q->queue_lock);
	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);

	/*
	 * Remove the sysfs attributes before unregistering the queue data
+2 −4
Original line number Diff line number Diff line
@@ -57,12 +57,10 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
		char *p = (char *) buf;

		val = simple_strtoul(p, &p, 10);
		spin_lock_irq(q->queue_lock);
		if (val)
			queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
			blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
		else
			queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
		spin_unlock_irq(q->queue_lock);
			blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
	}

	return count;
Loading