Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 32872863 authored by Ming Lei's avatar Ming Lei Committed by Martin K. Petersen
Browse files

scsi: core: avoid host-wide host_busy counter for scsi_mq



It isn't necessary to check the host depth in scsi_queue_rq() any more
since it has been respected by blk-mq before calling scsi_queue_rq() via
getting driver tag.

Lots of LUNs may attach to same host and per-host IOPS may reach millions,
so we should avoid expensive atomic operations on the host-wide counter in
the IO path.

This patch implements scsi_host_busy() via blk_mq_tagset_busy_iter() for
reading the count of busy IOs for scsi_mq.

It is observed that IOPS is increased by 15% in IO test on scsi_debug (32
LUNs, 32 submit queues, 1024 can_queue, libaio/dio) in a dual-socket
system.

[mkp: clarified commit message]

Cc: Omar Sandoval <osandov@fb.com>,
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>,
Cc: James Bottomley <james.bottomley@hansenpartnership.com>,
Cc: Christoph Hellwig <hch@lst.de>,
Cc: Don Brace <don.brace@microsemi.com>
Cc: Kashyap Desai <kashyap.desai@broadcom.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Laurence Oberman <loberman@redhat.com>
Cc: Bart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent c84b023a
Loading
Loading
Loading
Loading
+23 −1
Original line number Diff line number Diff line
@@ -563,13 +563,35 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
}
EXPORT_SYMBOL(scsi_host_get);

struct scsi_host_mq_in_flight {
	int cnt;
};

static void scsi_host_check_in_flight(struct request *rq, void *data,
		bool reserved)
{
	struct scsi_host_mq_in_flight *in_flight = data;

	if (blk_mq_request_started(rq))
		in_flight->cnt++;
}

/**
 * scsi_host_busy - Return the host busy counter
 * @shost:	Pointer to Scsi_Host to inc.
 **/
int scsi_host_busy(struct Scsi_Host *shost)
{
	struct scsi_host_mq_in_flight in_flight = {
		.cnt = 0,
	};

	if (!shost->use_blk_mq)
		return atomic_read(&shost->host_busy);

	blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
			&in_flight);
	return in_flight.cnt;
}
EXPORT_SYMBOL(scsi_host_busy);

+17 −6
Original line number Diff line number Diff line
@@ -345,6 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
	unsigned long flags;

	rcu_read_lock();
	if (!shost->use_blk_mq)
		atomic_dec(&shost->host_busy);
	if (unlikely(scsi_host_in_recovery(shost))) {
		spin_lock_irqsave(shost->host_lock, flags);
@@ -444,7 +445,12 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)

static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
	if (shost->can_queue > 0 &&
	/*
	 * blk-mq can handle host queue busy efficiently via host-wide driver
	 * tag allocation
	 */

	if (!shost->use_blk_mq && shost->can_queue > 0 &&
	    atomic_read(&shost->host_busy) >= shost->can_queue)
		return true;
	if (atomic_read(&shost->host_blocked) > 0)
@@ -1600,9 +1606,12 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
	if (scsi_host_in_recovery(shost))
		return 0;

	if (!shost->use_blk_mq)
		busy = atomic_inc_return(&shost->host_busy) - 1;
	else
		busy = 0;
	if (atomic_read(&shost->host_blocked) > 0) {
		if (busy)
		if (busy || scsi_host_busy(shost))
			goto starved;

		/*
@@ -1616,7 +1625,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
				     "unblocking host at zero depth\n"));
	}

	if (shost->can_queue > 0 && busy >= shost->can_queue)
	if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
		goto starved;
	if (shost->host_self_blocked)
		goto starved;
@@ -1702,6 +1711,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
	 * with the locks as normal issue path does.
	 */
	atomic_inc(&sdev->device_busy);

	if (!shost->use_blk_mq)
		atomic_inc(&shost->host_busy);
	if (starget->can_queue > 0)
		atomic_inc(&starget->target_busy);