Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e9684678 authored by Bart Van Assche's avatar Bart Van Assche Committed by David Dillow
Browse files

IB/srp: stop sharing the host lock with SCSI



We don't need protection against the SCSI stack, so use our own lock to
allow parallel progress on separate CPUs.

Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
[ broken out and small cleanups by David Dillow ]
Signed-off-by: default avatarDavid Dillow <dillowda@ornl.gov>
parent 94a9174c
Loading
Loading
Loading
Loading
+23 −23
Original line number Diff line number Diff line
@@ -447,12 +447,12 @@ static bool srp_change_state(struct srp_target_port *target,
{
	bool changed = false;

	spin_lock_irq(target->scsi_host->host_lock);
	spin_lock_irq(&target->lock);
	if (target->state == old) {
		target->state = new;
		changed = true;
	}
	spin_unlock_irq(target->scsi_host->host_lock);
	spin_unlock_irq(&target->lock);
	return changed;
}

@@ -555,11 +555,11 @@ static void srp_remove_req(struct srp_target_port *target,
	unsigned long flags;

	srp_unmap_data(req->scmnd, target, req);
	spin_lock_irqsave(target->scsi_host->host_lock, flags);
	spin_lock_irqsave(&target->lock, flags);
	target->req_lim += req_lim_delta;
	req->scmnd = NULL;
	list_add_tail(&req->list, &target->free_reqs);
	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
	spin_unlock_irqrestore(&target->lock, flags);
}

static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
@@ -634,13 +634,13 @@ static int srp_reconnect_target(struct srp_target_port *target)
	 * Schedule our work inside the lock to avoid a race with
	 * the flush_scheduled_work() in srp_remove_one().
	 */
	spin_lock_irq(target->scsi_host->host_lock);
	spin_lock_irq(&target->lock);
	if (target->state == SRP_TARGET_CONNECTING) {
		target->state = SRP_TARGET_DEAD;
		INIT_WORK(&target->work, srp_remove_work);
		schedule_work(&target->work);
	}
	spin_unlock_irq(target->scsi_host->host_lock);
	spin_unlock_irq(&target->lock);

	return ret;
}
@@ -829,17 +829,16 @@ static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
{
	unsigned long flags;

	spin_lock_irqsave(target->scsi_host->host_lock, flags);
	spin_lock_irqsave(&target->lock, flags);
	list_add(&iu->list, &target->free_tx);
	if (iu_type != SRP_IU_RSP)
		++target->req_lim;
	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
	spin_unlock_irqrestore(&target->lock, flags);
}

/*
 * Must be called with target->scsi_host->host_lock held to protect
 * req_lim and free_tx.  If IU is not sent, it must be returned using
 * srp_put_tx_iu().
 * Must be called with target->lock held to protect req_lim and free_tx.
 * If IU is not sent, it must be returned using srp_put_tx_iu().
 *
 * Note:
 * An upper limit for the number of allocated information units for each
@@ -920,9 +919,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
	unsigned long flags;

	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
		spin_lock_irqsave(target->scsi_host->host_lock, flags);
		spin_lock_irqsave(&target->lock, flags);
		target->req_lim += be32_to_cpu(rsp->req_lim_delta);
		spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
		spin_unlock_irqrestore(&target->lock, flags);

		target->tsk_mgmt_status = -1;
		if (be32_to_cpu(rsp->resp_data_len) >= 4)
@@ -963,10 +962,10 @@ static int srp_response_common(struct srp_target_port *target, s32 req_delta,
	struct srp_iu *iu;
	int err;

	spin_lock_irqsave(target->scsi_host->host_lock, flags);
	spin_lock_irqsave(&target->lock, flags);
	target->req_lim += req_delta;
	iu = __srp_get_tx_iu(target, SRP_IU_RSP);
	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
	spin_unlock_irqrestore(&target->lock, flags);

	if (!iu) {
		shost_printk(KERN_ERR, target->scsi_host, PFX
@@ -1131,14 +1130,14 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
		return 0;
	}

	spin_lock_irqsave(shost->host_lock, flags);
	spin_lock_irqsave(&target->lock, flags);
	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
	if (iu) {
		req = list_first_entry(&target->free_reqs, struct srp_request,
				      list);
		list_del(&req->list);
	}
	spin_unlock_irqrestore(shost->host_lock, flags);
	spin_unlock_irqrestore(&target->lock, flags);

	if (!iu)
		goto err;
@@ -1184,9 +1183,9 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
err_iu:
	srp_put_tx_iu(target, iu, SRP_IU_CMD);

	spin_lock_irqsave(shost->host_lock, flags);
	spin_lock_irqsave(&target->lock, flags);
	list_add(&req->list, &target->free_reqs);
	spin_unlock_irqrestore(shost->host_lock, flags);
	spin_unlock_irqrestore(&target->lock, flags);

err:
	return SCSI_MLQUEUE_HOST_BUSY;
@@ -1451,9 +1450,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,

	init_completion(&target->tsk_mgmt_done);

	spin_lock_irq(target->scsi_host->host_lock);
	spin_lock_irq(&target->lock);
	iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
	spin_unlock_irq(target->scsi_host->host_lock);
	spin_unlock_irq(&target->lock);

	if (!iu)
		return -1;
@@ -1957,6 +1956,7 @@ static ssize_t srp_create_target(struct device *dev,
	target->scsi_host  = target_host;
	target->srp_host   = host;

	spin_lock_init(&target->lock);
	INIT_LIST_HEAD(&target->free_tx);
	INIT_LIST_HEAD(&target->free_reqs);
	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
@@ -2186,9 +2186,9 @@ static void srp_remove_one(struct ib_device *device)
		 */
		spin_lock(&host->target_lock);
		list_for_each_entry(target, &host->target_list, list) {
			spin_lock_irq(target->scsi_host->host_lock);
			spin_lock_irq(&target->lock);
			target->state = SRP_TARGET_REMOVED;
			spin_unlock_irq(target->scsi_host->host_lock);
			spin_unlock_irq(&target->lock);
		}
		spin_unlock(&host->target_lock);

+2 −0
Original line number Diff line number Diff line
@@ -144,6 +144,8 @@ struct srp_target_port {

	struct srp_iu	       *rx_ring[SRP_RQ_SIZE];

	spinlock_t		lock;

	struct list_head	free_tx;
	struct srp_iu	       *tx_ring[SRP_SQ_SIZE];