Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8a0ff92c authored by Webb Scales's avatar Webb Scales Committed by James Bottomley
Browse files

hpsa: use helper routines for finishing commands



cleanup command completions

Reviewed-by: default avatarScott Teel <scott.teel@pmcs.com>
Reviewed-by: default avatarKevin Barnett <kevin.barnett@pmcs.com>
Reviewed-by: default avatarTomas Henzl <thenzl@redhat.com>
Reviewed-by: default avatarHannes Reinecke <hare@Suse.de>
Signed-off-by: default avatarWebb Scales <webbnh@hp.com>
Signed-off-by: default avatarDon Brace <don.brace@pmcs.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJames Bottomley <JBottomley@Odin.com>
parent 8be986cc
Loading
Loading
Loading
Loading
+31 −47
Original line number Original line Diff line number Diff line
@@ -1973,6 +1973,19 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
	return retry;	/* retry on raid path? */
	return retry;	/* retry on raid path? */
}
}


static void hpsa_cmd_free_and_done(struct ctlr_info *h,
		struct CommandList *c, struct scsi_cmnd *cmd)
{
	cmd_free(h, c);
	cmd->scsi_done(cmd);
}

static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
{
	INIT_WORK(&c->work, hpsa_command_resubmit_worker);
	queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
}

static void process_ioaccel2_completion(struct ctlr_info *h,
static void process_ioaccel2_completion(struct ctlr_info *h,
		struct CommandList *c, struct scsi_cmnd *cmd,
		struct CommandList *c, struct scsi_cmnd *cmd,
		struct hpsa_scsi_dev_t *dev)
		struct hpsa_scsi_dev_t *dev)
@@ -1981,13 +1994,11 @@ static void process_ioaccel2_completion(struct ctlr_info *h,


	/* check for good status */
	/* check for good status */
	if (likely(c2->error_data.serv_response == 0 &&
	if (likely(c2->error_data.serv_response == 0 &&
			c2->error_data.status == 0)) {
			c2->error_data.status == 0))
		cmd_free(h, c);
		return hpsa_cmd_free_and_done(h, c, cmd);
		cmd->scsi_done(cmd);
		return;
	}


	/* Any RAID offload error results in retry which will use
	/*
	 * Any RAID offload error results in retry which will use
	 * the normal I/O path so the controller can handle whatever's
	 * the normal I/O path so the controller can handle whatever's
	 * wrong.
	 * wrong.
	 */
	 */
@@ -1997,19 +2008,14 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
		if (c2->error_data.status ==
		if (c2->error_data.status ==
			IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
			IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
			dev->offload_enabled = 0;
			dev->offload_enabled = 0;
		goto retry_cmd;

		return hpsa_retry_cmd(h, c);
	}
	}


	if (handle_ioaccel_mode2_error(h, c, cmd, c2))
	if (handle_ioaccel_mode2_error(h, c, cmd, c2))
		goto retry_cmd;
		return hpsa_retry_cmd(h, c);


	cmd_free(h, c);
	return hpsa_cmd_free_and_done(h, c, cmd);
	cmd->scsi_done(cmd);
	return;

retry_cmd:
	INIT_WORK(&c->work, hpsa_command_resubmit_worker);
	queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
}
}


/* Returns 0 on success, < 0 otherwise. */
/* Returns 0 on success, < 0 otherwise. */
@@ -2082,22 +2088,15 @@ static void complete_scsi_command(struct CommandList *cp)
	if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
	if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
		/* DID_NO_CONNECT will prevent a retry */
		/* DID_NO_CONNECT will prevent a retry */
		cmd->result = DID_NO_CONNECT << 16;
		cmd->result = DID_NO_CONNECT << 16;
		cmd_free(h, cp);
		return hpsa_cmd_free_and_done(h, cp, cmd);
		cmd->scsi_done(cmd);
		return;
	}
	}


	if (cp->cmd_type == CMD_IOACCEL2)
	if (cp->cmd_type == CMD_IOACCEL2)
		return process_ioaccel2_completion(h, cp, cmd, dev);
		return process_ioaccel2_completion(h, cp, cmd, dev);


	scsi_set_resid(cmd, ei->ResidualCnt);
	scsi_set_resid(cmd, ei->ResidualCnt);
	if (ei->CommandStatus == 0) {
	if (ei->CommandStatus == 0)
		if (cp->cmd_type == CMD_IOACCEL1)
		return hpsa_cmd_free_and_done(h, cp, cmd);
			atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
		cmd_free(h, cp);
		cmd->scsi_done(cmd);
		return;
	}


	/* For I/O accelerator commands, copy over some fields to the normal
	/* For I/O accelerator commands, copy over some fields to the normal
	 * CISS header used below for error handling.
	 * CISS header used below for error handling.
@@ -2119,10 +2118,7 @@ static void complete_scsi_command(struct CommandList *cp)
		if (is_logical_dev_addr_mode(dev->scsi3addr)) {
		if (is_logical_dev_addr_mode(dev->scsi3addr)) {
			if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
			if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
				dev->offload_enabled = 0;
				dev->offload_enabled = 0;
			INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
			return hpsa_retry_cmd(h, cp);
			queue_work_on(raw_smp_processor_id(),
					h->resubmit_wq, &cp->work);
			return;
		}
		}
	}
	}


@@ -2253,8 +2249,8 @@ static void complete_scsi_command(struct CommandList *cp)
		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
				cp, ei->CommandStatus);
				cp, ei->CommandStatus);
	}
	}
	cmd_free(h, cp);

	cmd->scsi_done(cmd);
	return hpsa_cmd_free_and_done(h, cp, cmd);
}
}


static void hpsa_pci_unmap(struct pci_dev *pdev,
static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -4509,16 +4505,13 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
{
{
	struct scsi_cmnd *cmd;
	struct scsi_cmnd *cmd;
	struct hpsa_scsi_dev_t *dev;
	struct hpsa_scsi_dev_t *dev;
	struct CommandList *c =
	struct CommandList *c = container_of(work, struct CommandList, work);
			container_of(work, struct CommandList, work);


	cmd = c->scsi_cmd;
	cmd = c->scsi_cmd;
	dev = cmd->device->hostdata;
	dev = cmd->device->hostdata;
	if (!dev) {
	if (!dev) {
		cmd->result = DID_NO_CONNECT << 16;
		cmd->result = DID_NO_CONNECT << 16;
		cmd_free(c->h, c);
		return hpsa_cmd_free_and_done(c->h, c, cmd);
		cmd->scsi_done(cmd);
		return;
	}
	}
	if (c->cmd_type == CMD_IOACCEL2) {
	if (c->cmd_type == CMD_IOACCEL2) {
		struct ctlr_info *h = c->h;
		struct ctlr_info *h = c->h;
@@ -4537,12 +4530,7 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
				 * then get SCSI_MLQUEUE_HOST_BUSY.
				 * then get SCSI_MLQUEUE_HOST_BUSY.
				 */
				 */
				cmd->result = DID_IMM_RETRY << 16;
				cmd->result = DID_IMM_RETRY << 16;
				cmd->scsi_done(cmd);
				return hpsa_cmd_free_and_done(h, c, cmd);
				cmd_free(h, c);	/* FIX-ME:  on merge, change
						 * to cmd_tagged_free() and
						 * ultimately to
						 * hpsa_cmd_free_and_done(). */
				return;
			}
			}
			/* else, fall thru and resubmit down CISS path */
			/* else, fall thru and resubmit down CISS path */
		}
		}
@@ -4606,9 +4594,7 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
		if (rc == 0)
		if (rc == 0)
			return 0;
			return 0;
		if (rc == SCSI_MLQUEUE_HOST_BUSY) {
		if (rc == SCSI_MLQUEUE_HOST_BUSY) {
			cmd_free(h, c);	/* FIX-ME:  on merge, change to
			cmd_free(h, c);
					 * cmd_tagged_free(), and ultimately
					 * to hpsa_cmd_resolve_and_free(). */
			return SCSI_MLQUEUE_HOST_BUSY;
			return SCSI_MLQUEUE_HOST_BUSY;
		}
		}
	}
	}
@@ -7721,8 +7707,6 @@ static void hpsa_flush_cache(struct ctlr_info *h)
	struct CommandList *c;
	struct CommandList *c;
	int rc;
	int rc;


	/* Don't bother trying to flush the cache if locked up */
	/* FIXME not necessary if do_simple_cmd does the check */
	if (unlikely(lockup_detected(h)))
	if (unlikely(lockup_detected(h)))
		return;
		return;
	flush_buf = kzalloc(4, GFP_KERNEL);
	flush_buf = kzalloc(4, GFP_KERNEL);