Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 396883e2 authored by Stephen M. Cameron's avatar Stephen M. Cameron Committed by James Bottomley
Browse files

[SCSI] hpsa: prevent stalled i/o



If a fifo full condition is encountered, i/o requests will stack
up in the h->reqQ queue.  The only thing which empties this queue
is start_io, which only gets called when new i/o requests come in.
If none are forthcoming, i/o in h->reqQ will be stalled.

To fix this, whenever fifo full condition is encountered, this
is recorded, and the interrupt handler examines this to see
if a fifo full condition was recently encountered when a
command completes and will call start_io to prevent i/o's in
h->reqQ from getting stuck.

I've only ever seen this problem occur when running specialized
test programs that pound on the the CCISS_PASSTHRU ioctl.

Signed-off-by: default avatarStephen M. Cameron <scameron@beardog.cce.hp.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 0390f0c0
Loading
Loading
Loading
Loading
+30 −2
Original line number Original line Diff line number Diff line
@@ -3483,9 +3483,11 @@ static void start_io(struct ctlr_info *h)
		c = list_entry(h->reqQ.next, struct CommandList, list);
		c = list_entry(h->reqQ.next, struct CommandList, list);
		/* can't do anything if fifo is full */
		/* can't do anything if fifo is full */
		if ((h->access.fifo_full(h))) {
		if ((h->access.fifo_full(h))) {
			h->fifo_recently_full = 1;
			dev_warn(&h->pdev->dev, "fifo full\n");
			dev_warn(&h->pdev->dev, "fifo full\n");
			break;
			break;
		}
		}
		h->fifo_recently_full = 0;


		/* Get the first entry from the Request Q */
		/* Get the first entry from the Request Q */
		removeQ(c);
		removeQ(c);
@@ -3539,15 +3541,41 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
static inline void finish_cmd(struct CommandList *c)
static inline void finish_cmd(struct CommandList *c)
{
{
	unsigned long flags;
	unsigned long flags;
	int io_may_be_stalled = 0;
	struct ctlr_info *h = c->h;


	spin_lock_irqsave(&c->h->lock, flags);
	spin_lock_irqsave(&h->lock, flags);
	removeQ(c);
	removeQ(c);
	spin_unlock_irqrestore(&c->h->lock, flags);

	/*
	 * Check for possibly stalled i/o.
	 *
	 * If a fifo_full condition is encountered, requests will back up
	 * in h->reqQ.  This queue is only emptied out by start_io which is
	 * only called when a new i/o request comes in.  If no i/o's are
	 * forthcoming, the i/o's in h->reqQ can get stuck.  So we call
	 * start_io from here if we detect such a danger.
	 *
	 * Normally, we shouldn't hit this case, but pounding on the
	 * CCISS_PASSTHRU ioctl can provoke it.  Only call start_io if
	 * commands_outstanding is low.  We want to avoid calling
	 * start_io from in here as much as possible, and esp. don't
	 * want to get in a cycle where we call start_io every time
	 * through here.
	 */
	if (unlikely(h->fifo_recently_full) &&
		h->commands_outstanding < 5)
		io_may_be_stalled = 1;

	spin_unlock_irqrestore(&h->lock, flags);

	dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
	dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
	if (likely(c->cmd_type == CMD_SCSI))
	if (likely(c->cmd_type == CMD_SCSI))
		complete_scsi_command(c);
		complete_scsi_command(c);
	else if (c->cmd_type == CMD_IOCTL_PEND)
	else if (c->cmd_type == CMD_IOCTL_PEND)
		complete(c->waiting);
		complete(c->waiting);
	if (unlikely(io_may_be_stalled))
		start_io(h);
}
}


static inline u32 hpsa_tag_contains_index(u32 tag)
static inline u32 hpsa_tag_contains_index(u32 tag)
+1 −0
Original line number Original line Diff line number Diff line
@@ -136,6 +136,7 @@ struct ctlr_info {
	atomic_t firmware_flash_in_progress;
	atomic_t firmware_flash_in_progress;
	u32 lockup_detected;
	u32 lockup_detected;
	struct list_head lockup_list;
	struct list_head lockup_list;
	u32 fifo_recently_full;
	/* Address of h->q[x] is passed to intr handler to know which queue */
	/* Address of h->q[x] is passed to intr handler to know which queue */
	u8 q[MAX_REPLY_QUEUES];
	u8 q[MAX_REPLY_QUEUES];
	u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
	u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */