Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b53d124a authored by wenxiong@linux.vnet.ibm.com's avatar wenxiong@linux.vnet.ibm.com Committed by James Bottomley
Browse files

[SCSI] ipr: Implement block iopoll



This patch implements blk iopoll in ipr driver for performance improvement.

Signed-off-by: default avatarWen Xiong <wenxiong@linux.vnet.ibm.com>
Acked-by: default avatarBrian King <brking@linux.vnet.ibm.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent 56d6aa33
Loading
Loading
Loading
Loading
+172 −49
Original line number Diff line number Diff line
@@ -108,6 +108,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
		.max_cmds = 100,
		.cache_line_size = 0x20,
		.clear_isr = 1,
		.iopoll_weight = 0,
		{
			.set_interrupt_mask_reg = 0x0022C,
			.clr_interrupt_mask_reg = 0x00230,
@@ -132,6 +133,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
		.max_cmds = 100,
		.cache_line_size = 0x20,
		.clear_isr = 1,
		.iopoll_weight = 0,
		{
			.set_interrupt_mask_reg = 0x00288,
			.clr_interrupt_mask_reg = 0x0028C,
@@ -156,6 +158,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
		.max_cmds = 1000,
		.cache_line_size = 0x20,
		.clear_isr = 0,
		.iopoll_weight = 64,
		{
			.set_interrupt_mask_reg = 0x00010,
			.clr_interrupt_mask_reg = 0x00018,
@@ -3560,6 +3563,95 @@ static struct device_attribute ipr_ioa_reset_attr = {
	.store = ipr_store_reset_adapter
};

static int ipr_iopoll(struct blk_iopoll *iop, int budget);
 /**
 * ipr_show_iopoll_weight - Show ipr polling mode
 * @dev:	class device struct
 * @buf:	buffer
 *
 * Return value:
 *	number of bytes printed to buffer
 **/
static ssize_t ipr_show_iopoll_weight(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
	struct Scsi_Host *shost = class_to_shost(dev);
	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
	unsigned long lock_flags = 0;
	int len;

	spin_lock_irqsave(shost->host_lock, lock_flags);
	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
	spin_unlock_irqrestore(shost->host_lock, lock_flags);

	return len;
}

/**
 * ipr_store_iopoll_weight - Change the adapter's polling mode
 * @dev:	class device struct
 * @buf:	buffer
 *
 * Return value:
 *	number of bytes printed to buffer
 **/
static ssize_t ipr_store_iopoll_weight(struct device *dev,
					struct device_attribute *attr,
					const char *buf, size_t count)
{
	struct Scsi_Host *shost = class_to_shost(dev);
	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
	unsigned long user_iopoll_weight;
	unsigned long lock_flags = 0;
	int i;

	if (!ioa_cfg->sis64) {
		dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
		return -EINVAL;
	}
	if (kstrtoul(buf, 10, &user_iopoll_weight))
		return -EINVAL;

	if (user_iopoll_weight > 256) {
		dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
		return -EINVAL;
	}

	if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
		dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
		return strlen(buf);
	}

	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		for (i = 1; i < ioa_cfg->hrrq_num; i++)
			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
	}

	spin_lock_irqsave(shost->host_lock, lock_flags);
	ioa_cfg->iopoll_weight = user_iopoll_weight;
	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
					ioa_cfg->iopoll_weight, ipr_iopoll);
			blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
		}
	}
	spin_unlock_irqrestore(shost->host_lock, lock_flags);

	return strlen(buf);
}

static struct device_attribute ipr_iopoll_weight_attr = {
	.attr = {
		.name =		"iopoll_weight",
		.mode =		S_IRUGO | S_IWUSR,
	},
	.show = ipr_show_iopoll_weight,
	.store = ipr_store_iopoll_weight
};

/**
 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
 * @buf_len:		buffer length
@@ -3928,6 +4020,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
	&ipr_ioa_reset_attr,
	&ipr_update_fw_attr,
	&ipr_ioa_fw_type_attr,
	&ipr_iopoll_weight_attr,
	NULL,
};

@@ -5218,7 +5311,7 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
}

static int __ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue,
static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
						struct list_head *doneq)
{
	u32 ioasc;
@@ -5260,9 +5353,41 @@ static int __ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue,
			hrr_queue->toggle_bit ^= 1u;
		}
		num_hrrq++;
		if (budget > 0 && num_hrrq >= budget)
			break;
	}

	return num_hrrq;
}

static int ipr_iopoll(struct blk_iopoll *iop, int budget)
{
	struct ipr_ioa_cfg *ioa_cfg;
	struct ipr_hrr_queue *hrrq;
	struct ipr_cmnd *ipr_cmd, *temp;
	unsigned long hrrq_flags;
	int completed_ops;
	LIST_HEAD(doneq);

	hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
	ioa_cfg = hrrq->ioa_cfg;

	spin_lock_irqsave(hrrq->lock, hrrq_flags);
	completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);

	if (completed_ops < budget)
		blk_iopoll_complete(iop);
	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);

	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
		list_del(&ipr_cmd->queue);
		del_timer(&ipr_cmd->timer);
		ipr_cmd->fast_done(ipr_cmd);
	}

	return completed_ops;
}

/**
 * ipr_isr - Interrupt service routine
 * @irq:	irq number
@@ -5277,8 +5402,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
	unsigned long hrrq_flags = 0;
	u32 int_reg = 0;
	u32 ioasc;
	u16 cmd_index;
	int num_hrrq = 0;
	int irq_none = 0;
	struct ipr_cmnd *ipr_cmd, *temp;
@@ -5293,48 +5416,17 @@ static irqreturn_t ipr_isr(int irq, void *devp)
	}

	while (1) {
		ipr_cmd = NULL;

		while ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
		       hrrq->toggle_bit) {

			cmd_index = (be32_to_cpu(*hrrq->hrrq_curr) &
				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;

			if (unlikely(cmd_index > hrrq->max_cmd_id ||
				     cmd_index < hrrq->min_cmd_id)) {
				ipr_isr_eh(ioa_cfg,
					"Invalid response handle from IOA: ",
					cmd_index);
				rc = IRQ_HANDLED;
				goto unlock_out;
			}

			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
			ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);

			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);

			list_move_tail(&ipr_cmd->queue, &doneq);

		if (ipr_process_hrrq(hrrq, -1, &doneq)) {
			rc =  IRQ_HANDLED;

			if (hrrq->hrrq_curr < hrrq->hrrq_end) {
				hrrq->hrrq_curr++;
			} else {
				hrrq->hrrq_curr = hrrq->hrrq_start;
				hrrq->toggle_bit ^= 1u;
			}
		}

		if (ipr_cmd && !ioa_cfg->clear_isr)
			if (!ioa_cfg->clear_isr)
				break;

		if (ipr_cmd != NULL) {
			/* Clear the PCI interrupt */
			num_hrrq = 0;
			do {
				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
				writel(IPR_PCII_HRRQ_UPDATED,
				     ioa_cfg->regs.clr_interrupt_reg32);
				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
				num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
@@ -5344,9 +5436,10 @@ static irqreturn_t ipr_isr(int irq, void *devp)
			irq_none++;
		} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
			   int_reg & IPR_PCII_HRRQ_UPDATED) {
			ipr_isr_eh(ioa_cfg, "Error clearing HRRQ: ", num_hrrq);
			ipr_isr_eh(ioa_cfg,
				"Error clearing HRRQ: ", num_hrrq);
			rc = IRQ_HANDLED;
			goto unlock_out;
			break;
		} else
			break;
	}
@@ -5354,7 +5447,6 @@ static irqreturn_t ipr_isr(int irq, void *devp)
	if (unlikely(rc == IRQ_NONE))
		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);

unlock_out:
	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
		list_del(&ipr_cmd->queue);
@@ -5375,6 +5467,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
{
	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
	unsigned long hrrq_flags = 0;
	struct ipr_cmnd *ipr_cmd, *temp;
	irqreturn_t rc = IRQ_NONE;
@@ -5388,11 +5481,22 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
		return IRQ_NONE;
	}

	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
		       hrrq->toggle_bit) {
			if (!blk_iopoll_sched_prep(&hrrq->iopoll))
				blk_iopoll_sched(&hrrq->iopoll);
			spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
			return IRQ_HANDLED;
		}
	} else {
		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
			hrrq->toggle_bit)

		if (__ipr_process_hrrq(hrrq, &doneq))
			if (ipr_process_hrrq(hrrq, -1, &doneq))
				rc =  IRQ_HANDLED;
	}

	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);

@@ -9690,7 +9794,7 @@ static void ipr_remove(struct pci_dev *pdev)
static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
	struct ipr_ioa_cfg *ioa_cfg;
	int rc;
	int rc, i;

	rc = ipr_probe_ioa(pdev, dev_id);

@@ -9737,6 +9841,17 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
	ioa_cfg->allow_ml_add_del = 1;
	ioa_cfg->host->max_channel = IPR_VSET_BUS;
	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;

	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
					ioa_cfg->iopoll_weight, ipr_iopoll);
			blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
		}
	}

	schedule_work(&ioa_cfg->work_q);
	return 0;
}
@@ -9755,8 +9870,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
{
	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
	unsigned long lock_flags = 0;
	int i;

	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		ioa_cfg->iopoll_weight = 0;
		for (i = 1; i < ioa_cfg->hrrq_num; i++)
			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
	}

	while (ioa_cfg->in_reset_reload) {
		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+6 −0
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@
#include <linux/libata.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/blk-iopoll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>

@@ -492,6 +493,8 @@ struct ipr_hrr_queue {
	u8 allow_interrupts:1;
	u8 ioa_is_dead:1;
	u8 allow_cmds:1;

	struct blk_iopoll iopoll;
};

/* Command packet structure */
@@ -1348,6 +1351,7 @@ struct ipr_chip_cfg_t {
	u16 max_cmds;
	u8 cache_line_size;
	u8 clear_isr;
	u32 iopoll_weight;
	struct ipr_interrupt_offsets regs;
};

@@ -1534,6 +1538,8 @@ struct ipr_ioa_cfg {
		char desc[22];
	} vectors_info[IPR_MAX_MSIX_VECTORS];

	u32 iopoll_weight;

}; /* struct ipr_ioa_cfg */

struct ipr_cmnd {