Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 89f8b33c authored by Jens Axboe's avatar Jens Axboe
Browse files

block: remove old blk_iopoll_enabled variable



This was a debugging measure to toggle enabled/disabled
when testing. But for real production setups, it's not
safe to toggle this setting without either reloading
drivers of quiescing IO first. Neither of which the toggle
enforces.

Additionally, it makes drivers deal with the conditional
state.

Remove it completely. It's up to the driver whether iopoll
is enabled or not.

Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent af5040da
Loading
Loading
Loading
Loading
+0 −3
Original line number Original line Diff line number Diff line
@@ -14,9 +14,6 @@


#include "blk.h"
#include "blk.h"


int blk_iopoll_enabled = 1;
EXPORT_SYMBOL(blk_iopoll_enabled);

static unsigned int blk_iopoll_budget __read_mostly = 256;
static unsigned int blk_iopoll_budget __read_mostly = 256;


static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
+63 −143
Original line number Original line Diff line number Diff line
@@ -873,7 +873,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
	struct be_queue_info *cq;
	struct be_queue_info *cq;
	unsigned int num_eq_processed;
	unsigned int num_eq_processed;
	struct be_eq_obj *pbe_eq;
	struct be_eq_obj *pbe_eq;
	unsigned long flags;


	pbe_eq = dev_id;
	pbe_eq = dev_id;
	eq = &pbe_eq->q;
	eq = &pbe_eq->q;
@@ -882,7 +881,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)


	phba = pbe_eq->phba;
	phba = pbe_eq->phba;
	num_eq_processed = 0;
	num_eq_processed = 0;
	if (blk_iopoll_enabled) {
	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
				& EQE_VALID_MASK) {
				& EQE_VALID_MASK) {
		if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
		if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
@@ -893,21 +891,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
		eqe = queue_tail_node(eq);
		eqe = queue_tail_node(eq);
		num_eq_processed++;
		num_eq_processed++;
	}
	}
	} else {
		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
						& EQE_VALID_MASK) {
			spin_lock_irqsave(&phba->isr_lock, flags);
			pbe_eq->todo_cq = true;
			spin_unlock_irqrestore(&phba->isr_lock, flags);
			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
			queue_tail_inc(eq);
			eqe = queue_tail_node(eq);
			num_eq_processed++;
		}

		if (pbe_eq->todo_cq)
			queue_work(phba->wq, &pbe_eq->work_cqs);
	}


	if (num_eq_processed)
	if (num_eq_processed)
		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
@@ -927,7 +910,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
	struct hwi_context_memory *phwi_context;
	struct hwi_context_memory *phwi_context;
	struct be_eq_entry *eqe = NULL;
	struct be_eq_entry *eqe = NULL;
	struct be_queue_info *eq;
	struct be_queue_info *eq;
	struct be_queue_info *cq;
	struct be_queue_info *mcc;
	struct be_queue_info *mcc;
	unsigned long flags, index;
	unsigned long flags, index;
	unsigned int num_mcceq_processed, num_ioeq_processed;
	unsigned int num_mcceq_processed, num_ioeq_processed;
@@ -953,7 +935,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)


	num_ioeq_processed = 0;
	num_ioeq_processed = 0;
	num_mcceq_processed = 0;
	num_mcceq_processed = 0;
	if (blk_iopoll_enabled) {
	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
				& EQE_VALID_MASK) {
				& EQE_VALID_MASK) {
		if (((eqe->dw[offsetof(struct amap_eq_entry,
		if (((eqe->dw[offsetof(struct amap_eq_entry,
@@ -988,37 +969,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
		return IRQ_HANDLED;
		return IRQ_HANDLED;
	} else
	} else
		return IRQ_NONE;
		return IRQ_NONE;
	} else {
		cq = &phwi_context->be_cq[0];
		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
						& EQE_VALID_MASK) {

			if (((eqe->dw[offsetof(struct amap_eq_entry,
			     resource_id) / 32] &
			     EQE_RESID_MASK) >> 16) != cq->id) {
				spin_lock_irqsave(&phba->isr_lock, flags);
				pbe_eq->todo_mcc_cq = true;
				spin_unlock_irqrestore(&phba->isr_lock, flags);
			} else {
				spin_lock_irqsave(&phba->isr_lock, flags);
				pbe_eq->todo_cq = true;
				spin_unlock_irqrestore(&phba->isr_lock, flags);
			}
			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
			queue_tail_inc(eq);
			eqe = queue_tail_node(eq);
			num_ioeq_processed++;
		}
		if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
			queue_work(phba->wq, &pbe_eq->work_cqs);

		if (num_ioeq_processed) {
			hwi_ring_eq_db(phba, eq->id, 0,
				       num_ioeq_processed, 1, 1);
			return IRQ_HANDLED;
		} else
			return IRQ_NONE;
	}
}
}


static int beiscsi_init_irqs(struct beiscsi_hba *phba)
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
@@ -5216,7 +5166,6 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
		}
		}
	pci_disable_msix(phba->pcidev);
	pci_disable_msix(phba->pcidev);


	if (blk_iopoll_enabled)
	for (i = 0; i < phba->num_cpus; i++) {
	for (i = 0; i < phba->num_cpus; i++) {
		pbe_eq = &phwi_context->be_eq[i];
		pbe_eq = &phwi_context->be_eq[i];
		blk_iopoll_disable(&pbe_eq->iopoll);
		blk_iopoll_disable(&pbe_eq->iopoll);
@@ -5429,7 +5378,6 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
	phwi_ctrlr = phba->phwi_ctrlr;
	phwi_ctrlr = phba->phwi_ctrlr;
	phwi_context = phwi_ctrlr->phwi_ctxt;
	phwi_context = phwi_ctrlr->phwi_ctxt;


	if (blk_iopoll_enabled) {
	for (i = 0; i < phba->num_cpus; i++) {
	for (i = 0; i < phba->num_cpus; i++) {
		pbe_eq = &phwi_context->be_eq[i];
		pbe_eq = &phwi_context->be_eq[i];
		blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
		blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
@@ -5441,19 +5389,6 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
	/* Work item for MCC handling */
	/* Work item for MCC handling */
	pbe_eq = &phwi_context->be_eq[i];
	pbe_eq = &phwi_context->be_eq[i];
	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
	} else {
		if (phba->msix_enabled) {
			for (i = 0; i <= phba->num_cpus; i++) {
				pbe_eq = &phwi_context->be_eq[i];
				INIT_WORK(&pbe_eq->work_cqs,
					  beiscsi_process_all_cqs);
			}
		} else {
			pbe_eq = &phwi_context->be_eq[0];
			INIT_WORK(&pbe_eq->work_cqs,
				  beiscsi_process_all_cqs);
		}
	}


	ret = beiscsi_init_irqs(phba);
	ret = beiscsi_init_irqs(phba);
	if (ret < 0) {
	if (ret < 0) {
@@ -5614,7 +5549,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
	phwi_ctrlr = phba->phwi_ctrlr;
	phwi_ctrlr = phba->phwi_ctrlr;
	phwi_context = phwi_ctrlr->phwi_ctxt;
	phwi_context = phwi_ctrlr->phwi_ctxt;


	if (blk_iopoll_enabled) {
	for (i = 0; i < phba->num_cpus; i++) {
	for (i = 0; i < phba->num_cpus; i++) {
		pbe_eq = &phwi_context->be_eq[i];
		pbe_eq = &phwi_context->be_eq[i];
		blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
		blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
@@ -5626,19 +5560,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
	/* Work item for MCC handling */
	/* Work item for MCC handling */
	pbe_eq = &phwi_context->be_eq[i];
	pbe_eq = &phwi_context->be_eq[i];
	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
	} else {
		if (phba->msix_enabled) {
			for (i = 0; i <= phba->num_cpus; i++) {
				pbe_eq = &phwi_context->be_eq[i];
				INIT_WORK(&pbe_eq->work_cqs,
					  beiscsi_process_all_cqs);
			}
		} else {
				pbe_eq = &phwi_context->be_eq[0];
				INIT_WORK(&pbe_eq->work_cqs,
					  beiscsi_process_all_cqs);
			}
	}


	ret = beiscsi_init_irqs(phba);
	ret = beiscsi_init_irqs(phba);
	if (ret < 0) {
	if (ret < 0) {
@@ -5668,7 +5589,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,


free_blkenbld:
free_blkenbld:
	destroy_workqueue(phba->wq);
	destroy_workqueue(phba->wq);
	if (blk_iopoll_enabled)
	for (i = 0; i < phba->num_cpus; i++) {
	for (i = 0; i < phba->num_cpus; i++) {
		pbe_eq = &phwi_context->be_eq[i];
		pbe_eq = &phwi_context->be_eq[i];
		blk_iopoll_disable(&pbe_eq->iopoll);
		blk_iopoll_disable(&pbe_eq->iopoll);
+5 −10
Original line number Original line Diff line number Diff line
@@ -3630,16 +3630,14 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev,
		return strlen(buf);
		return strlen(buf);
	}
	}


	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		for (i = 1; i < ioa_cfg->hrrq_num; i++)
		for (i = 1; i < ioa_cfg->hrrq_num; i++)
			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
	}
	}


	spin_lock_irqsave(shost->host_lock, lock_flags);
	spin_lock_irqsave(shost->host_lock, lock_flags);
	ioa_cfg->iopoll_weight = user_iopoll_weight;
	ioa_cfg->iopoll_weight = user_iopoll_weight;
	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
					ioa_cfg->iopoll_weight, ipr_iopoll);
					ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -5484,8 +5482,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
		return IRQ_NONE;
		return IRQ_NONE;
	}
	}


	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
		       hrrq->toggle_bit) {
		       hrrq->toggle_bit) {
			if (!blk_iopoll_sched_prep(&hrrq->iopoll))
			if (!blk_iopoll_sched_prep(&hrrq->iopoll))
@@ -9859,8 +9856,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
	ioa_cfg->host->max_channel = IPR_VSET_BUS;
	ioa_cfg->host->max_channel = IPR_VSET_BUS;
	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;


	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
					ioa_cfg->iopoll_weight, ipr_iopoll);
					ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -9889,8 +9885,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
	int i;
	int i;


	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
		ioa_cfg->iopoll_weight = 0;
		ioa_cfg->iopoll_weight = 0;
		for (i = 1; i < ioa_cfg->hrrq_num; i++)
		for (i = 1; i < ioa_cfg->hrrq_num; i++)
			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+0 −2
Original line number Original line Diff line number Diff line
@@ -43,6 +43,4 @@ extern void __blk_iopoll_complete(struct blk_iopoll *);
extern void blk_iopoll_enable(struct blk_iopoll *);
extern void blk_iopoll_enable(struct blk_iopoll *);
extern void blk_iopoll_disable(struct blk_iopoll *);
extern void blk_iopoll_disable(struct blk_iopoll *);


extern int blk_iopoll_enabled;

#endif
#endif
+0 −12
Original line number Original line Diff line number Diff line
@@ -112,9 +112,6 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
#ifndef CONFIG_MMU
#ifndef CONFIG_MMU
extern int sysctl_nr_trim_pages;
extern int sysctl_nr_trim_pages;
#endif
#endif
#ifdef CONFIG_BLOCK
extern int blk_iopoll_enabled;
#endif


/* Constants used for minimum and  maximum */
/* Constants used for minimum and  maximum */
#ifdef CONFIG_LOCKUP_DETECTOR
#ifdef CONFIG_LOCKUP_DETECTOR
@@ -1093,15 +1090,6 @@ static struct ctl_table kern_table[] = {
		.mode		= 0644,
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
		.proc_handler	= proc_dointvec,
	},
	},
#endif
#ifdef CONFIG_BLOCK
	{
		.procname	= "blk_iopoll",
		.data		= &blk_iopoll_enabled,
		.maxlen		= sizeof(int),
		.mode		= 0644,
		.proc_handler	= proc_dointvec,
	},
#endif
#endif
	{ }
	{ }
};
};