Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e2fa6f2e authored by Rasesh Mody's avatar Rasesh Mody Committed by David S. Miller
Browse files

bna: fix interrupt handling



This fix handles the case when IRQ handler is called (for shared IRQs)
even before the driver is ready to handle interrupts.

Signed-off-by: default avatarDebashis Dutt <ddutt@brocade.com>
Signed-off-by: default avatarRasesh Mody <rmody@brocade.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ebc0ffae
Loading
Loading
Loading
Loading
+27 −21
Original line number Diff line number Diff line
@@ -564,9 +564,11 @@ bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
static void
bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
{
	spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
	unsigned long flags;

	spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
	bnad_enable_rx_irq_unsafe(ccb);
	spin_unlock_irq(&bnad->bna_lock);
	spin_unlock_irqrestore(&bnad->bna_lock, flags);
}

static void
@@ -630,13 +632,15 @@ bnad_isr(int irq, void *data)
	struct bnad_rx_info *rx_info;
	struct bnad_rx_ctrl *rx_ctrl;

	spin_lock_irqsave(&bnad->bna_lock, flags);
	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
		return IRQ_NONE;

	bna_intr_status_get(&bnad->bna, intr_status);
	if (!intr_status) {
		spin_unlock_irqrestore(&bnad->bna_lock, flags);

	if (unlikely(!intr_status))
		return IRQ_NONE;
	}

	spin_lock_irqsave(&bnad->bna_lock, flags);

	if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
		bna_mbox_handler(&bnad->bna, intr_status);
@@ -672,11 +676,10 @@ bnad_enable_mbox_irq(struct bnad *bnad)
{
	int irq = BNAD_GET_MBOX_IRQ(bnad);

	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
		return;

	if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
		if (bnad->cfg_flags & BNAD_CF_MSIX)
			enable_irq(irq);

	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
}

@@ -689,11 +692,11 @@ bnad_disable_mbox_irq(struct bnad *bnad)
{
	int irq = BNAD_GET_MBOX_IRQ(bnad);

	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
		return;

	if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
		if (bnad->cfg_flags & BNAD_CF_MSIX)
			disable_irq_nosync(irq);

	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
}

@@ -1045,14 +1048,12 @@ bnad_mbox_irq_free(struct bnad *bnad,
		return;

	spin_lock_irqsave(&bnad->bna_lock, flags);

	bnad_disable_mbox_irq(bnad);
	spin_unlock_irqrestore(&bnad->bna_lock, flags);

	irq = BNAD_GET_MBOX_IRQ(bnad);
	free_irq(irq, bnad->netdev);

	spin_unlock_irqrestore(&bnad->bna_lock, flags);

	kfree(intr_info->idl);
}

@@ -1094,8 +1095,15 @@ bnad_mbox_irq_alloc(struct bnad *bnad,

	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);

	/*
	 * Set the Mbox IRQ disable flag, so that the IRQ handler
	 * called from request_irq() for SHARED IRQs do not execute
	 */
	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);

	err = request_irq(irq, irq_handler, flags,
			  bnad->mbox_irq_name, bnad->netdev);

	if (err) {
		kfree(intr_info->idl);
		intr_info->idl = NULL;
@@ -1103,7 +1111,8 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
	}

	spin_lock_irqsave(&bnad->bna_lock, flags);
	bnad_disable_mbox_irq(bnad);
	if (bnad->cfg_flags & BNAD_CF_MSIX)
		disable_irq_nosync(irq);
	spin_unlock_irqrestore(&bnad->bna_lock, flags);
	return 0;
}
@@ -1485,7 +1494,6 @@ bnad_stats_timer_start(struct bnad *bnad)
			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
	}
	spin_unlock_irqrestore(&bnad->bna_lock, flags);

}

/*
@@ -2170,7 +2178,6 @@ bnad_device_disable(struct bnad *bnad)
	spin_unlock_irqrestore(&bnad->bna_lock, flags);

	wait_for_completion(&bnad->bnad_completions.ioc_comp);

}

static int
@@ -3108,7 +3115,6 @@ bnad_pci_probe(struct pci_dev *pdev,

	spin_lock_irqsave(&bnad->bna_lock, flags);
	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);

	spin_unlock_irqrestore(&bnad->bna_lock, flags);

	bnad->stats.bna_stats = &bna->stats;