Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 64ee3120 authored by Ben Hutchings's avatar Ben Hutchings Committed by Jeff Garzik
Browse files

sfc: Remove efx_channel::has_interrupt



efx_channel::has_interrupt is redundant with efx_channel::used_flags.

Remove efx_test_eventq() because it is now obviously unreachable.

Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent 46123d04
Loading
Loading
Loading
Loading
+5 −11
Original line number Diff line number Diff line
@@ -259,7 +259,7 @@ void efx_process_channel_now(struct efx_channel *channel)
	falcon_disable_interrupts(efx);
	if (efx->legacy_irq)
		synchronize_irq(efx->legacy_irq);
	if (channel->has_interrupt && channel->irq)
	if (channel->irq)
		synchronize_irq(channel->irq);

	/* Wait for any NAPI processing to complete */
@@ -872,10 +872,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
		}

		if (rc == 0) {
			for (i = 0; i < efx->rss_queues; i++) {
				efx->channel[i].has_interrupt = true;
			for (i = 0; i < efx->rss_queues; i++)
				efx->channel[i].irq = xentries[i].vector;
			}
		} else {
			/* Fall back to single channel MSI */
			efx->interrupt_mode = EFX_INT_MODE_MSI;
@@ -889,7 +887,6 @@ static void efx_probe_interrupts(struct efx_nic *efx)
		rc = pci_enable_msi(efx->pci_dev);
		if (rc == 0) {
			efx->channel[0].irq = efx->pci_dev->irq;
			efx->channel[0].has_interrupt = true;
		} else {
			EFX_ERR(efx, "could not enable MSI\n");
			efx->interrupt_mode = EFX_INT_MODE_LEGACY;
@@ -899,9 +896,6 @@ static void efx_probe_interrupts(struct efx_nic *efx)
	/* Assume legacy interrupts */
	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
		efx->rss_queues = 1;
		/* Every channel is interruptible */
		for (i = 0; i < EFX_MAX_CHANNELS; i++)
			efx->channel[i].has_interrupt = true;
		efx->legacy_irq = efx->pci_dev->irq;
	}
}
@@ -911,7 +905,7 @@ static void efx_remove_interrupts(struct efx_nic *efx)
	struct efx_channel *channel;

	/* Remove MSI/MSI-X interrupts */
	efx_for_each_channel_with_interrupt(channel, efx)
	efx_for_each_channel(channel, efx)
		channel->irq = 0;
	pci_disable_msi(efx->pci_dev);
	pci_disable_msix(efx->pci_dev);
@@ -1106,7 +1100,7 @@ static void efx_stop_all(struct efx_nic *efx)
	falcon_disable_interrupts(efx);
	if (efx->legacy_irq)
		synchronize_irq(efx->legacy_irq);
	efx_for_each_channel_with_interrupt(channel, efx) {
	efx_for_each_channel(channel, efx) {
		if (channel->irq)
			synchronize_irq(channel->irq);
	}
@@ -1303,7 +1297,7 @@ static void efx_netpoll(struct net_device *net_dev)
	struct efx_nic *efx = netdev_priv(net_dev);
	struct efx_channel *channel;

	efx_for_each_channel_with_interrupt(channel, efx)
	efx_for_each_channel(channel, efx)
		efx_schedule_channel(channel);
}

+4 −4
Original line number Diff line number Diff line
@@ -1317,7 +1317,7 @@ void falcon_enable_interrupts(struct efx_nic *efx)

	/* Force processing of all the channels to get the EVQ RPTRs up to
	   date */
	efx_for_each_channel_with_interrupt(channel, efx)
	efx_for_each_channel(channel, efx)
		efx_schedule_channel(channel);
}

@@ -1567,7 +1567,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
	}

	/* Hook MSI or MSI-X interrupt */
	efx_for_each_channel_with_interrupt(channel, efx) {
	efx_for_each_channel(channel, efx) {
		rc = request_irq(channel->irq, falcon_msi_interrupt,
				 IRQF_PROBE_SHARED, /* Not shared */
				 efx->name, channel);
@@ -1580,7 +1580,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
	return 0;

 fail2:
	efx_for_each_channel_with_interrupt(channel, efx)
	efx_for_each_channel(channel, efx)
		free_irq(channel->irq, channel);
 fail1:
	return rc;
@@ -1592,7 +1592,7 @@ void falcon_fini_interrupt(struct efx_nic *efx)
	efx_oword_t reg;

	/* Disable MSI/MSI-X interrupts */
	efx_for_each_channel_with_interrupt(channel, efx) {
	efx_for_each_channel(channel, efx) {
		if (channel->irq)
			free_irq(channel->irq, channel);
	}
+0 −11
Original line number Diff line number Diff line
@@ -329,7 +329,6 @@ enum efx_rx_alloc_method {
 * @used_flags: Channel is used by net driver
 * @enabled: Channel enabled indicator
 * @irq: IRQ number (MSI and MSI-X only)
 * @has_interrupt: Channel has an interrupt
 * @irq_moderation: IRQ moderation value (in us)
 * @napi_dev: Net device used with NAPI
 * @napi_str: NAPI control structure
@@ -361,7 +360,6 @@ struct efx_channel {
	int used_flags;
	bool enabled;
	int irq;
	bool has_interrupt;
	unsigned int irq_moderation;
	struct net_device *napi_dev;
	struct napi_struct napi_str;
@@ -834,15 +832,6 @@ struct efx_nic_type {
			continue;					\
		else

/* Iterate over all used channels with interrupts */
#define efx_for_each_channel_with_interrupt(_channel, _efx)		\
	for (_channel = &_efx->channel[0];				\
	     _channel < &_efx->channel[EFX_MAX_CHANNELS];		\
	     _channel++)						\
		if (!(_channel->used_flags && _channel->has_interrupt))	\
			continue;					\
		else

/* Iterate over all used TX queues */
#define efx_for_each_tx_queue(_tx_queue, _efx)				\
	for (_tx_queue = &_efx->tx_queue[0];				\
+3 −41
Original line number Diff line number Diff line
@@ -111,7 +111,7 @@ static int efx_test_interrupts(struct efx_nic *efx,

	/* ACK each interrupting event queue. Receiving an interrupt due to
	 * traffic before a test event is raised is considered a pass */
	efx_for_each_channel_with_interrupt(channel, efx) {
	efx_for_each_channel(channel, efx) {
		if (channel->work_pending)
			efx_process_channel_now(channel);
		if (efx->last_irq_cpu >= 0)
@@ -136,41 +136,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
	return 0;
}

/* Test generation and receipt of non-interrupting events */
static int efx_test_eventq(struct efx_channel *channel,
			   struct efx_self_tests *tests)
{
	unsigned int magic;

	/* Channel specific code, limited to 20 bits */
	magic = (0x00010150 + channel->channel);
	EFX_LOG(channel->efx, "channel %d testing event queue with code %x\n",
		channel->channel, magic);

	tests->eventq_dma[channel->channel] = -1;
	tests->eventq_int[channel->channel] = 1;	/* fake pass */
	tests->eventq_poll[channel->channel] = 1;	/* fake pass */

	/* Reset flag and zero magic word */
	channel->efx->last_irq_cpu = -1;
	channel->eventq_magic = 0;
	smp_wmb();

	falcon_generate_test_event(channel, magic);
	udelay(1);

	efx_process_channel_now(channel);
	if (channel->eventq_magic != magic) {
		EFX_ERR(channel->efx, "channel %d  failed to see test event\n",
			channel->channel);
		return -ETIMEDOUT;
	} else {
		tests->eventq_dma[channel->channel] = 1;
	}

	return 0;
}

/* Test generation and receipt of interrupting events */
static int efx_test_eventq_irq(struct efx_channel *channel,
			       struct efx_self_tests *tests)
@@ -456,7 +421,7 @@ static int efx_poll_loopback(struct efx_nic *efx)

	/* NAPI polling is not enabled, so process channels
	 * synchronously */
	efx_for_each_channel_with_interrupt(channel, efx) {
	efx_for_each_channel(channel, efx) {
		if (channel->work_pending)
			efx_process_channel_now(channel);
	}
@@ -689,10 +654,7 @@ int efx_online_test(struct efx_nic *efx, struct efx_self_tests *tests)
	if (rc)
		return rc;
	efx_for_each_channel(channel, efx) {
		if (channel->has_interrupt)
		rc = efx_test_eventq_irq(channel, tests);
		else
			rc = efx_test_eventq(channel, tests);
		if (rc)
			return rc;
	}