Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d853f111 authored by David S. Miller's avatar David S. Miller
Browse files


Ben Hutchings says:

====================
1. Refactoring and cleanup in preparation for new hardware support.
2. Some bug fixes for firmware completion handling.  (They're not known
to cause real problems, otherwise I'd be submitting these for net and
stable.)
3. Update to the firmware protocol (MCDI) definitions.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b05930f5 f76fe120
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
sfc-y			+= efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
sfc-y			+= efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \
			   filter.o \
			   selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
			   tenxpress.o txc43128_phy.o falcon_boards.o \
			   mcdi.o mcdi_port.o mcdi_mon.o ptp.o
+88 −128
Original line number Diff line number Diff line
@@ -191,8 +191,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
 *
 *************************************************************************/

static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
static void efx_soft_enable_interrupts(struct efx_nic *efx);
static void efx_soft_disable_interrupts(struct efx_nic *efx);
static void efx_remove_channel(struct efx_channel *channel);
static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
@@ -248,30 +248,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
			efx_channel_get_rx_queue(channel);

		efx_rx_flush_packet(channel);
		if (rx_queue->enabled)
		efx_fast_push_rx_descriptors(rx_queue);
	}

	return spent;
}

/* Mark channel as finished processing
 *
 * Note that since we will not receive further interrupts for this
 * channel before we finish processing and call the eventq_read_ack()
 * method, there is no need to use the interrupt hold-off timers.
 */
static inline void efx_channel_processed(struct efx_channel *channel)
{
	/* The interrupt handler for this channel may set work_pending
	 * as soon as we acknowledge the events we've seen.  Make sure
	 * it's cleared before then. */
	channel->work_pending = false;
	smp_wmb();

	efx_nic_eventq_read_ack(channel);
}

/* NAPI poll handler
 *
 * NAPI guarantees serialisation of polls of the same device, which
@@ -316,58 +298,16 @@ static int efx_poll(struct napi_struct *napi, int budget)

		/* There is no race here; although napi_disable() will
		 * only wait for napi_complete(), this isn't a problem
		 * since efx_channel_processed() will have no effect if
		 * since efx_nic_eventq_read_ack() will have no effect if
		 * interrupts have already been disabled.
		 */
		napi_complete(napi);
		efx_channel_processed(channel);
		efx_nic_eventq_read_ack(channel);
	}

	return spent;
}

/* Process the eventq of the specified channel immediately on this CPU
 *
 * Disable hardware generated interrupts, wait for any existing
 * processing to finish, then directly poll (and ack ) the eventq.
 * Finally reenable NAPI and interrupts.
 *
 * This is for use only during a loopback self-test.  It must not
 * deliver any packets up the stack as this can result in deadlock.
 */
void efx_process_channel_now(struct efx_channel *channel)
{
	struct efx_nic *efx = channel->efx;

	BUG_ON(channel->channel >= efx->n_channels);
	BUG_ON(!channel->enabled);
	BUG_ON(!efx->loopback_selftest);

	/* Disable interrupts and wait for ISRs to complete */
	efx_nic_disable_interrupts(efx);
	if (efx->legacy_irq) {
		synchronize_irq(efx->legacy_irq);
		efx->legacy_irq_enabled = false;
	}
	if (channel->irq)
		synchronize_irq(channel->irq);

	/* Wait for any NAPI processing to complete */
	napi_disable(&channel->napi_str);

	/* Poll the channel */
	efx_process_channel(channel, channel->eventq_mask + 1);

	/* Ack the eventq. This may cause an interrupt to be generated
	 * when they are reenabled */
	efx_channel_processed(channel);

	napi_enable(&channel->napi_str);
	if (efx->legacy_irq)
		efx->legacy_irq_enabled = true;
	efx_nic_enable_interrupts(efx);
}

/* Create event queue
 * Event queue memory allocations are done only once.  If the channel
 * is reset, the memory buffer will be reused; this guards against
@@ -407,11 +347,7 @@ static void efx_start_eventq(struct efx_channel *channel)
	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
		  "chan %d start event queue\n", channel->channel);

	/* The interrupt handler for this channel may set work_pending
	 * as soon as we enable it.  Make sure it's cleared before
	 * then.  Similarly, make sure it sees the enabled flag set.
	 */
	channel->work_pending = false;
	/* Make sure the NAPI handler sees the enabled flag set */
	channel->enabled = true;
	smp_wmb();

@@ -583,8 +519,8 @@ static void efx_set_channel_names(struct efx_nic *efx)

	efx_for_each_channel(channel, efx)
		channel->type->get_name(channel,
					efx->channel_name[channel->channel],
					sizeof(efx->channel_name[0]));
					efx->msi_context[channel->channel].name,
					sizeof(efx->msi_context[0].name));
}

static int efx_probe_channels(struct efx_nic *efx)
@@ -704,21 +640,37 @@ static void efx_stop_datapath(struct efx_nic *efx)
	struct efx_channel *channel;
	struct efx_tx_queue *tx_queue;
	struct efx_rx_queue *rx_queue;
	struct pci_dev *dev = efx->pci_dev;
	int rc;

	EFX_ASSERT_RESET_SERIALISED(efx);
	BUG_ON(efx->port_enabled);

	/* Only perform flush if dma is enabled */
	if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
		rc = efx_nic_flush_queues(efx);
	/* Stop RX refill */
	efx_for_each_channel(channel, efx) {
		efx_for_each_channel_rx_queue(rx_queue, channel)
			rx_queue->refill_enabled = false;
	}

	efx_for_each_channel(channel, efx) {
		/* RX packet processing is pipelined, so wait for the
		 * NAPI handler to complete.  At least event queue 0
		 * might be kept active by non-data events, so don't
		 * use napi_synchronize() but actually disable NAPI
		 * temporarily.
		 */
		if (efx_channel_has_rx_queue(channel)) {
			efx_stop_eventq(channel);
			efx_start_eventq(channel);
		}
	}

	rc = efx->type->fini_dmaq(efx);
	if (rc && EFX_WORKAROUND_7803(efx)) {
		/* Schedule a reset to recover from the flush failure. The
		 * descriptor caches reference memory we're about to free,
		 * but falcon_reconfigure_mac_wrapper() won't reconnect
			 * the MACs because of the pending reset. */
		 * the MACs because of the pending reset.
		 */
		netif_err(efx, drv, efx->net_dev,
			  "Resetting to recover from flush failure\n");
		efx_schedule_reset(efx, RESET_TYPE_ALL);
@@ -728,20 +680,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
		netif_dbg(efx, drv, efx->net_dev,
			  "successfully flushed all queues\n");
	}
	}

	efx_for_each_channel(channel, efx) {
		/* RX packet processing is pipelined, so wait for the
		 * NAPI handler to complete.  At least event queue 0
		 * might be kept active by non-data events, so don't
		 * use napi_synchronize() but actually disable NAPI
		 * temporarily.
		 */
		if (efx_channel_has_rx_queue(channel)) {
			efx_stop_eventq(channel);
			efx_start_eventq(channel);
		}

		efx_for_each_channel_rx_queue(rx_queue, channel)
			efx_fini_rx_queue(rx_queue);
		efx_for_each_possible_channel_tx_queue(tx_queue, channel)
@@ -809,7 +749,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)

	efx_device_detach_sync(efx);
	efx_stop_all(efx);
	efx_stop_interrupts(efx, true);
	efx_soft_disable_interrupts(efx);

	/* Clone channels (where possible) */
	memset(other_channel, 0, sizeof(other_channel));
@@ -859,7 +799,7 @@ out:
		}
	}

	efx_start_interrupts(efx, true);
	efx_soft_enable_interrupts(efx);
	efx_start_all(efx);
	netif_device_attach(efx->net_dev);
	return rc;
@@ -1392,23 +1332,17 @@ static int efx_probe_interrupts(struct efx_nic *efx)
	return 0;
}

/* Enable interrupts, then probe and start the event queues */
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
static void efx_soft_enable_interrupts(struct efx_nic *efx)
{
	struct efx_channel *channel;

	BUG_ON(efx->state == STATE_DISABLED);

	if (efx->eeh_disabled_legacy_irq) {
		enable_irq(efx->legacy_irq);
		efx->eeh_disabled_legacy_irq = false;
	}
	if (efx->legacy_irq)
		efx->legacy_irq_enabled = true;
	efx_nic_enable_interrupts(efx);
	efx->irq_soft_enabled = true;
	smp_wmb();

	efx_for_each_channel(channel, efx) {
		if (!channel->type->keep_eventq || !may_keep_eventq)
		if (!channel->type->keep_eventq)
			efx_init_eventq(channel);
		efx_start_eventq(channel);
	}
@@ -1416,7 +1350,7 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
	efx_mcdi_mode_event(efx);
}

static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
static void efx_soft_disable_interrupts(struct efx_nic *efx)
{
	struct efx_channel *channel;

@@ -1425,20 +1359,55 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)

	efx_mcdi_mode_poll(efx);

	efx_nic_disable_interrupts(efx);
	if (efx->legacy_irq) {
	efx->irq_soft_enabled = false;
	smp_wmb();

	if (efx->legacy_irq)
		synchronize_irq(efx->legacy_irq);
		efx->legacy_irq_enabled = false;
	}

	efx_for_each_channel(channel, efx) {
		if (channel->irq)
			synchronize_irq(channel->irq);

		efx_stop_eventq(channel);
		if (!channel->type->keep_eventq || !may_keep_eventq)
		if (!channel->type->keep_eventq)
			efx_fini_eventq(channel);
	}
}

static void efx_enable_interrupts(struct efx_nic *efx)
{
	struct efx_channel *channel;

	BUG_ON(efx->state == STATE_DISABLED);

	if (efx->eeh_disabled_legacy_irq) {
		enable_irq(efx->legacy_irq);
		efx->eeh_disabled_legacy_irq = false;
	}

	efx->type->irq_enable_master(efx);

	efx_for_each_channel(channel, efx) {
		if (channel->type->keep_eventq)
			efx_init_eventq(channel);
	}

	efx_soft_enable_interrupts(efx);
}

static void efx_disable_interrupts(struct efx_nic *efx)
{
	struct efx_channel *channel;

	efx_soft_disable_interrupts(efx);

	efx_for_each_channel(channel, efx) {
		if (channel->type->keep_eventq)
			efx_fini_eventq(channel);
	}

	efx->type->irq_disable_non_ev(efx);
}

static void efx_remove_interrupts(struct efx_nic *efx)
@@ -2185,22 +2154,11 @@ fail_locked:

static void efx_unregister_netdev(struct efx_nic *efx)
{
	struct efx_channel *channel;
	struct efx_tx_queue *tx_queue;

	if (!efx->net_dev)
		return;

	BUG_ON(netdev_priv(efx->net_dev) != efx);

	/* Free up any skbs still remaining. This has to happen before
	 * we try to unregister the netdev as running their destructors
	 * may be needed to get the device ref. count to 0. */
	efx_for_each_channel(channel, efx) {
		efx_for_each_channel_tx_queue(tx_queue, channel)
			efx_release_tx_buffers(tx_queue);
	}

	strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);

@@ -2223,7 +2181,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
	EFX_ASSERT_RESET_SERIALISED(efx);

	efx_stop_all(efx);
	efx_stop_interrupts(efx, false);
	efx_disable_interrupts(efx);

	mutex_lock(&efx->mac_lock);
	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2262,7 +2220,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)

	efx->type->reconfigure_mac(efx);

	efx_start_interrupts(efx, false);
	efx_enable_interrupts(efx);
	efx_restore_filters(efx);
	efx_sriov_reset(efx);

@@ -2527,6 +2485,8 @@ static int efx_init_struct(struct efx_nic *efx,
		efx->channel[i] = efx_alloc_channel(efx, i, NULL);
		if (!efx->channel[i])
			goto fail;
		efx->msi_context[i].efx = efx;
		efx->msi_context[i].index = i;
	}

	EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
@@ -2579,7 +2539,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
	BUG_ON(efx->state == STATE_READY);
	cancel_work_sync(&efx->reset_work);

	efx_stop_interrupts(efx, false);
	efx_disable_interrupts(efx);
	efx_nic_fini_interrupt(efx);
	efx_fini_port(efx);
	efx->type->fini(efx);
@@ -2601,7 +2561,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
	/* Mark the NIC as fini, then stop the interface */
	rtnl_lock();
	dev_close(efx->net_dev);
	efx_stop_interrupts(efx, false);
	efx_disable_interrupts(efx);
	rtnl_unlock();

	efx_sriov_fini(efx);
@@ -2703,7 +2663,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
	rc = efx_nic_init_interrupt(efx);
	if (rc)
		goto fail5;
	efx_start_interrupts(efx, false);
	efx_enable_interrupts(efx);

	return 0;

@@ -2824,7 +2784,7 @@ static int efx_pm_freeze(struct device *dev)
		efx_device_detach_sync(efx);

		efx_stop_all(efx);
		efx_stop_interrupts(efx, false);
		efx_disable_interrupts(efx);
	}

	rtnl_unlock();
@@ -2839,7 +2799,7 @@ static int efx_pm_thaw(struct device *dev)
	rtnl_lock();

	if (efx->state != STATE_DISABLED) {
		efx_start_interrupts(efx, false);
		efx_enable_interrupts(efx);

		mutex_lock(&efx->mac_lock);
		efx->phy_op->reconfigure(efx);
@@ -2942,7 +2902,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
		efx_device_detach_sync(efx);

		efx_stop_all(efx);
		efx_stop_interrupts(efx, false);
		efx_disable_interrupts(efx);

		status = PCI_ERS_RESULT_NEED_RESET;
	} else {
+0 −3
Original line number Diff line number Diff line
@@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t
efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
@@ -109,7 +108,6 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
/* Channels */
extern int efx_channel_dummy_op_int(struct efx_channel *channel);
extern void efx_channel_dummy_op_void(struct efx_channel *channel);
extern void efx_process_channel_now(struct efx_channel *channel);
extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);

@@ -155,7 +153,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
	netif_vdbg(channel->efx, intr, channel->efx->net_dev,
		   "channel %d scheduling NAPI poll on CPU%d\n",
		   channel->channel, raw_smp_processor_id());
	channel->work_pending = true;

	napi_schedule(&channel->napi_str);
}
+64 −12
Original line number Diff line number Diff line
@@ -21,7 +21,7 @@
#include "efx.h"
#include "spi.h"
#include "nic.h"
#include "regs.h"
#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
@@ -336,7 +336,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
 *
 * NB most hardware supports MSI interrupts
 */
inline void falcon_irq_ack_a1(struct efx_nic *efx)
static inline void falcon_irq_ack_a1(struct efx_nic *efx)
{
	efx_dword_t reg;

@@ -346,7 +346,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx)
}


irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
	struct efx_nic *efx = dev_id;
	efx_oword_t *int_ker = efx->irq_status.addr;
@@ -367,10 +367,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
		   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
		   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));

	if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
		return IRQ_HANDLED;

	/* Check to see if we have a serious error condition */
	syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
	if (unlikely(syserr))
		return efx_nic_fatal_interrupt(efx);
		return efx_farch_fatal_interrupt(efx);

	/* Determine interrupting queues, clear interrupt status
	 * register and acknowledge the device interrupt.
@@ -1418,7 +1421,7 @@ static int falcon_probe_port(struct efx_nic *efx)

	/* Allocate buffer for stats */
	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
				  FALCON_MAC_STATS_SIZE);
				  FALCON_MAC_STATS_SIZE, GFP_KERNEL);
	if (rc)
		return rc;
	netif_dbg(efx, probe, efx->net_dev,
@@ -1555,7 +1558,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
	return falcon_read_nvram(efx, NULL);
}

static const struct efx_nic_register_test falcon_b0_register_tests[] = {
static const struct efx_farch_register_test falcon_b0_register_tests[] = {
	{ FR_AZ_ADR_REGION,
	  EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
	{ FR_AZ_RX_CFG,
@@ -1615,7 +1618,7 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
	efx_reset_down(efx, reset_method);

	tests->registers =
		efx_nic_test_registers(efx, falcon_b0_register_tests,
		efx_farch_test_registers(efx, falcon_b0_register_tests,
					 ARRAY_SIZE(falcon_b0_register_tests))
		? -1 : 1;

@@ -1981,7 +1984,7 @@ static int falcon_probe_nic(struct efx_nic *efx)

	rc = -ENODEV;

	if (efx_nic_fpga_ver(efx) != 0) {
	if (efx_farch_fpga_ver(efx) != 0) {
		netif_err(efx, probe, efx->net_dev,
			  "Falcon FPGA not supported\n");
		goto fail1;
@@ -2035,7 +2038,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
	}

	/* Allocate memory for INT_KER */
	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
	rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
				  GFP_KERNEL);
	if (rc)
		goto fail4;
	BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -2214,7 +2218,7 @@ static int falcon_init_nic(struct efx_nic *efx)
		efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
	}

	efx_nic_init_common(efx);
	efx_farch_init_common(efx);

	return 0;
}
@@ -2339,7 +2343,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
	.remove = falcon_remove_nic,
	.init = falcon_init_nic,
	.dimension_resources = falcon_dimension_resources,
	.fini = efx_port_dummy_op_void,
	.fini = falcon_irq_ack_a1,
	.monitor = falcon_monitor,
	.map_reset_reason = falcon_map_reset_reason,
	.map_reset_flags = falcon_map_reset_flags,
@@ -2347,6 +2351,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
	.probe_port = falcon_probe_port,
	.remove_port = falcon_remove_port,
	.handle_global_event = falcon_handle_global_event,
	.fini_dmaq = efx_farch_fini_dmaq,
	.prepare_flush = falcon_prepare_flush,
	.finish_flush = efx_port_dummy_op_void,
	.update_stats = falcon_update_nic_stats,
@@ -2362,6 +2367,28 @@ const struct efx_nic_type falcon_a1_nic_type = {
	.set_wol = falcon_set_wol,
	.resume_wol = efx_port_dummy_op_void,
	.test_nvram = falcon_test_nvram,
	.irq_enable_master = efx_farch_irq_enable_master,
	.irq_test_generate = efx_farch_irq_test_generate,
	.irq_disable_non_ev = efx_farch_irq_disable_master,
	.irq_handle_msi = efx_farch_msi_interrupt,
	.irq_handle_legacy = falcon_legacy_interrupt_a1,
	.tx_probe = efx_farch_tx_probe,
	.tx_init = efx_farch_tx_init,
	.tx_remove = efx_farch_tx_remove,
	.tx_write = efx_farch_tx_write,
	.rx_push_indir_table = efx_farch_rx_push_indir_table,
	.rx_probe = efx_farch_rx_probe,
	.rx_init = efx_farch_rx_init,
	.rx_remove = efx_farch_rx_remove,
	.rx_write = efx_farch_rx_write,
	.rx_defer_refill = efx_farch_rx_defer_refill,
	.ev_probe = efx_farch_ev_probe,
	.ev_init = efx_farch_ev_init,
	.ev_fini = efx_farch_ev_fini,
	.ev_remove = efx_farch_ev_remove,
	.ev_process = efx_farch_ev_process,
	.ev_read_ack = efx_farch_ev_read_ack,
	.ev_test_generate = efx_farch_ev_test_generate,

	.revision = EFX_REV_FALCON_A1,
	.mem_map_size = 0x20000,
@@ -2377,6 +2404,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
	.phys_addr_channels = 4,
	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
	.offload_features = NETIF_F_IP_CSUM,
	.mcdi_max_ver = -1,
};

const struct efx_nic_type falcon_b0_nic_type = {
@@ -2392,6 +2420,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
	.probe_port = falcon_probe_port,
	.remove_port = falcon_remove_port,
	.handle_global_event = falcon_handle_global_event,
	.fini_dmaq = efx_farch_fini_dmaq,
	.prepare_flush = falcon_prepare_flush,
	.finish_flush = efx_port_dummy_op_void,
	.update_stats = falcon_update_nic_stats,
@@ -2408,6 +2437,28 @@ const struct efx_nic_type falcon_b0_nic_type = {
	.resume_wol = efx_port_dummy_op_void,
	.test_chip = falcon_b0_test_chip,
	.test_nvram = falcon_test_nvram,
	.irq_enable_master = efx_farch_irq_enable_master,
	.irq_test_generate = efx_farch_irq_test_generate,
	.irq_disable_non_ev = efx_farch_irq_disable_master,
	.irq_handle_msi = efx_farch_msi_interrupt,
	.irq_handle_legacy = efx_farch_legacy_interrupt,
	.tx_probe = efx_farch_tx_probe,
	.tx_init = efx_farch_tx_init,
	.tx_remove = efx_farch_tx_remove,
	.tx_write = efx_farch_tx_write,
	.rx_push_indir_table = efx_farch_rx_push_indir_table,
	.rx_probe = efx_farch_rx_probe,
	.rx_init = efx_farch_rx_init,
	.rx_remove = efx_farch_rx_remove,
	.rx_write = efx_farch_rx_write,
	.rx_defer_refill = efx_farch_rx_defer_refill,
	.ev_probe = efx_farch_ev_probe,
	.ev_init = efx_farch_ev_init,
	.ev_fini = efx_farch_ev_fini,
	.ev_remove = efx_farch_ev_remove,
	.ev_process = efx_farch_ev_process,
	.ev_read_ack = efx_farch_ev_read_ack,
	.ev_test_generate = efx_farch_ev_test_generate,

	.revision = EFX_REV_FALCON_B0,
	/* Map everything up to and including the RSS indirection
@@ -2431,5 +2482,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
				   * channels */
	.timer_period_max =  1 << FRF_AB_TC_TIMER_VAL_WIDTH,
	.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
	.mcdi_max_ver = -1,
};
+1781 −0

File added.

Preview size limit exceeded, changes collapsed.

Loading