Loading drivers/net/sfc/efx.c +9 −22 Original line number Diff line number Diff line Loading @@ -673,7 +673,7 @@ static void efx_fini_channels(struct efx_nic *efx) efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_fini_tx_queue(tx_queue); efx_fini_eventq(channel); } Loading @@ -689,7 +689,7 @@ static void efx_remove_channel(struct efx_channel *channel) efx_for_each_channel_rx_queue(rx_queue, channel) efx_remove_rx_queue(rx_queue); efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_remove_tx_queue(tx_queue); efx_remove_eventq(channel); } Loading Loading @@ -1271,21 +1271,8 @@ static void efx_remove_interrupts(struct efx_nic *efx) static void efx_set_channels(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; efx->tx_channel_offset = separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; /* Channel pointers were set in efx_init_struct() but we now * need to clear them for TX queues in any RX-only channels. */ efx_for_each_channel(channel, efx) { if (channel->channel - efx->tx_channel_offset >= efx->n_tx_channels) { efx_for_each_channel_tx_queue(tx_queue, channel) tx_queue->channel = NULL; } } } static int efx_probe_nic(struct efx_nic *efx) Loading Loading @@ -1531,9 +1518,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, efx->irq_rx_adaptive = rx_adaptive; efx->irq_rx_moderation = rx_ticks; efx_for_each_channel(channel, efx) { if (efx_channel_get_rx_queue(channel)) if (efx_channel_has_rx_queue(channel)) channel->irq_moderation = rx_ticks; else if (efx_channel_get_tx_queue(channel, 0)) else if (efx_channel_has_tx_queues(channel)) channel->irq_moderation = tx_ticks; } } Loading Loading @@ -1849,6 +1836,7 @@ static const struct net_device_ops efx_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif .ndo_setup_tc = efx_setup_tc, }; static void efx_update_name(struct efx_nic *efx) Loading Loading @@ -1910,10 +1898,8 @@ static int efx_register_netdev(struct efx_nic *efx) efx_for_each_channel(channel, efx) { struct efx_tx_queue *tx_queue; efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->core_txq = netdev_get_tx_queue( efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); } efx_for_each_channel_tx_queue(tx_queue, channel) efx_init_tx_queue_core_txq(tx_queue); } /* Always start with carrier off; PHY events will detect the link */ Loading Loading @@ -2401,7 +2387,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, int i, rc; /* Allocate and initialise a struct net_device and struct efx_nic */ net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, EFX_MAX_RX_QUEUES); if (!net_dev) return -ENOMEM; net_dev->features |= (type->offload_features | NETIF_F_SG | Loading drivers/net/sfc/efx.h +2 −0 Original line number Diff line number Diff line Loading @@ -29,6 +29,7 @@ extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); extern netdev_tx_t Loading @@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); Loading drivers/net/sfc/ethtool.c +3 −3 Original line number Diff line number Diff line Loading @@ -631,7 +631,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, /* Find lowest IRQ moderation across all used TX queues */ coalesce->tx_coalesce_usecs_irq = ~((u32) 0); efx_for_each_channel(channel, efx) { if (!efx_channel_get_tx_queue(channel, 0)) if (!efx_channel_has_tx_queues(channel)) continue; if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { if (channel->channel < efx->n_rx_channels) Loading Loading @@ -676,8 +676,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, /* If the channel is shared only allow RX parameters to be set */ efx_for_each_channel(channel, efx) { if (efx_channel_get_rx_queue(channel) && efx_channel_get_tx_queue(channel, 0) && if (efx_channel_has_rx_queue(channel) && efx_channel_has_tx_queues(channel) && tx_usecs) { netif_err(efx, drv, efx->net_dev, "Channel is shared. " "Only RX coalescing may be set\n"); Loading drivers/net/sfc/net_driver.h +49 −15 Original line number Diff line number Diff line Loading @@ -63,10 +63,12 @@ /* Checksum generation is a per-queue option in hardware, so each * queue visible to the networking core is backed by two hardware TX * queues. */ #define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS #define EFX_TXQ_TYPE_OFFLOAD 1 #define EFX_TXQ_TYPES 2 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) #define EFX_MAX_TX_TC 2 #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ #define EFX_TXQ_TYPES 4 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) /** * struct efx_special_buffer - An Efx special buffer Loading Loading @@ -140,6 +142,7 @@ struct efx_tx_buffer { * @buffer: The software buffer ring * @txd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. * @initialised: Has hardware queue been initialised? * @flushed: Used when handling queue flushing * @read_count: Current read pointer. * This is the number of buffers that have been removed from both rings. Loading Loading @@ -182,6 +185,7 @@ struct efx_tx_queue { struct efx_tx_buffer *buffer; struct efx_special_buffer txd; unsigned int ptr_mask; bool initialised; enum efx_flush_state flushed; /* Members used mainly on the completion path */ Loading Loading @@ -377,7 +381,7 @@ struct efx_channel { bool rx_pkt_csummed; struct efx_rx_queue rx_queue; struct efx_tx_queue tx_queue[2]; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; }; enum efx_led_mode { Loading Loading @@ -938,18 +942,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; } static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) { return channel->channel - channel->efx->tx_channel_offset < channel->efx->n_tx_channels; } static inline struct efx_tx_queue * efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) { struct efx_tx_queue *tx_queue = channel->tx_queue; EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); return tx_queue->channel ? tx_queue + type : NULL; EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || type >= EFX_TXQ_TYPES); return &channel->tx_queue[type]; } static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) { return !(tx_queue->efx->net_dev->num_tc < 2 && tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); } /* Iterate over all TX queues belonging to a channel */ #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ if (!efx_channel_has_tx_queues(_channel)) \ ; \ else \ for (_tx_queue = (_channel)->tx_queue; \ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ efx_tx_queue_used(_tx_queue); \ _tx_queue++) /* Iterate over all possible TX queues belonging to a channel */ #define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ for (_tx_queue = (_channel)->tx_queue; \ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ _tx_queue++) static inline struct efx_rx_queue * Loading @@ -959,16 +985,24 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index) return &efx->channel[index]->rx_queue; } static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) { return channel->channel < channel->efx->n_rx_channels; } static inline struct efx_rx_queue * efx_channel_get_rx_queue(struct efx_channel *channel) { return channel->channel < channel->efx->n_rx_channels ? &channel->rx_queue : NULL; EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); return &channel->rx_queue; } /* Iterate over all RX queues belonging to a channel */ #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ for (_rx_queue = efx_channel_get_rx_queue(channel); \ if (!efx_channel_has_rx_queue(_channel)) \ ; \ else \ for (_rx_queue = &(_channel)->rx_queue; \ _rx_queue; \ _rx_queue = NULL) Loading drivers/net/sfc/nic.c +38 −13 Original line number Diff line number Diff line Loading @@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) void efx_nic_init_tx(struct efx_tx_queue *tx_queue) { efx_oword_t tx_desc_ptr; struct efx_nic *efx = tx_queue->efx; efx_oword_t reg; tx_queue->flushed = FLUSH_NONE; Loading @@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) efx_init_special_buffer(efx, &tx_queue->txd); /* Push TX descriptor ring to card */ EFX_POPULATE_OWORD_10(tx_desc_ptr, EFX_POPULATE_OWORD_10(reg, FRF_AZ_TX_DESCQ_EN, 1, FRF_AZ_TX_ISCSI_DDIG_EN, 0, FRF_AZ_TX_ISCSI_HDIG_EN, 0, Loading @@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); } efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, tx_queue->queue); if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { efx_oword_t reg; /* Only 128 bits in this register */ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); Loading @@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) set_bit_le(tx_queue->queue, (void *)®); efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); } if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_POPULATE_OWORD_1(reg, FRF_BZ_TX_PACE, (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? FFE_BZ_TX_PACE_OFF : FFE_BZ_TX_PACE_RESERVED); efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue); } } static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) Loading Loading @@ -1238,9 +1246,11 @@ int efx_nic_flush_queues(struct efx_nic *efx) /* Flush all tx queues in parallel */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (tx_queue->initialised) efx_flush_tx_queue(tx_queue); } } /* The hardware supports four concurrent rx flushes, each of which may * need to be retried if there is an outstanding descriptor fetch */ Loading @@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) ++rx_pending; } } efx_for_each_channel_tx_queue(tx_queue, channel) { if (tx_queue->flushed != FLUSH_DONE) efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (tx_queue->initialised && tx_queue->flushed != FLUSH_DONE) ++tx_pending; } } Loading @@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) /* Mark the queues as all flushed. We're going to return failure * leading to a reset, or fake up success anyway */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) { if (tx_queue->flushed != FLUSH_DONE) efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (tx_queue->initialised && tx_queue->flushed != FLUSH_DONE) netif_err(efx, hw, efx->net_dev, "tx queue %d flush command timed out\n", tx_queue->queue); Loading Loading @@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx) if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_POPULATE_OWORD_4(temp, /* Default values */ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, FRF_BZ_TX_PACE_SB_AF, 0xb, FRF_BZ_TX_PACE_FB_BASE, 0, /* Allow large pace values in the * fast bin. */ FRF_BZ_TX_PACE_BIN_TH, FFE_BZ_TX_PACE_RESERVED); efx_writeo(efx, &temp, FR_BZ_TX_PACE); } } /* Register dump */ Loading Loading
drivers/net/sfc/efx.c +9 −22 Original line number Diff line number Diff line Loading @@ -673,7 +673,7 @@ static void efx_fini_channels(struct efx_nic *efx) efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_fini_tx_queue(tx_queue); efx_fini_eventq(channel); } Loading @@ -689,7 +689,7 @@ static void efx_remove_channel(struct efx_channel *channel) efx_for_each_channel_rx_queue(rx_queue, channel) efx_remove_rx_queue(rx_queue); efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_remove_tx_queue(tx_queue); efx_remove_eventq(channel); } Loading Loading @@ -1271,21 +1271,8 @@ static void efx_remove_interrupts(struct efx_nic *efx) static void efx_set_channels(struct efx_nic *efx) { struct efx_channel *channel; struct efx_tx_queue *tx_queue; efx->tx_channel_offset = separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; /* Channel pointers were set in efx_init_struct() but we now * need to clear them for TX queues in any RX-only channels. */ efx_for_each_channel(channel, efx) { if (channel->channel - efx->tx_channel_offset >= efx->n_tx_channels) { efx_for_each_channel_tx_queue(tx_queue, channel) tx_queue->channel = NULL; } } } static int efx_probe_nic(struct efx_nic *efx) Loading Loading @@ -1531,9 +1518,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, efx->irq_rx_adaptive = rx_adaptive; efx->irq_rx_moderation = rx_ticks; efx_for_each_channel(channel, efx) { if (efx_channel_get_rx_queue(channel)) if (efx_channel_has_rx_queue(channel)) channel->irq_moderation = rx_ticks; else if (efx_channel_get_tx_queue(channel, 0)) else if (efx_channel_has_tx_queues(channel)) channel->irq_moderation = tx_ticks; } } Loading Loading @@ -1849,6 +1836,7 @@ static const struct net_device_ops efx_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif .ndo_setup_tc = efx_setup_tc, }; static void efx_update_name(struct efx_nic *efx) Loading Loading @@ -1910,10 +1898,8 @@ static int efx_register_netdev(struct efx_nic *efx) efx_for_each_channel(channel, efx) { struct efx_tx_queue *tx_queue; efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->core_txq = netdev_get_tx_queue( efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); } efx_for_each_channel_tx_queue(tx_queue, channel) efx_init_tx_queue_core_txq(tx_queue); } /* Always start with carrier off; PHY events will detect the link */ Loading Loading @@ -2401,7 +2387,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, int i, rc; /* Allocate and initialise a struct net_device and struct efx_nic */ net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, EFX_MAX_RX_QUEUES); if (!net_dev) return -ENOMEM; net_dev->features |= (type->offload_features | NETIF_F_SG | Loading
drivers/net/sfc/efx.h +2 −0 Original line number Diff line number Diff line Loading @@ -29,6 +29,7 @@ extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); extern netdev_tx_t Loading @@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); Loading
drivers/net/sfc/ethtool.c +3 −3 Original line number Diff line number Diff line Loading @@ -631,7 +631,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, /* Find lowest IRQ moderation across all used TX queues */ coalesce->tx_coalesce_usecs_irq = ~((u32) 0); efx_for_each_channel(channel, efx) { if (!efx_channel_get_tx_queue(channel, 0)) if (!efx_channel_has_tx_queues(channel)) continue; if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { if (channel->channel < efx->n_rx_channels) Loading Loading @@ -676,8 +676,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, /* If the channel is shared only allow RX parameters to be set */ efx_for_each_channel(channel, efx) { if (efx_channel_get_rx_queue(channel) && efx_channel_get_tx_queue(channel, 0) && if (efx_channel_has_rx_queue(channel) && efx_channel_has_tx_queues(channel) && tx_usecs) { netif_err(efx, drv, efx->net_dev, "Channel is shared. " "Only RX coalescing may be set\n"); Loading
drivers/net/sfc/net_driver.h +49 −15 Original line number Diff line number Diff line Loading @@ -63,10 +63,12 @@ /* Checksum generation is a per-queue option in hardware, so each * queue visible to the networking core is backed by two hardware TX * queues. */ #define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS #define EFX_TXQ_TYPE_OFFLOAD 1 #define EFX_TXQ_TYPES 2 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) #define EFX_MAX_TX_TC 2 #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ #define EFX_TXQ_TYPES 4 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) /** * struct efx_special_buffer - An Efx special buffer Loading Loading @@ -140,6 +142,7 @@ struct efx_tx_buffer { * @buffer: The software buffer ring * @txd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. * @initialised: Has hardware queue been initialised? * @flushed: Used when handling queue flushing * @read_count: Current read pointer. * This is the number of buffers that have been removed from both rings. Loading Loading @@ -182,6 +185,7 @@ struct efx_tx_queue { struct efx_tx_buffer *buffer; struct efx_special_buffer txd; unsigned int ptr_mask; bool initialised; enum efx_flush_state flushed; /* Members used mainly on the completion path */ Loading Loading @@ -377,7 +381,7 @@ struct efx_channel { bool rx_pkt_csummed; struct efx_rx_queue rx_queue; struct efx_tx_queue tx_queue[2]; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; }; enum efx_led_mode { Loading Loading @@ -938,18 +942,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; } static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) { return channel->channel - channel->efx->tx_channel_offset < channel->efx->n_tx_channels; } static inline struct efx_tx_queue * efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) { struct efx_tx_queue *tx_queue = channel->tx_queue; EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); return tx_queue->channel ? tx_queue + type : NULL; EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || type >= EFX_TXQ_TYPES); return &channel->tx_queue[type]; } static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) { return !(tx_queue->efx->net_dev->num_tc < 2 && tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); } /* Iterate over all TX queues belonging to a channel */ #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ if (!efx_channel_has_tx_queues(_channel)) \ ; \ else \ for (_tx_queue = (_channel)->tx_queue; \ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ efx_tx_queue_used(_tx_queue); \ _tx_queue++) /* Iterate over all possible TX queues belonging to a channel */ #define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ for (_tx_queue = (_channel)->tx_queue; \ _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ _tx_queue++) static inline struct efx_rx_queue * Loading @@ -959,16 +985,24 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index) return &efx->channel[index]->rx_queue; } static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) { return channel->channel < channel->efx->n_rx_channels; } static inline struct efx_rx_queue * efx_channel_get_rx_queue(struct efx_channel *channel) { return channel->channel < channel->efx->n_rx_channels ? &channel->rx_queue : NULL; EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); return &channel->rx_queue; } /* Iterate over all RX queues belonging to a channel */ #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ for (_rx_queue = efx_channel_get_rx_queue(channel); \ if (!efx_channel_has_rx_queue(_channel)) \ ; \ else \ for (_rx_queue = &(_channel)->rx_queue; \ _rx_queue; \ _rx_queue = NULL) Loading
drivers/net/sfc/nic.c +38 −13 Original line number Diff line number Diff line Loading @@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) void efx_nic_init_tx(struct efx_tx_queue *tx_queue) { efx_oword_t tx_desc_ptr; struct efx_nic *efx = tx_queue->efx; efx_oword_t reg; tx_queue->flushed = FLUSH_NONE; Loading @@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) efx_init_special_buffer(efx, &tx_queue->txd); /* Push TX descriptor ring to card */ EFX_POPULATE_OWORD_10(tx_desc_ptr, EFX_POPULATE_OWORD_10(reg, FRF_AZ_TX_DESCQ_EN, 1, FRF_AZ_TX_ISCSI_DDIG_EN, 0, FRF_AZ_TX_ISCSI_HDIG_EN, 0, Loading @@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); } efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, tx_queue->queue); if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { efx_oword_t reg; /* Only 128 bits in this register */ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); Loading @@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) set_bit_le(tx_queue->queue, (void *)®); efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); } if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_POPULATE_OWORD_1(reg, FRF_BZ_TX_PACE, (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? FFE_BZ_TX_PACE_OFF : FFE_BZ_TX_PACE_RESERVED); efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue); } } static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) Loading Loading @@ -1238,9 +1246,11 @@ int efx_nic_flush_queues(struct efx_nic *efx) /* Flush all tx queues in parallel */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (tx_queue->initialised) efx_flush_tx_queue(tx_queue); } } /* The hardware supports four concurrent rx flushes, each of which may * need to be retried if there is an outstanding descriptor fetch */ Loading @@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) ++rx_pending; } } efx_for_each_channel_tx_queue(tx_queue, channel) { if (tx_queue->flushed != FLUSH_DONE) efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (tx_queue->initialised && tx_queue->flushed != FLUSH_DONE) ++tx_pending; } } Loading @@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) /* Mark the queues as all flushed. We're going to return failure * leading to a reset, or fake up success anyway */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) { if (tx_queue->flushed != FLUSH_DONE) efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (tx_queue->initialised && tx_queue->flushed != FLUSH_DONE) netif_err(efx, hw, efx->net_dev, "tx queue %d flush command timed out\n", tx_queue->queue); Loading Loading @@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx) if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_POPULATE_OWORD_4(temp, /* Default values */ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, FRF_BZ_TX_PACE_SB_AF, 0xb, FRF_BZ_TX_PACE_FB_BASE, 0, /* Allow large pace values in the * fast bin. */ FRF_BZ_TX_PACE_BIN_TH, FFE_BZ_TX_PACE_RESERVED); efx_writeo(efx, &temp, FR_BZ_TX_PACE); } } /* Register dump */ Loading