Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1191cb83 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

bnx2x: remove some bloat



Before doing skb->head_frag work on bnx2x driver, I found too much stuff
was inlined in bnx2x/bnx2x_cmn.h for no good reason and made my work not
very easy.

Move some big functions out of this include file to the respective .c
file.

A lot of inline keywords are not needed at all in this huge driver.

   text	   data	    bss	    dec	    hex	filename
 490083	   1270	     56	 491409	  77f91	bnx2x/bnx2x.ko.before
 484206	   1270	     56	 485532	  7689c	bnx2x/bnx2x.ko

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Eilon Greenstein <eilong@broadcom.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Maciej Żenczykowski <maze@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Cc: Ben Hutchings <bhutchings@solarflare.com>
Cc: Matt Carlson <mcarlson@broadcom.com>
Cc: Michael Chan <mchan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d344c4f3
Loading
Loading
Loading
Loading
+262 −23
Original line number Original line Diff line number Diff line
@@ -358,7 +358,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
 * Approximate value of the MSS for this aggregation calculated using
 * Approximate value of the MSS for this aggregation calculated using
 * the first packet of it.
 * the first packet of it.
 */
 */
static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
			     u16 len_on_bd)
			     u16 len_on_bd)
{
{
	/*
	/*
@@ -385,6 +385,36 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
	return len_on_bd - hdrs_len;
	return len_on_bd - hdrs_len;
}
}


static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
			      struct bnx2x_fastpath *fp, u16 index)
{
	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
	dma_addr_t mapping;

	if (unlikely(page == NULL)) {
		BNX2X_ERR("Can't alloc sge\n");
		return -ENOMEM;
	}

	mapping = dma_map_page(&bp->pdev->dev, page, 0,
			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		__free_pages(page, PAGES_PER_SGE_SHIFT);
		BNX2X_ERR("Can't map sge\n");
		return -ENOMEM;
	}

	sw_buf->page = page;
	dma_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
	sge->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       struct bnx2x_agg_info *tpa_info,
			       struct bnx2x_agg_info *tpa_info,
			       u16 pages,
			       u16 pages,
@@ -483,7 +513,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
	return 0;
	return 0;
}
}


static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			   struct bnx2x_agg_info *tpa_info,
			   struct bnx2x_agg_info *tpa_info,
			   u16 pages,
			   u16 pages,
			   struct eth_end_agg_rx_cqe *cqe,
			   struct eth_end_agg_rx_cqe *cqe,
@@ -557,6 +587,36 @@ static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
	fp->eth_q_stats.rx_skb_alloc_failed++;
	fp->eth_q_stats.rx_skb_alloc_failed++;
}
}


static int bnx2x_alloc_rx_data(struct bnx2x *bp,
			       struct bnx2x_fastpath *fp, u16 index)
{
	u8 *data;
	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
	dma_addr_t mapping;

	data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
	if (unlikely(data == NULL))
		return -ENOMEM;

	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
				 fp->rx_buf_size,
				 DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		kfree(data);
		BNX2X_ERR("Can't map rx data\n");
		return -ENOMEM;
	}

	rx_buf->data = data;
	dma_unmap_addr_set(rx_buf, mapping, mapping);

	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}



int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
{
@@ -870,7 +930,7 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
 *
 *
 * It uses a none-atomic bit operations because is called under the mutex.
 * It uses a none-atomic bit operations because is called under the mutex.
 */
 */
static inline void bnx2x_fill_report_data(struct bnx2x *bp,
static void bnx2x_fill_report_data(struct bnx2x *bp,
				   struct bnx2x_link_report_data *data)
				   struct bnx2x_link_report_data *data)
{
{
	u16 line_speed = bnx2x_get_mf_speed(bp);
	u16 line_speed = bnx2x_get_mf_speed(bp);
@@ -989,6 +1049,47 @@ void __bnx2x_link_report(struct bnx2x *bp)
	}
	}
}
}


static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
{
	int i;

	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
		struct eth_rx_sge *sge;

		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
		sge->addr_hi =
			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));

		sge->addr_lo =
			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
	}
}

static void bnx2x_free_tpa_pool(struct bnx2x *bp,
				struct bnx2x_fastpath *fp, int last)
{
	int i;

	for (i = 0; i < last; i++) {
		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
		u8 *data = first_buf->data;

		if (data == NULL) {
			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
			continue;
		}
		if (tpa_info->tpa_state == BNX2X_TPA_START)
			dma_unmap_single(&bp->pdev->dev,
					 dma_unmap_addr(first_buf, mapping),
					 fp->rx_buf_size, DMA_FROM_DEVICE);
		kfree(data);
		first_buf->data = NULL;
	}
}

void bnx2x_init_rx_rings(struct bnx2x *bp)
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
{
	int func = BP_FUNC(bp);
	int func = BP_FUNC(bp);
@@ -1362,7 +1463,7 @@ static int bnx2x_req_irq(struct bnx2x *bp)
	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
}
}


static inline int bnx2x_setup_irqs(struct bnx2x *bp)
static int bnx2x_setup_irqs(struct bnx2x *bp)
{
{
	int rc = 0;
	int rc = 0;
	if (bp->flags & USING_MSIX_FLAG &&
	if (bp->flags & USING_MSIX_FLAG &&
@@ -1392,7 +1493,7 @@ static inline int bnx2x_setup_irqs(struct bnx2x *bp)
	return 0;
	return 0;
}
}


static inline void bnx2x_napi_enable(struct bnx2x *bp)
static void bnx2x_napi_enable(struct bnx2x *bp)
{
{
	int i;
	int i;


@@ -1400,7 +1501,7 @@ static inline void bnx2x_napi_enable(struct bnx2x *bp)
		napi_enable(&bnx2x_fp(bp, i, napi));
		napi_enable(&bnx2x_fp(bp, i, napi));
}
}


static inline void bnx2x_napi_disable(struct bnx2x *bp)
static void bnx2x_napi_disable(struct bnx2x *bp)
{
{
	int i;
	int i;


@@ -1487,7 +1588,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
 */
 */
static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
static int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
{
	int rc, tx, rx;
	int rc, tx, rx;


@@ -1519,7 +1620,7 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
	return rc;
	return rc;
}
}


static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
{
{
	int i;
	int i;


@@ -1547,7 +1648,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
	}
	}
}
}


static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
static int bnx2x_init_rss_pf(struct bnx2x *bp)
{
{
	int i;
	int i;
	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -1614,7 +1715,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
	return bnx2x_config_rss(bp, &params);
	return bnx2x_config_rss(bp, &params);
}
}


static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
{
{
	struct bnx2x_func_state_params func_params = {NULL};
	struct bnx2x_func_state_params func_params = {NULL};


@@ -1723,6 +1824,87 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
	return true;
	return true;
}
}


/**
 * bnx2x_bz_fp - zero content of the fastpath structure.
 *
 * @bp:		driver handle
 * @index:	fastpath index to be zeroed
 *
 * Makes sure the contents of the bp->fp[index].napi is kept
 * intact.
 */
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
	struct bnx2x_fastpath *fp = &bp->fp[index];
	struct napi_struct orig_napi = fp->napi;
	/* bzero bnx2x_fastpath contents */
	if (bp->stats_init)
		memset(fp, 0, sizeof(*fp));
	else {
		/* Keep Queue statistics */
		struct bnx2x_eth_q_stats *tmp_eth_q_stats;
		struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;

		tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
					  GFP_KERNEL);
		if (tmp_eth_q_stats)
			memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
			       sizeof(struct bnx2x_eth_q_stats));

		tmp_eth_q_stats_old =
			kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
				GFP_KERNEL);
		if (tmp_eth_q_stats_old)
			memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
			       sizeof(struct bnx2x_eth_q_stats_old));

		memset(fp, 0, sizeof(*fp));

		if (tmp_eth_q_stats) {
			memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
				   sizeof(struct bnx2x_eth_q_stats));
			kfree(tmp_eth_q_stats);
		}

		if (tmp_eth_q_stats_old) {
			memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
			       sizeof(struct bnx2x_eth_q_stats_old));
			kfree(tmp_eth_q_stats_old);
		}

	}

	/* Restore the NAPI object as it has been already initialized */
	fp->napi = orig_napi;

	fp->bp = bp;
	fp->index = index;
	if (IS_ETH_FP(fp))
		fp->max_cos = bp->max_cos;
	else
		/* Special queues support only one CoS */
		fp->max_cos = 1;

	/*
	 * set the tpa flag for each queue. The tpa flag determines the queue
	 * minimal size so it must be set prior to queue memory allocation
	 */
	fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
				  (bp->flags & GRO_ENABLE_FLAG &&
				   bnx2x_mtu_allows_gro(bp->dev->mtu)));
	if (bp->flags & TPA_ENABLE_FLAG)
		fp->mode = TPA_MODE_LRO;
	else if (bp->flags & GRO_ENABLE_FLAG)
		fp->mode = TPA_MODE_GRO;

#ifdef BCM_CNIC
	/* We don't want TPA on an FCoE L2 ring */
	if (IS_FCOE_FP(fp))
		fp->disable_tpa = 1;
#endif
}


/* must be called with rtnl_lock */
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
{
@@ -3169,7 +3351,7 @@ void bnx2x_free_fp_mem(struct bnx2x *bp)
		bnx2x_free_fp_mem_at(bp, i);
		bnx2x_free_fp_mem_at(bp, i);
}
}


static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
static void set_sb_shortcuts(struct bnx2x *bp, int index)
{
{
	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
	if (!CHIP_IS_E1x(bp)) {
	if (!CHIP_IS_E1x(bp)) {
@@ -3185,6 +3367,63 @@ static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
	}
	}
}
}


/* Returns the number of actually allocated BDs */
static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
			      int rx_ring_size)
{
	struct bnx2x *bp = fp->bp;
	u16 ring_prod, cqe_ring_prod;
	int i, failure_cnt = 0;

	fp->rx_comp_cons = 0;
	cqe_ring_prod = ring_prod = 0;

	/* This routine is called only during fo init so
	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
	 */
	for (i = 0; i < rx_ring_size; i++) {
		if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
			failure_cnt++;
			continue;
		}
		ring_prod = NEXT_RX_IDX(ring_prod);
		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
		WARN_ON(ring_prod <= (i - failure_cnt));
	}

	if (failure_cnt)
		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
			  i - failure_cnt, fp->index);

	fp->rx_bd_prod = ring_prod;
	/* Limit the CQE producer by the CQE ring size */
	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
			       cqe_ring_prod);
	fp->rx_pkt = fp->rx_calls = 0;

	fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;

	return i - failure_cnt;
}

static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
{
	int i;

	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
		struct eth_rx_cqe_next_page *nextpg;

		nextpg = (struct eth_rx_cqe_next_page *)
			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
		nextpg->addr_hi =
			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
		nextpg->addr_lo =
			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
	}
}

static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
{
{
	union host_hc_status_block *sb;
	union host_hc_status_block *sb;
@@ -3674,7 +3913,7 @@ void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
}
}


static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
				    u8 fw_sb_id, u8 sb_index,
				    u8 fw_sb_id, u8 sb_index,
				    u8 ticks)
				    u8 ticks)
{
{
@@ -3687,7 +3926,7 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
	   port, fw_sb_id, sb_index, ticks);
	   port, fw_sb_id, sb_index, ticks);
}
}


static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
				    u16 fw_sb_id, u8 sb_index,
				    u16 fw_sb_id, u8 sb_index,
				    u8 disable)
				    u8 disable)
{
{
+0 −362
Original line number Original line Diff line number Diff line
@@ -612,53 +612,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
	barrier();
	barrier();
}
}


static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
					  u8 idu_sb_id, bool is_Pf)
{
	u32 data, ctl, cnt = 100;
	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
	u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
	u32 sb_bit =  1 << (idu_sb_id%32);
	u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;

	/* Not supported in BC mode */
	if (CHIP_INT_MODE_IS_BC(bp))
		return;

	data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
			<< IGU_REGULAR_CLEANUP_TYPE_SHIFT)	|
		IGU_REGULAR_CLEANUP_SET				|
		IGU_REGULAR_BCLEANUP;

	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;

	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
			 data, igu_addr_data);
	REG_WR(bp, igu_addr_data, data);
	mmiowb();
	barrier();
	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
			  ctl, igu_addr_ctl);
	REG_WR(bp, igu_addr_ctl, ctl);
	mmiowb();
	barrier();

	/* wait for clean up to finish */
	while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
		msleep(20);


	if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
		DP(NETIF_MSG_HW,
		   "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
			  idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
	}
}

static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
				   u8 storm, u16 index, u8 op, u8 update)
				   u8 storm, u16 index, u8 op, u8 update)
{
{
@@ -885,66 +838,6 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
	bnx2x_clear_sge_mask_next_elems(fp);
	bnx2x_clear_sge_mask_next_elems(fp);
}
}


static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
				     struct bnx2x_fastpath *fp, u16 index)
{
	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
	dma_addr_t mapping;

	if (unlikely(page == NULL)) {
		BNX2X_ERR("Can't alloc sge\n");
		return -ENOMEM;
	}

	mapping = dma_map_page(&bp->pdev->dev, page, 0,
			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		__free_pages(page, PAGES_PER_SGE_SHIFT);
		BNX2X_ERR("Can't map sge\n");
		return -ENOMEM;
	}

	sw_buf->page = page;
	dma_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
	sge->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
				      struct bnx2x_fastpath *fp, u16 index)
{
	u8 *data;
	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
	dma_addr_t mapping;

	data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
	if (unlikely(data == NULL))
		return -ENOMEM;

	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
				 fp->rx_buf_size,
				 DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		kfree(data);
		BNX2X_ERR("Can't map rx data\n");
		return -ENOMEM;
	}

	rx_buf->data = data;
	dma_unmap_addr_set(rx_buf, mapping, mapping);

	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

/* note that we are not allocating a new buffer,
/* note that we are not allocating a new buffer,
 * we are just moving one from cons to prod
 * we are just moving one from cons to prod
 * we are not creating a new mapping,
 * we are not creating a new mapping,
@@ -1042,66 +935,6 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
		bnx2x_free_rx_sge(bp, fp, i);
		bnx2x_free_rx_sge(bp, fp, i);
}
}


static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
				       struct bnx2x_fastpath *fp, int last)
{
	int i;

	for (i = 0; i < last; i++) {
		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
		u8 *data = first_buf->data;

		if (data == NULL) {
			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
			continue;
		}
		if (tpa_info->tpa_state == BNX2X_TPA_START)
			dma_unmap_single(&bp->pdev->dev,
					 dma_unmap_addr(first_buf, mapping),
					 fp->rx_buf_size, DMA_FROM_DEVICE);
		kfree(data);
		first_buf->data = NULL;
	}
}

static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
{
	int i;

	for (i = 1; i <= NUM_TX_RINGS; i++) {
		struct eth_tx_next_bd *tx_next_bd =
			&txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;

		tx_next_bd->addr_hi =
			cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
		tx_next_bd->addr_lo =
			cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
	}

	SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
	txdata->tx_db.data.zero_fill1 = 0;
	txdata->tx_db.data.prod = 0;

	txdata->tx_pkt_prod = 0;
	txdata->tx_pkt_cons = 0;
	txdata->tx_bd_prod = 0;
	txdata->tx_bd_cons = 0;
	txdata->tx_pkt = 0;
}

static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
{
	int i;
	u8 cos;

	for_each_tx_queue(bp, i)
		for_each_cos_in_tx_queue(&bp->fp[i], cos)
			bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
}

static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
{
{
	int i;
	int i;
@@ -1119,80 +952,6 @@ static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
	}
	}
}
}


static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
{
	int i;

	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
		struct eth_rx_sge *sge;

		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
		sge->addr_hi =
			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));

		sge->addr_lo =
			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
	}
}

static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
{
	int i;
	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
		struct eth_rx_cqe_next_page *nextpg;

		nextpg = (struct eth_rx_cqe_next_page *)
			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
		nextpg->addr_hi =
			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
		nextpg->addr_lo =
			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
	}
}

/* Returns the number of actually allocated BDs */
static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
				      int rx_ring_size)
{
	struct bnx2x *bp = fp->bp;
	u16 ring_prod, cqe_ring_prod;
	int i, failure_cnt = 0;

	fp->rx_comp_cons = 0;
	cqe_ring_prod = ring_prod = 0;

	/* This routine is called only during fo init so
	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
	 */
	for (i = 0; i < rx_ring_size; i++) {
		if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
			failure_cnt++;
			continue;
		}
		ring_prod = NEXT_RX_IDX(ring_prod);
		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
		WARN_ON(ring_prod <= (i - failure_cnt));
	}

	if (failure_cnt)
		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
			  i - failure_cnt, fp->index);

	fp->rx_bd_prod = ring_prod;
	/* Limit the CQE producer by the CQE ring size */
	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
			       cqe_ring_prod);
	fp->rx_pkt = fp->rx_calls = 0;

	fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;

	return i - failure_cnt;
}

/* Statistics ID are global per chip/path, while Client IDs for E1x are per
/* Statistics ID are global per chip/path, while Client IDs for E1x are per
 * port.
 * port.
 */
 */
@@ -1421,47 +1180,6 @@ static inline void __storm_memset_struct(struct bnx2x *bp,
		REG_WR(bp, addr + (i * 4), data[i]);
		REG_WR(bp, addr + (i * 4), data[i]);
}
}


static inline void storm_memset_func_cfg(struct bnx2x *bp,
				struct tstorm_eth_function_common_config *tcfg,
				u16 abs_fid)
{
	size_t size = sizeof(struct tstorm_eth_function_common_config);

	u32 addr = BAR_TSTRORM_INTMEM +
			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);

	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
}

static inline void storm_memset_cmng(struct bnx2x *bp,
				struct cmng_init *cmng,
				u8 port)
{
	int vn;
	size_t size = sizeof(struct cmng_struct_per_port);

	u32 addr = BAR_XSTRORM_INTMEM +
			XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);

	__storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);

	for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
		int func = func_by_vn(bp, vn);

		addr = BAR_XSTRORM_INTMEM +
		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
		size = sizeof(struct rate_shaping_vars_per_vn);
		__storm_memset_struct(bp, addr, size,
				      (u32 *)&cmng->vnic.vnic_max_rate[vn]);

		addr = BAR_XSTRORM_INTMEM +
		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
		size = sizeof(struct fairness_vars_per_vn);
		__storm_memset_struct(bp, addr, size,
				      (u32 *)&cmng->vnic.vnic_min_rate[vn]);
	}
}

/**
/**
 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
 *
 *
@@ -1544,86 +1262,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
	 */
	 */
	return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
	return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
}
}
/**
 * bnx2x_bz_fp - zero content of the fastpath structure.
 *
 * @bp:		driver handle
 * @index:	fastpath index to be zeroed
 *
 * Makes sure the contents of the bp->fp[index].napi is kept
 * intact.
 */
static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
	struct bnx2x_fastpath *fp = &bp->fp[index];
	struct napi_struct orig_napi = fp->napi;
	/* bzero bnx2x_fastpath contents */
	if (bp->stats_init)
		memset(fp, 0, sizeof(*fp));
	else {
		/* Keep Queue statistics */
		struct bnx2x_eth_q_stats *tmp_eth_q_stats;
		struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;

		tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
					  GFP_KERNEL);
		if (tmp_eth_q_stats)
			memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
			       sizeof(struct bnx2x_eth_q_stats));

		tmp_eth_q_stats_old =
			kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
				GFP_KERNEL);
		if (tmp_eth_q_stats_old)
			memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
			       sizeof(struct bnx2x_eth_q_stats_old));

		memset(fp, 0, sizeof(*fp));

		if (tmp_eth_q_stats) {
			memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
				   sizeof(struct bnx2x_eth_q_stats));
			kfree(tmp_eth_q_stats);
		}

		if (tmp_eth_q_stats_old) {
			memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
			       sizeof(struct bnx2x_eth_q_stats_old));
			kfree(tmp_eth_q_stats_old);
		}

	}

	/* Restore the NAPI object as it has been already initialized */
	fp->napi = orig_napi;

	fp->bp = bp;
	fp->index = index;
	if (IS_ETH_FP(fp))
		fp->max_cos = bp->max_cos;
	else
		/* Special queues support only one CoS */
		fp->max_cos = 1;

	/*
	 * set the tpa flag for each queue. The tpa flag determines the queue
	 * minimal size so it must be set prior to queue memory allocation
	 */
	fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
				  (bp->flags & GRO_ENABLE_FLAG &&
				   bnx2x_mtu_allows_gro(bp->dev->mtu)));
	if (bp->flags & TPA_ENABLE_FLAG)
		fp->mode = TPA_MODE_LRO;
	else if (bp->flags & GRO_ENABLE_FLAG)
		fp->mode = TPA_MODE_GRO;

#ifdef BCM_CNIC
	/* We don't want TPA on an FCoE L2 ring */
	if (IS_FCOE_FP(fp))
		fp->disable_tpa = 1;
#endif
}

#ifdef BCM_CNIC
#ifdef BCM_CNIC
/**
/**
 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+12 −12
Original line number Original line Diff line number Diff line
@@ -592,7 +592,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
#define IS_E3_ONLINE(info)	(((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
#define IS_E3_ONLINE(info)	(((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
#define IS_E3B0_ONLINE(info)	(((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
#define IS_E3B0_ONLINE(info)	(((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)


static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
static bool bnx2x_is_reg_online(struct bnx2x *bp,
				const struct reg_addr *reg_info)
				const struct reg_addr *reg_info)
{
{
	if (CHIP_IS_E1(bp))
	if (CHIP_IS_E1(bp))
@@ -610,7 +610,7 @@ static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
}
}


/******* Paged registers info selectors ********/
/******* Paged registers info selectors ********/
static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
{
{
	if (CHIP_IS_E2(bp))
	if (CHIP_IS_E2(bp))
		return page_vals_e2;
		return page_vals_e2;
@@ -620,7 +620,7 @@ static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
		return NULL;
		return NULL;
}
}


static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
{
{
	if (CHIP_IS_E2(bp))
	if (CHIP_IS_E2(bp))
		return PAGE_MODE_VALUES_E2;
		return PAGE_MODE_VALUES_E2;
@@ -630,7 +630,7 @@ static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
		return 0;
		return 0;
}
}


static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
{
{
	if (CHIP_IS_E2(bp))
	if (CHIP_IS_E2(bp))
		return page_write_regs_e2;
		return page_write_regs_e2;
@@ -640,7 +640,7 @@ static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
		return NULL;
		return NULL;
}
}


static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
{
{
	if (CHIP_IS_E2(bp))
	if (CHIP_IS_E2(bp))
		return PAGE_WRITE_REGS_E2;
		return PAGE_WRITE_REGS_E2;
@@ -650,7 +650,7 @@ static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
		return 0;
		return 0;
}
}


static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
{
{
	if (CHIP_IS_E2(bp))
	if (CHIP_IS_E2(bp))
		return page_read_regs_e2;
		return page_read_regs_e2;
@@ -660,7 +660,7 @@ static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
		return NULL;
		return NULL;
}
}


static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
{
{
	if (CHIP_IS_E2(bp))
	if (CHIP_IS_E2(bp))
		return PAGE_READ_REGS_E2;
		return PAGE_READ_REGS_E2;
@@ -670,7 +670,7 @@ static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
		return 0;
		return 0;
}
}


static inline int __bnx2x_get_regs_len(struct bnx2x *bp)
static int __bnx2x_get_regs_len(struct bnx2x *bp)
{
{
	int num_pages = __bnx2x_get_page_reg_num(bp);
	int num_pages = __bnx2x_get_page_reg_num(bp);
	int page_write_num = __bnx2x_get_page_write_num(bp);
	int page_write_num = __bnx2x_get_page_write_num(bp);
@@ -715,7 +715,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
 * ("read address"). There may be more than one write address per "page" and
 * ("read address"). There may be more than one write address per "page" and
 * more than one read address per write address.
 * more than one read address per write address.
 */
 */
static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
{
{
	u32 i, j, k, n;
	u32 i, j, k, n;
	/* addresses of the paged registers */
	/* addresses of the paged registers */
@@ -744,7 +744,7 @@ static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
	}
	}
}
}


static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
{
{
	u32 i, j;
	u32 i, j;


@@ -2209,7 +2209,7 @@ static void bnx2x_self_test(struct net_device *dev,
/* ethtool statistics are displayed for all regular ethernet queues and the
/* ethtool statistics are displayed for all regular ethernet queues and the
 * fcoe L2 queue if not disabled
 * fcoe L2 queue if not disabled
 */
 */
static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
static int bnx2x_num_stat_queues(struct bnx2x *bp)
{
{
	return BNX2X_NUM_ETH_QUEUES(bp);
	return BNX2X_NUM_ETH_QUEUES(bp);
}
}
+224 −101

File changed.

Preview size limit exceeded, changes collapsed.

+1 −1
Original line number Original line Diff line number Diff line
@@ -1316,7 +1316,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
 *
 *
 * @param bp
 * @param bp
 */
 */
static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
{
	int i;
	int i;
	int first_queue_query_index;
	int first_queue_query_index;