Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ebda9b46 authored by Xue Chaojing's avatar Xue Chaojing Committed by David S. Miller
Browse files

net-next/hinic:replace multiply and division operators



To improve performance, this patch uses bit operations to replace
multiply and division operators.

Signed-off-by: default avatarXue Chaojing <xuechaojing@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a421ce08
Loading
Loading
Loading
Loading
+36 −19
Original line number Diff line number Diff line
@@ -74,12 +74,6 @@
			((void *)((cmdq_pages)->shadow_page_vaddr) \
				+ (wq)->block_idx * CMDQ_BLOCK_SIZE)

#define WQE_PAGE_OFF(wq, idx)   (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
					(wq)->wqebb_size)

#define WQE_PAGE_NUM(wq, idx)   (((idx) / ((wq)->num_wqebbs_per_page)) \
					& ((wq)->num_q_pages - 1))

#define WQ_PAGE_ADDR(wq, idx)           \
			((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])

@@ -93,6 +87,17 @@
		(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
			/ (wq)->max_wqe_size)

static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
{
	return (((idx) & ((wq)->num_wqebbs_per_page - 1))
		<< (wq)->wqebb_size_shift);
}

static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
{
	return (((idx) >> ((wq)->wqebbs_per_page_shift))
		& ((wq)->num_q_pages - 1));
}
/**
 * queue_alloc_page - allocate page for Queue
 * @hwif: HW interface for allocating DMA
@@ -513,10 +518,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
	struct hinic_hwif *hwif = wqs->hwif;
	struct pci_dev *pdev = hwif->pdev;
	u16 num_wqebbs_per_page;
	u16 wqebb_size_shift;
	int err;

	if (wqebb_size == 0) {
		dev_err(&pdev->dev, "wqebb_size must be > 0\n");
	if (!is_power_of_2(wqebb_size)) {
		dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
		return -EINVAL;
	}

@@ -530,9 +536,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
		return -EINVAL;
	}

	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
	wqebb_size_shift = ilog2(wqebb_size);
	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
				>> wqebb_size_shift;

	if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
	if (!is_power_of_2(num_wqebbs_per_page)) {
		dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
		return -EINVAL;
	}
@@ -550,7 +558,8 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
	wq->q_depth = q_depth;
	wq->max_wqe_size = max_wqe_size;
	wq->num_wqebbs_per_page = num_wqebbs_per_page;

	wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
	wq->wqebb_size_shift = wqebb_size_shift;
	wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
	wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
	wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
@@ -604,11 +613,13 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
			 u16 q_depth, u16 max_wqe_size)
{
	struct pci_dev *pdev = hwif->pdev;
	u16 num_wqebbs_per_page_shift;
	u16 num_wqebbs_per_page;
	u16 wqebb_size_shift;
	int i, j, err = -ENOMEM;

	if (wqebb_size == 0) {
		dev_err(&pdev->dev, "wqebb_size must be > 0\n");
	if (!is_power_of_2(wqebb_size)) {
		dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
		return -EINVAL;
	}

@@ -622,9 +633,11 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
		return -EINVAL;
	}

	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
	wqebb_size_shift = ilog2(wqebb_size);
	num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
				>> wqebb_size_shift;

	if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
	if (!is_power_of_2(num_wqebbs_per_page)) {
		dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
		return -EINVAL;
	}
@@ -636,6 +649,7 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
		dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
		return err;
	}
	num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);

	for (i = 0; i < cmdq_blocks; i++) {
		wq[i].hwif = hwif;
@@ -647,7 +661,8 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
		wq[i].q_depth = q_depth;
		wq[i].max_wqe_size = max_wqe_size;
		wq[i].num_wqebbs_per_page = num_wqebbs_per_page;

		wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
		wq[i].wqebb_size_shift = wqebb_size_shift;
		wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
		wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
		wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
@@ -741,7 +756,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,

	*prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));

	num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
	num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;

	if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
		atomic_add(num_wqebbs, &wq->delta);
@@ -795,7 +810,8 @@ void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
 **/
void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
{
	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
			>> wq->wqebb_size_shift;

	atomic_add(num_wqebbs, &wq->cons_idx);

@@ -813,7 +829,8 @@ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
				    u16 *cons_idx)
{
	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
	int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
			>> wq->wqebb_size_shift;
	u16 curr_cons_idx, end_cons_idx;
	int curr_pg, end_pg;

+2 −1
Original line number Diff line number Diff line
@@ -39,7 +39,8 @@ struct hinic_wq {
	u16             q_depth;
	u16             max_wqe_size;
	u16             num_wqebbs_per_page;

	u16		wqebbs_per_page_shift;
	u16		wqebb_size_shift;
	/* The addresses are 64 bit in the HW */
	u64             block_paddr;
	void            **shadow_block_vaddr;