Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8de944bc authored by Jinwei Chen's avatar Jinwei Chen Committed by snandini
Browse files

qcamcn: support multiple pages prealloc for descriptor

support multiple pages prealloc for descriptor

Change-Id: I66d4cef3acf69acf4b6fc8e5a6d01c3d67921dca
CRs-Fixed: 2751338
parent 9d5e92b3
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -1111,6 +1111,13 @@ struct ol_if_ops {
	void (*dp_prealloc_put_consistent)(qdf_size_t size,
					   void *vaddr_unligned,
					   qdf_dma_addr_t paddr);
	void (*dp_get_multi_pages)(uint32_t desc_type,
				   size_t element_size,
				   uint16_t element_num,
				   struct qdf_mem_multi_page_t *pages,
				   bool cacheable);
	void (*dp_put_multi_pages)(uint32_t desc_type,
				   struct qdf_mem_multi_page_t *pages);
#endif
};

+43 −0
Original line number Diff line number Diff line
@@ -2108,4 +2108,47 @@ void dp_set_max_page_size(struct qdf_mem_multi_page_t *pages,
}
#endif /* MAX_ALLOC_PAGE_SIZE */

#ifdef DP_MEM_PRE_ALLOC
void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
				   enum dp_desc_type desc_type,
				   struct qdf_mem_multi_page_t *pages,
				   size_t element_size,
				   uint16_t element_num,
				   qdf_dma_context_t memctxt,
				   bool cacheable);

void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
				  enum dp_desc_type desc_type,
				  struct qdf_mem_multi_page_t *pages,
				  qdf_dma_context_t memctxt,
				  bool cacheable);

#else
static inline
void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
				   enum dp_desc_type desc_type,
				   struct qdf_mem_multi_page_t *pages,
				   size_t element_size,
				   uint16_t element_num,
				   qdf_dma_context_t memctxt,
				   bool cacheable)
{
	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
				  element_num, memctxt, cacheable);
}

static inline
void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
				  enum dp_desc_type desc_type,
				  struct qdf_mem_multi_page_t *pages,
				  qdf_dma_context_t memctxt,
				  bool cacheable)
{
	qdf_mem_multi_pages_free(soc->osdev, pages,
				 memctxt, cacheable);
}

#endif


#endif /* #ifndef _DP_INTERNAL_H_ */
+62 −9
Original line number Diff line number Diff line
@@ -1412,6 +1412,59 @@ static inline void dp_srng_mem_free_consistent(struct dp_soc *soc,
	}
}

void dp_desc_multi_pages_mem_alloc(struct dp_soc *soc,
				   enum dp_desc_type desc_type,
				   struct qdf_mem_multi_page_t *pages,
				   size_t element_size,
				   uint16_t element_num,
				   qdf_dma_context_t memctxt,
				   bool cacheable)
{
	if (!soc->cdp_soc.ol_ops->dp_get_multi_pages) {
		dp_warn("dp_get_multi_pages is null!");
		goto qdf;
	}

	pages->num_pages = 0;
	pages->is_mem_prealloc = 0;
	soc->cdp_soc.ol_ops->dp_get_multi_pages(desc_type,
						element_size,
						element_num,
						pages,
						cacheable);
	if (pages->num_pages)
		goto end;

qdf:
	qdf_mem_multi_pages_alloc(soc->osdev, pages, element_size,
				  element_num, memctxt, cacheable);
end:
	dp_info("%s desc_type %d element_size %d element_num %d cacheable %d",
		pages->is_mem_prealloc ? "pre-alloc" : "dynamic-alloc",
		desc_type, element_size, element_num, cacheable);
}

void dp_desc_multi_pages_mem_free(struct dp_soc *soc,
				  enum dp_desc_type desc_type,
				  struct qdf_mem_multi_page_t *pages,
				  qdf_dma_context_t memctxt,
				  bool cacheable)
{
	if (pages->is_mem_prealloc) {
		if (!soc->cdp_soc.ol_ops->dp_put_multi_pages) {
			dp_warn("dp_put_multi_pages is null!");
			QDF_BUG(0);
			return;
		}

		soc->cdp_soc.ol_ops->dp_put_multi_pages(desc_type, pages);
		qdf_mem_zero(pages, sizeof(*pages));
	} else {
		qdf_mem_multi_pages_free(soc->osdev, pages,
					 memctxt, cacheable);
	}
}

#else

static inline
@@ -2350,7 +2403,7 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
	pages = &soc->link_desc_pages;
	dp_set_max_page_size(pages, max_alloc_size);
	if (!dp_is_soc_reinit(soc)) {
		qdf_mem_multi_pages_alloc(soc->osdev,
		dp_desc_multi_pages_mem_alloc(soc, DP_HW_LINK_DESC_TYPE,
					      pages,
					      link_desc_size,
					      total_link_descs,
@@ -2521,7 +2574,7 @@ static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
	pages = &soc->link_desc_pages;
	qdf_minidump_remove(
		(void *)pages->dma_pages->page_v_addr_start);
	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
				     pages, 0, false);
	return QDF_STATUS_E_FAILURE;

@@ -2557,7 +2610,7 @@ static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
	pages = &soc->link_desc_pages;
	qdf_minidump_remove(
		(void *)pages->dma_pages->page_v_addr_start);
	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_HW_LINK_DESC_TYPE,
				     pages, 0, false);
}

+5 −4
Original line number Diff line number Diff line
@@ -36,7 +36,8 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
	desc_size = sizeof(*rx_desc_elem);
	rx_desc_pool->elem_size = desc_size;
	if (!dp_is_soc_reinit(soc)) {
		qdf_mem_multi_pages_alloc(soc->osdev, &rx_desc_pool->desc_pages,
		dp_desc_multi_pages_mem_alloc(soc, DP_RX_DESC_BUF_TYPE,
					      &rx_desc_pool->desc_pages,
					      desc_size, num_elem, 0, true);
		if (!rx_desc_pool->desc_pages.num_pages) {
			qdf_err("Multi page alloc fail,size=%d, elem=%d",
@@ -163,7 +164,7 @@ void dp_rx_desc_pool_free(struct dp_soc *soc,
{
	if (qdf_unlikely(!(rx_desc_pool->desc_pages.cacheable_pages)))
		return;
	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_RX_DESC_BUF_TYPE,
				     &rx_desc_pool->desc_pages, 0, true);
}
#else
+44 −43
Original line number Diff line number Diff line
@@ -102,7 +102,7 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
	tx_desc_pool->elem_size = desc_size;
	if (!dp_is_soc_reinit(soc))
		qdf_mem_multi_pages_alloc(soc->osdev,
		dp_desc_multi_pages_mem_alloc(soc, DP_TX_DESC_TYPE,
					      &tx_desc_pool->desc_pages,
					      desc_size, num_elem,
					      0, true);
@@ -111,7 +111,6 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
		goto fail_exit;
	}


	num_desc_per_page =
		tx_desc_pool->desc_pages.num_element_per_page;
	tx_desc_pool->freelist = (struct dp_tx_desc_s *)
@@ -144,7 +143,7 @@ QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
	return QDF_STATUS_SUCCESS;

free_tx_desc:
	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
				     &tx_desc_pool->desc_pages, 0, true);

fail_exit:
@@ -164,8 +163,9 @@ QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
	struct dp_tx_desc_pool_s *tx_desc_pool =
				&((soc)->tx_desc[(pool_id)]);

	qdf_mem_multi_pages_free(soc->osdev,
		&tx_desc_pool->desc_pages, 0, true);
	dp_desc_multi_pages_mem_free(soc, DP_TX_DESC_TYPE,
				     &tx_desc_pool->desc_pages,
				     0, true);
	TX_DESC_LOCK_DESTROY(&tx_desc_pool->lock);
	TX_DESC_POOL_MEMBER_CLEAN(tx_desc_pool);
	return QDF_STATUS_SUCCESS;
@@ -194,7 +194,8 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
	soc->tx_ext_desc[pool_id].elem_count = num_elem;
	memctx = qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx);
	if (!dp_is_soc_reinit(soc)) {
		qdf_mem_multi_pages_alloc(soc->osdev,
		dp_desc_multi_pages_mem_alloc(soc,
					      DP_TX_EXT_DESC_TYPE,
					      &soc->tx_ext_desc[pool_id].
					      desc_pages,
					      soc->tx_ext_desc[pool_id].elem_size,
@@ -218,13 +219,11 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
	soc->tx_ext_desc[pool_id].link_elem_size =
		sizeof(struct dp_tx_ext_desc_elem_s);
	if (!dp_is_soc_reinit(soc)) {
		qdf_mem_multi_pages_alloc(soc->osdev,
					  &soc->tx_ext_desc[pool_id].
					  desc_link_pages,
					  soc->tx_ext_desc[pool_id].
					  link_elem_size,
					  soc->tx_ext_desc[pool_id].
					  elem_count,
		dp_desc_multi_pages_mem_alloc(soc,
					      DP_TX_EXT_DESC_LINK_TYPE,
					      &soc->tx_ext_desc[pool_id].desc_link_pages,
					      soc->tx_ext_desc[pool_id].link_elem_size,
					      soc->tx_ext_desc[pool_id].elem_count,
					      0, true);
	}
	if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
@@ -285,11 +284,11 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
	return QDF_STATUS_SUCCESS;

free_ext_link_desc_page:
	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_TX_EXT_DESC_LINK_TYPE,
		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);

free_ext_desc_page:
	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_TX_EXT_DESC_TYPE,
		&soc->tx_ext_desc[pool_id].desc_pages,
		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
		false);
@@ -308,10 +307,10 @@ QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
 */
QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
{
	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_TX_EXT_DESC_LINK_TYPE,
		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);

	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_TX_EXT_DESC_TYPE,
		&soc->tx_ext_desc[pool_id].desc_pages,
		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
		false);
@@ -339,7 +338,8 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
	tso_desc_pool->num_free = 0;
	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_seg_elem_t));
	if (!dp_is_soc_reinit(soc))
		qdf_mem_multi_pages_alloc(soc->osdev,
		dp_desc_multi_pages_mem_alloc(soc,
					      DP_TX_TSO_DESC_TYPE,
					      &tso_desc_pool->desc_pages,
					      desc_size,
					      num_elem, 0, true);
@@ -369,7 +369,7 @@ QDF_STATUS dp_tx_tso_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
	return QDF_STATUS_SUCCESS;

free_tso_desc:
	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
				     &tso_desc_pool->desc_pages, 0, true);

	return QDF_STATUS_E_FAULT;
@@ -390,7 +390,7 @@ void dp_tx_tso_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)

	qdf_spin_lock_bh(&tso_desc_pool->lock);

	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_DESC_TYPE,
				     &tso_desc_pool->desc_pages, 0, true);
	tso_desc_pool->freelist = NULL;
	tso_desc_pool->num_free = 0;
@@ -417,7 +417,8 @@ QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
	tso_num_seg_pool->num_free = 0;
	desc_size = DP_TX_DESC_SIZE(sizeof(struct qdf_tso_num_seg_elem_t));
	if (!dp_is_soc_reinit(soc))
		qdf_mem_multi_pages_alloc(soc->osdev,
		dp_desc_multi_pages_mem_alloc(soc,
					      DP_TX_TSO_NUM_SEG_TYPE,
					      &tso_num_seg_pool->desc_pages,
					      desc_size,
					      num_elem, 0, true);
@@ -447,7 +448,7 @@ QDF_STATUS dp_tx_tso_num_seg_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
	return QDF_STATUS_SUCCESS;

fail:
	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
				     &tso_num_seg_pool->desc_pages, 0, true);

	return QDF_STATUS_E_NOMEM;
@@ -468,7 +469,7 @@ void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id)
	tso_num_seg_pool = &soc->tx_tso_num_seg[pool_id];
	qdf_spin_lock_bh(&tso_num_seg_pool->lock);

	qdf_mem_multi_pages_free(soc->osdev,
	dp_desc_multi_pages_mem_free(soc, DP_TX_TSO_NUM_SEG_TYPE,
				     &tso_num_seg_pool->desc_pages, 0, true);
	tso_num_seg_pool->freelist = NULL;
	tso_num_seg_pool->num_free = 0;
Loading