Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 660391ec authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: gsi: ipa: Cut down doorbell during replenish buffers"

parents e150728e 3bfde979
Loading
Loading
Loading
Loading
+11 −5
Original line number Diff line number Diff line
@@ -3416,7 +3416,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
		return -GSI_STATUS_NODEV;
	}

	if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) {
	if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
		GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
				chan_hdl, num_xfers, xfer);
		return -GSI_STATUS_INVALID_PARAMS;
@@ -3436,6 +3436,11 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
		slock = &ctx->ring.slock;

	spin_lock_irqsave(slock, flags);

	/* allow only ring doorbell */
	if (!num_xfers)
		goto ring_doorbell;

	/*
	 * for GCI channels the responsibility is on the caller to make sure
	 * there is enough room in the TRE.
@@ -3471,11 +3476,12 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,

	ctx->stats.queued += num_xfers;

ring_doorbell:
	if (ring_db) {
		/* ensure TRE is set before ringing doorbell */
		wmb();

	if (ring_db)
		gsi_ring_chan_doorbell(ctx);
	}

	spin_unlock_irqrestore(slock, flags);

+46 −57
Original line number Diff line number Diff line
@@ -71,6 +71,7 @@

#define IPA_DEFAULT_SYS_YELLOW_WM 32
#define IPA_REPL_XFER_THRESH 20
#define IPA_REPL_XFER_MAX 36

#define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)

@@ -2051,7 +2052,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
	int ret;
	int idx = 0;
	int rx_len_cached = 0;
	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH];
	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;

	rx_len_cached = sys->len;
@@ -2095,15 +2096,13 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
		idx++;
		rx_len_cached++;
		/*
		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
		 * If this size is reached we need to queue the xfers.
		 */
		if (idx == IPA_REPL_XFER_THRESH) {
		if (idx == IPA_REPL_XFER_MAX) {
			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
				gsi_xfer_elem_array, true);
			if (ret == GSI_STATUS_SUCCESS) {
				sys->len = rx_len_cached;
			} else {
				gsi_xfer_elem_array, false);
			if (ret != GSI_STATUS_SUCCESS) {
				/* we don't expect this will happen */
				IPAERR("failed to provide buffer: %d\n", ret);
				WARN_ON(1);
@@ -2123,7 +2122,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
				msecs_to_jiffies(1));
done:
	if (idx) {
	/* only ring doorbell once here */
	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
		gsi_xfer_elem_array, true);
	if (ret == GSI_STATUS_SUCCESS) {
@@ -2134,7 +2133,6 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
		WARN_ON(1);
	}
}
}

static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
{
@@ -2143,7 +2141,7 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
	int ret;
	int idx = 0;
	int rx_len_cached = 0;
	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH];
	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;

	/* start replenish only when buffers go lower than the threshold */
@@ -2205,15 +2203,13 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
		idx++;
		rx_len_cached++;
		/*
		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_MAX.
		 * If this size is reached we need to queue the xfers.
		 */
		if (idx == IPA_REPL_XFER_THRESH) {
		if (idx == IPA_REPL_XFER_MAX) {
			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
				gsi_xfer_elem_array, true);
			if (ret == GSI_STATUS_SUCCESS) {
				sys->len = rx_len_cached;
			} else {
				gsi_xfer_elem_array, false);
			if (ret != GSI_STATUS_SUCCESS) {
				/* we don't expect this will happen */
				IPAERR("failed to provide buffer: %d\n", ret);
				WARN_ON(1);
@@ -2233,7 +2229,7 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
		msecs_to_jiffies(1));
done:
	if (idx) {
	/* only ring doorbell once here */
	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
		gsi_xfer_elem_array, true);
	if (ret == GSI_STATUS_SUCCESS) {
@@ -2244,7 +2240,6 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
		WARN_ON(1);
	}
}
}

static inline void __trigger_repl_work(struct ipa3_sys_context *sys)
{
@@ -2268,7 +2263,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
	struct ipa3_rx_pkt_wrapper *rx_pkt;
	int ret;
	int rx_len_cached = 0;
	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_THRESH];
	struct gsi_xfer_elem gsi_xfer_elem_array[IPA_REPL_XFER_MAX];
	u32 curr;
	int idx = 0;

@@ -2298,15 +2293,10 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
		 * gsi_xfer_elem_buffer has a size of IPA_REPL_XFER_THRESH.
		 * If this size is reached we need to queue the xfers.
		 */
		if (idx == IPA_REPL_XFER_THRESH) {
		if (idx == IPA_REPL_XFER_MAX) {
			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
				gsi_xfer_elem_array, true);
			if (ret == GSI_STATUS_SUCCESS) {
				/* ensure write is done before setting head */
				mb();
				atomic_set(&sys->repl->head_idx, curr);
				sys->len = rx_len_cached;
			} else {
				gsi_xfer_elem_array, false);
			if (ret != GSI_STATUS_SUCCESS) {
				/* we don't expect this will happen */
				IPAERR("failed to provide buffer: %d\n", ret);
				WARN_ON(1);
@@ -2315,8 +2305,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
			idx = 0;
		}
	}
	/* There can still be something left which has not been xfer yet */
	if (idx) {
	/* only ring doorbell once here */
	ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
			gsi_xfer_elem_array, true);
	if (ret == GSI_STATUS_SUCCESS) {
@@ -2329,7 +2318,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
		IPAERR("failed to provide buffer: %d\n", ret);
		WARN_ON(1);
	}
	}

	spin_unlock_bh(&sys->spinlock);

	__trigger_repl_work(sys);