Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0996a4d5 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge changes I1b01a789,Id12d4ad7,If4e72c5e,I4267a8f7,I567082f4 into msm-4.14

* changes:
  msm: ipa: fix aggregation calculation for > 16k
  msm: ipa: allocate gsi_queue_xfer params on stack
  msm: gsi: use LSB to ring doorbells
  msm: ipa: queue replenish work only when required
  msm: ipa: enable GSI interrupt moderation
parents 0d0a67ba d137965f
Loading
Loading
Loading
Loading
+20 −16
Original line number Diff line number Diff line
@@ -434,14 +434,6 @@ static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
{
	uint32_t val;

	/* write order MUST be MSB followed by LSB */
	val = ((ctx->ring.wp_local >> 32) &
			GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
			GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
	gsi_writel(val, gsi_ctx->base +
			GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id,
				gsi_ctx->per.ee));

	val = (ctx->ring.wp_local &
			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
@@ -464,14 +456,6 @@ static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
		gsi_ring_evt_doorbell(ctx->evtr);
	ctx->ring.wp = ctx->ring.wp_local;

	/* write order MUST be MSB followed by LSB */
	val = ((ctx->ring.wp_local >> 32) &
			GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
			GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
	gsi_writel(val, gsi_ctx->base +
			GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
				gsi_ctx->per.ee));

	val = (ctx->ring.wp_local &
			GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
			GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
@@ -1100,11 +1084,21 @@ static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
{
	unsigned long flags;
	uint32_t val;

	spin_lock_irqsave(&ctx->ring.slock, flags);
	memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
	ctx->ring.wp_local = ctx->ring.base +
		ctx->ring.max_num_elem * ctx->ring.elem_sz;

	/* write order MUST be MSB followed by LSB */
	val = ((ctx->ring.wp_local >> 32) &
		GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
		GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
	gsi_writel(val, gsi_ctx->base +
		GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id,
		gsi_ctx->per.ee));

	gsi_ring_evt_doorbell(ctx);
	spin_unlock_irqrestore(&ctx->ring.slock, flags);
}
@@ -1966,6 +1960,14 @@ int gsi_start_channel(unsigned long chan_hdl)
		BUG();
	}

	/* write order MUST be MSB followed by LSB */
	val = ((ctx->ring.wp_local >> 32) &
		GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
		GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
	gsi_writel(val, gsi_ctx->base +
		GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
		gsi_ctx->per.ee));

	mutex_unlock(&gsi_ctx->mlock);

	return GSI_STATUS_SUCCESS;
@@ -2602,6 +2604,8 @@ int gsi_poll_channel(unsigned long chan_hdl,
	spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
		/* update rp to see of we have anything new to process */
		gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
		rp = gsi_readl(gsi_ctx->base +
			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
		rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
+56 −23
Original line number Diff line number Diff line
@@ -62,7 +62,8 @@
#define IPA_SIZE_DL_CSUM_META_TRAILER 8

#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
#define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
#define IPA_GSI_EVT_RING_INT_MODT (16) /* 0.5ms under 32KHz clock */
#define IPA_GSI_EVT_RING_INT_MODC (20)

#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
/* The below virtual channel cannot be used by any entity */
@@ -75,6 +76,8 @@

#define IPA_APPS_BW_FOR_PM 700

#define IPA_SEND_MAX_DESC (20)

static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
@@ -272,7 +275,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
	struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first;
	struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
	struct ipa3_tx_pkt_wrapper *next_pkt;
	struct gsi_xfer_elem *gsi_xfer_elem_array = NULL;
	struct gsi_xfer_elem gsi_xfer[IPA_SEND_MAX_DESC];
	int i = 0;
	int j;
	int result;
@@ -289,6 +292,13 @@ int ipa3_send(struct ipa3_sys_context *sys,
			sys->ep->client);
		return -EFAULT;
	}
	if (unlikely(num_desc > IPA_SEND_MAX_DESC)) {
		IPAERR("max descriptors reached need=%d max=%d\n",
			num_desc, IPA_SEND_MAX_DESC);
		WARN_ON(1);
		return -EPERM;
	}

	if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) {
		IPAERR("Too many chained descriptors need=%d max=%d\n",
			num_desc, gsi_ep_cfg->ipa_if_tlv);
@@ -296,11 +306,9 @@ int ipa3_send(struct ipa3_sys_context *sys,
		return -EPERM;
	}

	gsi_xfer_elem_array =
		kzalloc(num_desc * sizeof(struct gsi_xfer_elem),
		mem_flag);
	if (!gsi_xfer_elem_array)
		return -ENOMEM;

	/* initialize only the xfers we use */
	memset(gsi_xfer, 0, sizeof(gsi_xfer[0]) * num_desc);

	spin_lock_bh(&sys->spinlock);

@@ -372,45 +380,45 @@ int ipa3_send(struct ipa3_sys_context *sys,

		list_add_tail(&tx_pkt->link, &sys->head_desc_list);

		gsi_xfer_elem_array[i].addr = tx_pkt->mem.phys_base;
		gsi_xfer[i].addr = tx_pkt->mem.phys_base;

		/*
		 * Special treatment for immediate commands, where
		 * the structure of the descriptor is different
		 */
		if (desc[i].type == IPA_IMM_CMD_DESC) {
			gsi_xfer_elem_array[i].len = desc[i].opcode;
			gsi_xfer_elem_array[i].type =
			gsi_xfer[i].len = desc[i].opcode;
			gsi_xfer[i].type =
				GSI_XFER_ELEM_IMME_CMD;
		} else {
			gsi_xfer_elem_array[i].len = desc[i].len;
			gsi_xfer_elem_array[i].type =
			gsi_xfer[i].len = desc[i].len;
			gsi_xfer[i].type =
				GSI_XFER_ELEM_DATA;
		}

		if (i == (num_desc - 1)) {
			if (!sys->use_comm_evt_ring) {
				gsi_xfer_elem_array[i].flags |=
				gsi_xfer[i].flags |=
					GSI_XFER_FLAG_EOT;
				gsi_xfer_elem_array[i].flags |=
				gsi_xfer[i].flags |=
					GSI_XFER_FLAG_BEI;
			}
			gsi_xfer_elem_array[i].xfer_user_data =
			gsi_xfer[i].xfer_user_data =
				tx_pkt_first;
		} else {
			gsi_xfer_elem_array[i].flags |=
			gsi_xfer[i].flags |=
				GSI_XFER_FLAG_CHAIN;
		}
	}

	IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
	result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
			gsi_xfer_elem_array, true);
			gsi_xfer, true);
	if (result != GSI_STATUS_SUCCESS) {
		IPAERR("GSI xfer failed.\n");
		goto failure;
	}
	kfree(gsi_xfer_elem_array);


	if (sys->use_comm_evt_ring && !sys->nop_pending) {
		sys->nop_pending = true;
@@ -458,8 +466,6 @@ int ipa3_send(struct ipa3_sys_context *sys,
		tx_pkt = next_pkt;
	}

	kfree(gsi_xfer_elem_array);

	spin_unlock_bh(&sys->spinlock);
	return -EFAULT;
}
@@ -1009,6 +1015,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
	*clnt_hdl = ipa_ep_idx;

	if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
		atomic_set(&ep->sys->repl.pending, 0);
		ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
		ep->sys->repl.cache = kcalloc(ep->sys->repl.capacity,
				sizeof(void *), GFP_KERNEL);
@@ -1488,6 +1495,7 @@ static void ipa3_wq_repl_rx(struct work_struct *work)
	u32 curr;

	sys = container_of(work, struct ipa3_sys_context, repl_work);
	atomic_set(&sys->repl.pending, 0);
	curr = atomic_read(&sys->repl.tail_idx);

begin:
@@ -1758,6 +1766,7 @@ static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
		gsi_xfer_elem_one.len = sys->rx_buff_sz;
		gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
		gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
		gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_BEI;
		gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
		gsi_xfer_elem_one.xfer_user_data = rx_pkt;

@@ -1860,6 +1869,7 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
		gsi_xfer_elem_one.len = sys->rx_buff_sz;
		gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
		gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
		gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_BEI;
		gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
		gsi_xfer_elem_one.xfer_user_data = rx_pkt;

@@ -1899,6 +1909,23 @@ static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
		msecs_to_jiffies(1));
}

static inline void __trigger_repl_work(struct ipa3_sys_context *sys)
{
	int tail, head, avail;

	if (atomic_read(&sys->repl.pending))
		return;

	tail = atomic_read(&sys->repl.tail_idx);
	head = atomic_read(&sys->repl.head_idx);
	avail = (tail - head) % sys->repl.capacity;

	if (avail < sys->repl.capacity / 4) {
		atomic_set(&sys->repl.pending, 1);
		queue_work(sys->repl_wq, &sys->repl_work);
	}
}

static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
{
	struct ipa3_rx_pkt_wrapper *rx_pkt;
@@ -1925,6 +1952,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
		gsi_xfer_elem_one.len = sys->rx_buff_sz;
		gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
		gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
		gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_BEI;
		gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
		gsi_xfer_elem_one.xfer_user_data = rx_pkt;

@@ -1953,7 +1981,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
	}
	spin_unlock_bh(&sys->spinlock);

	queue_work(sys->repl_wq, &sys->repl_work);
	__trigger_repl_work(sys);

	if (rx_len_cached - sys->len_pending_xfer
		<= IPA_DEFAULT_SYS_YELLOW_WM) {
@@ -3567,8 +3595,13 @@ static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
		ep->gsi_mem_info.evt_ring_base_vaddr =
			gsi_evt_ring_props.ring_base_vaddr;

		if (ep->napi_enabled) {
			gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
			gsi_evt_ring_props.int_modc = IPA_GSI_EVT_RING_INT_MODC;
		} else {
			gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
			gsi_evt_ring_props.int_modc = 1;
		}

		IPADBG("client=%d moderation threshold cycles=%u cnt=%u\n",
			ep->client,
+1 −0
Original line number Diff line number Diff line
@@ -633,6 +633,7 @@ struct ipa3_repl_ctx {
	atomic_t head_idx;
	atomic_t tail_idx;
	u32 capacity;
	atomic_t pending;
};

/**
+6 −14
Original line number Diff line number Diff line
@@ -2228,6 +2228,7 @@ int ipa3_init_hw(void)
{
	u32 ipa_version = 0;
	u32 val;
	struct ipahal_reg_counter_cfg cnt_cfg;

	/* Read IPA version and make sure we have access to the registers */
	ipa_version = ipahal_read_reg(IPA_VERSION);
@@ -2273,6 +2274,10 @@ int ipa3_init_hw(void)

	ipa3_cfg_qsb();

	/* set granularity for 0.5 msec*/
	cnt_cfg.aggr_granularity = GRAN_VALUE_500_USEC;
	ipahal_write_reg_fields(IPA_COUNTER_CFG, &cnt_cfg);

	return 0;
}

@@ -4489,21 +4494,8 @@ u32 ipa3_get_num_pipes(void)
int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
{
	int res = -1;
	u32 limit;

	/* checking if IPA-HW can support */
	limit = ipahal_aggr_get_max_byte_limit();
	if ((agg_size >> 10) > limit) {
		IPAERR("IPA-AGG byte limit %d\n", limit);
		IPAERR("exceed aggr_byte_limit\n");
		return res;
	}
	limit = ipahal_aggr_get_max_pkt_limit();
	if (agg_count > limit) {
		IPAERR("IPA-AGG pkt limit %d\n", limit);
		IPAERR("exceed aggr_pkt_limit\n");
		return res;
	}
	/* ipahal will adjust limits based on HW capabilities */

	if (ipa3_ctx) {
		ipa3_ctx->ipa_client_apps_wan_cons_agg_gro = true;
+43 −2
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@ static const char *ipareg_name_to_str[IPA_REG_MAX] = {
	__stringify(IPA_SPARE_REG_2),
	__stringify(IPA_COMP_CFG),
	__stringify(IPA_STATE_AGGR_ACTIVE),
	__stringify(IPA_COUNTER_CFG),
	__stringify(IPA_ENDP_INIT_HDR_n),
	__stringify(IPA_ENDP_INIT_HDR_EXT_n),
	__stringify(IPA_ENDP_INIT_AGGR_n),
@@ -1074,6 +1075,9 @@ static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg,
{
	struct ipa_ep_cfg_aggr *ep_aggr =
		(struct ipa_ep_cfg_aggr *)fields;
	u32 byte_limit;
	u32 pkt_limit;


	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en,
		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
@@ -1083,7 +1087,12 @@ static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg,
		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK);

	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_byte_limit,
	/* make sure aggregation size does not cross HW boundaries */
	byte_limit = (ep_aggr->aggr_byte_limit >
		ipahal_aggr_get_max_byte_limit()) ?
		ipahal_aggr_get_max_byte_limit() :
		ep_aggr->aggr_byte_limit;
	IPA_SETFIELD_IN_REG(*val, byte_limit,
		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK);

@@ -1091,7 +1100,12 @@ static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg,
		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);

	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit,
	/* make sure aggregation size does not cross HW boundaries */
	pkt_limit = (ep_aggr->aggr_pkt_limit >
		ipahal_aggr_get_max_pkt_limit()) ?
		ipahal_aggr_get_max_pkt_limit() :
		ep_aggr->aggr_pkt_limit;
	IPA_SETFIELD_IN_REG(*val, pkt_limit,
		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);

@@ -1449,6 +1463,30 @@ static void ipareg_parse_hps_queue_weights(
		IPA_HPS_FTCH_ARB_QUEUE_WEIGHTS_RX_HPS_QUEUE_WEIGHT_3_BMSK);
}

static void ipareg_construct_counter_cfg(enum ipahal_reg_name reg,
	const void *fields, u32 *val)
{
	struct ipahal_reg_counter_cfg *counter_cfg =
		(struct ipahal_reg_counter_cfg *)fields;

	IPA_SETFIELD_IN_REG(*val, counter_cfg->aggr_granularity,
		IPA_COUNTER_CFG_AGGR_GRANULARITY_SHFT,
		IPA_COUNTER_CFG_AGGR_GRANULARITY_BMSK);
}

static void ipareg_parse_counter_cfg(
	enum ipahal_reg_name reg, void *fields, u32 val)
{
	struct ipahal_reg_counter_cfg *counter_cfg =
		(struct ipahal_reg_counter_cfg *)fields;

	memset(counter_cfg, 0, sizeof(*counter_cfg));

	counter_cfg->aggr_granularity = IPA_GETFIELD_FROM_REG(val,
		IPA_COUNTER_CFG_AGGR_GRANULARITY_SHFT,
		IPA_COUNTER_CFG_AGGR_GRANULARITY_BMSK);
}

/*
 * struct ipahal_reg_obj - Register H/W information for specific IPA version
 * @construct - CB to construct register value from abstracted structure
@@ -1741,6 +1779,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
	[IPA_HW_v3_5][IPA_HPS_FTCH_ARB_QUEUE_WEIGHT] = {
		ipareg_construct_hps_queue_weights,
		ipareg_parse_hps_queue_weights, 0x000005a4, 0},
	[IPA_HW_v3_5][IPA_COUNTER_CFG] = {
		ipareg_construct_counter_cfg, ipareg_parse_counter_cfg,
		0x000001F0, 0 },

	/* IPAv4.0 */
	[IPA_HW_v4_0][IPA_ENDP_INIT_CTRL_n] = {
Loading