Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 792e8f6f authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: gsi: Change user_data to support OOO completions"

parents 64dd490f 06ae5b7b
Loading
Loading
Loading
Loading
+123 −17
Original line number Diff line number Diff line
@@ -466,7 +466,8 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
	}

	ch_ctx = &gsi_ctx->chan[ch_id];
	if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI))
	if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
		return;

	if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
@@ -493,7 +494,10 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,

	ch_ctx->stats.completed++;

	notify->xfer_user_data = ch_ctx->user_data[rp_idx];
	WARN_ON(!ch_ctx->user_data[rp_idx].valid);
	notify->xfer_user_data = ch_ctx->user_data[rp_idx].p;
	ch_ctx->user_data[rp_idx].valid = false;

	notify->chan_user_data = ch_ctx->props.chan_user_data;
	notify->evt_id = evt->code;
	notify->bytes_xfered = evt->len;
@@ -525,6 +529,7 @@ static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
{
	uint32_t val;

	ctx->ring.wp = ctx->ring.wp_local;
	val = (ctx->ring.wp_local &
			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
@@ -1485,6 +1490,55 @@ static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
	return GSI_STATUS_SUCCESS;
}

/**
 * gsi_cleanup_xfer_user_data: cleanup the user data array using callback passed
 *	by IPA driver. Need to do this in GSI since only GSI knows which TRE
 *	are being used or not. However, IPA is the one that does cleaning,
 *	therefore we pass a callback from IPA and call it using params from GSI
 *
 * @chan_hdl: hdl of the gsi channel user data array to be cleaned
 * @cleanup_cb: callback used to clean the user data array. takes 2 inputs
 *	@chan_user_data: ipa_sys_context of the gsi_channel
 *	@xfer_uder_data: user data array element (rx_pkt wrapper)
 *
 * Returns: 0 on success, negative on failure
 */
static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
	void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data))
{
	struct gsi_chan_ctx *ctx;
	uint64_t i;
	uint16_t rp_idx;

	ctx = &gsi_ctx->chan[chan_hdl];
	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
		GSIERR("bad state %d\n", ctx->state);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}

	/* for coalescing, traverse the whole array */
	if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
		size_t user_data_size =
			ctx->ring.max_num_elem + 1 + GSI_VEID_MAX;
		for (i = 0; i < user_data_size; i++) {
			if (ctx->user_data[i].valid)
				cleanup_cb(ctx->props.chan_user_data,
					ctx->user_data[i].p);
		}
	} else {
		/* for non-coalescing, clean between RP and WP */
		while (ctx->ring.rp_local != ctx->ring.wp_local) {
			rp_idx = gsi_find_idx_from_addr(&ctx->ring,
				ctx->ring.rp_local);
			WARN_ON(!ctx->user_data[rp_idx].valid);
			cleanup_cb(ctx->props.chan_user_data,
				ctx->user_data[rp_idx].p);
			gsi_incr_ring_rp(&ctx->ring);
		}
	}
	return 0;
}

int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
		unsigned long *evt_ring_hdl)
{
@@ -2163,7 +2217,8 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
	int ee;
	enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
	uint8_t erindex;
	void **user_data;
	struct gsi_user_data *user_data;
	size_t user_data_size;

	if (!gsi_ctx) {
		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
@@ -2187,10 +2242,11 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
			return -GSI_STATUS_INVALID_PARAMS;
		}

		if (props->prot != GSI_CHAN_PROT_GCI &&
			atomic_read(
		if (atomic_read(
			&gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
			gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive) {
			gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
			gsi_ctx->evtr[props->evt_ring_hdl].chan->props.prot !=
			GSI_CHAN_PROT_GCI) {
			GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
				props->evt_ring_hdl, chan_hdl);
			return -GSI_STATUS_UNSUPPORTED_OP;
@@ -2202,10 +2258,18 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
		GSIERR("chan %d already allocated\n", props->ch_id);
		return -GSI_STATUS_NODEV;
	}

	memset(ctx, 0, sizeof(*ctx));
	user_data_size = props->ring_len / props->re_size;
	/*
	 * GCI channels might have OOO event completions up to GSI_VEID_MAX.
	 * user_data needs to be large enough to accommodate those.
	 * TODO: increase user data size if GSI_VEID_MAX is not enough
	 */
	if (props->prot == GSI_CHAN_PROT_GCI)
		user_data_size += GSI_VEID_MAX;

	user_data = devm_kzalloc(gsi_ctx->dev,
		(props->ring_len / props->re_size) * sizeof(void *),
		user_data_size * sizeof(*user_data),
		GFP_KERNEL);
	if (user_data == NULL) {
		GSIERR("context not allocated\n");
@@ -2256,7 +2320,9 @@ int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
	}
	ctx->evtr = &gsi_ctx->evtr[erindex];
	atomic_inc(&ctx->evtr->chan_ref_cnt);
	if (props->prot != GSI_CHAN_PROT_GCI && ctx->evtr->props.exclusive)
	if (props->prot != GSI_CHAN_PROT_GCI &&
		ctx->evtr->props.exclusive &&
		atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
		ctx->evtr->chan = ctx;

	gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
@@ -2882,6 +2948,9 @@ int gsi_reset_channel(unsigned long chan_hdl)
		goto reset;
	}

	if (ctx->props.cleanup_cb)
		gsi_cleanup_xfer_user_data(chan_hdl, ctx->props.cleanup_cb);

	gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
			ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
	gsi_init_chan_ring(&ctx->props, &ctx->ring);
@@ -3111,7 +3180,8 @@ int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
	ctx = &gsi_ctx->chan[chan_hdl];
	ee = gsi_ctx->per.ee;

	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ctx->props.prot != GSI_CHAN_PROT_GCI) {
		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}
@@ -3167,7 +3237,37 @@ int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
}
EXPORT_SYMBOL(gsi_is_channel_empty);

int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx, struct gsi_xfer_elem *xfer)
int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
{
	int i;
	int end;

	if (!ctx->user_data[idx].valid) {
		ctx->user_data[idx].valid = true;
		return idx;
	}

	/*
	 * at this point we need to find an "escape buffer" for the cookie
	 * as the userdata in this spot is in use. This happens if the TRE at
	 * idx is not completed yet and it is getting reused by a new TRE.
	 */
	ctx->stats.userdata_in_use++;
	for (i = 0; i < GSI_VEID_MAX; i++) {
		end = ctx->ring.max_num_elem + 1;
		if (!ctx->user_data[end + i].valid) {
			ctx->user_data[end + i].valid = true;
			return end + i;
		}
	}

	/* TODO: Increase escape buffer size if we hit this */
	GSIERR("user_data is full\n");
	return -EPERM;
}

int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
	struct gsi_xfer_elem *xfer)
{
	struct gsi_gci_tre gci_tre;
	struct gsi_gci_tre *tre_gci_ptr;
@@ -3193,11 +3293,13 @@ int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx, struct gsi_xfer_elem *xfer)
	gci_tre.buffer_ptr = xfer->addr;
	gci_tre.buf_len = xfer->len;
	gci_tre.re_type = GSI_RE_COAL;
	gci_tre.cookie = idx;
	gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
	if (gci_tre.cookie < 0)
		return -EPERM;

	/* write the TRE to ring */
	*tre_gci_ptr = gci_tre;
	ctx->user_data[idx] = xfer->xfer_user_data;
	ctx->user_data[idx].p = xfer->xfer_user_data;

	return 0;
}
@@ -3235,7 +3337,8 @@ int __gsi_populate_tre(struct gsi_chan_ctx *ctx,

	/* write the TRE to ring */
	*tre_ptr = tre;
	ctx->user_data[idx] = xfer->xfer_user_data;
	ctx->user_data[idx].valid = true;
	ctx->user_data[idx].p = xfer->xfer_user_data;

	return 0;
}
@@ -3338,7 +3441,8 @@ int gsi_start_xfer(unsigned long chan_hdl)

	ctx = &gsi_ctx->chan[chan_hdl];

	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ctx->props.prot != GSI_CHAN_PROT_GCI) {
		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}
@@ -3393,7 +3497,8 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
	ctx = &gsi_ctx->chan[chan_hdl];
	ee = gsi_ctx->per.ee;

	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ctx->props.prot != GSI_CHAN_PROT_GCI) {
		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}
@@ -3455,7 +3560,8 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)

	ctx = &gsi_ctx->chan[chan_hdl];

	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
		ctx->props.prot != GSI_CHAN_PROT_GCI) {
		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
		return -GSI_STATUS_UNSUPPORTED_OP;
	}
+13 −1
Original line number Diff line number Diff line
@@ -121,14 +121,26 @@ struct gsi_chan_stats {
	unsigned long invalid_tre_error;
	unsigned long poll_ok;
	unsigned long poll_empty;
	unsigned long userdata_in_use;
	struct gsi_chan_dp_stats dp;
};

/**
 * struct gsi_user_data - user_data element pointed by the TRE
 * @valid: valid to be cleaned. if its true that means it is being used.
 *	false means its free to overwrite
 * @p: pointer to the user data array element
 */
struct gsi_user_data {
	bool valid;
	void *p;
};

struct gsi_chan_ctx {
	struct gsi_chan_props props;
	enum gsi_chan_state state;
	struct gsi_ring_ctx ring;
	void **user_data;
	struct gsi_user_data *user_data;
	struct gsi_evt_ctx *evtr;
	struct mutex mlock;
	struct completion compl;
+1 −0
Original line number Diff line number Diff line
@@ -278,6 +278,7 @@ static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx)
	if (ctx->evtr)
		PRT_STAT("compl_evt=%lu\n",
			ctx->evtr->stats.completed);
	PRT_STAT("userdata_in_use=%lu\n", ctx->stats.userdata_in_use);

	PRT_STAT("ch_below_lo=%lu\n", ctx->stats.dp.ch_below_lo);
	PRT_STAT("ch_below_hi=%lu\n", ctx->stats.dp.ch_below_hi);
+1 −3
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
 * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
 */


@@ -1089,11 +1089,9 @@ void ipa3_dma_async_memcpy_notify_cb(void *priv
	struct ipa3_dma_xfer_wrapper *xfer_descr_expected;
	struct ipa3_sys_context *sys;
	unsigned long flags;
	struct ipa_mem_buffer *mem_info;

	IPADMA_FUNC_ENTRY();

	mem_info = (struct ipa_mem_buffer *)data;
	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
	if (ep_idx < 0) {
		IPADMA_ERR("IPA Client mapping failed\n");
+223 −160

File changed.

Preview size limit exceeded, changes collapsed.

Loading