Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f9a62269 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "msm: kgsl: Add support for KGSL_CMDBATCH_MARKER"

parents 4c8217a2 763f6aae
Loading
Loading
Loading
Loading
+163 −43
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@

#include "kgsl.h"
#include "kgsl_cffdump.h"
#include "kgsl_sharedmem.h"
#include "adreno.h"
#include "adreno_ringbuffer.h"
#include "adreno_trace.h"
@@ -147,28 +148,94 @@ static int fault_detect_read_compare(struct kgsl_device *device)
}

/**
 * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
 * @drawctxt: Pointer to the adreno draw context
 * _retire_marker() - Retire a marker command batch without sending it to the
 * hardware
 * @cmdbatch: Pointer to the cmdbatch to retire
 *
 * Dequeue a new command batch from the context list
 * In some cases marker commands can be retired by the software without going to
 * the GPU.  In those cases, update the memstore from the CPU, kick off the
 * event engine to handle expired events and destroy the command batch.
 */
static inline struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
		struct adreno_context *drawctxt)
static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
{
	struct kgsl_context *context = cmdbatch->context;
	struct kgsl_device *device = context->device;

	/*
	 * Write the start and end timestamp to the memstore to keep the
	 * accounting sane
	 */
	kgsl_sharedmem_writel(device, &device->memstore,
		KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
		cmdbatch->timestamp);

	kgsl_sharedmem_writel(device, &device->memstore,
		KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
		cmdbatch->timestamp);


	/* Retire pending GPU events for the object */
	kgsl_process_event_group(device, &context->events);

	trace_adreno_cmdbatch_retired(cmdbatch, -1);
	kgsl_cmdbatch_destroy(cmdbatch);
}

/*
 * return true if this is a marker command and the dependent timestamp has
 * retired
 */
static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch)
{
	return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
		kgsl_check_timestamp(cmdbatch->device, cmdbatch->context,
			cmdbatch->marker_timestamp);
}

static inline void _pop_cmdbatch(struct adreno_context *drawctxt)
{
	drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
		ADRENO_CONTEXT_CMDQUEUE_SIZE);
	drawctxt->queued--;
}

static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
{
	struct kgsl_cmdbatch *cmdbatch = NULL;
	int pending;
	bool pending = false;

	if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail)
		return NULL;

	mutex_lock(&drawctxt->mutex);
	if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
	cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];

	/* Check to see if this is a marker we can skip over */
	if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {
		if (_marker_expired(cmdbatch)) {
			_pop_cmdbatch(drawctxt);
			_retire_marker(cmdbatch);

			/* Get the next thing in the queue */
			return _get_cmdbatch(drawctxt);
		}

		/*
		 * Don't dequeue a cmdbatch that is still waiting for other
		 * events
		 * If the marker isn't expired but the SKIP bit is set
		 * then there are real commands following this one in
		 * the queue.  This means that we need to dispatch the
		 * command so that we can keep the timestamp accounting
		 * correct.  If skip isn't set then we block this queue
		 * until the dependent timestamp expires
		 */

		if (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))
			pending = true;
	}

	spin_lock(&cmdbatch->lock);
		pending = list_empty(&cmdbatch->synclist) ? 0 : 1;
	if (!list_empty(&cmdbatch->synclist))
		pending = true;
	spin_unlock(&cmdbatch->lock);

	/*
	 * If changes are pending and the canary timer hasn't been
@@ -181,28 +248,33 @@ static inline struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
		 */
		if (!timer_pending(&cmdbatch->timer))
			mod_timer(&cmdbatch->timer, jiffies + (5 * HZ));
			spin_unlock(&cmdbatch->lock);
		} else {

		return ERR_PTR(-EAGAIN);
	}

	/*
	 * Otherwise, delete the timer to make sure it is good
	 * and dead before queuing the buffer
	 */
			spin_unlock(&cmdbatch->lock);
	del_timer_sync(&cmdbatch->timer);
		}

		if (pending) {
			cmdbatch = ERR_PTR(-EAGAIN);
			goto done;
	_pop_cmdbatch(drawctxt);
	return cmdbatch;
}

		drawctxt->cmdqueue_head =
			CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
			ADRENO_CONTEXT_CMDQUEUE_SIZE);
		drawctxt->queued--;
	}
/**
 * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
 * @drawctxt: Pointer to the adreno draw context
 *
 * Dequeue a new command batch from the context list
 */
static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
		struct adreno_context *drawctxt)
{
	struct kgsl_cmdbatch *cmdbatch;

done:
	mutex_lock(&drawctxt->mutex);
	cmdbatch = _get_cmdbatch(drawctxt);
	mutex_unlock(&drawctxt->mutex);

	return cmdbatch;
@@ -344,7 +416,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
		return ret;
	}

	trace_adreno_cmdbatch_submitted(cmdbatch, dispatcher->inflight);
	trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight);

	dispatcher->cmdqueue[dispatcher->tail] = cmdbatch;
	dispatcher->tail = (dispatcher->tail + 1) %
@@ -747,6 +819,36 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,

	cmdbatch->timestamp = *timestamp;

	if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {

		/*
		 * See if we can fastpath this thing - if nothing is queued
		 * and nothing is inflight retire without bothering the GPU
		 */

		if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device,
			cmdbatch->context, drawctxt->inflight_timestamp)) {
			trace_adreno_cmdbatch_queued(cmdbatch,
				drawctxt->queued);

			_retire_marker(cmdbatch);
			mutex_unlock(&drawctxt->mutex);
			return 0;
		}

		/*
		 * Remember the last queued timestamp - the marker will block
		 * until that timestamp is expired (unless another command
		 * comes along and forces the marker to execute)
		 */

		cmdbatch->marker_timestamp = drawctxt->inflight_timestamp;
	}

	/* SYNC commands have timestamp 0 and will get optimized out anyway */
	if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC))
		drawctxt->inflight_timestamp = *timestamp;

	/*
	 * Set the fault tolerance policy for the command batch - assuming the
	 * context hasn't disabled FT use the current device policy
@@ -762,6 +864,24 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
	drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
		ADRENO_CONTEXT_CMDQUEUE_SIZE;

	/*
	 * If this is a real command then we need to force any markers queued
	 * before it to dispatch to keep time linear - set the skip bit so
	 * the commands get NOPed.
	 */

	if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) {
		unsigned int i = drawctxt->cmdqueue_head;

		while (i != drawctxt->cmdqueue_tail) {
			if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER)
				set_bit(CMDBATCH_FLAG_SKIP,
					&drawctxt->cmdqueue[i]->priv);

			i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE);
		}
	}

	drawctxt->queued++;
	trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);

@@ -1458,7 +1578,7 @@ static void adreno_dispatcher_work(struct work_struct *work)
			}

			trace_adreno_cmdbatch_retired(cmdbatch,
				dispatcher->inflight - 1);
				(int) (dispatcher->inflight - 1));

			/* Reduce the number of inflight command batches */
			dispatcher->inflight--;
+2 −1
Original line number Diff line number Diff line
@@ -41,9 +41,9 @@ struct kgsl_context;
 * @wq: Workqueue structure for contexts to sleep pending room in the queue
 * @waiting: Workqueue structure for contexts waiting for a timestamp or event
 * @queued: Number of commands queued in the cmdqueue
 * @ops: Context switch functions for this context.
 * @fault_policy: GFT fault policy set in cmdbatch_skip_cmd();
 * @debug_root: debugfs entry for this context.
 * @inflight_timestamp: The last timestamp that was queued on this context
 */
struct adreno_context {
	struct kgsl_context base;
@@ -64,6 +64,7 @@ struct adreno_context {
	int queued;
	unsigned int fault_policy;
	struct dentry *debug_root;
	unsigned int inflight_timestamp;
};

/**
+14 −6
Original line number Diff line number Diff line
@@ -52,17 +52,21 @@ DECLARE_EVENT_CLASS(adreno_cmdbatch_template,
	TP_STRUCT__entry(
		__field(unsigned int, id)
		__field(unsigned int, timestamp)
		__field(unsigned int, inflight)
		__field(int, inflight)
		__field(unsigned int, flags)
	),
	TP_fast_assign(
		__entry->id = cmdbatch->context->id;
		__entry->timestamp = cmdbatch->timestamp;
		__entry->inflight = inflight;
		__entry->flags = cmdbatch->flags;
	),
	TP_printk(
		"ctx=%u ts=%u inflight=%u",
		"ctx=%u ts=%u inflight=%d flags=%s",
			__entry->id, __entry->timestamp,
			__entry->inflight
			__entry->inflight,
			__entry->flags ? __print_flags(__entry->flags, "|",
				KGSL_CMDBATCH_FLAGS) : "none"
	)
);

@@ -77,22 +81,26 @@ TRACE_EVENT(adreno_cmdbatch_retired,
	TP_STRUCT__entry(
		__field(unsigned int, id)
		__field(unsigned int, timestamp)
		__field(unsigned int, inflight)
		__field(int, inflight)
		__field(unsigned int, recovery)
		__field(unsigned int, flags)
	),
	TP_fast_assign(
		__entry->id = cmdbatch->context->id;
		__entry->timestamp = cmdbatch->timestamp;
		__entry->inflight = inflight;
		__entry->recovery = cmdbatch->fault_recovery;
		__entry->flags = cmdbatch->flags;
	),
	TP_printk(
		"ctx=%u ts=%u inflight=%u recovery=%s",
		"ctx=%u ts=%u inflight=%d recovery=%s flags=%s",
			__entry->id, __entry->timestamp,
			__entry->inflight,
			__entry->recovery ?
				__print_flags(__entry->recovery, "|",
				ADRENO_FT_TYPES) : "none"
				ADRENO_FT_TYPES) : "none",
			__entry->flags ? __print_flags(__entry->flags, "|",
				KGSL_CMDBATCH_FLAGS) : "none"
	)
);

+15 −8
Original line number Diff line number Diff line
@@ -2110,7 +2110,7 @@ static struct kgsl_cmdbatch *_kgsl_cmdbatch_create(struct kgsl_device *device,
		goto done;
	}

	if (!(flags & KGSL_CMDBATCH_SYNC)) {
	if (!(flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))) {
		struct kgsl_ibdesc ibdesc;
		void  __user *uptr = cmdlist;

@@ -2169,7 +2169,7 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
	long result = -EINVAL;

	/* The legacy functions don't support synchronization commands */
	if (param->flags & KGSL_CMDBATCH_SYNC)
	if ((param->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)))
		return -EINVAL;

	/* Get the context */
@@ -2227,14 +2227,21 @@ long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,

	long result = -EINVAL;

	/* The number of IBs are completely ignored for sync commands */
	if (!(param->flags & KGSL_CMDBATCH_SYNC)) {
		if (param->numcmds == 0 || param->numcmds > KGSL_MAX_NUMIBS)
			return -EINVAL;
	} else if (param->numcmds != 0) {
	/*
	 * The SYNC bit is supposed to identify a dummy sync object so warn the
	 * user if they specified any IBs with it.  A MARKER command can either
	 * have IBs or not but if the command has 0 IBs it is automatically
	 * assumed to be a marker.  If none of the above make sure that the user
	 * specified a sane number of IBs
	 */

	if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
		KGSL_DEV_ERR_ONCE(device,
			"Commands specified with the SYNC flag.  They will be ignored\n");
	}
	else if (param->numcmds > KGSL_MAX_NUMIBS)
		return -EINVAL;
	else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
		param->flags |= KGSL_CMDBATCH_MARKER;

	context = kgsl_context_get_owner(dev_priv, param->context_id);
	if (context == NULL)
+1 −1
Original line number Diff line number Diff line
@@ -375,7 +375,7 @@ int kgsl_cmdbatch_create_compat(struct kgsl_device *device, unsigned int flags,
{
	int ret = 0, i;

	if (!(flags & KGSL_CMDBATCH_SYNC)) {
	if (!(flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))) {
		struct kgsl_ibdesc_compat ibdesc32;
		struct kgsl_ibdesc ibdesc;
		void __user *uptr = cmdlist;
Loading