Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a31d08be authored by Jordan Crouse's avatar Jordan Crouse
Browse files

msm: kgsl: Replace the command batch syncpoint list



We have spend an inordinate amount of time trying to get command
batch syncpoints right. For reasons that are not even clear to me
we stuck with the list model long after it had become more of a
burden than a benefit. The different behavior of the sync models
and the dynamic nature of the list as it was destroyed required
a lot of complex reference counting and locking which broke
every time we moved to a new kernel or sync model.

This ends now. Syncpoints for each command batch are finite and
known at create time. We do not need to dynamically allocate
blocks of memory for each event, we do not need to reference
count them or dynamically destroy them either. All we need
to have is a fast way to determine when all the events have
expired so we can submit the command batch. In that spirit:

 - Allocate a single array for all syncpoints for a command batch
   at initialization time.
 - Use a bitmap to keep track of pending events - if the event is
   pending, the bit is set. Once the event is expired or destroyed,
   the bit is cleared. When the entire bitmap goes to zero the
   command batch can be submitted. The atomic bitmap operations
   should be plenty fast for our purposes.
 - Syncpoint memory is freed at command batch destroy time - no more
   reference counting or locking required.

Change-Id: Ic0dedbad5ece033e831132bb6d076ef6a542fe1f
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
parent 58ff0284
Loading
Loading
Loading
Loading
+9 −5
Original line number Diff line number Diff line
@@ -201,22 +201,26 @@ static void print_flags(struct seq_file *s, const struct flag_entry *table,

static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch)
{
	struct kgsl_cmdbatch_sync_event *sync_event;
	struct kgsl_cmdbatch_sync_event *event;
	unsigned int i;

	/* print fences first, since they block this cmdbatch */

	rcu_read_lock();
	list_for_each_entry_rcu(sync_event, &cmdbatch->synclist, node) {
	for (i = 0; i < cmdbatch->numsyncs; i++) {
		event = &cmdbatch->synclist[i];

		if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
			continue;

		/*
		 * Timestamp is 0 for KGSL_CONTEXT_SYNC, but print it anyways
		 * so that it is clear if the fence was a separate submit
		 * or part of an IB submit.
		 */
		seq_printf(s, "\t%d ", cmdbatch->timestamp);
		sync_event_print(s, sync_event);
		sync_event_print(s, event);
		seq_puts(s, "\n");
	}
	rcu_read_unlock();

	/* if this flag is set, there won't be an IB */
	if (cmdbatch->flags & KGSL_CONTEXT_SYNC)
+2 −15
Original line number Diff line number Diff line
@@ -338,7 +338,6 @@ static inline void _pop_cmdbatch(struct adreno_context *drawctxt)
static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt)
{
	struct kgsl_cmdbatch *cmdbatch;
	bool pending = false;

	if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail)
		return NULL;
@@ -357,17 +356,7 @@ static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt)
	}

	if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
		/*
		 * We may have cmdbatch timer running, which also uses same
		 * lock, take a lock with software interrupt disabled (bh) to
		 * avoid spin lock recursion.
		 */
		spin_lock_bh(&cmdbatch->lock);
		if (!list_empty(&cmdbatch->synclist))
			pending = true;
		spin_unlock_bh(&cmdbatch->lock);

		if (!pending) {
		if (!kgsl_cmdbatch_events_pending(cmdbatch)) {
			_pop_cmdbatch(drawctxt);
			kgsl_cmdbatch_destroy(cmdbatch);
			return _expire_markers(drawctxt);
@@ -406,10 +395,8 @@ static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
			(!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)))
		pending = true;

	rcu_read_lock();
	if (!list_empty(&cmdbatch->synclist))
	if (kgsl_cmdbatch_events_pending(cmdbatch))
		pending = true;
	rcu_read_unlock();

	/*
	 * If changes are pending and the canary timer hasn't been
+1 −3
Original line number Diff line number Diff line
@@ -82,15 +82,13 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
			goto stats;
		}

		rcu_read_lock();
		if (!list_empty(&cmdbatch->synclist)) {
		if (kgsl_cmdbatch_events_pending(cmdbatch)) {
			dev_err(device->dev,
				"  context[%d] (ts=%d) Active sync points:\n",
				context->id, cmdbatch->timestamp);

			kgsl_dump_syncpoints(device, cmdbatch);
		}
		rcu_read_unlock();
	}

stats:
+2 −2
Original line number Diff line number Diff line
@@ -1539,7 +1539,7 @@ long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
		param->flags |= KGSL_CMDBATCH_MARKER;

	/* Make sure that we don't have too many syncpoints */
	if (param->numsyncs > KGSL_MAX_NUMIBS)
	if (param->numsyncs > KGSL_MAX_SYNCPOINTS)
		return -EINVAL;

	context = kgsl_context_get_owner(dev_priv, param->context_id);
@@ -1608,7 +1608,7 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
	/* Make sure that the memobj and syncpoint count isn't too big */
	if (param->numcmds > KGSL_MAX_NUMIBS ||
		param->numobjs > KGSL_MAX_NUMIBS ||
		param->numsyncs > KGSL_MAX_NUMIBS)
		param->numsyncs > KGSL_MAX_SYNCPOINTS)
		return -EINVAL;

	context = kgsl_context_get_owner(dev_priv, param->context_id);
+1 −0
Original line number Diff line number Diff line
@@ -54,6 +54,7 @@ static inline void KGSL_STATS_ADD(uint64_t size, atomic_long_t *stat,
}

#define KGSL_MAX_NUMIBS 100000
#define KGSL_MAX_SYNCPOINTS 32

struct kgsl_device;
struct kgsl_context;
Loading