Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27c01aae authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Prepare i915_gem_active for annotations



In the future, we will want to add annotations to the i915_gem_active
struct. The API is thus expanded to hide direct access to the contents
of i915_gem_active and mediated instead through a number of helpers.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-11-git-send-email-chris@chris-wilson.co.uk
parent 381f371b
Loading
Loading
Loading
Loading
+8 −5
Original line number Diff line number Diff line
@@ -155,10 +155,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
		   obj->base.write_domain);
	for_each_engine_id(engine, dev_priv, id)
		seq_printf(m, "%x ",
			   i915_gem_request_get_seqno(obj->last_read[id].request));
			   i915_gem_active_get_seqno(&obj->last_read[id]));
	seq_printf(m, "] %x %x%s%s%s",
		   i915_gem_request_get_seqno(obj->last_write.request),
		   i915_gem_request_get_seqno(obj->last_fence.request),
		   i915_gem_active_get_seqno(&obj->last_write),
		   i915_gem_active_get_seqno(&obj->last_fence),
		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
		   obj->dirty ? " dirty" : "",
		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -195,8 +195,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
		*t = '\0';
		seq_printf(m, " (%s mappable)", s);
	}
	if (obj->last_write.request)
		seq_printf(m, " (%s)", obj->last_write.request->engine->name);

	engine = i915_gem_active_get_engine(&obj->last_write);
	if (engine)
		seq_printf(m, " (%s)", engine->name);

	if (obj->frontbuffer_bits)
		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
+49 −36
Original line number Diff line number Diff line
@@ -1349,27 +1349,30 @@ int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
			       bool readonly)
{
	struct drm_i915_gem_request *request;
	struct reservation_object *resv;
	int ret, i;

	if (readonly) {
		if (obj->last_write.request) {
			ret = i915_wait_request(obj->last_write.request);
		request = i915_gem_active_peek(&obj->last_write);
		if (request) {
			ret = i915_wait_request(request);
			if (ret)
				return ret;

			i = obj->last_write.request->engine->id;
			if (obj->last_read[i].request == obj->last_write.request)
			i = request->engine->id;
			if (i915_gem_active_peek(&obj->last_read[i]) == request)
				i915_gem_object_retire__read(obj, i);
			else
				i915_gem_object_retire__write(obj);
		}
	} else {
		for (i = 0; i < I915_NUM_ENGINES; i++) {
			if (!obj->last_read[i].request)
			request = i915_gem_active_peek(&obj->last_read[i]);
			if (!request)
				continue;

			ret = i915_wait_request(obj->last_read[i].request);
			ret = i915_wait_request(request);
			if (ret)
				return ret;

@@ -1397,9 +1400,9 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
{
	int idx = req->engine->id;

	if (obj->last_read[idx].request == req)
	if (i915_gem_active_peek(&obj->last_read[idx]) == req)
		i915_gem_object_retire__read(obj, idx);
	else if (obj->last_write.request == req)
	else if (i915_gem_active_peek(&obj->last_write) == req)
		i915_gem_object_retire__write(obj);

	if (!i915_reset_in_progress(&req->i915->gpu_error))
@@ -1428,20 +1431,20 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
	if (readonly) {
		struct drm_i915_gem_request *req;

		req = obj->last_write.request;
		req = i915_gem_active_get(&obj->last_write);
		if (req == NULL)
			return 0;

		requests[n++] = i915_gem_request_get(req);
		requests[n++] = req;
	} else {
		for (i = 0; i < I915_NUM_ENGINES; i++) {
			struct drm_i915_gem_request *req;

			req = obj->last_read[i].request;
			req = i915_gem_active_get(&obj->last_read[i]);
			if (req == NULL)
				continue;

			requests[n++] = i915_gem_request_get(req);
			requests[n++] = req;
		}
	}

@@ -2383,8 +2386,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(!obj->last_write.request);
	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write.request->engine)));
	GEM_BUG_ON(!i915_gem_active_isset(&obj->last_write));
	GEM_BUG_ON(!(obj->active & intel_engine_flag(i915_gem_active_get_engine(&obj->last_write))));

	i915_gem_active_set(&obj->last_write, NULL);
	intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2393,15 +2396,17 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
static void
i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
{
	struct intel_engine_cs *engine;
	struct i915_vma *vma;

	GEM_BUG_ON(!obj->last_read[idx].request);
	GEM_BUG_ON(!i915_gem_active_isset(&obj->last_read[idx]));
	GEM_BUG_ON(!(obj->active & (1 << idx)));

	list_del_init(&obj->engine_list[idx]);
	i915_gem_active_set(&obj->last_read[idx], NULL);

	if (obj->last_write.request && obj->last_write.request->engine->id == idx)
	engine = i915_gem_active_get_engine(&obj->last_write);
	if (engine && engine->id == idx)
		i915_gem_object_retire__write(obj);

	obj->active &= ~(1 << idx);
@@ -2621,7 +2626,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
				       struct drm_i915_gem_object,
				       engine_list[engine->id]);

		if (!list_empty(&obj->last_read[engine->id].request->list))
		if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id])->list))
			break;

		i915_gem_object_retire__read(obj, engine->id);
@@ -2754,7 +2759,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
	for (i = 0; i < I915_NUM_ENGINES; i++) {
		struct drm_i915_gem_request *req;

		req = obj->last_read[i].request;
		req = i915_gem_active_peek(&obj->last_read[i]);
		if (req == NULL)
			continue;

@@ -2794,7 +2799,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
	int i, n = 0;
	int ret;

@@ -2830,20 +2835,21 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
	i915_gem_object_put(obj);

	for (i = 0; i < I915_NUM_ENGINES; i++) {
		if (!obj->last_read[i].request)
			continue;
		struct drm_i915_gem_request *req;

		req[n++] = i915_gem_request_get(obj->last_read[i].request);
		req = i915_gem_active_get(&obj->last_read[i]);
		if (req)
			requests[n++] = req;
	}

	mutex_unlock(&dev->struct_mutex);

	for (i = 0; i < n; i++) {
		if (ret == 0)
			ret = __i915_wait_request(req[i], true,
			ret = __i915_wait_request(requests[i], true,
						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
						  to_rps_client(file));
		i915_gem_request_put(req[i]);
		i915_gem_request_put(requests[i]);
	}
	return ret;

@@ -2916,7 +2922,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_request *to)
{
	const bool readonly = obj->base.pending_write_domain == 0;
	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
	int ret, i, n;

	if (!obj->active)
@@ -2924,15 +2930,22 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,

	n = 0;
	if (readonly) {
		if (obj->last_write.request)
			req[n++] = obj->last_write.request;
		struct drm_i915_gem_request *req;

		req = i915_gem_active_peek(&obj->last_write);
		if (req)
			requests[n++] = req;
	} else {
		for (i = 0; i < I915_NUM_ENGINES; i++)
			if (obj->last_read[i].request)
				req[n++] = obj->last_read[i].request;
		for (i = 0; i < I915_NUM_ENGINES; i++) {
			struct drm_i915_gem_request *req;

			req = i915_gem_active_peek(&obj->last_read[i]);
			if (req)
				requests[n++] = req;
		}
	}
	for (i = 0; i < n; i++) {
		ret = __i915_gem_object_sync(obj, to, req[i]);
		ret = __i915_gem_object_sync(obj, to, requests[i]);
		if (ret)
			return ret;
	}
@@ -4021,17 +4034,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,

	args->busy = 0;
	if (obj->active) {
		struct drm_i915_gem_request *req;
		int i;

		for (i = 0; i < I915_NUM_ENGINES; i++) {
			struct drm_i915_gem_request *req;

			req = obj->last_read[i].request;
			req = i915_gem_active_peek(&obj->last_read[i]);
			if (req)
				args->busy |= 1 << (16 + req->engine->exec_id);
		}
		if (obj->last_write.request)
			args->busy |= obj->last_write.request->engine->exec_id;
		req = i915_gem_active_peek(&obj->last_write);
		if (req)
			args->busy |= req->engine->exec_id;
	}

unref:
+5 −6
Original line number Diff line number Diff line
@@ -261,14 +261,13 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
static int
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
	if (obj->last_fence.request) {
		int ret = i915_wait_request(obj->last_fence.request);
	int ret;

	ret = i915_gem_active_wait(&obj->last_fence);
	if (ret)
		return ret;

	i915_gem_active_set(&obj->last_fence, NULL);
	}

	return 0;
}

+127 −0
Original line number Diff line number Diff line
@@ -280,6 +280,15 @@ struct i915_gem_active {
	struct drm_i915_gem_request *request;
};

/**
 * i915_gem_active_set - updates the tracker to watch the current request
 * @active - the active tracker
 * @request - the request to watch
 *
 * i915_gem_active_set() watches the given @request for completion. Whilst
 * that @request is busy, the @active reports busy. When that @request is
 * retired, the @active tracker is updated to report idle.
 */
static inline void
i915_gem_active_set(struct i915_gem_active *active,
		    struct drm_i915_gem_request *request)
@@ -287,6 +296,124 @@ i915_gem_active_set(struct i915_gem_active *active,
	i915_gem_request_assign(&active->request, request);
}

/**
 * i915_gem_active_peek - report the request being monitored
 * @active - the active tracker
 *
 * i915_gem_active_peek() returns the current request being tracked, or NULL.
 * It does not obtain a reference on the request for the caller, so the
 * caller must hold struct_mutex.
 */
static inline struct drm_i915_gem_request *
i915_gem_active_peek(const struct i915_gem_active *active)
{
	return active->request;
}

/**
 * i915_gem_active_get - return a reference to the active request
 * @active - the active tracker
 *
 * i915_gem_active_get() returns a reference to the active request, or NULL
 * if the active tracker is idle. The caller must hold struct_mutex.
 */
static inline struct drm_i915_gem_request *
i915_gem_active_get(const struct i915_gem_active *active)
{
	struct drm_i915_gem_request *request;

	request = i915_gem_active_peek(active);
	if (!request || i915_gem_request_completed(request))
		return NULL;

	return i915_gem_request_get(request);
}

/**
 * i915_gem_active_isset - report whether the active tracker is assigned
 * @active - the active tracker
 *
 * i915_gem_active_isset() returns true if the active tracker is currently
 * assigned to a request. Due to the lazy retiring, that request may be idle
 * and this may report stale information.
 */
static inline bool
i915_gem_active_isset(const struct i915_gem_active *active)
{
	return active->request;
}

/**
 * i915_gem_active_is_idle - report whether the active tracker is idle
 * @active - the active tracker
 *
 * i915_gem_active_is_idle() returns true if the active tracker is currently
 * unassigned or if the request is complete (but not yet retired). Requires
 * the caller to hold struct_mutex (but that can be relaxed if desired).
 */
static inline bool
i915_gem_active_is_idle(const struct i915_gem_active *active)
{
	struct drm_i915_gem_request *request;

	request = i915_gem_active_peek(active);
	if (!request || i915_gem_request_completed(request))
		return true;

	return false;
}

/**
 * i915_gem_active_wait - waits until the request is completed
 * @active - the active request on which to wait
 *
 * i915_gem_active_wait() waits until the request is completed before
 * returning. Note that it does not guarantee that the request is
 * retired first, see i915_gem_active_retire().
 */
static inline int __must_check
i915_gem_active_wait(const struct i915_gem_active *active)
{
	struct drm_i915_gem_request *request;

	request = i915_gem_active_peek(active);
	if (!request)
		return 0;

	return i915_wait_request(request);
}

/**
 * i915_gem_active_retire - waits until the request is retired
 * @active - the active request on which to wait
 *
 * i915_gem_active_retire() waits until the request is completed,
 * and then ensures that at least the retirement handler for this
 * @active tracker is called before returning. If the @active
 * tracker is idle, the function returns immediately.
 */
static inline int __must_check
i915_gem_active_retire(const struct i915_gem_active *active)
{
	return i915_gem_active_wait(active);
}

/* Convenience functions for peeking at state inside active's request whilst
 * guarded by the struct_mutex.
 */

static inline uint32_t
i915_gem_active_get_seqno(const struct i915_gem_active *active)
{
	return i915_gem_request_get_seqno(i915_gem_active_peek(active));
}

static inline struct intel_engine_cs *
i915_gem_active_get_engine(const struct i915_gem_active *active)
{
	return i915_gem_request_get_engine(i915_gem_active_peek(active));
}

#define for_each_active(mask, idx) \
	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))

+1 −1
Original line number Diff line number Diff line
@@ -242,7 +242,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
			}

			obj->fence_dirty =
				obj->last_fence.request ||
				!i915_gem_active_is_idle(&obj->last_fence) ||
				obj->fence_reg != I915_FENCE_REG_NONE;

			obj->tiling_mode = args->tiling_mode;
Loading