Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 26827088 authored by Dave Gordon's avatar Dave Gordon Committed by Daniel Vetter
Browse files

drm/i915: simplify allocation of driver-internal requests



There are a number of places where the driver needs a request, but isn't
working on behalf of any specific user or in a specific context. At
present, we associate them with the per-engine default context. A future
patch will abolish those per-engine context pointers; but we can already
eliminate a lot of the references to them, just by making the allocator
allow NULL as a shorthand for "an appropriate context for this ring",
which will mean that the callers don't need to know anything about how
the "appropriate context" is found (e.g. per-ring vs per-device, etc).

So this patch renames the existing i915_gem_request_alloc(), and makes
it local (static inline), and replaces it with a wrapper that provides
a default if the context is NULL, and also has a nicer calling
convention (doesn't require a pointer to an output parameter). Then we
change all callers to use the new convention:
OLD:
	err = i915_gem_request_alloc(ring, user_ctx, &req);
	if (err) ...
NEW:
	req = i915_gem_request_alloc(ring, user_ctx);
	if (IS_ERR(req)) ...
OLD:
	err = i915_gem_request_alloc(ring, ring->default_context, &req);
	if (err) ...
NEW:
	req = i915_gem_request_alloc(ring, NULL);
	if (IS_ERR(req)) ...

v4:	Rebased

Signed-off-by: default avatarDave Gordon <david.s.gordon@intel.com>
Reviewed-by: default avatarNick Hoath <nicholas.hoath@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1453230175-19330-2-git-send-email-david.s.gordon@intel.com


Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent e7941294
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -2268,9 +2268,9 @@ struct drm_i915_gem_request {

};

int i915_gem_request_alloc(struct intel_engine_cs *ring,
			   struct intel_context *ctx,
			   struct drm_i915_gem_request **req_out);
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
		       struct intel_context *ctx);
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+42 −13
Original line number Diff line number Diff line
@@ -2690,7 +2690,8 @@ void i915_gem_request_free(struct kref *req_ref)
	kmem_cache_free(req->i915->requests, req);
}

int i915_gem_request_alloc(struct intel_engine_cs *ring,
static inline int
__i915_gem_request_alloc(struct intel_engine_cs *ring,
			 struct intel_context *ctx,
			 struct drm_i915_gem_request **req_out)
{
@@ -2755,6 +2756,31 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
	return ret;
}

/**
 * i915_gem_request_alloc - allocate a request structure
 *
 * @engine: engine that we wish to issue the request on.
 * @ctx: context that the request will be associated with.
 *       This can be NULL if the request is not directly related to
 *       any specific user context, in which case this function will
 *       choose an appropriate context to use.
 *
 * Returns a pointer to the allocated request if successful,
 * or an error code if not.
 */
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
		       struct intel_context *ctx)
{
	struct drm_i915_gem_request *req;
	int err;

	if (ctx == NULL)
		ctx = engine->default_context;
	err = __i915_gem_request_alloc(engine, ctx, &req);
	return err ? ERR_PTR(err) : req;
}

void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
	intel_ring_reserved_space_cancel(req->ringbuf);
@@ -3172,9 +3198,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
			return 0;

		if (*to_req == NULL) {
			ret = i915_gem_request_alloc(to, to->default_context, to_req);
			if (ret)
				return ret;
			struct drm_i915_gem_request *req;

			req = i915_gem_request_alloc(to, NULL);
			if (IS_ERR(req))
				return PTR_ERR(req);

			*to_req = req;
		}

		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
@@ -3374,9 +3404,9 @@ int i915_gpu_idle(struct drm_device *dev)
		if (!i915.enable_execlists) {
			struct drm_i915_gem_request *req;

			ret = i915_gem_request_alloc(ring, ring->default_context, &req);
			if (ret)
				return ret;
			req = i915_gem_request_alloc(ring, NULL);
			if (IS_ERR(req))
				return PTR_ERR(req);

			ret = i915_switch_context(req);
			if (ret) {
@@ -4871,10 +4901,9 @@ i915_gem_init_hw(struct drm_device *dev)
	for_each_ring(ring, dev_priv, i) {
		struct drm_i915_gem_request *req;

		WARN_ON(!ring->default_context);

		ret = i915_gem_request_alloc(ring, ring->default_context, &req);
		if (ret) {
		req = i915_gem_request_alloc(ring, NULL);
		if (IS_ERR(req)) {
			ret = PTR_ERR(req);
			i915_gem_cleanup_ringbuffer(dev);
			goto out;
		}
+9 −5
Original line number Diff line number Diff line
@@ -1381,6 +1381,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		       struct drm_i915_gem_exec_object2 *exec)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_request *req = NULL;
	struct eb_vmas *eb;
	struct drm_i915_gem_object *batch_obj;
	struct drm_i915_gem_exec_object2 shadow_exec_entry;
@@ -1602,11 +1603,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);

	/* Allocate a request for this batch buffer nice and early. */
	ret = i915_gem_request_alloc(ring, ctx, &params->request);
	if (ret)
	req = i915_gem_request_alloc(ring, ctx);
	if (IS_ERR(req)) {
		ret = PTR_ERR(req);
		goto err_batch_unpin;
	}

	ret = i915_gem_request_add_to_client(params->request, file);
	ret = i915_gem_request_add_to_client(req, file);
	if (ret)
		goto err_batch_unpin;

@@ -1622,6 +1625,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
	params->dispatch_flags          = dispatch_flags;
	params->batch_obj               = batch_obj;
	params->ctx                     = ctx;
	params->request                 = req;

	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);

@@ -1645,8 +1649,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
	 * must be freed again. If it was submitted then it is being tracked
	 * on the active request list and no clean up is required here.
	 */
	if (ret && params->request)
		i915_gem_request_cancel(params->request);
	if (ret && req)
		i915_gem_request_cancel(req);

	mutex_unlock(&dev->struct_mutex);

+4 −2
Original line number Diff line number Diff line
@@ -11690,10 +11690,12 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
					obj->last_write_req);
	} else {
		if (!request) {
			ret = i915_gem_request_alloc(ring, ring->default_context, &request);
			if (ret)
			request = i915_gem_request_alloc(ring, NULL);
			if (IS_ERR(request)) {
				ret = PTR_ERR(request);
				goto cleanup_unpin;
			}
		}

		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
						   page_flip_flags);
+4 −5
Original line number Diff line number Diff line
@@ -2520,11 +2520,10 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
	if (ctx != ring->default_context && ring->init_context) {
		struct drm_i915_gem_request *req;

		ret = i915_gem_request_alloc(ring,
			ctx, &req);
		if (ret) {
			DRM_ERROR("ring create req: %d\n",
				ret);
		req = i915_gem_request_alloc(ring, ctx);
		if (IS_ERR(req)) {
			ret = PTR_ERR(req);
			DRM_ERROR("ring create req: %d\n", ret);
			goto error_ringbuf;
		}

Loading