Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eed29a5b authored by Daniel Vetter's avatar Daniel Vetter
Browse files

drm/i915: s/\<rq\>/req/g



The merged seqno->request conversion from John called request
variables req, but some (not all) of Chris' recent patches changed
those to just rq. We've had a lenghty (and inconclusive) discussion on
irc which is the more meaningful name with maybe at most a slight bias
towards req.

Given that the "don't change names without good reason to avoid
conflicts" rule applies, so lets go back to a req everywhere for
consistency. I'll sed any patches for which this will cause conflicts
before applying.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: John Harrison <John.C.Harrison@Intel.com>
[danvet: s/origina/merged/ as pointed out by Chris - the first
mass-conversion patch was from Chris, the merged one from John.]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
parent 8ea6f892
Loading
Loading
Loading
Loading
+7 −7
Original line number Diff line number Diff line
@@ -665,7 +665,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
	struct drm_device *dev = node->minor->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_engine_cs *ring;
	struct drm_i915_gem_request *rq;
	struct drm_i915_gem_request *req;
	int ret, any, i;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -677,22 +677,22 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
		int count;

		count = 0;
		list_for_each_entry(rq, &ring->request_list, list)
		list_for_each_entry(req, &ring->request_list, list)
			count++;
		if (count == 0)
			continue;

		seq_printf(m, "%s requests: %d\n", ring->name, count);
		list_for_each_entry(rq, &ring->request_list, list) {
		list_for_each_entry(req, &ring->request_list, list) {
			struct task_struct *task;

			rcu_read_lock();
			task = NULL;
			if (rq->pid)
				task = pid_task(rq->pid, PIDTYPE_PID);
			if (req->pid)
				task = pid_task(req->pid, PIDTYPE_PID);
			seq_printf(m, "    %x @ %d: %s [%d]\n",
				   rq->seqno,
				   (int) (jiffies - rq->emitted_jiffies),
				   req->seqno,
				   (int) (jiffies - req->emitted_jiffies),
				   task ? task->comm : "<unknown>",
				   task ? task->pid : -1);
			rcu_read_unlock();
+16 −16
Original line number Diff line number Diff line
@@ -1178,16 +1178,16 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
	return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}

static int __i915_spin_request(struct drm_i915_gem_request *rq)
static int __i915_spin_request(struct drm_i915_gem_request *req)
{
	unsigned long timeout;

	if (i915_gem_request_get_ring(rq)->irq_refcount)
	if (i915_gem_request_get_ring(req)->irq_refcount)
		return -EBUSY;

	timeout = jiffies + 1;
	while (!need_resched()) {
		if (i915_gem_request_completed(rq, true))
		if (i915_gem_request_completed(req, true))
			return 0;

		if (time_after_eq(jiffies, timeout))
@@ -1195,7 +1195,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *rq)

		cpu_relax_lowlatency();
	}
	if (i915_gem_request_completed(rq, false))
	if (i915_gem_request_completed(req, false))
		return 0;

	return -EAGAIN;
@@ -2572,37 +2572,37 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
			   struct intel_context *ctx)
{
	struct drm_i915_private *dev_priv = to_i915(ring->dev);
	struct drm_i915_gem_request *rq;
	struct drm_i915_gem_request *req;
	int ret;

	if (ring->outstanding_lazy_request)
		return 0;

	rq = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
	if (rq == NULL)
	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
	if (req == NULL)
		return -ENOMEM;

	kref_init(&rq->ref);
	rq->i915 = dev_priv;
	kref_init(&req->ref);
	req->i915 = dev_priv;

	ret = i915_gem_get_seqno(ring->dev, &rq->seqno);
	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
	if (ret) {
		kfree(rq);
		kfree(req);
		return ret;
	}

	rq->ring = ring;
	req->ring = ring;

	if (i915.enable_execlists)
		ret = intel_logical_ring_alloc_request_extras(rq, ctx);
		ret = intel_logical_ring_alloc_request_extras(req, ctx);
	else
		ret = intel_ring_alloc_request_extras(rq);
		ret = intel_ring_alloc_request_extras(req);
	if (ret) {
		kfree(rq);
		kfree(req);
		return ret;
	}

	ring->outstanding_lazy_request = rq;
	ring->outstanding_lazy_request = req;
	return 0;
}

+4 −4
Original line number Diff line number Diff line
@@ -10785,14 +10785,14 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
	struct intel_mmio_flip *mmio_flip =
		container_of(work, struct intel_mmio_flip, work);

	if (mmio_flip->rq)
		WARN_ON(__i915_wait_request(mmio_flip->rq,
	if (mmio_flip->req)
		WARN_ON(__i915_wait_request(mmio_flip->req,
					    mmio_flip->crtc->reset_counter,
					    false, NULL, NULL));

	intel_do_mmio_flip(mmio_flip->crtc);

	i915_gem_request_unreference__unlocked(mmio_flip->rq);
	i915_gem_request_unreference__unlocked(mmio_flip->req);
	kfree(mmio_flip);
}

@@ -10809,7 +10809,7 @@ static int intel_queue_mmio_flip(struct drm_device *dev,
	if (mmio_flip == NULL)
		return -ENOMEM;

	mmio_flip->rq = i915_gem_request_reference(obj->last_write_req);
	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
	mmio_flip->crtc = to_intel_crtc(crtc);

	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
+2 −2
Original line number Diff line number Diff line
@@ -460,7 +460,7 @@ struct intel_pipe_wm {

struct intel_mmio_flip {
	struct work_struct work;
	struct drm_i915_gem_request *rq;
	struct drm_i915_gem_request *req;
	struct intel_crtc *crtc;
};

@@ -1366,7 +1366,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv,
		    struct drm_i915_file_private *file_priv);
void intel_queue_rps_boost_for_request(struct drm_device *dev,
				       struct drm_i915_gem_request *rq);
				       struct drm_i915_gem_request *req);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+8 −8
Original line number Diff line number Diff line
@@ -6842,34 +6842,34 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)

struct request_boost {
	struct work_struct work;
	struct drm_i915_gem_request *rq;
	struct drm_i915_gem_request *req;
};

static void __intel_rps_boost_work(struct work_struct *work)
{
	struct request_boost *boost = container_of(work, struct request_boost, work);

	if (!i915_gem_request_completed(boost->rq, true))
		gen6_rps_boost(to_i915(boost->rq->ring->dev), NULL);
	if (!i915_gem_request_completed(boost->req, true))
		gen6_rps_boost(to_i915(boost->req->ring->dev), NULL);

	i915_gem_request_unreference__unlocked(boost->rq);
	i915_gem_request_unreference__unlocked(boost->req);
	kfree(boost);
}

void intel_queue_rps_boost_for_request(struct drm_device *dev,
				       struct drm_i915_gem_request *rq)
				       struct drm_i915_gem_request *req)
{
	struct request_boost *boost;

	if (rq == NULL || INTEL_INFO(dev)->gen < 6)
	if (req == NULL || INTEL_INFO(dev)->gen < 6)
		return;

	boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
	if (boost == NULL)
		return;

	i915_gem_request_reference(rq);
	boost->rq = rq;
	i915_gem_request_reference(req);
	boost->req = req;

	INIT_WORK(&boost->work, __intel_rps_boost_work);
	queue_work(to_i915(dev)->wq, &boost->work);