Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 325eb94a authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang
Browse files

drm/i915/gvt: Move ring scan buffers into intel_vgpu_submission



Move ring scan buffers into intel_vgpu_submission since they belongs to
a part of vGPU submission stuffs.

Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 8cf80a2e
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -2604,6 +2604,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
	struct intel_vgpu *vgpu = workload->vgpu;
	struct intel_vgpu_submission *s = &vgpu->submission;
	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
	void *shadow_ring_buffer_va;
	int ring_id = workload->ring_id;
@@ -2619,21 +2620,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
	gma_tail = workload->rb_start + workload->rb_tail;
	gma_top = workload->rb_start + guest_rb_size;

	if (workload->rb_len > vgpu->ring_scan_buffer_size[ring_id]) {
	if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
		void *p;

		/* realloc the new ring buffer if needed */
		p = krealloc(vgpu->ring_scan_buffer[ring_id], workload->rb_len,
		p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
				GFP_KERNEL);
		if (!p) {
			gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
			return -ENOMEM;
		}
		vgpu->ring_scan_buffer[ring_id] = p;
		vgpu->ring_scan_buffer_size[ring_id] = workload->rb_len;
		s->ring_scan_buffer[ring_id] = p;
		s->ring_scan_buffer_size[ring_id] = workload->rb_len;
	}

	shadow_ring_buffer_va = vgpu->ring_scan_buffer[ring_id];
	shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];

	/* get shadow ring buffer va */
	workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
+13 −10
Original line number Diff line number Diff line
@@ -864,15 +864,18 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
	clean_workloads(vgpu, ALL_ENGINES);

	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
		kfree(vgpu->ring_scan_buffer[i]);
		vgpu->ring_scan_buffer[i] = NULL;
		vgpu->ring_scan_buffer_size[i] = 0;
		struct intel_vgpu_submission *s = &vgpu->submission;

		kfree(s->ring_scan_buffer[i]);
		s->ring_scan_buffer[i] = NULL;
		s->ring_scan_buffer_size[i] = 0;
	}
}

#define RESERVE_RING_BUFFER_SIZE		((1 * PAGE_SIZE)/8)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
	struct intel_vgpu_submission *s = &vgpu->submission;
	enum intel_engine_id i;
	struct intel_engine_cs *engine;

@@ -881,21 +884,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)

	/* each ring has a shadow ring buffer until vgpu destroyed */
	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
		vgpu->ring_scan_buffer[i] =
		s->ring_scan_buffer[i] =
			kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
		if (!vgpu->ring_scan_buffer[i]) {
		if (!s->ring_scan_buffer[i]) {
			gvt_vgpu_err("fail to alloc ring scan buffer\n");
			goto out;
		}
		vgpu->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
		s->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
	}
	return 0;
out:
	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
		if (vgpu->ring_scan_buffer_size[i]) {
			kfree(vgpu->ring_scan_buffer[i]);
			vgpu->ring_scan_buffer[i] = NULL;
			vgpu->ring_scan_buffer_size[i] = 0;
		if (s->ring_scan_buffer_size[i]) {
			kfree(s->ring_scan_buffer[i]);
			s->ring_scan_buffer[i] = NULL;
			s->ring_scan_buffer_size[i] = 0;
		}
	}
	return -ENOMEM;
+3 −4
Original line number Diff line number Diff line
@@ -150,6 +150,9 @@ struct intel_vgpu_submission {
	struct i915_gem_context *shadow_ctx;
	DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
	/* 1/2K for each engine */
	void *ring_scan_buffer[I915_NUM_ENGINES];
	int ring_scan_buffer_size[I915_NUM_ENGINES];
};

struct intel_vgpu {
@@ -172,10 +175,6 @@ struct intel_vgpu {
	struct intel_vgpu_opregion opregion;
	struct intel_vgpu_display display;
	struct intel_vgpu_submission submission;
	/* 1/2K for each engine */
	void *ring_scan_buffer[I915_NUM_ENGINES];
	int ring_scan_buffer_size[I915_NUM_ENGINES];


#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
	struct {