Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f6504cce authored by Ping Gao's avatar Ping Gao Committed by Zhenyu Wang
Browse files

drm/i915/gvt: add some statistic routine for scheduler



Add some statistic routine to collect the time when vGPU is
scheduled in/out and the time of the last ctx submission.

Signed-off-by: default avatarPing Gao <ping.a.gao@intel.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 91d0101a
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -138,6 +138,10 @@ struct intel_vgpu_display {
	struct intel_vgpu_sbi sbi;
	struct intel_vgpu_sbi sbi;
};
};


struct vgpu_sched_ctl {
	int weight;
};

struct intel_vgpu {
struct intel_vgpu {
	struct intel_gvt *gvt;
	struct intel_gvt *gvt;
	int id;
	int id;
@@ -160,6 +164,7 @@ struct intel_vgpu {
	struct list_head workload_q_head[I915_NUM_ENGINES];
	struct list_head workload_q_head[I915_NUM_ENGINES];
	struct kmem_cache *workloads;
	struct kmem_cache *workloads;
	atomic_t running_workload_num;
	atomic_t running_workload_num;
	ktime_t last_ctx_submit_time;
	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
	DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
	struct i915_gem_context *shadow_ctx;
	struct i915_gem_context *shadow_ctx;


+1 −0
Original line number Original line Diff line number Diff line
@@ -1416,6 +1416,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,


	execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
	execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
	if (execlist->elsp_dwords.index == 3) {
	if (execlist->elsp_dwords.index == 3) {
		vgpu->last_ctx_submit_time = ktime_get();
		ret = intel_vgpu_submit_execlist(vgpu, ring_id);
		ret = intel_vgpu_submit_execlist(vgpu, ring_id);
		if(ret)
		if(ret)
			gvt_vgpu_err("fail submit workload on ring %d\n",
			gvt_vgpu_err("fail submit workload on ring %d\n",
+42 −25
Original line number Original line Diff line number Diff line
@@ -47,11 +47,33 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
	return false;
	return false;
}
}


struct vgpu_sched_data {
	struct list_head list;
	struct intel_vgpu *vgpu;

	ktime_t sched_in_time;
	ktime_t sched_out_time;
	ktime_t sched_time;
	ktime_t left_ts;
	ktime_t allocated_ts;

	struct vgpu_sched_ctl sched_ctl;
};

struct gvt_sched_data {
	struct intel_gvt *gvt;
	struct hrtimer timer;
	unsigned long period;
	struct list_head runq_head;
};

static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{
{
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	enum intel_engine_id i;
	enum intel_engine_id i;
	struct intel_engine_cs *engine;
	struct intel_engine_cs *engine;
	struct vgpu_sched_data *vgpu_data;
	ktime_t cur_time;


	/* no target to schedule */
	/* no target to schedule */
	if (!scheduler->next_vgpu)
	if (!scheduler->next_vgpu)
@@ -77,6 +99,14 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
	gvt_dbg_sched("switch to next vgpu %d\n",
	gvt_dbg_sched("switch to next vgpu %d\n",
			scheduler->next_vgpu->id);
			scheduler->next_vgpu->id);


	cur_time = ktime_get();
	if (scheduler->current_vgpu) {
		vgpu_data = scheduler->current_vgpu->sched_data;
		vgpu_data->sched_out_time = cur_time;
	}
	vgpu_data = scheduler->next_vgpu->sched_data;
	vgpu_data->sched_in_time = cur_time;

	/* switch current vgpu */
	/* switch current vgpu */
	scheduler->current_vgpu = scheduler->next_vgpu;
	scheduler->current_vgpu = scheduler->next_vgpu;
	scheduler->next_vgpu = NULL;
	scheduler->next_vgpu = NULL;
@@ -88,25 +118,12 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
		wake_up(&scheduler->waitq[i]);
		wake_up(&scheduler->waitq[i]);
}
}


struct tbs_vgpu_data {
	struct list_head list;
	struct intel_vgpu *vgpu;
	/* put some per-vgpu sched stats here */
};

struct tbs_sched_data {
	struct intel_gvt *gvt;
	struct hrtimer timer;
	unsigned long period;
	struct list_head runq_head;
};

/* in nanosecond */
/* in nanosecond */
#define GVT_DEFAULT_TIME_SLICE 1000000
#define GVT_DEFAULT_TIME_SLICE 1000000


static void tbs_sched_func(struct tbs_sched_data *sched_data)
static void tbs_sched_func(struct gvt_sched_data *sched_data)
{
{
	struct tbs_vgpu_data *vgpu_data;
	struct vgpu_sched_data *vgpu_data;


	struct intel_gvt *gvt = sched_data->gvt;
	struct intel_gvt *gvt = sched_data->gvt;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
@@ -130,7 +147,7 @@ static void tbs_sched_func(struct tbs_sched_data *sched_data)
		if (pos == &sched_data->runq_head)
		if (pos == &sched_data->runq_head)
			continue;
			continue;


		vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
		vgpu_data = container_of(pos, struct vgpu_sched_data, list);
		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
			continue;
			continue;


@@ -152,7 +169,7 @@ static void tbs_sched_func(struct tbs_sched_data *sched_data)


void intel_gvt_schedule(struct intel_gvt *gvt)
void intel_gvt_schedule(struct intel_gvt *gvt)
{
{
	struct tbs_sched_data *sched_data = gvt->scheduler.sched_data;
	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;


	mutex_lock(&gvt->lock);
	mutex_lock(&gvt->lock);
	tbs_sched_func(sched_data);
	tbs_sched_func(sched_data);
@@ -161,9 +178,9 @@ void intel_gvt_schedule(struct intel_gvt *gvt)


static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
{
{
	struct tbs_sched_data *data;
	struct gvt_sched_data *data;


	data = container_of(timer_data, struct tbs_sched_data, timer);
	data = container_of(timer_data, struct gvt_sched_data, timer);


	intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
	intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);


@@ -177,7 +194,7 @@ static int tbs_sched_init(struct intel_gvt *gvt)
	struct intel_gvt_workload_scheduler *scheduler =
	struct intel_gvt_workload_scheduler *scheduler =
		&gvt->scheduler;
		&gvt->scheduler;


	struct tbs_sched_data *data;
	struct gvt_sched_data *data;


	data = kzalloc(sizeof(*data), GFP_KERNEL);
	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
	if (!data)
@@ -198,7 +215,7 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
{
{
	struct intel_gvt_workload_scheduler *scheduler =
	struct intel_gvt_workload_scheduler *scheduler =
		&gvt->scheduler;
		&gvt->scheduler;
	struct tbs_sched_data *data = scheduler->sched_data;
	struct gvt_sched_data *data = scheduler->sched_data;


	hrtimer_cancel(&data->timer);
	hrtimer_cancel(&data->timer);


@@ -208,7 +225,7 @@ static void tbs_sched_clean(struct intel_gvt *gvt)


static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
{
{
	struct tbs_vgpu_data *data;
	struct vgpu_sched_data *data;


	data = kzalloc(sizeof(*data), GFP_KERNEL);
	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
	if (!data)
@@ -230,8 +247,8 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)


static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{
{
	struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
	struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
	struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;


	if (!list_empty(&vgpu_data->list))
	if (!list_empty(&vgpu_data->list))
		return;
		return;
@@ -245,7 +262,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)


static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
{
{
	struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;


	list_del_init(&vgpu_data->list);
	list_del_init(&vgpu_data->list);
}
}