Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4744e24 authored by Yong Zhao's avatar Yong Zhao Committed by Oded Gabbay
Browse files

drm/amdkfd: Avoid submitting an unnecessary packet to HWS



v2:
Make queue mapping interfaces more consistent by passing unmap filter
parameters directly to execute_queues_cpsch, same as unmap_queues_cpsch.

Signed-off-by: default avatarYong Zhao <yong.zhao@amd.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 60a00956
Loading
Loading
Loading
Loading
+14 −12
Original line number Original line Diff line number Diff line
@@ -44,7 +44,9 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
					struct queue *q,
					struct queue *q,
					struct qcm_process_device *qpd);
					struct qcm_process_device *qpd);


static int execute_queues_cpsch(struct device_queue_manager *dqm);
static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param);
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param);
				uint32_t filter_param);
@@ -729,7 +731,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
	init_interrupts(dqm);
	init_interrupts(dqm);


	mutex_lock(&dqm->lock);
	mutex_lock(&dqm->lock);
	execute_queues_cpsch(dqm);
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
	mutex_unlock(&dqm->lock);
	mutex_unlock(&dqm->lock);


	return 0;
	return 0;
@@ -775,7 +777,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
	list_add(&kq->list, &qpd->priv_queue_list);
	list_add(&kq->list, &qpd->priv_queue_list);
	dqm->queue_count++;
	dqm->queue_count++;
	qpd->is_debug = true;
	qpd->is_debug = true;
	execute_queues_cpsch(dqm);
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
	mutex_unlock(&dqm->lock);
	mutex_unlock(&dqm->lock);


	return 0;
	return 0;
@@ -786,12 +788,10 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
					struct qcm_process_device *qpd)
{
{
	mutex_lock(&dqm->lock);
	mutex_lock(&dqm->lock);
	/* here we actually preempt the DIQ */
	unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
	list_del(&kq->list);
	list_del(&kq->list);
	dqm->queue_count--;
	dqm->queue_count--;
	qpd->is_debug = false;
	qpd->is_debug = false;
	execute_queues_cpsch(dqm);
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
	/*
	/*
	 * Unconditionally decrement this counter, regardless of the queue's
	 * Unconditionally decrement this counter, regardless of the queue's
	 * type.
	 * type.
@@ -850,7 +850,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
	list_add(&q->list, &qpd->queues_list);
	list_add(&q->list, &qpd->queues_list);
	if (q->properties.is_active) {
	if (q->properties.is_active) {
		dqm->queue_count++;
		dqm->queue_count++;
		retval = execute_queues_cpsch(dqm);
		retval = execute_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
	}
	}


	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
@@ -960,14 +961,15 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
}
}


/* dqm->lock mutex has to be locked before calling this function */
/* dqm->lock mutex has to be locked before calling this function */
static int execute_queues_cpsch(struct device_queue_manager *dqm)
static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param)
{
{
	int retval;
	int retval;


	retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
	retval = unmap_queues_cpsch(dqm, filter, filter_param);
			0);
	if (retval) {
	if (retval) {
		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
		return retval;
		return retval;
	}
	}


@@ -1013,7 +1015,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
	if (q->properties.is_active)
	if (q->properties.is_active)
		dqm->queue_count--;
		dqm->queue_count--;


	execute_queues_cpsch(dqm);
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);


	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);