Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20d05927 authored by Alex Deucher's avatar Alex Deucher
Browse files

Revert "drm/amdkfd: avoid HMM change cause circular lock"



This reverts commit 8dd69e69.

This depends on an HMM fix which is not upstream yet.

Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 318c3f4b
Loading
Loading
Loading
Loading
+15 −17
Original line number Diff line number Diff line
@@ -1162,17 +1162,21 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
	int retval;
	struct mqd_manager *mqd_mgr;

	retval = 0;

	dqm_lock(dqm);

	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
		pr_warn("Can't create new usermode queue because %d queues were already created\n",
				dqm->total_queue_count);
		retval = -EPERM;
		goto out;
		goto out_unlock;
	}

	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
		retval = allocate_sdma_queue(dqm, &q->sdma_id);
		if (retval)
			goto out;
			goto out_unlock;
		q->properties.sdma_queue_id =
			q->sdma_id / get_num_sdma_engines(dqm);
		q->properties.sdma_engine_id =
@@ -1183,9 +1187,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
	if (retval)
		goto out_deallocate_sdma_queue;

	/* Do init_mqd before dqm_lock(dqm) to avoid circular locking order:
	 * lock(dqm) -> bo::reserve
	 */
	mqd_mgr = dqm->ops.get_mqd_manager(dqm,
			get_mqd_type_from_queue_type(q->properties.type));

@@ -1193,7 +1194,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
		retval = -ENOMEM;
		goto out_deallocate_doorbell;
	}

	/*
	 * Eviction state logic: we only mark active queues as evicted
	 * to avoid the overhead of restoring inactive queues later
@@ -1202,7 +1202,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
		q->properties.is_evicted = (q->properties.queue_size > 0 &&
					    q->properties.queue_percent > 0 &&
					    q->properties.queue_address != 0);

	dqm->asic_ops.init_sdma_vm(dqm, q, qpd);

	q->properties.tba_addr = qpd->tba_addr;
	q->properties.tma_addr = qpd->tma_addr;
	retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
@@ -1210,8 +1212,6 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
	if (retval)
		goto out_deallocate_doorbell;

	dqm_lock(dqm);

	list_add(&q->list, &qpd->queues_list);
	qpd->queue_count++;
	if (q->properties.is_active) {
@@ -1239,7 +1239,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
out_deallocate_sdma_queue:
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
		deallocate_sdma_queue(dqm, q->sdma_id);
out:
out_unlock:
	dqm_unlock(dqm);

	return retval;
}

@@ -1402,6 +1404,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
			qpd->reset_wavefronts = true;
	}

	mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);

	/*
	 * Unconditionally decrement this counter, regardless of the queue's
	 * type
@@ -1412,9 +1416,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,

	dqm_unlock(dqm);

	/* Do uninit_mqd after dqm_unlock(dqm) to avoid circular locking */
	mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);

	return retval;

failed:
@@ -1636,11 +1637,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
		qpd->reset_wavefronts = false;
	}

	dqm_unlock(dqm);

	/* Lastly, free mqd resources.
	 * Do uninit_mqd() after dqm_unlock to avoid circular locking.
	 */
	/* lastly, free mqd resources */
	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
		mqd_mgr = dqm->ops.get_mqd_manager(dqm,
			get_mqd_type_from_queue_type(q->properties.type));
@@ -1654,6 +1651,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
	}

out:
	dqm_unlock(dqm);
	return retval;
}