Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e6245a54 authored by Harshdeep Dhatt's avatar Harshdeep Dhatt
Browse files

msm: kgsl: Add the f2h daemon



This daemon is a bottom half handler for asynchronous packets
originating from GMU for example TS_RETIRE, CONTEXT_BAD and
F2H_MSG_GMU_CNTR_REGISTER. The way it works is -the interrupt
handler snoops all the packets coming from GMU, processes the
acks inline but copies the aforementioned packets to a queue
and wakes up the daemon to process those.

Change-Id: Ib2c9d470a9c4c64bc44409015201adaff398edd8
Signed-off-by: default avatarHarshdeep Dhatt <hdhatt@codeaurora.org>
parent 473b816c
Loading
Loading
Loading
Loading
+74 −1
Original line number Diff line number Diff line
@@ -15,6 +15,9 @@

#define HFI_QUEUE_MAX (HFI_QUEUE_DEFAULT_CNT + HFI_QUEUE_DISPATCH_MAX_CNT)

/* Use a kmem cache to speed up allocations for f2h packets */
static struct kmem_cache *f2h_cache;

#define DEFINE_QHDR(gmuaddr, id, prio) \
	{\
		.status = 1, \
@@ -145,10 +148,38 @@ static void a6xx_receive_ack_async(struct adreno_device *adreno_dev, void *rcvd)
			MSG_HDR_GET_SEQNUM(waiters[i]));
}

struct f2h_packet {
	/** @rcvd: the contents of the fw to host packet */
	u32 rcvd[MAX_RCVD_SIZE];
	/** @node: To add to the fw to host msg list */
	struct llist_node node;
};

static void add_f2h_packet(struct adreno_device *adreno_dev, u32 *msg)
{
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	struct a6xx_hwsched_hfi *hfi = to_a6xx_hwsched_hfi(adreno_dev);
	struct f2h_packet *pkt = kmem_cache_alloc(f2h_cache, GFP_ATOMIC);
	u32 size = MSG_HDR_GET_SIZE(msg[0]) << 2;

	if (!pkt)
		return;

	if (size > sizeof(pkt->rcvd))
		dev_err_ratelimited(&gmu->pdev->dev,
			"f2h packet too big: %d allowed: %d\n",
			size, sizeof(pkt->rcvd));

	memcpy(pkt->rcvd, msg, min_t(u32, size, sizeof(pkt->rcvd)));

	llist_add(&pkt->node, &hfi->f2h_msglist);
}

static void process_msgq_irq(struct adreno_device *adreno_dev)
{
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	u32 rcvd[MAX_RCVD_SIZE];
	struct a6xx_hwsched_hfi *hfi = to_a6xx_hwsched_hfi(adreno_dev);

	while (a6xx_hfi_queue_read(gmu, HFI_MSG_ID, rcvd, sizeof(rcvd)) > 0) {

@@ -157,8 +188,12 @@ static void process_msgq_irq(struct adreno_device *adreno_dev)
		 * because hfi sending thread waits for completion while
		 * holding the device mutex
		 */
		if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_MSG_ACK)
		if (MSG_HDR_GET_TYPE(rcvd[0]) == HFI_MSG_ACK) {
			a6xx_receive_ack_async(adreno_dev, rcvd);
		} else {
			add_f2h_packet(adreno_dev, rcvd);
			wake_up_interruptible(&hfi->f2h_wq);
		}
	}
}

@@ -749,6 +784,36 @@ int a6xx_hwsched_cp_init(struct adreno_device *adreno_dev)
	return ret;
}

static int hfi_f2h_main(void *arg)
{
	struct adreno_device *adreno_dev = arg;
	struct a6xx_hwsched_hfi *hfi = to_a6xx_hwsched_hfi(adreno_dev);
	struct llist_node *list;
	struct f2h_packet *pkt, *tmp;

	while (!kthread_should_stop()) {
		wait_event_interruptible(hfi->f2h_wq,
			(!llist_empty(&hfi->f2h_msglist) &&
			!kthread_should_stop()));

		if (kthread_should_stop())
			break;

		list = llist_del_all(&hfi->f2h_msglist);

		list = llist_reverse_order(list);

		llist_for_each_entry_safe(pkt, tmp, list, node) {
			if (MSG_HDR_GET_ID(pkt->rcvd[0]) == F2H_MSG_TS_RETIRE)
				adreno_hwsched_trigger(adreno_dev);

			kmem_cache_free(f2h_cache, pkt);
		}
	}

	return 0;
}

int a6xx_hwsched_hfi_probe(struct adreno_device *adreno_dev)
{
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
@@ -768,6 +833,14 @@ int a6xx_hwsched_hfi_probe(struct adreno_device *adreno_dev)

	INIT_LIST_HEAD(&hw_hfi->msglist);

	init_llist_head(&hw_hfi->f2h_msglist);

	init_waitqueue_head(&hw_hfi->f2h_wq);

	hw_hfi->f2h_task = kthread_run(hfi_f2h_main, adreno_dev, "gmu_f2h");

	f2h_cache = KMEM_CACHE(f2h_packet, 0);

	return 0;
}

+6 −1
Original line number Diff line number Diff line
@@ -113,7 +113,12 @@ struct a6xx_hwsched_hfi {
	rwlock_t msglock;
	/** @msglist: List of un-ACKed hfi packets */
	struct list_head msglist;

	/** @f2h_task: Task for processing gmu fw to host packets */
	struct task_struct *f2h_task;
	/** @f2h_msglist: List of gmu fw to host packets */
	struct llist_head f2h_msglist;
	/** @f2h_wq: Waitqueue for the f2h_task */
	wait_queue_head_t f2h_wq;
};

struct kgsl_drawobj_cmd;
+1 −1
Original line number Diff line number Diff line
@@ -526,7 +526,7 @@ static void hwsched_issuecmds(struct adreno_device *adreno_dev)
		hwsched_handle_jobs(adreno_dev, i);
}

static void adreno_hwsched_trigger(struct adreno_device *adreno_dev)
void adreno_hwsched_trigger(struct adreno_device *adreno_dev)
{
	struct adreno_hwsched *hwsched = to_hwsched(adreno_dev);

+8 −0
Original line number Diff line number Diff line
@@ -30,6 +30,14 @@ enum adreno_hwsched_flags {
	ADRENO_HWSCHED_POWER = 0,
};

/**
 * adreno_hwsched_trigger - Function to schedule the hwsched thread
 * @adreno_dev: A handle to adreno device
 *
 * Schedule the hw dispatcher for retiring and submitting command objects
 */
void adreno_hwsched_trigger(struct adreno_device *adreno_dev);

/**
 * adreno_hwsched_queue_cmds() - Queue a new draw object in the context
 * @dev_priv: Pointer to the device private struct