Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bf1d7d92 authored by Harshdeep Dhatt's avatar Harshdeep Dhatt
Browse files

msm: kgsl: Add asynchronous processing of acks



This adds the initial bits of handling acks asynchronously.
It is assumed that there is only one outstanding ack since we
wait for ack while holding the device mutex.
GMU handles acks inline until start_msg hfi and beyond that,
each hfi will be acked asynchronously.

Change-Id: I15e930d22f1154db377599a0c0409640821fb868
Signed-off-by: default avatarHarshdeep Dhatt <hdhatt@codeaurora.org>
parent fbe170ed
Loading
Loading
Loading
Loading
+23 −12
Original line number Diff line number Diff line
@@ -1688,7 +1688,7 @@ static void a6xx_gmu_pwrctrl_suspend(struct adreno_device *adreno_dev)
 * a6xx_gmu_notify_slumber() - initiate request to GMU to prepare to slumber
 * @device: Pointer to KGSL device
 */
int a6xx_gmu_notify_slumber(struct adreno_device *adreno_dev)
static int a6xx_gmu_notify_slumber(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
@@ -2639,7 +2639,6 @@ int a6xx_gmu_probe(struct kgsl_device *device,
{
	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	struct a6xx_hfi *hfi = &gmu->hfi;
	struct resource *res;
	int ret;

@@ -2706,14 +2705,6 @@ int a6xx_gmu_probe(struct kgsl_device *device,

	device->gmu_core.dev_ops = &a6xx_gmudev;

	/* Initialize HFI and GMU interrupts */
	hfi->irq = kgsl_request_irq(gmu->pdev, "kgsl_hfi_irq",
		a6xx_hfi_irq_handler, device);
	if (hfi->irq < 0) {
		ret = hfi->irq;
		goto error;
	}

	gmu->irq = kgsl_request_irq(gmu->pdev, "kgsl_gmu_irq",
		a6xx_gmu_irq_handler, device);

@@ -2725,7 +2716,6 @@ int a6xx_gmu_probe(struct kgsl_device *device,
	/* Don't enable GMU interrupts until GMU started */
	/* We cannot use irq_disable because it writes registers */
	disable_irq(gmu->irq);
	disable_irq(gmu->hfi.irq);

	return 0;

@@ -3367,8 +3357,29 @@ int a6xx_gmu_restart(struct kgsl_device *device)
static int a6xx_gmu_bind(struct device *dev, struct device *master, void *data)
{
	struct kgsl_device *device = dev_get_drvdata(master);
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(ADRENO_DEVICE(device));
	struct a6xx_hfi *hfi = &gmu->hfi;
	int ret;

	return a6xx_gmu_probe(device, to_platform_device(dev));
	ret = a6xx_gmu_probe(device, to_platform_device(dev));
	if (ret)
		return ret;

	/*
	 * a6xx_gmu_probe() is also called by hwscheduling probe. However,
	 * since HFI interrupts are handled differently in hwscheduling, move
	 * out HFI interrupt setup from a6xx_gmu_probe().
	 */
	hfi->irq = kgsl_request_irq(gmu->pdev, "kgsl_hfi_irq",
		a6xx_hfi_irq_handler, device);
	if (hfi->irq < 0) {
		a6xx_gmu_remove(device);
		return hfi->irq;
	}

	disable_irq(gmu->hfi.irq);

	return 0;
}

static void a6xx_gmu_unbind(struct device *dev, struct device *master,
+0 −8
Original line number Diff line number Diff line
@@ -446,14 +446,6 @@ void a6xx_gmu_oob_clear(struct kgsl_device *device, enum oob_request oob);
 */
int a6xx_gmu_wait_for_lowest_idle(struct adreno_device *adreno_dev);

/**
 * a6xx_gmu_notify_slumber - Send NOTIFY_SLUMBER hfi to gmu
 * @adreno_dev: Pointer to the adreno device
 *
 * Return: 0 on success or negative error on failure
 */
int a6xx_gmu_notify_slumber(struct adreno_device *adreno_dev);

/**
 * a6xx_gmu_wait_for_idle - Wait for gmu to become idle
 * @adreno_dev: Pointer to the adreno device
+8 −12
Original line number Diff line number Diff line
@@ -21,9 +21,6 @@
#define HOST_QUEUE_START_ADDR(hfi_mem, i) \
	((hfi_mem)->hostptr + HFI_QUEUE_OFFSET(i))

static int a6xx_hfi_process_queue(struct a6xx_gmu_device *gmu,
		uint32_t queue_idx, struct pending_cmd *ret_cmd);

struct a6xx_hfi *to_a6xx_hfi(struct adreno_device *adreno_dev)
{
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
@@ -220,9 +217,6 @@ int a6xx_hfi_init(struct adreno_device *adreno_dev)
	return PTR_ERR_OR_ZERO(hfi->hfi_mem);
}

#define HDR_CMP_SEQNUM(out_hdr, in_hdr) \
	(MSG_HDR_GET_SEQNUM(out_hdr) == MSG_HDR_GET_SEQNUM(in_hdr))

int a6xx_receive_ack_cmd(struct a6xx_gmu_device *gmu, void *rcvd,
	struct pending_cmd *ret_cmd)
{
@@ -286,8 +280,8 @@ static int poll_gmu_reg(struct adreno_device *adreno_dev,
	return -ETIMEDOUT;
}

int a6xx_hfi_send_cmd(struct adreno_device *adreno_dev, uint32_t queue_idx,
		void *data, struct pending_cmd *ret_cmd)
static int a6xx_hfi_send_cmd_wait_inline(struct adreno_device *adreno_dev,
	uint32_t queue_idx, void *data, struct pending_cmd *ret_cmd)
{
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -336,7 +330,7 @@ int a6xx_hfi_send_generic_req(struct adreno_device *adreno_dev,

	memset(&ret_cmd, 0, sizeof(ret_cmd));

	rc = a6xx_hfi_send_cmd(adreno_dev, queue, cmd, &ret_cmd);
	rc = a6xx_hfi_send_cmd_wait_inline(adreno_dev, queue, cmd, &ret_cmd);

	if (!rc && ret_cmd.results[2] == HFI_ACK_ERROR) {
		struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
@@ -378,7 +372,8 @@ static int a6xx_hfi_get_fw_version(struct adreno_device *adreno_dev,

	memset(&ret_cmd, 0, sizeof(ret_cmd));

	rc = a6xx_hfi_send_cmd(adreno_dev, HFI_CMD_ID, &cmd, &ret_cmd);
	rc = a6xx_hfi_send_cmd_wait_inline(adreno_dev, HFI_CMD_ID, &cmd,
			&ret_cmd);
	if (rc)
		return rc;

@@ -473,7 +468,8 @@ static int a6xx_hfi_send_get_value(struct adreno_device *adreno_dev,

	cmd->hdr = CMD_MSG_HDR(H2F_MSG_GET_VALUE, sizeof(*cmd));

	rc = a6xx_hfi_send_cmd(adreno_dev, HFI_CMD_ID, cmd, &ret_cmd);
	rc = a6xx_hfi_send_cmd_wait_inline(adreno_dev, HFI_CMD_ID, cmd,
			&ret_cmd);
	if (rc)
		return rc;

@@ -535,7 +531,7 @@ static void a6xx_hfi_v1_receiver(struct a6xx_gmu_device *gmu, uint32_t *rcvd,
	}
}

static int a6xx_hfi_process_queue(struct a6xx_gmu_device *gmu,
int a6xx_hfi_process_queue(struct a6xx_gmu_device *gmu,
		uint32_t queue_idx, struct pending_cmd *ret_cmd)
{
	uint32_t rcvd[MAX_RCVD_SIZE];
+17 −10
Original line number Diff line number Diff line
@@ -156,6 +156,12 @@ struct hfi_queue_table {
#define MSG_HDR_GET_TYPE(hdr) (((hdr) >> 16) & 0xF)
#define MSG_HDR_GET_SEQNUM(hdr) (((hdr) >> 20) & 0xFFF)

#define MSG_HDR_GET_SIZE(hdr) (((hdr) >> 8) & 0xFF)
#define MSG_HDR_GET_SEQNUM(hdr) (((hdr) >> 20) & 0xFFF)

#define HDR_CMP_SEQNUM(out_hdr, in_hdr) \
	(MSG_HDR_GET_SEQNUM(out_hdr) == MSG_HDR_GET_SEQNUM(in_hdr))

#define MSG_HDR_SET_SEQNUM(hdr, num) \
	(((hdr) & 0xFFFFF) | ((num) << 20))

@@ -514,12 +520,14 @@ struct hfi_context_bad_reply_cmd {
/**
 * struct pending_cmd - data structure to track outstanding HFI
 *	command messages
 * @sent_hdr: copy of outgoing header for response comparison
 * @results: the payload of received return message (ACK)
 */
struct pending_cmd {
	uint32_t sent_hdr;
	uint32_t results[MAX_RCVD_SIZE];
	/** @sent_hdr: Header of the un-ack'd hfi packet */
	u32 sent_hdr;
	/** @results: Array to store the ack packet */
	u32 results[MAX_RCVD_SIZE];
	/** @complete: Completion to signal hfi ack has been received */
	struct completion complete;
};

/**
@@ -677,14 +685,13 @@ int a6xx_hfi_send_generic_req(struct adreno_device *adreno_dev,
int a6xx_hfi_send_bcl_feature_ctrl(struct adreno_device *adreno_dev);

/*
 * a6xx_hfi_send_cmd - Send and wait for a hfi packet
 * @adreno_dev: Pointer to the adreno device
 * @queue_idx: Destination queue id
 * @data: Pointer to hfi packet header and data
 * a6xx_hfi_process_queue - Check hfi queue for messages from gmu
 * @gmu: Pointer to the a6xx gmu device
 * @queue_idx: queue id to be processed
 * @ret_cmd: Container for data needed for waiting for the ack
 *
 * Return: 0 on success or negative error on failure
 */
int a6xx_hfi_send_cmd(struct adreno_device *adreno_dev, u32 queue_idx,
	void *data, struct pending_cmd *ret_cmd);
int a6xx_hfi_process_queue(struct a6xx_gmu_device *gmu,
	u32 queue_idx, struct pending_cmd *ret_cmd);
#endif
+111 −2
Original line number Diff line number Diff line
@@ -177,6 +177,24 @@ static void a6xx_hwsched_active_count_put(struct adreno_device *adreno_dev)
	wake_up(&device->active_cnt_wq);
}

static int a6xx_hwsched_notify_slumber(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	struct hfi_prep_slumber_cmd req;

	req.hdr = CMD_MSG_HDR(H2F_MSG_PREPARE_SLUMBER, sizeof(req));
	req.freq = gmu->hfi.dcvs_table.gpu_level_num -
			pwr->default_pwrlevel - 1;
	req.bw = pwr->pwrlevels[pwr->default_pwrlevel].bus_freq;

	/* Disable the power counter so that the GMU is not busy */
	gmu_core_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);

	return a6xx_hfi_send_cmd_async(adreno_dev, &req);

}
static int a6xx_hwsched_gmu_power_off(struct adreno_device *adreno_dev)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -191,7 +209,7 @@ static int a6xx_hwsched_gmu_power_off(struct adreno_device *adreno_dev)
	if (ret)
		goto error;

	ret = a6xx_gmu_notify_slumber(adreno_dev);
	ret = a6xx_hwsched_notify_slumber(adreno_dev);
	if (ret)
		goto error;

@@ -549,12 +567,92 @@ static int a6xx_hwsched_active_count_get(struct adreno_device *adreno_dev)
	return ret;
}

static int a6xx_hwsched_dcvs_set(struct adreno_device *adreno_dev,
		int gpu_pwrlevel, int bus_level)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	struct a6xx_gmu_device *gmu = to_a6xx_gmu(adreno_dev);
	struct hfi_dcvstable_cmd *table = &gmu->hfi.dcvs_table;
	struct hfi_gx_bw_perf_vote_cmd req = {
		.hdr = CMD_MSG_HDR(H2F_MSG_GX_BW_PERF_VOTE, sizeof(req)),
		.ack_type = DCVS_ACK_BLOCK,
		.freq = INVALID_DCVS_IDX,
		.bw = INVALID_DCVS_IDX,
	};
	int ret = 0;

	if (!test_bit(GMU_PRIV_HFI_STARTED, &gmu->flags))
		return 0;

	/* Do not set to XO and lower GPU clock vote from GMU */
	if ((gpu_pwrlevel != INVALID_DCVS_IDX) &&
			(gpu_pwrlevel >= table->gpu_level_num - 1)) {
		dev_err(&gmu->pdev->dev, "Invalid gpu dcvs request: %d\n",
			gpu_pwrlevel);
		return -EINVAL;
	}

	if (gpu_pwrlevel < table->gpu_level_num - 1)
		req.freq = table->gpu_level_num - gpu_pwrlevel - 1;

	if (bus_level < pwr->ddr_table_count && bus_level > 0)
		req.bw = bus_level;

	/* GMU will vote for slumber levels through the sleep sequence */
	if ((req.freq == INVALID_DCVS_IDX) && (req.bw == INVALID_DCVS_IDX))
		return 0;

	ret = a6xx_hfi_send_cmd_async(adreno_dev, &req);

	if (ret)
		dev_err_ratelimited(&gmu->pdev->dev,
			"Failed to set GPU perf idx %d, bw idx %d\n",
			req.freq, req.bw);

	return ret;
}

static int a6xx_hwsched_clock_set(struct adreno_device *adreno_dev,
	u32 pwrlevel)
{
	return a6xx_hwsched_dcvs_set(adreno_dev, pwrlevel, INVALID_DCVS_IDX);
}

static int a6xx_hwsched_bus_set(struct adreno_device *adreno_dev, int buslevel,
	u32 ab)
{
	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
	int ret = 0;

	if (buslevel != pwr->cur_buslevel) {
		ret = a6xx_hwsched_dcvs_set(adreno_dev, INVALID_DCVS_IDX,
				buslevel);
		if (ret)
			return ret;

		pwr->cur_buslevel = buslevel;

		trace_kgsl_buslevel(device, pwr->active_pwrlevel, buslevel);
	}

	if (ab != pwr->cur_ab) {
		icc_set_bw(pwr->icc_path, MBps_to_icc(ab), 0);
		pwr->cur_ab = ab;
	}

	return ret;
}

const struct adreno_power_ops a6xx_hwsched_power_ops = {
	.first_open = a6xx_hwsched_first_open,
	.last_close = a6xx_hwsched_power_off,
	.active_count_get = a6xx_hwsched_active_count_get,
	.active_count_put = a6xx_hwsched_active_count_put,
	.touch_wakeup = a6xx_hwsched_touch_wakeup,
	.gpu_clock_set = a6xx_hwsched_clock_set,
	.gpu_bus_set = a6xx_hwsched_bus_set,
};

int a6xx_hwsched_probe(struct platform_device *pdev,
@@ -589,8 +687,19 @@ static int a6xx_hwsched_bind(struct device *dev, struct device *master,
	void *data)
{
	struct kgsl_device *device = dev_get_drvdata(master);
	int ret;

	return a6xx_gmu_probe(device, to_platform_device(dev));
	ret = a6xx_gmu_probe(device, to_platform_device(dev));
	if (ret)
		goto error;

	ret = a6xx_hwsched_hfi_probe(ADRENO_DEVICE(device));

error:
	if (ret)
		a6xx_gmu_remove(device);

	return ret;
}

static void a6xx_hwsched_unbind(struct device *dev, struct device *master,
Loading