Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e4b5be7d authored by Linux Build Service Account's avatar Linux Build Service Account
Browse files

Merge 80aaf7bb on remote branch

Change-Id: I693e4a3e1cc558a209058d30bcb13539de8127d2
parents e03dd833 80aaf7bb
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -808,6 +808,12 @@ static inline void mhi_trigger_resume(struct mhi_controller *mhi_cntrl)
	pm_wakeup_hard_event(&mhi_cntrl->mhi_dev->dev);
}

static inline bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr)
{
	return ((addr >= ring->iommu_base &&
		addr < ring->iommu_base + ring->len) && (addr % 16 == 0));
}

/* queue transfer buffer */
int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
		void *buf, void *cb, size_t buf_len, enum MHI_FLAGS flags);
+15 −1
Original line number Diff line number Diff line
@@ -1385,6 +1385,13 @@ int mhi_process_tsync_ev_ring(struct mhi_controller *mhi_cntrl,
	int ret = 0;

	spin_lock_bh(&mhi_event->lock);
	if (!is_valid_ring_ptr(ev_ring, er_ctxt->rp)) {
		MHI_ERR(
			"Event ring rp points outside of the event ring or unalign rp %llx\n",
			er_ctxt->rp);
		spin_unlock_bh(&mhi_event->lock);
		return 0;
	}
	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
	if (ev_ring->rp == dev_rp) {
		spin_unlock_bh(&mhi_event->lock);
@@ -1477,8 +1484,15 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
	int result, ret = 0;

	spin_lock_bh(&mhi_event->lock);
	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
	if (!is_valid_ring_ptr(ev_ring, er_ctxt->rp)) {
		MHI_ERR(
			"Event ring rp points outside of the event ring or unalign rp %llx\n",
			er_ctxt->rp);
		spin_unlock_bh(&mhi_event->lock);
		return 0;
	}

	dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
	if (ev_ring->rp == dev_rp) {
		spin_unlock_bh(&mhi_event->lock);
		goto exit_bw_scale_process;
+4 −4
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
 * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
 */

/*
@@ -283,7 +283,7 @@ static void drawobj_destroy_sparse(struct kgsl_drawobj *drawobj)
	}
}

static void drawobj_sync_timeline_fence_work(struct irq_work *work)
static void drawobj_sync_timeline_fence_work(struct work_struct *work)
{
	struct kgsl_drawobj_sync_event *event = container_of(work,
		struct kgsl_drawobj_sync_event, work);
@@ -303,7 +303,7 @@ static void drawobj_sync_timeline_fence_callback(struct dma_fence *f,
	 * removing the fence
	 */
	if (drawobj_sync_expire(event->device, event))
		irq_work_queue(&event->work);
		queue_work(kgsl_driver.mem_workqueue, &event->work);
}

static void syncobj_destroy(struct kgsl_drawobj *drawobj)
@@ -497,7 +497,7 @@ static int drawobj_add_sync_timeline(struct kgsl_device *device,
	event->device = device;
	event->context = NULL;
	event->fence = fence;
	init_irq_work(&event->work, drawobj_sync_timeline_fence_work);
	INIT_WORK(&event->work, drawobj_sync_timeline_fence_work);

	INIT_LIST_HEAD(&event->cb.node);

+3 −2
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (c) 2016-2019, 2021, The Linux Foundation. All rights reserved.
 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
 */

#ifndef __KGSL_DRAWOBJ_H
@@ -169,8 +170,8 @@ struct kgsl_drawobj_sync_event {
	struct dma_fence *fence;
	/** @cb: Callback struct for KGSL_CMD_SYNCPOINT_TYPE_TIMELINE */
	struct dma_fence_cb cb;
	/** @work : irq worker for KGSL_CMD_SYNCPOINT_TYPE_TIMELINE */
	struct irq_work work;
	/** @work : work_struct for KGSL_CMD_SYNCPOINT_TYPE_TIMELINE */
	struct work_struct work;
};

/**
+24 −0
Original line number Diff line number Diff line
@@ -3153,6 +3153,30 @@ static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
	pr_debug("prepare to unload app(%d)(%s), pending %d\n",
		data->client.app_id, data->client.app_name,
		data->client.unload_pending);

	/* For keymaster we are not going to unload so no need to add it in
	 * unload app pending list as soon as we identify release ion buffer
	 * and return .
	 */
	if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
		if (data->client.dmabuf) {
			/* Each client will get same KM TA loaded handle but
			 * will allocate separate shared buffer during
			 * loading of TA, as client can't unload KM TA so we
			 * will only free out shared buffer and return early
			 * to avoid any ion buffer leak.
			 */
			qseecom_vaddr_unmap(data->client.sb_virt,
				data->client.sgt, data->client.attach,
				data->client.dmabuf);
			MAKE_NULL(data->client.sgt,
				data->client.attach, data->client.dmabuf);
		}
		__qseecom_free_tzbuf(&data->sglistinfo_shm);
		data->released = true;
		return 0;
	}

	if (data->client.unload_pending)
		return 0;
	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Loading