Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b2b7d4d authored by Hemant Kumar's avatar Hemant Kumar
Browse files

mhi: core: Add support to create uncached event ring



This helps to selectively mark an event ring as uncached
to debug cache-coherency issue.

Change-Id: I25a1310df0576496af9e1fb624ec8c54c670dfe8
Signed-off-by: default avatarHemant Kumar <hemantk@codeaurora.org>
parent 6063f4e4
Loading
Loading
Loading
Loading
+35 −3
Original line number Diff line number Diff line
@@ -234,6 +234,21 @@ static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
	return 0;
}

/* MHI protocol require transfer ring to be aligned to ring length */
static int mhi_alloc_aligned_ring_uncached(
	struct mhi_controller *mhi_cntrl, struct mhi_ring *ring, u64 len)
{
	ring->alloc_size = len + (len - 1);
	ring->pre_aligned = mhi_alloc_uncached(mhi_cntrl, ring->alloc_size,
					       &ring->dma_handle, GFP_KERNEL);
	if (!ring->pre_aligned)
		return -ENOMEM;

	ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
	ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
	return 0;
}

void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
{
	int i;
@@ -318,6 +333,10 @@ void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
			continue;

		ring = &mhi_event->ring;
		if (mhi_event->force_uncached)
			mhi_free_uncached(mhi_cntrl, ring->alloc_size,
					  ring->pre_aligned, ring->dma_handle);
		else
			mhi_free_coherent(mhi_cntrl, ring->alloc_size,
				  ring->pre_aligned, ring->dma_handle);
		ring->base = NULL;
@@ -487,7 +506,12 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)

		ring->el_size = sizeof(struct mhi_tre);
		ring->len = ring->el_size * ring->elements;
		ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
		if (mhi_event->force_uncached)
			ret = mhi_alloc_aligned_ring_uncached(mhi_cntrl, ring,
				ring->len);
		else
			ret = mhi_alloc_aligned_ring(mhi_cntrl, ring,
				ring->len);
		if (ret)
			goto error_alloc_er;

@@ -548,6 +572,10 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
		if (mhi_event->offload_ev)
			continue;

		if (mhi_event->force_uncached)
			mhi_free_uncached(mhi_cntrl, ring->alloc_size,
				  ring->pre_aligned, ring->dma_handle);
		else
			mhi_free_coherent(mhi_cntrl, ring->alloc_size,
				  ring->pre_aligned, ring->dma_handle);
	}
@@ -1041,6 +1069,10 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
			continue;

		mhi_event->er_index = i++;

		mhi_event->force_uncached = of_property_read_bool(child,
				"mhi,force-uncached");

		ret = of_property_read_u32(child, "mhi,num-elements",
					   (u32 *)&mhi_event->ring.elements);
		if (ret)
+24 −0
Original line number Diff line number Diff line
@@ -667,6 +667,7 @@ struct mhi_event {
	struct mhi_controller *mhi_cntrl;
	struct mhi_tre last_cached_tre;
	u64 last_dev_rp;
	bool force_uncached;
};

struct mhi_chan {
@@ -882,6 +883,29 @@ static inline void mhi_free_coherent(struct mhi_controller *mhi_cntrl,
	dma_free_coherent(mhi_cntrl->dev, size, vaddr, dma_handle);
}

static inline void *mhi_alloc_uncached(struct mhi_controller *mhi_cntrl,
				       size_t size,
				       dma_addr_t *dma_handle,
				       gfp_t gfp)
{
	void *buf = dma_alloc_attrs(mhi_cntrl->dev, size, dma_handle, gfp,
			DMA_ATTR_FORCE_NON_COHERENT);

	if (buf)
		atomic_add(size, &mhi_cntrl->alloc_size);

	return buf;
}
static inline void mhi_free_uncached(struct mhi_controller *mhi_cntrl,
				     size_t size,
				     void *vaddr,
				     dma_addr_t dma_handle)
{
	atomic_sub(size, &mhi_cntrl->alloc_size);
	dma_free_attrs(mhi_cntrl->dev, size, vaddr, dma_handle,
			DMA_ATTR_FORCE_NON_COHERENT);
}

static inline void *mhi_alloc_contig_coherent(
					struct mhi_controller *mhi_cntrl,
					size_t size, dma_addr_t *dma_handle,