Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d5f60c07 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "arm: dma-mapping: map_page map to nearest page"

parents 48f67e2f f898a884
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ if ARM_DMA_USE_IOMMU
config ARM_DMA_IOMMU_ALIGNMENT
	int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
	range 4 9
	default 8
	default 9
	help
	  DMA mapping framework by default aligns all buffers to the smallest
	  PAGE_SIZE order which is greater than or equal to the requested buffer
+15 −0
Original line number Diff line number Diff line
@@ -92,6 +92,21 @@ config DEBUG_USER
	      8 - SIGSEGV faults
	     16 - SIGBUS faults

config ARCH_SUPPORTS_DEBUG_PAGEALLOC
	def_bool y
	depends on FORCE_PAGES

config FORCE_PAGES
	bool "Force lowmem to be mapped with 4K pages"
        help
          There are some advanced debug features that can only be done when
          memory is mapped with pages instead of sections. Enable this option
          to always map lowmem pages with pages. This may have a performance
          cost due to increased TLB pressure.

          If unsure say N.


# These options are only for real kernel hackers who want to get their hands dirty.
config DEBUG_LL
	bool "Kernel low-level debugging functions (read help!)"
+16 −0
Original line number Diff line number Diff line
@@ -178,10 +178,26 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
 * is visible to DMA, or data written by DMA to system memory is
 * visible to the CPU.
 */
extern void __dma_map_area(const void *addr, size_t size, int dir);
extern void __dma_unmap_area(const void *addr, size_t size, int dir);
extern void dmac_inv_range(const void *start, const void *end);
extern void dmac_clean_range(const void *start, const void *end);
extern void dmac_flush_range(const void *, const void *);

static inline void __dma_inv_area(const void *start, size_t len)
{
	dmac_inv_range(start, start + len);
}

static inline void __dma_clean_area(const void *start, size_t len)
{
	dmac_clean_range(start, start + len);
}

static inline void __dma_flush_area(const void *start, size_t len)
{
	dmac_flush_range(start, start + len);
}
#endif

/*
+1 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
struct dma_iommu_mapping {
	/* iommu specific data */
	struct iommu_domain	*domain;
	const struct dma_map_ops *ops;

	unsigned long		**bitmaps;	/* array of bitmaps */
	unsigned int		nr_bitmaps;	/* nr of elements in array */
+5 −0
Original line number Diff line number Diff line
@@ -161,6 +161,11 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
#define dmac_inv_range			__glue(_CACHE, _dma_inv_range)
#define dmac_clean_range		__glue(_CACHE, _dma_clean_range)
#define dmac_map_area          __glue(_CACHE, _dma_map_area)
#define dmac_unmap_area            __glue(_CACHE, _dma_unmap_area)

#define __dma_map_area         dmac_map_area
#define __dma_unmap_area       dmac_unmap_area
#endif

#endif
Loading