Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a57cb1c1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:

 - a few misc things

 - kexec updates

 - DMA-mapping updates to better support networking DMA operations

 - IPC updates

 - various MM changes to improve DAX fault handling

 - lots of radix-tree changes, mainly to the test suite. All leading up
   to reimplementing the IDA/IDR code to be a wrapper layer over the
   radix-tree. However the final trigger-pulling patch is held off for
   4.11.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (114 commits)
  radix tree test suite: delete unused rcupdate.c
  radix tree test suite: add new tag check
  radix-tree: ensure counts are initialised
  radix tree test suite: cache recently freed objects
  radix tree test suite: add some more functionality
  idr: reduce the number of bits per level from 8 to 6
  rxrpc: abstract away knowledge of IDR internals
  tpm: use idr_find(), not idr_find_slowpath()
  idr: add ida_is_empty
  radix tree test suite: check multiorder iteration
  radix-tree: fix replacement for multiorder entries
  radix-tree: add radix_tree_split_preload()
  radix-tree: add radix_tree_split
  radix-tree: add radix_tree_join
  radix-tree: delete radix_tree_range_tag_if_tagged()
  radix-tree: delete radix_tree_locate_item()
  radix-tree: improve multiorder iterators
  btrfs: fix race in btrfs_free_dummy_fs_info()
  radix-tree: improve dump output
  radix-tree: make radix_tree_find_next_bit more useful
  ...
parents cf1b3341 e1e14ab8
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -556,7 +556,7 @@ till "end_pgoff". ->map_pages() is called with page table locked and must
not block.  If it's not possible to reach a page without blocking,
filesystem should skip it. Filesystem should use do_set_pte() to setup
page table entry. Pointer to entry associated with the page is passed in
"pte" field in fault_env structure. Pointers to entries for other offsets
"pte" field in vm_fault structure. Pointers to entries for other offsets
should be calculated relative to "pte".

	->page_mkwrite() is called when a previously read-only pte is
+4 −1
Original line number Diff line number Diff line
@@ -158,7 +158,10 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
		unsigned long attrs)
{
	phys_addr_t paddr = page_to_phys(page) + offset;

	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		_dma_cache_sync(paddr, size, dir);

	return plat_phys_to_dma(dev, paddr);
}

+10 −6
Original line number Diff line number Diff line
@@ -243,7 +243,8 @@ static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
}

static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
				    enum dma_data_direction dir,
				    unsigned long attrs)
{
	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
	struct safe_buffer *buf;
@@ -262,7 +263,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
		buf->safe, buf->safe_dma_addr);

	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
	if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
		dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
			__func__, ptr, buf->safe, size);
		memcpy(buf->safe, ptr, size);
@@ -272,7 +274,8 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
}

static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
		size_t size, enum dma_data_direction dir)
				size_t size, enum dma_data_direction dir,
				unsigned long attrs)
{
	BUG_ON(buf->size != size);
	BUG_ON(buf->direction != dir);
@@ -283,7 +286,8 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,

	DO_STATS(dev->archdata.dmabounce->bounce_count++);

	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
	if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
		void *ptr = buf->ptr;

		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
@@ -334,7 +338,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
		return DMA_ERROR_CODE;
	}

	return map_single(dev, page_address(page) + offset, size, dir);
	return map_single(dev, page_address(page) + offset, size, dir, attrs);
}

/*
@@ -357,7 +361,7 @@ static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t
		return;
	}

	unmap_single(dev, buf, size, dir);
	unmap_single(dev, buf, size, dir, attrs);
}

static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+6 −1
Original line number Diff line number Diff line
@@ -146,6 +146,7 @@ static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
{
	void *cpu_addr = page_address(page) + offset;

	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		dma_cache_sync(dev, cpu_addr, size, direction);
	return virt_to_bus(cpu_addr);
}
@@ -162,6 +163,10 @@ static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,

		sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
		virt = sg_virt(sg);

		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
			continue;

		dma_cache_sync(dev, virt, sg->length, direction);
	}

+7 −1
Original line number Diff line number Diff line
@@ -118,6 +118,10 @@ static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,

	for_each_sg(sg_list, sg, nents, i) {
		sg->dma_address = (dma_addr_t) sg_virt(sg);

		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
			continue;

		__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
	}

@@ -143,7 +147,9 @@ static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
{
	dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);

	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		_dma_sync(handle, size, dir);

	return handle;
}

Loading