Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29a9b000 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull staging fixes from Greg KH:
 "Here are three staging driver fixes for 4.15-rc6

  The first resolves a bug in the lustre driver that came about due to a
  broken cleanup patch, due to crazy list usage in that codebase.

  The remaining two are ion driver fixes, finally getting the CMA
  interaction to work properly, resolving two regressions in that area
  of the code.

  All have been in linux-next with no reported issues for a while"

* tag 'staging-4.15-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging:
  staging: android: ion: Fix dma direction for dma_sync_sg_for_cpu/device
  staging: ion: Fix ion_cma_heap allocations
  staging: lustre: lnet: Fix recent breakage from list_for_each conversion
parents bc7236fb d6b246bb
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ config ION_CHUNK_HEAP

config ION_CMA_HEAP
	bool "Ion CMA heap support"
	depends on ION && CMA
	depends on ION && DMA_CMA
	help
	  Choose this option to enable CMA heaps with Ion. This heap is backed
	  by the Contiguous Memory Allocator (CMA). If your system has these
+2 −2
Original line number Diff line number Diff line
@@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
	mutex_lock(&buffer->lock);
	list_for_each_entry(a, &buffer->attachments, list) {
		dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
				    DMA_BIDIRECTIONAL);
				    direction);
	}
	mutex_unlock(&buffer->lock);

@@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
	mutex_lock(&buffer->lock);
	list_for_each_entry(a, &buffer->attachments, list) {
		dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
				       DMA_BIDIRECTIONAL);
				       direction);
	}
	mutex_unlock(&buffer->lock);

+11 −4
Original line number Diff line number Diff line
@@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
	struct sg_table *table;
	struct page *pages;
	unsigned long size = PAGE_ALIGN(len);
	unsigned long nr_pages = size >> PAGE_SHIFT;
	unsigned long align = get_order(size);
	int ret;

	pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL);
	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL);
	if (!pages)
		return -ENOMEM;

@@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
	if (ret)
		goto free_mem;

	sg_set_page(table->sgl, pages, len, 0);
	sg_set_page(table->sgl, pages, size, 0);

	buffer->priv_virt = pages;
	buffer->sg_table = table;
@@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
free_mem:
	kfree(table);
err:
	cma_release(cma_heap->cma, pages, buffer->size);
	cma_release(cma_heap->cma, pages, nr_pages);
	return -ENOMEM;
}

@@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer)
{
	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
	struct page *pages = buffer->priv_virt;
	unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;

	/* release memory */
	cma_release(cma_heap->cma, pages, buffer->size);
	cma_release(cma_heap->cma, pages, nr_pages);
	/* release sg table */
	sg_free_table(buffer->sg_table);
	kfree(buffer->sg_table);
+10 −13
Original line number Diff line number Diff line
@@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr,
			      ksocknal_nid2peerlist(id.nid));
	}

	route2 = NULL;
	list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) {
		if (route2->ksnr_ipaddr == ipaddr)
			break;

		route2 = NULL;
	}
	if (!route2) {
		ksocknal_add_route_locked(peer, route);
		route->ksnr_share_count++;
	} else {
		if (route2->ksnr_ipaddr == ipaddr) {
			/* Route already exists, use the old one */
			ksocknal_route_decref(route);
			route2->ksnr_share_count++;
			goto out;
		}

	}
	/* Route doesn't already exist, add the new one */
	ksocknal_add_route_locked(peer, route);
	route->ksnr_share_count++;
out:
	write_unlock_bh(&ksocknal_data.ksnd_global_lock);

	return 0;