Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5aad2145 authored by Nicholas Bellinger's avatar Nicholas Bellinger
Browse files

Merge branch 'for-next' of...

Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband into for-next
parents fa389e22 2dea9094
Loading
Loading
Loading
Loading
+56 −64
Original line number Original line Diff line number Diff line
@@ -42,29 +42,29 @@


#include "uverbs.h"
#include "uverbs.h"


#define IB_UMEM_MAX_PAGE_CHUNK						\
	((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /	\
	 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] -	\
	  (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))


static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
{
	struct ib_umem_chunk *chunk, *tmp;
	struct scatterlist *sg;
	struct page *page;
	int i;
	int i;


	list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
	if (umem->nmap > 0)
		ib_dma_unmap_sg(dev, chunk->page_list,
		ib_dma_unmap_sg(dev, umem->sg_head.sgl,
				chunk->nents, DMA_BIDIRECTIONAL);
				umem->nmap,
		for (i = 0; i < chunk->nents; ++i) {
				DMA_BIDIRECTIONAL);
			struct page *page = sg_page(&chunk->page_list[i]);


	for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {

		page = sg_page(sg);
		if (umem->writable && dirty)
		if (umem->writable && dirty)
			set_page_dirty_lock(page);
			set_page_dirty_lock(page);
		put_page(page);
		put_page(page);
	}
	}


		kfree(chunk);
	sg_free_table(&umem->sg_head);
	}
	return;

}
}


/**
/**
@@ -81,15 +81,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
	struct ib_umem *umem;
	struct ib_umem *umem;
	struct page **page_list;
	struct page **page_list;
	struct vm_area_struct **vma_list;
	struct vm_area_struct **vma_list;
	struct ib_umem_chunk *chunk;
	unsigned long locked;
	unsigned long locked;
	unsigned long lock_limit;
	unsigned long lock_limit;
	unsigned long cur_base;
	unsigned long cur_base;
	unsigned long npages;
	unsigned long npages;
	int ret;
	int ret;
	int off;
	int i;
	int i;
	DEFINE_DMA_ATTRS(attrs);
	DEFINE_DMA_ATTRS(attrs);
	struct scatterlist *sg, *sg_list_start;
	int need_release = 0;


	if (dmasync)
	if (dmasync)
		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
@@ -97,7 +97,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
	if (!can_do_mlock())
	if (!can_do_mlock())
		return ERR_PTR(-EPERM);
		return ERR_PTR(-EPERM);


	umem = kmalloc(sizeof *umem, GFP_KERNEL);
	umem = kzalloc(sizeof *umem, GFP_KERNEL);
	if (!umem)
	if (!umem)
		return ERR_PTR(-ENOMEM);
		return ERR_PTR(-ENOMEM);


@@ -117,8 +117,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
	/* We assume the memory is from hugetlb until proved otherwise */
	/* We assume the memory is from hugetlb until proved otherwise */
	umem->hugetlb   = 1;
	umem->hugetlb   = 1;


	INIT_LIST_HEAD(&umem->chunk_list);

	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	if (!page_list) {
	if (!page_list) {
		kfree(umem);
		kfree(umem);
@@ -147,7 +145,18 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,


	cur_base = addr & PAGE_MASK;
	cur_base = addr & PAGE_MASK;


	ret = 0;
	if (npages == 0) {
		ret = -EINVAL;
		goto out;
	}

	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
	if (ret)
		goto out;

	need_release = 1;
	sg_list_start = umem->sg_head.sgl;

	while (npages) {
	while (npages) {
		ret = get_user_pages(current, current->mm, cur_base,
		ret = get_user_pages(current, current->mm, cur_base,
				     min_t(unsigned long, npages,
				     min_t(unsigned long, npages,
@@ -157,53 +166,37 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
		if (ret < 0)
		if (ret < 0)
			goto out;
			goto out;


		umem->npages += ret;
		cur_base += ret * PAGE_SIZE;
		cur_base += ret * PAGE_SIZE;
		npages   -= ret;
		npages   -= ret;


		off = 0;
		for_each_sg(sg_list_start, sg, ret, i) {
			if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
				umem->hugetlb = 0;


		while (ret) {
			sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
			chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
					min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
					GFP_KERNEL);
			if (!chunk) {
				ret = -ENOMEM;
				goto out;
		}
		}


			chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
		/* preparing for next loop */
			sg_init_table(chunk->page_list, chunk->nents);
		sg_list_start = sg;
			for (i = 0; i < chunk->nents; ++i) {
				if (vma_list &&
				    !is_vm_hugetlb_page(vma_list[i + off]))
					umem->hugetlb = 0;
				sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
	}
	}


			chunk->nmap = ib_dma_map_sg_attrs(context->device,
	umem->nmap = ib_dma_map_sg_attrs(context->device,
							  &chunk->page_list[0],
				  umem->sg_head.sgl,
							  chunk->nents,
				  umem->npages,
				  DMA_BIDIRECTIONAL,
				  DMA_BIDIRECTIONAL,
				  &attrs);
				  &attrs);
			if (chunk->nmap <= 0) {
				for (i = 0; i < chunk->nents; ++i)
					put_page(sg_page(&chunk->page_list[i]));
				kfree(chunk);


	if (umem->nmap <= 0) {
		ret = -ENOMEM;
		ret = -ENOMEM;
		goto out;
		goto out;
	}
	}


			ret -= chunk->nents;
			off += chunk->nents;
			list_add_tail(&chunk->list, &umem->chunk_list);
		}

	ret = 0;
	ret = 0;
	}


out:
out:
	if (ret < 0) {
	if (ret < 0) {
		if (need_release)
			__ib_umem_release(context->device, umem, 0);
			__ib_umem_release(context->device, umem, 0);
		kfree(umem);
		kfree(umem);
	} else
	} else
@@ -278,17 +271,16 @@ EXPORT_SYMBOL(ib_umem_release);


int ib_umem_page_count(struct ib_umem *umem)
int ib_umem_page_count(struct ib_umem *umem)
{
{
	struct ib_umem_chunk *chunk;
	int shift;
	int shift;
	int i;
	int i;
	int n;
	int n;
	struct scatterlist *sg;


	shift = ilog2(umem->page_size);
	shift = ilog2(umem->page_size);


	n = 0;
	n = 0;
	list_for_each_entry(chunk, &umem->chunk_list, list)
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
		for (i = 0; i < chunk->nmap; ++i)
		n += sg_dma_len(sg) >> shift;
			n += sg_dma_len(&chunk->page_list[i]) >> shift;


	return n;
	return n;
}
}
+47 −0
Original line number Original line Diff line number Diff line
@@ -1169,6 +1169,45 @@ int ib_dereg_mr(struct ib_mr *mr)
}
}
EXPORT_SYMBOL(ib_dereg_mr);
EXPORT_SYMBOL(ib_dereg_mr);


struct ib_mr *ib_create_mr(struct ib_pd *pd,
			   struct ib_mr_init_attr *mr_init_attr)
{
	struct ib_mr *mr;

	if (!pd->device->create_mr)
		return ERR_PTR(-ENOSYS);

	mr = pd->device->create_mr(pd, mr_init_attr);

	if (!IS_ERR(mr)) {
		mr->device  = pd->device;
		mr->pd      = pd;
		mr->uobject = NULL;
		atomic_inc(&pd->usecnt);
		atomic_set(&mr->usecnt, 0);
	}

	return mr;
}
EXPORT_SYMBOL(ib_create_mr);

int ib_destroy_mr(struct ib_mr *mr)
{
	struct ib_pd *pd;
	int ret;

	if (atomic_read(&mr->usecnt))
		return -EBUSY;

	pd = mr->pd;
	ret = mr->device->destroy_mr(mr);
	if (!ret)
		atomic_dec(&pd->usecnt);

	return ret;
}
EXPORT_SYMBOL(ib_destroy_mr);

struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{
{
	struct ib_mr *mr;
	struct ib_mr *mr;
@@ -1398,3 +1437,11 @@ int ib_destroy_flow(struct ib_flow *flow_id)
	return err;
	return err;
}
}
EXPORT_SYMBOL(ib_destroy_flow);
EXPORT_SYMBOL(ib_destroy_flow);

int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
		       struct ib_mr_status *mr_status)
{
	return mr->device->check_mr_status ?
		mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
}
EXPORT_SYMBOL(ib_check_mr_status);
+9 −14
Original line number Original line Diff line number Diff line
@@ -431,9 +431,9 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
	u64 *pages;
	u64 *pages;
	u64 kva = 0;
	u64 kva = 0;
	int shift, n, len;
	int shift, n, len;
	int i, j, k;
	int i, k, entry;
	int err = 0;
	int err = 0;
	struct ib_umem_chunk *chunk;
	struct scatterlist *sg;
	struct c2_pd *c2pd = to_c2pd(pd);
	struct c2_pd *c2pd = to_c2pd(pd);
	struct c2_mr *c2mr;
	struct c2_mr *c2mr;


@@ -452,10 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
	}
	}


	shift = ffs(c2mr->umem->page_size) - 1;
	shift = ffs(c2mr->umem->page_size) - 1;

	n = c2mr->umem->nmap;
	n = 0;
	list_for_each_entry(chunk, &c2mr->umem->chunk_list, list)
		n += chunk->nents;


	pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
	pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
	if (!pages) {
	if (!pages) {
@@ -464,16 +461,14 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
	}
	}


	i = 0;
	i = 0;
	list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) {
	for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
		for (j = 0; j < chunk->nmap; ++j) {
		len = sg_dma_len(sg) >> shift;
			len = sg_dma_len(&chunk->page_list[j]) >> shift;
		for (k = 0; k < len; ++k) {
		for (k = 0; k < len; ++k) {
			pages[i++] =
			pages[i++] =
					sg_dma_address(&chunk->page_list[j]) +
				sg_dma_address(sg) +
				(c2mr->umem->page_size * k);
				(c2mr->umem->page_size * k);
		}
		}
	}
	}
	}


	kva = virt;
	kva = virt;
  	err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
  	err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
+7 −12
Original line number Original line Diff line number Diff line
@@ -618,14 +618,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
{
	__be64 *pages;
	__be64 *pages;
	int shift, n, len;
	int shift, n, len;
	int i, j, k;
	int i, k, entry;
	int err = 0;
	int err = 0;
	struct ib_umem_chunk *chunk;
	struct iwch_dev *rhp;
	struct iwch_dev *rhp;
	struct iwch_pd *php;
	struct iwch_pd *php;
	struct iwch_mr *mhp;
	struct iwch_mr *mhp;
	struct iwch_reg_user_mr_resp uresp;
	struct iwch_reg_user_mr_resp uresp;

	struct scatterlist *sg;
	PDBG("%s ib_pd %p\n", __func__, pd);
	PDBG("%s ib_pd %p\n", __func__, pd);


	php = to_iwch_pd(pd);
	php = to_iwch_pd(pd);
@@ -645,9 +644,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,


	shift = ffs(mhp->umem->page_size) - 1;
	shift = ffs(mhp->umem->page_size) - 1;


	n = 0;
	n = mhp->umem->nmap;
	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
		n += chunk->nents;


	err = iwch_alloc_pbl(mhp, n);
	err = iwch_alloc_pbl(mhp, n);
	if (err)
	if (err)
@@ -661,12 +658,10 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,


	i = n = 0;
	i = n = 0;


	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
	for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
		for (j = 0; j < chunk->nmap; ++j) {
			len = sg_dma_len(sg) >> shift;
			len = sg_dma_len(&chunk->page_list[j]) >> shift;
			for (k = 0; k < len; ++k) {
			for (k = 0; k < len; ++k) {
				pages[i++] = cpu_to_be64(sg_dma_address(
				pages[i++] = cpu_to_be64(sg_dma_address(sg) +
					&chunk->page_list[j]) +
					mhp->umem->page_size * k);
					mhp->umem->page_size * k);
				if (i == PAGE_SIZE / sizeof *pages) {
				if (i == PAGE_SIZE / sizeof *pages) {
					err = iwch_write_pbl(mhp, pages, i, n);
					err = iwch_write_pbl(mhp, pages, i, n);
+17 −22
Original line number Original line Diff line number Diff line
@@ -678,9 +678,9 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
{
	__be64 *pages;
	__be64 *pages;
	int shift, n, len;
	int shift, n, len;
	int i, j, k;
	int i, k, entry;
	int err = 0;
	int err = 0;
	struct ib_umem_chunk *chunk;
	struct scatterlist *sg;
	struct c4iw_dev *rhp;
	struct c4iw_dev *rhp;
	struct c4iw_pd *php;
	struct c4iw_pd *php;
	struct c4iw_mr *mhp;
	struct c4iw_mr *mhp;
@@ -710,10 +710,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,


	shift = ffs(mhp->umem->page_size) - 1;
	shift = ffs(mhp->umem->page_size) - 1;


	n = 0;
	n = mhp->umem->nmap;
	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
		n += chunk->nents;

	err = alloc_pbl(mhp, n);
	err = alloc_pbl(mhp, n);
	if (err)
	if (err)
		goto err;
		goto err;
@@ -726,12 +723,10 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,


	i = n = 0;
	i = n = 0;


	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
	for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) {
		for (j = 0; j < chunk->nmap; ++j) {
		len = sg_dma_len(sg) >> shift;
			len = sg_dma_len(&chunk->page_list[j]) >> shift;
		for (k = 0; k < len; ++k) {
		for (k = 0; k < len; ++k) {
				pages[i++] = cpu_to_be64(sg_dma_address(
			pages[i++] = cpu_to_be64(sg_dma_address(sg) +
					&chunk->page_list[j]) +
				mhp->umem->page_size * k);
				mhp->umem->page_size * k);
			if (i == PAGE_SIZE / sizeof *pages) {
			if (i == PAGE_SIZE / sizeof *pages) {
				err = write_pbl(&mhp->rhp->rdev,
				err = write_pbl(&mhp->rhp->rdev,
Loading