Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit db6c6774 authored by Shiraz Saleem's avatar Shiraz Saleem Committed by Jason Gunthorpe
Browse files

RDMA/umem: Remove hugetlb flag



The drivers i40iw and bnxt_re no longer dependent on the hugetlb flag. So
remove this flag from ib_umem structure.

Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent d8558251
Loading
Loading
Loading
Loading
+1 −25
Original line number Diff line number Diff line
@@ -37,7 +37,6 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/export.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <rdma/ib_umem_odp.h>
@@ -199,14 +198,12 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
	struct ib_ucontext *context;
	struct ib_umem *umem;
	struct page **page_list;
	struct vm_area_struct **vma_list;
	unsigned long lock_limit;
	unsigned long new_pinned;
	unsigned long cur_base;
	struct mm_struct *mm;
	unsigned long npages;
	int ret;
	int i;
	unsigned long dma_attrs = 0;
	struct scatterlist *sg;
	unsigned int gup_flags = FOLL_WRITE;
@@ -264,23 +261,12 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
		return umem;
	}

	/* We assume the memory is from hugetlb until proved otherwise */
	umem->hugetlb   = 1;

	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	if (!page_list) {
		ret = -ENOMEM;
		goto umem_kfree;
	}

	/*
	 * if we can't alloc the vma_list, it's not so bad;
	 * just assume the memory is not hugetlb memory
	 */
	vma_list = (struct vm_area_struct **) __get_free_page(GFP_KERNEL);
	if (!vma_list)
		umem->hugetlb = 0;

	npages = ib_umem_num_pages(umem);
	if (npages == 0 || npages > UINT_MAX) {
		ret = -EINVAL;
@@ -312,7 +298,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
		ret = get_user_pages_longterm(cur_base,
				     min_t(unsigned long, npages,
					   PAGE_SIZE / sizeof (struct page *)),
				     gup_flags, page_list, vma_list);
				     gup_flags, page_list, NULL);
		if (ret < 0) {
			up_read(&mm->mmap_sem);
			goto umem_release;
@@ -325,14 +311,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
			dma_get_max_seg_size(context->device->dma_device),
			&umem->sg_nents);

		/* Continue to hold the mmap_sem as vma_list access
		 * needs to be protected.
		 */
		for (i = 0; i < ret && umem->hugetlb; i++) {
			if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
				umem->hugetlb = 0;
		}

		up_read(&mm->mmap_sem);
	}

@@ -357,8 +335,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
vma:
	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
out:
	if (vma_list)
		free_page((unsigned long) vma_list);
	free_page((unsigned long) page_list);
umem_kfree:
	if (ret) {
+0 −3
Original line number Diff line number Diff line
@@ -417,9 +417,6 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
		h = hstate_vma(vma);
		umem->page_shift = huge_page_shift(h);
		up_read(&mm->mmap_sem);
		umem->hugetlb = 1;
	} else {
		umem->hugetlb = 0;
	}

	mutex_init(&umem_odp->umem_mutex);
+0 −1
Original line number Diff line number Diff line
@@ -48,7 +48,6 @@ struct ib_umem {
	unsigned long		address;
	int			page_shift;
	u32 writable : 1;
	u32 hugetlb : 1;
	u32 is_odp : 1;
	struct work_struct	work;
	struct sg_table sg_head;