Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c34bdf5 authored by Hoang-Nam Nguyen's avatar Hoang-Nam Nguyen Committed by Roland Dreier
Browse files

IB/ehca: Remove use of do_mmap()



This patch removes do_mmap() from ehca:
 - Call remap_pfn_range() for hardware register block
 - Use vm_insert_page() to register memory allocated for completion
   queues and queue pairs
 - The actual mmap() call/trigger is now controlled by user space,
   ie. libehca

Signed-off-by: default avatarHoang-Nam Nguyen <hnguyen@de.ibm.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 1f126670
Loading
Loading
Loading
Loading
+7 −8
Original line number Diff line number Diff line
@@ -119,13 +119,14 @@ struct ehca_qp {
	struct ipz_qp_handle ipz_qp_handle;
	struct ehca_pfqp pf;
	struct ib_qp_init_attr init_attr;
	u64 uspace_squeue;
	u64 uspace_rqueue;
	u64 uspace_fwh;
	struct ehca_cq *send_cq;
	struct ehca_cq *recv_cq;
	unsigned int sqerr_purgeflag;
	struct hlist_node list_entries;
	/* mmap counter for resources mapped into user space */
	u32 mm_count_squeue;
	u32 mm_count_rqueue;
	u32 mm_count_galpa;
};

/* must be power of 2 */
@@ -142,13 +143,14 @@ struct ehca_cq {
	struct ipz_cq_handle ipz_cq_handle;
	struct ehca_pfcq pf;
	spinlock_t cb_lock;
	u64 uspace_queue;
	u64 uspace_fwh;
	struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
	struct list_head entry;
	u32 nr_callbacks;
	spinlock_t task_lock;
	u32 ownpid;
	/* mmap counter for resources mapped into user space */
	u32 mm_count_queue;
	u32 mm_count_galpa;
};

enum ehca_mr_flag {
@@ -283,7 +285,6 @@ extern int ehca_port_act_time;
extern int ehca_use_hp_mr;

struct ipzu_queue_resp {
	u64 queue;        /* points to first queue entry */
	u32 qe_size;      /* queue entry size */
	u32 act_nr_of_sg;
	u32 queue_length; /* queue length allocated in bytes */
@@ -296,7 +297,6 @@ struct ehca_create_cq_resp {
	u32 cq_number;
	u32 token;
	struct ipzu_queue_resp ipz_queue;
	struct h_galpas galpas;
};

struct ehca_create_qp_resp {
@@ -309,7 +309,6 @@ struct ehca_create_qp_resp {
	u32 dummy; /* padding for 8 byte alignment */
	struct ipzu_queue_resp ipz_squeue;
	struct ipzu_queue_resp ipz_rqueue;
	struct h_galpas galpas;
};

struct ehca_alloc_cq_parms {
+16 −49
Original line number Diff line number Diff line
@@ -267,7 +267,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
	if (context) {
		struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
		struct ehca_create_cq_resp resp;
		struct vm_area_struct *vma;
		memset(&resp, 0, sizeof(resp));
		resp.cq_number = my_cq->cq_number;
		resp.token = my_cq->token;
@@ -276,40 +275,14 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
		resp.ipz_queue.queue_length = ipz_queue->queue_length;
		resp.ipz_queue.pagesize = ipz_queue->pagesize;
		resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
		ret = ehca_mmap_nopage(((u64)(my_cq->token) << 32) | 0x12000000,
				       ipz_queue->queue_length,
				       (void**)&resp.ipz_queue.queue,
				       &vma);
		if (ret) {
			ehca_err(device, "Could not mmap queue pages");
			cq = ERR_PTR(ret);
			goto create_cq_exit4;
		}
		my_cq->uspace_queue = resp.ipz_queue.queue;
		resp.galpas = my_cq->galpas;
		ret = ehca_mmap_register(my_cq->galpas.user.fw_handle,
					 (void**)&resp.galpas.kernel.fw_handle,
					 &vma);
		if (ret) {
			ehca_err(device, "Could not mmap fw_handle");
			cq = ERR_PTR(ret);
			goto create_cq_exit5;
		}
		my_cq->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;
		if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
			ehca_err(device, "Copy to udata failed.");
			goto create_cq_exit6;
			goto create_cq_exit4;
		}
	}

	return cq;

create_cq_exit6:
	ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);

create_cq_exit5:
	ehca_munmap(my_cq->uspace_queue, my_cq->ipz_queue.queue_length);

create_cq_exit4:
	ipz_queue_dtor(&my_cq->ipz_queue);

@@ -333,7 +306,6 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe,
int ehca_destroy_cq(struct ib_cq *cq)
{
	u64 h_ret;
	int ret;
	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
	int cq_num = my_cq->cq_number;
	struct ib_device *device = cq->device;
@@ -343,6 +315,20 @@ int ehca_destroy_cq(struct ib_cq *cq)
	u32 cur_pid = current->tgid;
	unsigned long flags;

	if (cq->uobject) {
		if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
			ehca_err(device, "Resources still referenced in "
				 "user space cq_num=%x", my_cq->cq_number);
			return -EINVAL;
		}
		if (my_cq->ownpid != cur_pid) {
			ehca_err(device, "Invalid caller pid=%x ownpid=%x "
				 "cq_num=%x",
				 cur_pid, my_cq->ownpid, my_cq->cq_number);
			return -EINVAL;
		}
	}

	spin_lock_irqsave(&ehca_cq_idr_lock, flags);
	while (my_cq->nr_callbacks) {
		spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
@@ -353,25 +339,6 @@ int ehca_destroy_cq(struct ib_cq *cq)
	idr_remove(&ehca_cq_idr, my_cq->token);
	spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);

	if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
		ehca_err(device, "Invalid caller pid=%x ownpid=%x",
			 cur_pid, my_cq->ownpid);
		return -EINVAL;
	}

	/* un-mmap if vma alloc */
	if (my_cq->uspace_queue ) {
		ret = ehca_munmap(my_cq->uspace_queue,
				  my_cq->ipz_queue.queue_length);
		if (ret)
			ehca_err(device, "Could not munmap queue ehca_cq=%p "
				 "cq_num=%x", my_cq, cq_num);
		ret = ehca_munmap(my_cq->uspace_fwh, EHCA_PAGESIZE);
		if (ret)
			ehca_err(device, "Could not munmap fwh ehca_cq=%p "
				 "cq_num=%x", my_cq, cq_num);
	}

	h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
	if (h_ret == H_R_STATE) {
		/* cq in err: read err data and destroy it forcibly */
@@ -400,7 +367,7 @@ int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
	struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
	u32 cur_pid = current->tgid;

	if (my_cq->uspace_queue && my_cq->ownpid != cur_pid) {
	if (cq->uobject && my_cq->ownpid != cur_pid) {
		ehca_err(cq->device, "Invalid caller pid=%x ownpid=%x",
			 cur_pid, my_cq->ownpid);
		return -EINVAL;
+0 −8
Original line number Diff line number Diff line
@@ -171,14 +171,6 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);

void ehca_poll_eqs(unsigned long data);

int ehca_mmap_nopage(u64 foffset,u64 length,void **mapped,
		     struct vm_area_struct **vma);

int ehca_mmap_register(u64 physical,void **mapped,
		       struct vm_area_struct **vma);

int ehca_munmap(unsigned long addr, size_t len);

#ifdef CONFIG_PPC_64K_PAGES
void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr);
+3 −3
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
MODULE_VERSION("SVNEHCA_0019");
MODULE_VERSION("SVNEHCA_0020");

int ehca_open_aqp1     = 0;
int ehca_debug_level   = 0;
@@ -288,7 +288,7 @@ int ehca_init_device(struct ehca_shca *shca)
	strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
	shca->ib_device.owner               = THIS_MODULE;

	shca->ib_device.uverbs_abi_ver	    = 5;
	shca->ib_device.uverbs_abi_ver	    = 6;
	shca->ib_device.uverbs_cmd_mask	    =
		(1ull << IB_USER_VERBS_CMD_GET_CONTEXT)		|
		(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)	|
@@ -790,7 +790,7 @@ int __init ehca_module_init(void)
	int ret;

	printk(KERN_INFO "eHCA Infiniband Device Driver "
	                 "(Rel.: SVNEHCA_0019)\n");
	                 "(Rel.: SVNEHCA_0020)\n");
	idr_init(&ehca_qp_idr);
	idr_init(&ehca_cq_idr);
	spin_lock_init(&ehca_qp_idr_lock);
+14 −64
Original line number Diff line number Diff line
@@ -637,7 +637,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
		struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
		struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
		struct ehca_create_qp_resp resp;
		struct vm_area_struct * vma;
		memset(&resp, 0, sizeof(resp));

		resp.qp_num = my_qp->real_qp_num;
@@ -651,59 +650,21 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
		resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length;
		resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize;
		resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
		ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x22000000,
				       ipz_rqueue->queue_length,
				       (void**)&resp.ipz_rqueue.queue,
				       &vma);
		if (ret) {
			ehca_err(pd->device, "Could not mmap rqueue pages");
			goto create_qp_exit3;
		}
		my_qp->uspace_rqueue = resp.ipz_rqueue.queue;
		/* squeue properties */
		resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
		resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
		resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
		resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
		resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
		ret = ehca_mmap_nopage(((u64)(my_qp->token) << 32) | 0x23000000,
				       ipz_squeue->queue_length,
				       (void**)&resp.ipz_squeue.queue,
				       &vma);
		if (ret) {
			ehca_err(pd->device, "Could not mmap squeue pages");
			goto create_qp_exit4;
		}
		my_qp->uspace_squeue = resp.ipz_squeue.queue;
		/* fw_handle */
		resp.galpas = my_qp->galpas;
		ret = ehca_mmap_register(my_qp->galpas.user.fw_handle,
					 (void**)&resp.galpas.kernel.fw_handle,
					 &vma);
		if (ret) {
			ehca_err(pd->device, "Could not mmap fw_handle");
			goto create_qp_exit5;
		}
		my_qp->uspace_fwh = (u64)resp.galpas.kernel.fw_handle;

		if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
			ehca_err(pd->device, "Copy to udata failed");
			ret = -EINVAL;
			goto create_qp_exit6;
			goto create_qp_exit3;
		}
	}

	return &my_qp->ib_qp;

create_qp_exit6:
	ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);

create_qp_exit5:
	ehca_munmap(my_qp->uspace_squeue, my_qp->ipz_squeue.queue_length);

create_qp_exit4:
	ehca_munmap(my_qp->uspace_rqueue, my_qp->ipz_rqueue.queue_length);

create_qp_exit3:
	ipz_queue_dtor(&my_qp->ipz_rqueue);
	ipz_queue_dtor(&my_qp->ipz_squeue);
@@ -931,7 +892,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
	     my_qp->qp_type == IB_QPT_SMI) &&
	    statetrans == IB_QPST_SQE2RTS) {
		/* mark next free wqe if kernel */
		if (my_qp->uspace_squeue == 0) {
		if (!ibqp->uobject) {
			struct ehca_wqe *wqe;
			/* lock send queue */
			spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
@@ -1417,12 +1378,19 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
	enum ib_qp_type	qp_type;
	unsigned long flags;

	if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
	    my_pd->ownpid != cur_pid) {
	if (ibqp->uobject) {
		if (my_qp->mm_count_galpa ||
		    my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
			ehca_err(ibqp->device, "Resources still referenced in "
				 "user space qp_num=%x", ibqp->qp_num);
			return -EINVAL;
		}
		if (my_pd->ownpid != cur_pid) {
			ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x",
				 cur_pid, my_pd->ownpid);
			return -EINVAL;
		}
	}

	if (my_qp->send_cq) {
		ret = ehca_cq_unassign_qp(my_qp->send_cq,
@@ -1439,24 +1407,6 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
	idr_remove(&ehca_qp_idr, my_qp->token);
	spin_unlock_irqrestore(&ehca_qp_idr_lock, flags);

	/* un-mmap if vma alloc */
	if (my_qp->uspace_rqueue) {
		ret = ehca_munmap(my_qp->uspace_rqueue,
				  my_qp->ipz_rqueue.queue_length);
		if (ret)
			ehca_err(ibqp->device, "Could not munmap rqueue "
				 "qp_num=%x", qp_num);
		ret = ehca_munmap(my_qp->uspace_squeue,
				  my_qp->ipz_squeue.queue_length);
		if (ret)
			ehca_err(ibqp->device, "Could not munmap squeue "
				 "qp_num=%x", qp_num);
		ret = ehca_munmap(my_qp->uspace_fwh, EHCA_PAGESIZE);
		if (ret)
			ehca_err(ibqp->device, "Could not munmap fwh qp_num=%x",
				 qp_num);
	}

	h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
	if (h_ret != H_SUCCESS) {
		ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx "
Loading