Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 877f075a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull infiniband updates from Roland Dreier:
 "Main batch of InfiniBand/RDMA changes for 3.15:

   - The biggest change is core API extensions and mlx5 low-level driver
     support for handling DIF/DIX-style protection information, and the
     addition of PI support to the iSER initiator.  Target support will
     be arriving shortly through the SCSI target tree.

   - A nice simplification to the "umem" memory pinning library now that
     we have chained sg lists.  Kudos to Yishai Hadas for realizing our
     code didn't have to be so crazy.

   - Another nice simplification to the sg wrappers used by qib, ipath
     and ehca to handle their mapping of memory to adapter.

   - The usual batch of fixes to bugs found by static checkers etc.
     from intrepid people like Dan Carpenter and Yann Droneaud.

   - A large batch of cxgb4, ocrdma, qib driver updates"

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (102 commits)
  RDMA/ocrdma: Unregister inet notifier when unloading ocrdma
  RDMA/ocrdma: Fix warnings about pointer <-> integer casts
  RDMA/ocrdma: Code clean-up
  RDMA/ocrdma: Display FW version
  RDMA/ocrdma: Query controller information
  RDMA/ocrdma: Support non-embedded mailbox commands
  RDMA/ocrdma: Handle CQ overrun error
  RDMA/ocrdma: Display proper value for max_mw
  RDMA/ocrdma: Use non-zero tag in SRQ posting
  RDMA/ocrdma: Memory leak fix in ocrdma_dereg_mr()
  RDMA/ocrdma: Increment abi version count
  RDMA/ocrdma: Update version string
  be2net: Add abi version between be2net and ocrdma
  RDMA/ocrdma: ABI versioning between ocrdma and be2net
  RDMA/ocrdma: Allow DPP QP creation
  RDMA/ocrdma: Read ASIC_ID register to select asic_gen
  RDMA/ocrdma: SQ and RQ doorbell offset clean up
  RDMA/ocrdma: EQ full catastrophe avoidance
  RDMA/cxgb4: Disable DSGL use by default
  RDMA/cxgb4: rx_data() needs to hold the ep mutex
  ...
parents 3cf59142 f7eaa7ed
Loading
Loading
Loading
Loading
+0 −17
Original line number Diff line number Diff line
@@ -349,23 +349,6 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
			   grh, &av->ah_attr);
}

int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac)
{
	struct cm_id_private *cm_id_priv;

	cm_id_priv = container_of(id, struct cm_id_private, id);

	if (smac != NULL)
		memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac));

	if (alt_smac != NULL)
		memcpy(cm_id_priv->alt_av.smac, alt_smac,
		       sizeof(cm_id_priv->alt_av.smac));

	return 0;
}
EXPORT_SYMBOL(ib_update_cm_av);

static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
{
	struct cm_device *cm_dev;
+0 −26
Original line number Diff line number Diff line
@@ -1284,15 +1284,6 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
	struct rdma_id_private *listen_id, *conn_id;
	struct rdma_cm_event event;
	int offset, ret;
	u8 smac[ETH_ALEN];
	u8 alt_smac[ETH_ALEN];
	u8 *psmac = smac;
	u8 *palt_smac = alt_smac;
	int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) ==
			RDMA_TRANSPORT_IB) &&
		       (rdma_port_get_link_layer(cm_id->device,
			ib_event->param.req_rcvd.port) ==
			IB_LINK_LAYER_ETHERNET));

	listen_id = cm_id->context;
	if (!cma_check_req_qp_type(&listen_id->id, ib_event))
@@ -1336,28 +1327,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
	ret = conn_id->id.event_handler(&conn_id->id, &event);
	if (ret)
		goto err3;

	if (is_iboe) {
		if (ib_event->param.req_rcvd.primary_path != NULL)
			rdma_addr_find_smac_by_sgid(
				&ib_event->param.req_rcvd.primary_path->sgid,
				psmac, NULL);
		else
			psmac = NULL;
		if (ib_event->param.req_rcvd.alternate_path != NULL)
			rdma_addr_find_smac_by_sgid(
				&ib_event->param.req_rcvd.alternate_path->sgid,
				palt_smac, NULL);
		else
			palt_smac = NULL;
	}
	/*
	 * Acquire mutex to prevent user executing rdma_destroy_id()
	 * while we're accessing the cm_id.
	 */
	mutex_lock(&lock);
	if (is_iboe)
		ib_update_cm_av(cm_id, psmac, palt_smac);
	if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
	    (conn_id->id.qp_type != IB_QPT_UD))
		ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+14 −0
Original line number Diff line number Diff line
@@ -1022,12 +1022,21 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
					mad_send_wr->send_buf.mad,
					sge[0].length,
					DMA_TO_DEVICE);
	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
		return -ENOMEM;

	mad_send_wr->header_mapping = sge[0].addr;

	sge[1].addr = ib_dma_map_single(mad_agent->device,
					ib_get_payload(mad_send_wr),
					sge[1].length,
					DMA_TO_DEVICE);
	if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
		ib_dma_unmap_single(mad_agent->device,
				    mad_send_wr->header_mapping,
				    sge[0].length, DMA_TO_DEVICE);
		return -ENOMEM;
	}
	mad_send_wr->payload_mapping = sge[1].addr;

	spin_lock_irqsave(&qp_info->send_queue.lock, flags);
@@ -2590,6 +2599,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
						 sizeof *mad_priv -
						   sizeof mad_priv->header,
						 DMA_FROM_DEVICE);
		if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
						  sg_list.addr))) {
			ret = -ENOMEM;
			break;
		}
		mad_priv->header.mapping = sg_list.addr;
		recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
		mad_priv->header.mad_list.mad_queue = recv_queue;
+56 −64
Original line number Diff line number Diff line
@@ -42,29 +42,29 @@

#include "uverbs.h"

#define IB_UMEM_MAX_PAGE_CHUNK						\
	((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) /	\
	 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] -	\
	  (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))

static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
	struct ib_umem_chunk *chunk, *tmp;
	struct scatterlist *sg;
	struct page *page;
	int i;

	list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
		ib_dma_unmap_sg(dev, chunk->page_list,
				chunk->nents, DMA_BIDIRECTIONAL);
		for (i = 0; i < chunk->nents; ++i) {
			struct page *page = sg_page(&chunk->page_list[i]);
	if (umem->nmap > 0)
		ib_dma_unmap_sg(dev, umem->sg_head.sgl,
				umem->nmap,
				DMA_BIDIRECTIONAL);

	for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {

		page = sg_page(sg);
		if (umem->writable && dirty)
			set_page_dirty_lock(page);
		put_page(page);
	}

		kfree(chunk);
	}
	sg_free_table(&umem->sg_head);
	return;

}

/**
@@ -81,15 +81,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
	struct ib_umem *umem;
	struct page **page_list;
	struct vm_area_struct **vma_list;
	struct ib_umem_chunk *chunk;
	unsigned long locked;
	unsigned long lock_limit;
	unsigned long cur_base;
	unsigned long npages;
	int ret;
	int off;
	int i;
	DEFINE_DMA_ATTRS(attrs);
	struct scatterlist *sg, *sg_list_start;
	int need_release = 0;

	if (dmasync)
		dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
@@ -97,7 +97,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
	if (!can_do_mlock())
		return ERR_PTR(-EPERM);

	umem = kmalloc(sizeof *umem, GFP_KERNEL);
	umem = kzalloc(sizeof *umem, GFP_KERNEL);
	if (!umem)
		return ERR_PTR(-ENOMEM);

@@ -117,8 +117,6 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
	/* We assume the memory is from hugetlb until proved otherwise */
	umem->hugetlb   = 1;

	INIT_LIST_HEAD(&umem->chunk_list);

	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	if (!page_list) {
		kfree(umem);
@@ -147,7 +145,18 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,

	cur_base = addr & PAGE_MASK;

	ret = 0;
	if (npages == 0) {
		ret = -EINVAL;
		goto out;
	}

	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
	if (ret)
		goto out;

	need_release = 1;
	sg_list_start = umem->sg_head.sgl;

	while (npages) {
		ret = get_user_pages(current, current->mm, cur_base,
				     min_t(unsigned long, npages,
@@ -157,53 +166,37 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
		if (ret < 0)
			goto out;

		umem->npages += ret;
		cur_base += ret * PAGE_SIZE;
		npages   -= ret;

		off = 0;
		for_each_sg(sg_list_start, sg, ret, i) {
			if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
				umem->hugetlb = 0;

		while (ret) {
			chunk = kmalloc(sizeof *chunk + sizeof (struct scatterlist) *
					min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK),
					GFP_KERNEL);
			if (!chunk) {
				ret = -ENOMEM;
				goto out;
			sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
		}

			chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
			sg_init_table(chunk->page_list, chunk->nents);
			for (i = 0; i < chunk->nents; ++i) {
				if (vma_list &&
				    !is_vm_hugetlb_page(vma_list[i + off]))
					umem->hugetlb = 0;
				sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
		/* preparing for next loop */
		sg_list_start = sg;
	}

			chunk->nmap = ib_dma_map_sg_attrs(context->device,
							  &chunk->page_list[0],
							  chunk->nents,
	umem->nmap = ib_dma_map_sg_attrs(context->device,
				  umem->sg_head.sgl,
				  umem->npages,
				  DMA_BIDIRECTIONAL,
				  &attrs);
			if (chunk->nmap <= 0) {
				for (i = 0; i < chunk->nents; ++i)
					put_page(sg_page(&chunk->page_list[i]));
				kfree(chunk);

	if (umem->nmap <= 0) {
		ret = -ENOMEM;
		goto out;
	}

			ret -= chunk->nents;
			off += chunk->nents;
			list_add_tail(&chunk->list, &umem->chunk_list);
		}

	ret = 0;
	}

out:
	if (ret < 0) {
		if (need_release)
			__ib_umem_release(context->device, umem, 0);
		kfree(umem);
	} else
@@ -278,17 +271,16 @@ EXPORT_SYMBOL(ib_umem_release);

int ib_umem_page_count(struct ib_umem *umem)
{
	struct ib_umem_chunk *chunk;
	int shift;
	int i;
	int n;
	struct scatterlist *sg;

	shift = ilog2(umem->page_size);

	n = 0;
	list_for_each_entry(chunk, &umem->chunk_list, list)
		for (i = 0; i < chunk->nmap; ++i)
			n += sg_dma_len(&chunk->page_list[i]) >> shift;
	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
		n += sg_dma_len(sg) >> shift;

	return n;
}
+47 −0
Original line number Diff line number Diff line
@@ -1169,6 +1169,45 @@ int ib_dereg_mr(struct ib_mr *mr)
}
EXPORT_SYMBOL(ib_dereg_mr);

struct ib_mr *ib_create_mr(struct ib_pd *pd,
			   struct ib_mr_init_attr *mr_init_attr)
{
	struct ib_mr *mr;

	if (!pd->device->create_mr)
		return ERR_PTR(-ENOSYS);

	mr = pd->device->create_mr(pd, mr_init_attr);

	if (!IS_ERR(mr)) {
		mr->device  = pd->device;
		mr->pd      = pd;
		mr->uobject = NULL;
		atomic_inc(&pd->usecnt);
		atomic_set(&mr->usecnt, 0);
	}

	return mr;
}
EXPORT_SYMBOL(ib_create_mr);

int ib_destroy_mr(struct ib_mr *mr)
{
	struct ib_pd *pd;
	int ret;

	if (atomic_read(&mr->usecnt))
		return -EBUSY;

	pd = mr->pd;
	ret = mr->device->destroy_mr(mr);
	if (!ret)
		atomic_dec(&pd->usecnt);

	return ret;
}
EXPORT_SYMBOL(ib_destroy_mr);

struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{
	struct ib_mr *mr;
@@ -1398,3 +1437,11 @@ int ib_destroy_flow(struct ib_flow *flow_id)
	return err;
}
EXPORT_SYMBOL(ib_destroy_flow);

int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
		       struct ib_mr_status *mr_status)
{
	return mr->device->check_mr_status ?
		mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
}
EXPORT_SYMBOL(ib_check_mr_status);
Loading