Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ed8ada39 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull infiniband updates from Roland Dreier:
 "Last batch of IB changes for 3.12: many mlx5 hardware driver fixes
  plus one trivial semicolon cleanup"

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB: Remove unnecessary semicolons
  IB/mlx5: Ensure proper synchronization accessing memory
  IB/mlx5: Fix alignment of reg umr gather buffers
  IB/mlx5: Fix eq names to display nicely in /proc/interrupts
  mlx5: Fix error code translation from firmware to driver
  IB/mlx5: Fix opt param mask according to firmware spec
  mlx5: Fix opt param mask for sq err to rts transition
  IB/mlx5: Disable atomic operations
  mlx5: Fix layout of struct mlx5_init_seg
  mlx5: Keep polling to reclaim pages while any returned
  IB/mlx5: Avoid async events on invalid port number
  IB/mlx5: Decrease memory consumption of mr caches
  mlx5: Remove checksum on command interface commands
  IB/mlx5: Fix memory leak in mlx5_ib_create_srq
  IB/mlx5: Flush cache workqueue before destroying it
  IB/mlx5: Fix send work queue size calculation
parents d6099aeb 59b5b28d
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
		return "C2_QP_STATE_ERROR";
		return "C2_QP_STATE_ERROR";
	default:
	default:
		return "<invalid QP state>";
		return "<invalid QP state>";
	};
	}
}
}


void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
+10 −6
Original line number Original line Diff line number Diff line
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
{
{
	struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
	struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
	char name[MLX5_MAX_EQ_NAME];
	struct mlx5_eq *eq, *n;
	struct mlx5_eq *eq, *n;
	int ncomp_vec;
	int ncomp_vec;
	int nent;
	int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
			goto clean;
			goto clean;
		}
		}


		snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
		snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
		err = mlx5_create_map_eq(&dev->mdev, eq,
		err = mlx5_create_map_eq(&dev->mdev, eq,
					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
					 eq->name,
					 name, &dev->mdev.priv.uuari.uars[0]);
					 &dev->mdev.priv.uuari.uars[0]);
		if (err) {
		if (err) {
			kfree(eq);
			kfree(eq);
			goto clean;
			goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
	props->max_srq_sge	   = max_rq_sg - 1;
	props->max_srq_sge	   = max_rq_sg - 1;
	props->max_fast_reg_page_list_len = (unsigned int)-1;
	props->max_fast_reg_page_list_len = (unsigned int)-1;
	props->local_ca_ack_delay  = dev->mdev.caps.local_ca_ack_delay;
	props->local_ca_ack_delay  = dev->mdev.caps.local_ca_ack_delay;
	props->atomic_cap	   = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
	props->atomic_cap	   = IB_ATOMIC_NONE;
		IB_ATOMIC_HCA : IB_ATOMIC_NONE;
	props->masked_atomic_cap   = IB_ATOMIC_NONE;
	props->masked_atomic_cap   = IB_ATOMIC_HCA;
	props->max_pkeys	   = be16_to_cpup((__be16 *)(out_mad->data + 28));
	props->max_pkeys	   = be16_to_cpup((__be16 *)(out_mad->data + 28));
	props->max_mcast_grp	   = 1 << dev->mdev.caps.log_max_mcg;
	props->max_mcast_grp	   = 1 << dev->mdev.caps.log_max_mcg;
	props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
	props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
	ibev.device	      = &ibdev->ib_dev;
	ibev.device	      = &ibdev->ib_dev;
	ibev.element.port_num = port;
	ibev.element.port_num = port;


	if (port < 1 || port > ibdev->num_ports) {
		mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
		return;
	}

	if (ibdev->ib_active)
	if (ibdev->ib_active)
		ib_dispatch_event(&ibev);
		ib_dispatch_event(&ibev);
}
}
+33 −37
Original line number Original line Diff line number Diff line
@@ -42,6 +42,10 @@ enum {
	DEF_CACHE_SIZE	= 10,
	DEF_CACHE_SIZE	= 10,
};
};


enum {
	MLX5_UMR_ALIGN	= 2048
};

static __be64 *mr_align(__be64 *ptr, int align)
static __be64 *mr_align(__be64 *ptr, int align)
{
{
	unsigned long mask = align - 1;
	unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)


static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
{
{
	struct device *ddev = dev->ib_dev.dma_device;
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent = &cache->ent[c];
	struct mlx5_cache_ent *ent = &cache->ent[c];
	struct mlx5_create_mkey_mbox_in *in;
	struct mlx5_create_mkey_mbox_in *in;
	struct mlx5_ib_mr *mr;
	struct mlx5_ib_mr *mr;
	int npages = 1 << ent->order;
	int npages = 1 << ent->order;
	int size = sizeof(u64) * npages;
	int err = 0;
	int err = 0;
	int i;
	int i;


@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
		}
		}
		mr->order = ent->order;
		mr->order = ent->order;
		mr->umred = 1;
		mr->umred = 1;
		mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
		if (!mr->pas) {
			kfree(mr);
			err = -ENOMEM;
			goto out;
		}
		mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
					 DMA_TO_DEVICE);
		if (dma_mapping_error(ddev, mr->dma)) {
			kfree(mr->pas);
			kfree(mr);
			err = -ENOMEM;
			goto out;
		}

		in->seg.status = 1 << 6;
		in->seg.status = 1 << 6;
		in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
		in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
		in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
		in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
					    sizeof(*in));
					    sizeof(*in));
		if (err) {
		if (err) {
			mlx5_ib_warn(dev, "create mkey failed %d\n", err);
			mlx5_ib_warn(dev, "create mkey failed %d\n", err);
			dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
			kfree(mr->pas);
			kfree(mr);
			kfree(mr);
			goto out;
			goto out;
		}
		}
@@ -129,11 +114,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)


static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
{
{
	struct device *ddev = dev->ib_dev.dma_device;
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent = &cache->ent[c];
	struct mlx5_cache_ent *ent = &cache->ent[c];
	struct mlx5_ib_mr *mr;
	struct mlx5_ib_mr *mr;
	int size;
	int err;
	int err;
	int i;
	int i;


@@ -149,16 +132,12 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
		ent->size--;
		ent->size--;
		spin_unlock(&ent->lock);
		spin_unlock(&ent->lock);
		err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
		err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
		if (err) {
		if (err)
			mlx5_ib_warn(dev, "failed destroy mkey\n");
			mlx5_ib_warn(dev, "failed destroy mkey\n");
		} else {
		else
			size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
			dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
			kfree(mr->pas);
			kfree(mr);
			kfree(mr);
	}
	}
}
}
}


static ssize_t size_write(struct file *filp, const char __user *buf,
static ssize_t size_write(struct file *filp, const char __user *buf,
			  size_t count, loff_t *pos)
			  size_t count, loff_t *pos)
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)


static void clean_keys(struct mlx5_ib_dev *dev, int c)
static void clean_keys(struct mlx5_ib_dev *dev, int c)
{
{
	struct device *ddev = dev->ib_dev.dma_device;
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_mr_cache *cache = &dev->cache;
	struct mlx5_cache_ent *ent = &cache->ent[c];
	struct mlx5_cache_ent *ent = &cache->ent[c];
	struct mlx5_ib_mr *mr;
	struct mlx5_ib_mr *mr;
	int size;
	int err;
	int err;


	cancel_delayed_work(&ent->dwork);
	while (1) {
	while (1) {
		spin_lock(&ent->lock);
		spin_lock(&ent->lock);
		if (list_empty(&ent->head)) {
		if (list_empty(&ent->head)) {
@@ -427,16 +405,12 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
		ent->size--;
		ent->size--;
		spin_unlock(&ent->lock);
		spin_unlock(&ent->lock);
		err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
		err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
		if (err) {
		if (err)
			mlx5_ib_warn(dev, "failed destroy mkey\n");
			mlx5_ib_warn(dev, "failed destroy mkey\n");
		} else {
		else
			size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
			dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
			kfree(mr->pas);
			kfree(mr);
			kfree(mr);
	}
	}
}
}
}


static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
{
{
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
	int i;
	int i;


	dev->cache.stopped = 1;
	dev->cache.stopped = 1;
	destroy_workqueue(dev->cache.wq);
	flush_workqueue(dev->cache.wq);


	mlx5_mr_cache_debugfs_cleanup(dev);
	mlx5_mr_cache_debugfs_cleanup(dev);


	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
	for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
		clean_keys(dev, i);
		clean_keys(dev, i);


	destroy_workqueue(dev->cache.wq);

	return 0;
	return 0;
}
}


@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
				  int page_shift, int order, int access_flags)
				  int page_shift, int order, int access_flags)
{
{
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct mlx5_ib_dev *dev = to_mdev(pd->device);
	struct device *ddev = dev->ib_dev.dma_device;
	struct umr_common *umrc = &dev->umrc;
	struct umr_common *umrc = &dev->umrc;
	struct ib_send_wr wr, *bad;
	struct ib_send_wr wr, *bad;
	struct mlx5_ib_mr *mr;
	struct mlx5_ib_mr *mr;
	struct ib_sge sg;
	struct ib_sge sg;
	int size = sizeof(u64) * npages;
	int err;
	int err;
	int i;
	int i;


@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
	if (!mr)
	if (!mr)
		return ERR_PTR(-EAGAIN);
		return ERR_PTR(-EAGAIN);


	mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1);
	mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
	if (!mr->pas) {
		err = -ENOMEM;
		goto error;
	}

	mlx5_ib_populate_pas(dev, umem, page_shift,
			     mr_align(mr->pas, MLX5_UMR_ALIGN), 1);

	mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
				 DMA_TO_DEVICE);
	if (dma_mapping_error(ddev, mr->dma)) {
		kfree(mr->pas);
		err = -ENOMEM;
		goto error;
	}


	memset(&wr, 0, sizeof(wr));
	memset(&wr, 0, sizeof(wr));
	wr.wr_id = (u64)(unsigned long)mr;
	wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
	wait_for_completion(&mr->done);
	wait_for_completion(&mr->done);
	up(&umrc->sem);
	up(&umrc->sem);


	dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
	kfree(mr->pas);

	if (mr->status != IB_WC_SUCCESS) {
	if (mr->status != IB_WC_SUCCESS) {
		mlx5_ib_warn(dev, "reg umr failed\n");
		mlx5_ib_warn(dev, "reg umr failed\n");
		err = -EFAULT;
		err = -EFAULT;
+30 −50
Original line number Original line Diff line number Diff line
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)


	switch (qp_type) {
	switch (qp_type) {
	case IB_QPT_XRC_INI:
	case IB_QPT_XRC_INI:
		size = sizeof(struct mlx5_wqe_xrc_seg);
		size += sizeof(struct mlx5_wqe_xrc_seg);
		/* fall through */
		/* fall through */
	case IB_QPT_RC:
	case IB_QPT_RC:
		size += sizeof(struct mlx5_wqe_ctrl_seg) +
		size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
			sizeof(struct mlx5_wqe_raddr_seg);
			sizeof(struct mlx5_wqe_raddr_seg);
		break;
		break;


	case IB_QPT_XRC_TGT:
		return 0;

	case IB_QPT_UC:
	case IB_QPT_UC:
		size = sizeof(struct mlx5_wqe_ctrl_seg) +
		size += sizeof(struct mlx5_wqe_ctrl_seg) +
			sizeof(struct mlx5_wqe_raddr_seg);
			sizeof(struct mlx5_wqe_raddr_seg);
		break;
		break;


	case IB_QPT_UD:
	case IB_QPT_UD:
	case IB_QPT_SMI:
	case IB_QPT_SMI:
	case IB_QPT_GSI:
	case IB_QPT_GSI:
		size = sizeof(struct mlx5_wqe_ctrl_seg) +
		size += sizeof(struct mlx5_wqe_ctrl_seg) +
			sizeof(struct mlx5_wqe_datagram_seg);
			sizeof(struct mlx5_wqe_datagram_seg);
		break;
		break;


	case MLX5_IB_QPT_REG_UMR:
	case MLX5_IB_QPT_REG_UMR:
		size = sizeof(struct mlx5_wqe_ctrl_seg) +
		size += sizeof(struct mlx5_wqe_ctrl_seg) +
			sizeof(struct mlx5_wqe_umr_ctrl_seg) +
			sizeof(struct mlx5_wqe_umr_ctrl_seg) +
			sizeof(struct mlx5_mkey_seg);
			sizeof(struct mlx5_mkey_seg);
		break;
		break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
		return wqe_size;
		return wqe_size;


	if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
	if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
		mlx5_ib_dbg(dev, "\n");
		mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
			    wqe_size, dev->mdev.caps.max_sq_desc_sz);
		return -EINVAL;
		return -EINVAL;
	}
	}


@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,


	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
	wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
	qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
	if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
		mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
			    qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
		return -ENOMEM;
	}
	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
	qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
	qp->sq.max_gs = attr->cap.max_send_sge;
	qp->sq.max_gs = attr->cap.max_send_sge;
	qp->sq.max_post = 1 << ilog2(wq_size / wqe_size);
	qp->sq.max_post = wq_size / wqe_size;
	attr->cap.max_send_wr = qp->sq.max_post;


	return wq_size;
	return wq_size;
}
}
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
					  MLX5_QP_OPTPAR_Q_KEY,
					  MLX5_QP_OPTPAR_Q_KEY,
			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX	|
			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX	|
					   MLX5_QP_OPTPAR_Q_KEY,
					   MLX5_QP_OPTPAR_Q_KEY,
			[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
					  MLX5_QP_OPTPAR_RRE            |
					  MLX5_QP_OPTPAR_RAE            |
					  MLX5_QP_OPTPAR_RWE            |
					  MLX5_QP_OPTPAR_PKEY_INDEX,
		},
		},
	},
	},
	[MLX5_QP_STATE_RTR] = {
	[MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
		[MLX5_QP_STATE_RTS] = {
		[MLX5_QP_STATE_RTS] = {
			[MLX5_QP_ST_UD]	 = MLX5_QP_OPTPAR_Q_KEY,
			[MLX5_QP_ST_UD]	 = MLX5_QP_OPTPAR_Q_KEY,
			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
			[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
			[MLX5_QP_ST_UC]	 = MLX5_QP_OPTPAR_RWE,
			[MLX5_QP_ST_RC]	 = MLX5_QP_OPTPAR_RNR_TIMEOUT	|
					   MLX5_QP_OPTPAR_RWE		|
					   MLX5_QP_OPTPAR_RAE		|
					   MLX5_QP_OPTPAR_RRE,
		},
		},
	},
	},
};
};
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
	rseg->reserved = 0;
	rseg->reserved = 0;
}
}


static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
{
	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
		aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
		aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
	} else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
		aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
		aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add_mask);
	} else {
		aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
		aseg->compare  = 0;
	}
}

static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
				  struct ib_send_wr *wr)
{
	aseg->swap_add		= cpu_to_be64(wr->wr.atomic.swap);
	aseg->swap_add_mask	= cpu_to_be64(wr->wr.atomic.swap_mask);
	aseg->compare		= cpu_to_be64(wr->wr.atomic.compare_add);
	aseg->compare_mask	= cpu_to_be64(wr->wr.atomic.compare_add_mask);
}

static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
			     struct ib_send_wr *wr)
			     struct ib_send_wr *wr)
{
{
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,


			case IB_WR_ATOMIC_CMP_AND_SWP:
			case IB_WR_ATOMIC_CMP_AND_SWP:
			case IB_WR_ATOMIC_FETCH_AND_ADD:
			case IB_WR_ATOMIC_FETCH_AND_ADD:
				set_raddr_seg(seg, wr->wr.atomic.remote_addr,
					      wr->wr.atomic.rkey);
				seg  += sizeof(struct mlx5_wqe_raddr_seg);

				set_atomic_seg(seg, wr);
				seg  += sizeof(struct mlx5_wqe_atomic_seg);

				size += (sizeof(struct mlx5_wqe_raddr_seg) +
					 sizeof(struct mlx5_wqe_atomic_seg)) / 16;
				break;

			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
				set_raddr_seg(seg, wr->wr.atomic.remote_addr,
				mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
					      wr->wr.atomic.rkey);
				err = -ENOSYS;
				seg  += sizeof(struct mlx5_wqe_raddr_seg);
				*bad_wr = wr;

				goto out;
				set_masked_atomic_seg(seg, wr);
				seg  += sizeof(struct mlx5_wqe_masked_atomic_seg);

				size += (sizeof(struct mlx5_wqe_raddr_seg) +
					 sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
				break;


			case IB_WR_LOCAL_INV:
			case IB_WR_LOCAL_INV:
				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
				next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+3 −1
Original line number Original line Diff line number Diff line
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
	mlx5_vfree(in);
	mlx5_vfree(in);
	if (err) {
	if (err) {
		mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
		mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
		goto err_srq;
		goto err_usr_kern_srq;
	}
	}


	mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
	mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,


err_core:
err_core:
	mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
	mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);

err_usr_kern_srq:
	if (pd->uobject)
	if (pd->uobject)
		destroy_srq_user(pd, srq);
		destroy_srq_user(pd, srq);
	else
	else
Loading