Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2a0d8366 authored by Roland Dreier's avatar Roland Dreier
Browse files

Merge branches 'cma', 'ehca', 'ipath', 'iser', 'mlx4' and 'nes' into for-next

Loading
Loading
Loading
Loading
+7 −0
Original line number Original line Diff line number Diff line
@@ -175,6 +175,13 @@ struct ehca_queue_map {
	unsigned int next_wqe_idx;   /* Idx to first wqe to be flushed */
	unsigned int next_wqe_idx;   /* Idx to first wqe to be flushed */
};
};


/* function to calculate the next index for the qmap */
static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
{
	unsigned int temp = cur_index + 1;
	return (temp == limit) ? 0 : temp;
}

struct ehca_qp {
struct ehca_qp {
	union {
	union {
		struct ib_qp ib_qp;
		struct ib_qp ib_qp;
+1 −1
Original line number Original line Diff line number Diff line
@@ -113,7 +113,7 @@ int ehca_create_eq(struct ehca_shca *shca,
			if (h_ret != H_SUCCESS || vpage)
			if (h_ret != H_SUCCESS || vpage)
				goto create_eq_exit2;
				goto create_eq_exit2;
		} else {
		} else {
			if (h_ret != H_PAGE_REGISTERED || !vpage)
			if (h_ret != H_PAGE_REGISTERED)
				goto create_eq_exit2;
				goto create_eq_exit2;
		}
		}
	}
	}
+10 −7
Original line number Original line Diff line number Diff line
@@ -717,6 +717,7 @@ static int __devinit ehca_probe(struct of_device *dev,
	const u64 *handle;
	const u64 *handle;
	struct ib_pd *ibpd;
	struct ib_pd *ibpd;
	int ret, i, eq_size;
	int ret, i, eq_size;
	unsigned long flags;


	handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
	handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
	if (!handle) {
	if (!handle) {
@@ -830,9 +831,9 @@ static int __devinit ehca_probe(struct of_device *dev,
		ehca_err(&shca->ib_device,
		ehca_err(&shca->ib_device,
			 "Cannot create device attributes  ret=%d", ret);
			 "Cannot create device attributes  ret=%d", ret);


	spin_lock(&shca_list_lock);
	spin_lock_irqsave(&shca_list_lock, flags);
	list_add(&shca->shca_list, &shca_list);
	list_add(&shca->shca_list, &shca_list);
	spin_unlock(&shca_list_lock);
	spin_unlock_irqrestore(&shca_list_lock, flags);


	return 0;
	return 0;


@@ -878,6 +879,7 @@ static int __devinit ehca_probe(struct of_device *dev,
static int __devexit ehca_remove(struct of_device *dev)
static int __devexit ehca_remove(struct of_device *dev)
{
{
	struct ehca_shca *shca = dev->dev.driver_data;
	struct ehca_shca *shca = dev->dev.driver_data;
	unsigned long flags;
	int ret;
	int ret;


	sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
	sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
@@ -915,9 +917,9 @@ static int __devexit ehca_remove(struct of_device *dev)


	ib_dealloc_device(&shca->ib_device);
	ib_dealloc_device(&shca->ib_device);


	spin_lock(&shca_list_lock);
	spin_lock_irqsave(&shca_list_lock, flags);
	list_del(&shca->shca_list);
	list_del(&shca->shca_list);
	spin_unlock(&shca_list_lock);
	spin_unlock_irqrestore(&shca_list_lock, flags);


	return ret;
	return ret;
}
}
@@ -975,6 +977,7 @@ static int ehca_mem_notifier(struct notifier_block *nb,
			     unsigned long action, void *data)
			     unsigned long action, void *data)
{
{
	static unsigned long ehca_dmem_warn_time;
	static unsigned long ehca_dmem_warn_time;
	unsigned long flags;


	switch (action) {
	switch (action) {
	case MEM_CANCEL_OFFLINE:
	case MEM_CANCEL_OFFLINE:
@@ -985,12 +988,12 @@ static int ehca_mem_notifier(struct notifier_block *nb,
	case MEM_GOING_ONLINE:
	case MEM_GOING_ONLINE:
	case MEM_GOING_OFFLINE:
	case MEM_GOING_OFFLINE:
		/* only ok if no hca is attached to the lpar */
		/* only ok if no hca is attached to the lpar */
		spin_lock(&shca_list_lock);
		spin_lock_irqsave(&shca_list_lock, flags);
		if (list_empty(&shca_list)) {
		if (list_empty(&shca_list)) {
			spin_unlock(&shca_list_lock);
			spin_unlock_irqrestore(&shca_list_lock, flags);
			return NOTIFY_OK;
			return NOTIFY_OK;
		} else {
		} else {
			spin_unlock(&shca_list_lock);
			spin_unlock_irqrestore(&shca_list_lock, flags);
			if (printk_timed_ratelimit(&ehca_dmem_warn_time,
			if (printk_timed_ratelimit(&ehca_dmem_warn_time,
						   30 * 1000))
						   30 * 1000))
				ehca_gen_err("DMEM operations are not allowed"
				ehca_gen_err("DMEM operations are not allowed"
+6 −6
Original line number Original line Diff line number Diff line
@@ -1138,14 +1138,14 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
		return -EFAULT;
		return -EFAULT;
	}
	}


	tail_idx = (qmap->tail + 1) % qmap->entries;
	tail_idx = next_index(qmap->tail, qmap->entries);
	wqe_idx = q_ofs / ipz_queue->qe_size;
	wqe_idx = q_ofs / ipz_queue->qe_size;


	/* check all processed wqes, whether a cqe is requested or not */
	/* check all processed wqes, whether a cqe is requested or not */
	while (tail_idx != wqe_idx) {
	while (tail_idx != wqe_idx) {
		if (qmap->map[tail_idx].cqe_req)
		if (qmap->map[tail_idx].cqe_req)
			qmap->left_to_poll++;
			qmap->left_to_poll++;
		tail_idx = (tail_idx + 1) % qmap->entries;
		tail_idx = next_index(tail_idx, qmap->entries);
	}
	}
	/* save index in queue, where we have to start flushing */
	/* save index in queue, where we have to start flushing */
	qmap->next_wqe_idx = wqe_idx;
	qmap->next_wqe_idx = wqe_idx;
@@ -1195,14 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
	} else {
	} else {
		spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
		spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
		my_qp->sq_map.left_to_poll = 0;
		my_qp->sq_map.left_to_poll = 0;
		my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
		my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
						my_qp->sq_map.entries;
							my_qp->sq_map.entries);
		spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
		spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);


		spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
		spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
		my_qp->rq_map.left_to_poll = 0;
		my_qp->rq_map.left_to_poll = 0;
		my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
		my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
						my_qp->rq_map.entries;
							my_qp->rq_map.entries);
		spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
		spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
	}
	}


+6 −7
Original line number Original line Diff line number Diff line
@@ -726,13 +726,13 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
		 * set left_to_poll to 0 because in error state, we will not
		 * set left_to_poll to 0 because in error state, we will not
		 * get any additional CQEs
		 * get any additional CQEs
		 */
		 */
		my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
		my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
						my_qp->sq_map.entries;
							my_qp->sq_map.entries);
		my_qp->sq_map.left_to_poll = 0;
		my_qp->sq_map.left_to_poll = 0;
		ehca_add_to_err_list(my_qp, 1);
		ehca_add_to_err_list(my_qp, 1);


		my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
		my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
						my_qp->rq_map.entries;
							my_qp->rq_map.entries);
		my_qp->rq_map.left_to_poll = 0;
		my_qp->rq_map.left_to_poll = 0;
		if (HAS_RQ(my_qp))
		if (HAS_RQ(my_qp))
			ehca_add_to_err_list(my_qp, 0);
			ehca_add_to_err_list(my_qp, 0);
@@ -860,9 +860,8 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,


		/* mark as reported and advance next_wqe pointer */
		/* mark as reported and advance next_wqe pointer */
		qmap_entry->reported = 1;
		qmap_entry->reported = 1;
		qmap->next_wqe_idx++;
		qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
		if (qmap->next_wqe_idx == qmap->entries)
						qmap->entries);
			qmap->next_wqe_idx = 0;
		qmap_entry = &qmap->map[qmap->next_wqe_idx];
		qmap_entry = &qmap->map[qmap->next_wqe_idx];


		wc++; nr++;
		wc++; nr++;
Loading