Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 35ff032e authored by Ralph Campbell's avatar Ralph Campbell Committed by Roland Dreier
Browse files

IB/ipath: Don't call spin_lock_irq() from interrupt context



This patch fixes the problem reported by Bernd Schubert <bs@q-leap.de>
with kernel debug options enabled:

    BUG: at kernel/lockdep.c:1860 trace_hardirqs_on()

This was caused by using spin_lock_irq()/spin_unlock_irq() from
interrupt context.  Fix all the places that might be called from
interrupts to use spin_lock_irqsave()/spin_unlock_irqrestore().

Signed-off-by: default avatarRalph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent b9099ff6
Loading
Loading
Loading
Loading
+11 −7
Original line number Original line Diff line number Diff line
@@ -587,6 +587,7 @@ static void send_rc_ack(struct ipath_qp *qp)
	u32 hwords;
	u32 hwords;
	struct ipath_ib_header hdr;
	struct ipath_ib_header hdr;
	struct ipath_other_headers *ohdr;
	struct ipath_other_headers *ohdr;
	unsigned long flags;


	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
	/* Don't send ACK or NAK if a RDMA read or atomic is pending. */
	if (qp->r_head_ack_queue != qp->s_tail_ack_queue)
	if (qp->r_head_ack_queue != qp->s_tail_ack_queue)
@@ -640,11 +641,11 @@ static void send_rc_ack(struct ipath_qp *qp)
	dev->n_rc_qacks++;
	dev->n_rc_qacks++;


queue_ack:
queue_ack:
	spin_lock_irq(&qp->s_lock);
	spin_lock_irqsave(&qp->s_lock, flags);
	qp->s_flags |= IPATH_S_ACK_PENDING;
	qp->s_flags |= IPATH_S_ACK_PENDING;
	qp->s_nak_state = qp->r_nak_state;
	qp->s_nak_state = qp->r_nak_state;
	qp->s_ack_psn = qp->r_ack_psn;
	qp->s_ack_psn = qp->r_ack_psn;
	spin_unlock_irq(&qp->s_lock);
	spin_unlock_irqrestore(&qp->s_lock, flags);


	/* Call ipath_do_rc_send() in another thread. */
	/* Call ipath_do_rc_send() in another thread. */
	tasklet_hi_schedule(&qp->s_task);
	tasklet_hi_schedule(&qp->s_task);
@@ -1294,6 +1295,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
	struct ipath_ack_entry *e;
	struct ipath_ack_entry *e;
	u8 i, prev;
	u8 i, prev;
	int old_req;
	int old_req;
	unsigned long flags;


	if (diff > 0) {
	if (diff > 0) {
		/*
		/*
@@ -1327,7 +1329,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
	psn &= IPATH_PSN_MASK;
	psn &= IPATH_PSN_MASK;
	e = NULL;
	e = NULL;
	old_req = 1;
	old_req = 1;
	spin_lock_irq(&qp->s_lock);
	spin_lock_irqsave(&qp->s_lock, flags);
	for (i = qp->r_head_ack_queue; ; i = prev) {
	for (i = qp->r_head_ack_queue; ; i = prev) {
		if (i == qp->s_tail_ack_queue)
		if (i == qp->s_tail_ack_queue)
			old_req = 0;
			old_req = 0;
@@ -1425,7 +1427,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
		 * after all the previous RDMA reads and atomics.
		 * after all the previous RDMA reads and atomics.
		 */
		 */
		if (i == qp->r_head_ack_queue) {
		if (i == qp->r_head_ack_queue) {
			spin_unlock_irq(&qp->s_lock);
			spin_unlock_irqrestore(&qp->s_lock, flags);
			qp->r_nak_state = 0;
			qp->r_nak_state = 0;
			qp->r_ack_psn = qp->r_psn - 1;
			qp->r_ack_psn = qp->r_psn - 1;
			goto send_ack;
			goto send_ack;
@@ -1443,7 +1445,7 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
	tasklet_hi_schedule(&qp->s_task);
	tasklet_hi_schedule(&qp->s_task);


unlock_done:
unlock_done:
	spin_unlock_irq(&qp->s_lock);
	spin_unlock_irqrestore(&qp->s_lock, flags);
done:
done:
	return 1;
	return 1;


@@ -1453,10 +1455,12 @@ static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,


static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
{
{
	spin_lock_irq(&qp->s_lock);
	unsigned long flags;

	spin_lock_irqsave(&qp->s_lock, flags);
	qp->state = IB_QPS_ERR;
	qp->state = IB_QPS_ERR;
	ipath_error_qp(qp, err);
	ipath_error_qp(qp, err);
	spin_unlock_irq(&qp->s_lock);
	spin_unlock_irqrestore(&qp->s_lock, flags);
}
}


/**
/**