Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8addebc1 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Martin K. Petersen
Browse files

scsi: bnx2fc: Plug CPU hotplug race



bnx2fc_process_new_cqes() has protection against CPU hotplug, which relies
on the per cpu thread pointer. This protection is racy because it happens
only partially with the per cpu fp_work_lock held.

If the CPU is unplugged after the lock is dropped, the wakeup code can
dereference a NULL pointer or access freed and potentially reused memory.

Restructure the code so the thread check and wakeup happens with the
fp_work_lock held.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarChad Dupuis <chad.dupuis@cavium.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 2c675218
Loading
Loading
Loading
Loading
+23 −22
Original line number Diff line number Diff line
@@ -1008,6 +1008,28 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
	return work;
}

/* Pending work request completion */
static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
{
	unsigned int cpu = wqe % num_possible_cpus();
	struct bnx2fc_percpu_s *fps;
	struct bnx2fc_work *work;

	fps = &per_cpu(bnx2fc_percpu, cpu);
	spin_lock_bh(&fps->fp_work_lock);
	if (fps->iothread) {
		work = bnx2fc_alloc_work(tgt, wqe);
		if (work) {
			list_add_tail(&work->list, &fps->work_list);
			wake_up_process(fps->iothread);
			spin_unlock_bh(&fps->fp_work_lock);
			return;
		}
	}
	spin_unlock_bh(&fps->fp_work_lock);
	bnx2fc_process_cq_compl(tgt, wqe);
}

int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
{
	struct fcoe_cqe *cq;
@@ -1042,28 +1064,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
			/* Unsolicited event notification */
			bnx2fc_process_unsol_compl(tgt, wqe);
		} else {
			/* Pending work request completion */
			struct bnx2fc_work *work = NULL;
			struct bnx2fc_percpu_s *fps = NULL;
			unsigned int cpu = wqe % num_possible_cpus();

			fps = &per_cpu(bnx2fc_percpu, cpu);
			spin_lock_bh(&fps->fp_work_lock);
			if (unlikely(!fps->iothread))
				goto unlock;

			work = bnx2fc_alloc_work(tgt, wqe);
			if (work)
				list_add_tail(&work->list,
					      &fps->work_list);
unlock:
			spin_unlock_bh(&fps->fp_work_lock);

			/* Pending work request completion */
			if (fps->iothread && work)
				wake_up_process(fps->iothread);
			else
				bnx2fc_process_cq_compl(tgt, wqe);
			bnx2fc_pending_work(tgt, wqe);
			num_free_sqes++;
		}
		cqe++;