Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6e3caa1e authored by Eric Holmberg's avatar Eric Holmberg
Browse files

hwspinlock/msm: correct pid for release functionality



Valid remote spinlock processor ID's are 1..N, but the processor IDs
start at 0.  To fix this, the value written to the HW Mutex is PID + 1.
Because the release functionality was using the PID for comparison, the
remote spinlock was not being properly released during subsystem restart
processing which would ultimately result in a watchdog if the remote
subsystem failed with the remote spinlock locked.

Update the release functions to compare the lock value with PID + 1.

CRs-Fixed: 725337
Change-Id: I4d484b5d37cfd1f9f8c6a3290ed378812620b130
Signed-off-by: default avatarEric Holmberg <eholmber@codeaurora.org>
Signed-off-by: default avatarArun Kumar Neelakantam <aneela@codeaurora.org>
parent f19ef0f6
Loading
Loading
Loading
Loading
+43 −12
Original line number Diff line number Diff line
@@ -21,8 +21,11 @@

#include <soc/qcom/smem.h>


#define SPINLOCK_PID_APPS 1
/**
 * The local processor (APPS) is PID 0, but because 0 is reserved for an empty
 * lock, the value PID + 1 is used as the APSS token when writing to the lock.
 */
#define SPINLOCK_TOKEN_APPS 1

static int is_hw_lock_type;
static DEFINE_MUTEX(ops_init_lock);
@@ -56,7 +59,7 @@ static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
"       teqeq   %0, #0\n"
"       bne     1b"
	: "=&r" (tmp)
	: "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
	: "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
	: "cc");

	smp_mb();
@@ -71,7 +74,7 @@ static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
"       teq     %0, #0\n"
"       strexeq %0, %2, [%1]\n"
	: "=&r" (tmp)
	: "r" (&lock->lock), "r" (SPINLOCK_PID_APPS)
	: "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
	: "cc");

	if (tmp == 0) {
@@ -87,7 +90,7 @@ static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)

	smp_mb();
	lock_owner = readl_relaxed(&lock->lock);
	if (lock_owner != SPINLOCK_PID_APPS) {
	if (lock_owner != SPINLOCK_TOKEN_APPS) {
		pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
				__func__, lock_owner);
	}
@@ -183,17 +186,35 @@ static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)

static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
{
	/*
	 * Wait for other local processor task to release spinlock if it
	 * already has the remote spinlock locked.  This can only happen in
	 * test cases since the local spinlock will prevent this when using the
	 * public APIs.
	 */
	while (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
		;

	/* acquire remote spinlock */
	do {
		writel_relaxed(SPINLOCK_PID_APPS, lock);
		writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
		smp_mb();
	} while (readl_relaxed(lock) != SPINLOCK_PID_APPS);
	} while (readl_relaxed(lock) != SPINLOCK_TOKEN_APPS);
}

static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
{
	writel_relaxed(SPINLOCK_PID_APPS, lock);
	/*
	 * If the local processor owns the spinlock, return failure.  This can
	 * only happen in test cases since the local spinlock will prevent this
	 * when using the public APIs.
	 */
	if (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
		return 0;

	writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
	smp_mb();
	return readl_relaxed(lock) == SPINLOCK_PID_APPS;
	return readl_relaxed(lock) == SPINLOCK_TOKEN_APPS;
}

static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
@@ -201,7 +222,7 @@ static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
	int lock_owner;

	lock_owner = readl_relaxed(lock);
	if (lock_owner != SPINLOCK_PID_APPS) {
	if (lock_owner != SPINLOCK_TOKEN_APPS) {
		pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
				__func__, lock_owner);
	}
@@ -247,7 +268,11 @@ static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
{
	int ret = 1;

	if (readl_relaxed(&lock->lock) == pid) {
	/*
	 * Since 0 is reserved for an empty lock and the PIDs start at 0, the
	 * value PID + 1 is written to the lock.
	 */
	if (readl_relaxed(&lock->lock) == (pid + 1)) {
		writel_relaxed(0, &lock->lock);
		wmb();
		ret = 0;
@@ -265,8 +290,14 @@ static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
 */
static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
{
	int owner;
	rmb();
	return readl_relaxed(&lock->lock);

	owner = readl_relaxed(&lock->lock);
	if (owner)
		return owner - 1;
	else
		return -ENODEV;
}


+85 −2
Original line number Diff line number Diff line
@@ -22,7 +22,6 @@
#include "smp2p_private.h"
#include "smp2p_test_common.h"

#define REMOTE_SPIN_PID 1
#define RS_END_THIEF_PID_BIT 20
#define RS_END_THIEF_MASK 0x00f00000

@@ -191,7 +190,7 @@ static void smp2p_ut_remote_spinlock_core(struct seq_file *s, int remote_pid,
			for (n = 0; n < 1000; ++n) {
				spinlock_owner =
					remote_spin_owner(smem_spinlock);
				if (spinlock_owner != REMOTE_SPIN_PID) {
				if (spinlock_owner != SMEM_APPS) {
					/* lock stolen by remote side */
					seq_puts(s, "\tFail: Remote side: ");
					seq_printf(s, "%d stole lock pid: %d\n",
@@ -466,6 +465,88 @@ static void smp2p_ut_remote_spinlock_rpm(struct seq_file *s)
	}
}

struct rmt_spinlock_work_item {
	struct work_struct work;
	struct completion try_lock;
	struct completion locked;
	bool has_locked;
};

static void ut_remote_spinlock_ssr_worker(struct work_struct *work)
{
	remote_spinlock_t *smem_spinlock;
	unsigned long flags;
	struct rmt_spinlock_work_item *work_item =
		container_of(work, struct rmt_spinlock_work_item, work);

	work_item->has_locked = false;
	complete(&work_item->try_lock);
	smem_spinlock = smem_get_remote_spinlock();
	if (!smem_spinlock) {
		pr_err("%s Failed\n", __func__);
		return;
	}

	remote_spin_lock_irqsave(smem_spinlock, flags);
	remote_spin_unlock_irqrestore(smem_spinlock, flags);
	work_item->has_locked = true;
	complete(&work_item->locked);
}

/**
 * smp2p_ut_remote_spinlock_ssr - Verify remote spinlock.
 *
 * @s:   pointer to output file
 */
static void smp2p_ut_remote_spinlock_ssr(struct seq_file *s)
{
	int failed = 0;
	unsigned long flags;
	remote_spinlock_t *smem_spinlock;
	int spinlock_owner = 0;

	struct workqueue_struct *ws = NULL;
	struct rmt_spinlock_work_item work_item;

	seq_printf(s, " Running %s Test\n",
		   __func__);
	do {
		smem_spinlock = smem_get_remote_spinlock();
		UT_ASSERT_PTR(smem_spinlock, !=, NULL);

		ws = create_singlethread_workqueue("ut_remote_spinlock_ssr");
		UT_ASSERT_PTR(ws, !=, NULL);
		INIT_WORK(&work_item.work, ut_remote_spinlock_ssr_worker);
		init_completion(&work_item.try_lock);
		init_completion(&work_item.locked);

		remote_spin_lock_irqsave(smem_spinlock, flags);
		/* Unlock local spin lock and hold HW spinlock */
		spin_unlock_irqrestore(&((smem_spinlock)->local), flags);

		queue_work(ws, &work_item.work);
		UT_ASSERT_INT(
			(int)wait_for_completion_timeout(
					&work_item.try_lock, HZ * 2), >, 0);
		UT_ASSERT_INT((int)work_item.has_locked, ==, 0);
		spinlock_owner = remote_spin_owner(smem_spinlock);
		UT_ASSERT_INT(spinlock_owner, ==, SMEM_APPS);
		remote_spin_release_all(SMEM_APPS);

		UT_ASSERT_INT(
			(int)wait_for_completion_timeout(
					&work_item.locked, HZ * 2), >, 0);

		if (!failed)
			seq_puts(s, "\tOK\n");
	} while (0);

	if (failed) {
		pr_err("%s: Failed\n", __func__);
		seq_puts(s, "\tFailed\n");
	}
}

static int __init smp2p_debugfs_init(void)
{
	/*
@@ -493,6 +574,8 @@ static int __init smp2p_debugfs_init(void)
		smp2p_ut_remote_spinlock_rpm);
	smp2p_debug_create_u32("ut_remote_spinlock_time",
		&ut_remote_spinlock_run_time);
	smp2p_debug_create("ut_remote_spinlock_ssr",
		&smp2p_ut_remote_spinlock_ssr);

	return 0;
}