Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2d0abb7b authored by Joel Fernandes's avatar Joel Fernandes Committed by Greg Kroah-Hartman
Browse files

pstore: Make spinlock per zone instead of global



commit 109704492ef637956265ec2eb72ae7b3b39eb6f4 upstream.

Currently pstore has a global spinlock for all zones. Since the zones
are independent and modify different areas of memory, there's no need
to have a global lock, so we should use a per-zone lock as introduced
here. Also, when ramoops's ftrace use-case has a FTRACE_PER_CPU flag
introduced later, which splits the ftrace memory area into a single zone
per CPU, it will eliminate the need for locking. In preparation for this,
make the locking optional.

Signed-off-by: default avatarJoel Fernandes <joelaf@google.com>
[kees: updated commit message]
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Cc: Leo Yan <leo.yan@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c64a196b
Loading
Loading
Loading
Loading
+5 −6
Original line number Diff line number Diff line
@@ -80,8 +80,6 @@ static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
	} while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
}

static DEFINE_RAW_SPINLOCK(buffer_lock);

/* increase and wrap the start pointer, returning the old value */
static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
{
@@ -89,7 +87,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
	int new;
	unsigned long flags;

	raw_spin_lock_irqsave(&buffer_lock, flags);
	raw_spin_lock_irqsave(&prz->buffer_lock, flags);

	old = atomic_read(&prz->buffer->start);
	new = old + a;
@@ -97,7 +95,7 @@ static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
		new -= prz->buffer_size;
	atomic_set(&prz->buffer->start, new);

	raw_spin_unlock_irqrestore(&buffer_lock, flags);
	raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);

	return old;
}
@@ -109,7 +107,7 @@ static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
	size_t new;
	unsigned long flags;

	raw_spin_lock_irqsave(&buffer_lock, flags);
	raw_spin_lock_irqsave(&prz->buffer_lock, flags);

	old = atomic_read(&prz->buffer->size);
	if (old == prz->buffer_size)
@@ -121,7 +119,7 @@ static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
	atomic_set(&prz->buffer->size, new);

exit:
	raw_spin_unlock_irqrestore(&buffer_lock, flags);
	raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
}

static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
@@ -489,6 +487,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,

	prz->buffer->sig = sig;
	persistent_ram_zap(prz);
	prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);

	return 0;
}
+1 −0
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@ struct persistent_ram_zone {
	void *vaddr;
	struct persistent_ram_buffer *buffer;
	size_t buffer_size;
	raw_spinlock_t buffer_lock;

	/* ECC correction */
	char *par_buffer;