Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b70eb300 authored by Paul Burton's avatar Paul Burton Committed by Ralf Baechle
Browse files

MIPS: cmpxchg: Implement 1 byte & 2 byte xchg()



Implement 1 & 2 byte xchg() using read-modify-write atop a 4 byte
cmpxchg(). This allows us to support these atomic operations despite the
MIPS ISA only providing for 4 & 8 byte atomic operations.

This is required in order to support queued spinlocks (qspinlock) in a
later patch, since these make use of a 2 byte xchg() in their slow path.

Signed-off-by: default avatarPaul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/16354/


Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 8263db4d
Loading
Loading
Loading
Loading
+7 −2
Original line number Diff line number Diff line
@@ -70,9 +70,16 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
	__ret;								\
})

extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
				  unsigned int size);

static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
	switch (size) {
	case 1:
	case 2:
		return __xchg_small(ptr, x, size);

	case 4:
		return __xchg_asm("ll", "sc", (volatile u32 *)ptr, x);

@@ -91,8 +98,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
({									\
	__typeof__(*(ptr)) __res;					\
									\
	BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc);				\
									\
	smp_mb__before_llsc();						\
									\
	__res = (__typeof__(*(ptr)))					\
+1 −1
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@

extra-y		:= head.o vmlinux.lds

obj-y		+= cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
obj-y		+= cmpxchg.o cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
		   process.o prom.o ptrace.o reset.o setup.o signal.o \
		   syscall.o time.o topology.o traps.o unaligned.o watch.o \
		   vdso.o cacheinfo.o
+52 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2017 Imagination Technologies
 * Author: Paul Burton <paul.burton@imgtec.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */

#include <linux/bitops.h>
#include <asm/cmpxchg.h>

unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
{
	u32 old32, new32, load32, mask;
	volatile u32 *ptr32;
	unsigned int shift;

	/* Check that ptr is naturally aligned */
	WARN_ON((unsigned long)ptr & (size - 1));

	/* Mask value to the correct size. */
	mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
	val &= mask;

	/*
	 * Calculate a shift & mask that correspond to the value we wish to
	 * exchange within the naturally aligned 4 byte integerthat includes
	 * it.
	 */
	shift = (unsigned long)ptr & 0x3;
	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
		shift ^= sizeof(u32) - size;
	shift *= BITS_PER_BYTE;
	mask <<= shift;

	/*
	 * Calculate a pointer to the naturally aligned 4 byte integer that
	 * includes our byte of interest, and load its value.
	 */
	ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
	load32 = *ptr32;

	do {
		old32 = load32;
		new32 = (load32 & ~mask) | (val << shift);
		load32 = cmpxchg(ptr32, old32, new32);
	} while (load32 != old32);

	return (load32 & mask) >> shift;
}