From e28ff5dc8cf6aec042741f1ea62089dca6a894ab Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 4 Oct 2023 17:53:08 +0100 Subject: [PATCH] alpha: implement xor_unlock_is_negative_byte Inspired by the alpha clear_bit() and arch_atomic_add_return(), this will surely be more efficient than the generic one defined in filemap.c. Link: https://lkml.kernel.org/r/20231004165317.1061855-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Albert Ou Cc: Alexander Gordeev Cc: Andreas Dilger Cc: Christian Borntraeger Cc: Christophe Leroy Cc: Geert Uytterhoeven Cc: Heiko Carstens Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Richard Henderson Cc: Sven Schnelle Cc: "Theodore Ts'o" Cc: Thomas Bogendoerfer Cc: Vasily Gorbik Signed-off-by: Andrew Morton --- arch/alpha/include/asm/bitops.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h index bafb1c1f0fdc..b50ad6b83e85 100644 --- a/arch/alpha/include/asm/bitops.h +++ b/arch/alpha/include/asm/bitops.h @@ -286,6 +286,27 @@ arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) #define arch_test_bit generic_test_bit #define arch_test_bit_acquire generic_test_bit_acquire +static inline bool xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *p) +{ + unsigned long temp, old; + + __asm__ __volatile__( + "1: ldl_l %0,%4\n" + " mov %0,%2\n" + " xor %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*p), "=&r" (old) + :"Ir" (mask), "m" (*p)); + + return (old & BIT(7)) != 0; +} +#define xor_unlock_is_negative_byte xor_unlock_is_negative_byte + /* * ffz = Find First Zero in word. Undefined if no zero exists, * so code should check against ~0UL first..