summaryrefslogtreecommitdiff
path: root/arch/arm64/lib
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-02-12 04:17:37 +0000
committerWill Deacon <will.deacon@arm.com>2015-07-27 15:28:51 +0100
commit084f903727e1c3a61d6bcdaeeed30bddc6d7f65a (patch)
tree614b95211ba1f8b103ed8f8b9de13ee14a5fab8b /arch/arm64/lib
parent81bb5c6420635dfd058c210bd342c29c95ccd145 (diff)
downloadlinux-084f903727e1c3a61d6bcdaeeed30bddc6d7f65a.tar.gz
linux-084f903727e1c3a61d6bcdaeeed30bddc6d7f65a.tar.xz
arm64: bitops: patch in lse instructions when supported by the CPU
On CPUs which support the LSE atomic instructions introduced in ARMv8.1, it makes sense to use them in preference to ll/sc sequences. This patch introduces runtime patching of our bitops functions so that LSE atomic instructions are used instead. Reviewed-by: Steve Capper <steve.capper@arm.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/lib')
-rw-r--r--arch/arm64/lib/bitops.S43
1 files changed, 24 insertions, 19 deletions
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index 7dac371cc9a2..bc18457c2bba 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -18,52 +18,57 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/lse.h>
/*
* x0: bits 5:0 bit offset
* bits 31:6 word offset
* x1: address
*/
- .macro bitop, name, instr
+ .macro bitop, name, llsc, lse
ENTRY( \name )
and w3, w0, #63 // Get bit offset
eor w0, w0, w3 // Clear low bits
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x3, x2, x3 // Create mask
-1: ldxr x2, [x1]
- \instr x2, x2, x3
- stxr w0, x2, [x1]
- cbnz w0, 1b
+
+alt_lse "1: ldxr x2, [x1]", "\lse x3, [x1]"
+alt_lse " \llsc x2, x2, x3", "nop"
+alt_lse " stxr w0, x2, [x1]", "nop"
+alt_lse " cbnz w0, 1b", "nop"
+
ret
ENDPROC(\name )
.endm
- .macro testop, name, instr
+ .macro testop, name, llsc, lse
ENTRY( \name )
and w3, w0, #63 // Get bit offset
eor w0, w0, w3 // Clear low bits
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask
-1: ldxr x2, [x1]
- lsr x0, x2, x3 // Save old value of bit
- \instr x2, x2, x4 // toggle bit
- stlxr w5, x2, [x1]
- cbnz w5, 1b
- dmb ish
+
+alt_lse "1: ldxr x2, [x1]", "\lse x4, x2, [x1]"
+ lsr x0, x2, x3
+alt_lse " \llsc x2, x2, x4", "nop"
+alt_lse " stlxr w5, x2, [x1]", "nop"
+alt_lse " cbnz w5, 1b", "nop"
+alt_lse " dmb ish", "nop"
+
and x0, x0, #1
-3: ret
+ ret
ENDPROC(\name )
.endm
/*
* Atomic bit operations.
*/
- bitop change_bit, eor
- bitop clear_bit, bic
- bitop set_bit, orr
+ bitop change_bit, eor, steor
+ bitop clear_bit, bic, stclr
+ bitop set_bit, orr, stset
- testop test_and_change_bit, eor
- testop test_and_clear_bit, bic
- testop test_and_set_bit, orr
+ testop test_and_change_bit, eor, ldeoral
+ testop test_and_clear_bit, bic, ldclral
+ testop test_and_set_bit, orr, ldsetal