OXIESEC PANEL
- Current Dir:
/
/
usr
/
src
/
linux-headers-4.15.0-213
/
arch
/
arm64
/
include
/
asm
Server IP: 139.59.38.164
Upload:
Create Dir:
Name
Size
Modified
Perms
📁
..
-
05/09/2024 07:14:12 AM
rwxr-xr-x
📄
Kbuild
703 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
acenv.h
541 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
acpi.h
4.34 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
alternative.h
7.63 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
arch_gicv3.h
3.44 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
arch_timer.h
4.87 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
arm-cci.h
794 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-bug.h
1.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-offsets.h
35 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-uaccess.h
2.09 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
assembler.h
13.51 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
atomic.h
8.35 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
atomic_ll_sc.h
10.61 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
atomic_lse.h
14.82 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
barrier.h
3.78 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
bitops.h
1.9 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bitrev.h
452 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
boot.h
384 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
brk-imm.h
706 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
bug.h
1.09 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cache.h
2.23 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cacheflush.h
4.87 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
checksum.h
1.35 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
clocksource.h
192 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
cmpxchg.h
7.98 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
compat.h
7.15 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
compiler.h
1.18 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cpu.h
1.84 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cpu_ops.h
2.73 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cpucaps.h
1.87 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cpufeature.h
19.14 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cpuidle.h
401 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
cputype.h
8.1 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
current.h
517 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
daifflags.h
1.59 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
dcc.h
1.36 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
debug-monitors.h
3.76 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
device.h
886 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
dma-mapping.h
2.42 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
dmi.h
850 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
efi.h
4.57 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
elf.h
5.7 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
esr.h
9.02 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
exception.h
1.21 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
exec.h
868 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
extable.h
815 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
fb.h
1000 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
fixmap.h
2.91 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
fpsimd.h
4.21 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
fpsimdmacros.h
5.62 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ftrace.h
1.92 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
futex.h
3.41 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
hardirq.h
2.08 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
hugetlb.h
2.71 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hw_breakpoint.h
4.46 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hwcap.h
1.86 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hypervisor.h
144 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
insn.h
16.03 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
io.h
7.72 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
irq.h
307 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
irq_work.h
228 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
irqflags.h
2.3 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
jump_label.h
1.68 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kasan.h
1.16 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kernel-pgtable.h
4.03 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kexec.h
2.42 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kgdb.h
3.79 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kprobes.h
1.74 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kvm_arm.h
8.38 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_asm.h
4.26 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_coproc.h
2.04 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kvm_emulate.h
10.38 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_host.h
15.73 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_hyp.h
5.79 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kvm_mmio.h
1.3 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_mmu.h
11.72 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
linkage.h
114 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
lse.h
1.26 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
memblock.h
720 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
memory.h
9.16 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
mmu.h
2.76 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
mmu_context.h
6.35 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
mmzone.h
266 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
module.h
2.8 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
neon.h
815 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
numa.h
1.33 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
page-def.h
1.17 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
page.h
1.61 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
paravirt.h
458 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
pci.h
878 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
percpu.h
7.48 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
perf_event.h
3.17 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgalloc.h
3.71 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable-hwdef.h
9.4 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
pgtable-prot.h
4.38 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
pgtable-types.h
1.84 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable.h
21.55 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
probes.h
1022 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
proc-fns.h
1.21 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
processor.h
6.52 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
ptdump.h
1.42 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ptrace.h
9 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
sdei.h
1.46 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
seccomp.h
714 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
sections.h
1.46 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
shmparam.h
965 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
signal32.h
1.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
simd.h
1.39 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
smp.h
4.23 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
smp_plat.h
1.43 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sparsemem.h
771 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock.h
3.33 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
spinlock_types.h
1.06 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stack_pointer.h
247 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
stackprotector.h
1.11 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stacktrace.h
2.53 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
stage2_pgtable-nopmd.h
1.3 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stage2_pgtable-nopud.h
1.24 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stage2_pgtable.h
4.89 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stat.h
1.43 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
string.h
2.33 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
suspend.h
1.65 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sync_bitops.h
1.11 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
syscall.h
2.87 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sysreg.h
25.1 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
system_misc.h
1.86 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
thread_info.h
3.93 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
timex.h
883 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlb.h
2.22 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
tlbflush.h
5.38 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
topology.h
1.29 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
traps.h
3.33 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
uaccess.h
12.01 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
unistd.h
1.6 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
unistd32.h
27.53 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
uprobes.h
777 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
vdso.h
1.09 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
vdso_datapage.h
1.53 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
vectors.h
1.75 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
virt.h
3 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
vmap_stack.h
769 bytes
06/16/2023 05:32:39 PM
rw-r--r--
📄
word-at-a-time.h
2.22 KB
06/16/2023 05:32:39 PM
rw-r--r--
📁
xen
-
05/09/2024 07:14:16 AM
rwxr-xr-x
Editing: atomic_lse.h
Close
/* * Based on arch/arm/include/asm/atomic.h * * Copyright (C) 1996 Russell King. * Copyright (C) 2002 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __ASM_ATOMIC_LSE_H #define __ASM_ATOMIC_LSE_H #ifndef __ARM64_IN_ATOMIC_IMPL #error "please don't include this file directly" #endif #define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) #define ATOMIC_OP(op, asm_op) \ static inline void atomic_##op(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \ " " #asm_op " %w[i], %[v]\n") \ : [i] "+r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS); \ } ATOMIC_OP(andnot, stclr) ATOMIC_OP(or, stset) ATOMIC_OP(xor, steor) ATOMIC_OP(add, stadd) #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ static inline int atomic_fetch_##op##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(fetch_##op##name), \ /* LSE atomics */ \ " " #asm_op #mb " %w[i], %w[i], %[v]") \ : [i] "+r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ return w0; \ } #define ATOMIC_FETCH_OPS(op, asm_op) \ ATOMIC_FETCH_OP(_relaxed, , op, asm_op) \ ATOMIC_FETCH_OP(_acquire, a, op, asm_op, "memory") \ ATOMIC_FETCH_OP(_release, l, op, asm_op, "memory") \ ATOMIC_FETCH_OP( , al, op, asm_op, "memory") ATOMIC_FETCH_OPS(andnot, ldclr) ATOMIC_FETCH_OPS(or, ldset) ATOMIC_FETCH_OPS(xor, ldeor) ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OPS #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ static inline int atomic_add_return##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(add_return##name) \ __nops(1), \ /* LSE atomics */ \ " ldadd" #mb " %w[i], w30, %[v]\n" \ " add %w[i], %w[i], w30") \ : [i] "+r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ return w0; \ } ATOMIC_OP_ADD_RETURN(_relaxed, ) ATOMIC_OP_ADD_RETURN(_acquire, a, "memory") ATOMIC_OP_ADD_RETURN(_release, l, "memory") ATOMIC_OP_ADD_RETURN( , al, "memory") #undef ATOMIC_OP_ADD_RETURN static inline void atomic_and(int i, atomic_t *v) { register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC(and) __nops(1), /* LSE atomics */ " mvn %w[i], %w[i]\n" " stclr %w[i], %[v]") : [i] "+&r" (w0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ static inline int atomic_fetch_and##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(fetch_and##name) \ __nops(1), \ /* LSE atomics */ \ " mvn %w[i], %w[i]\n" \ " ldclr" #mb " %w[i], %w[i], %[v]") \ : [i] "+&r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ return w0; \ } ATOMIC_FETCH_OP_AND(_relaxed, ) ATOMIC_FETCH_OP_AND(_acquire, a, "memory") ATOMIC_FETCH_OP_AND(_release, l, "memory") ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND static inline void atomic_sub(int i, atomic_t *v) { register int w0 asm ("w0") = i; register atomic_t *x1 asm ("x1") = v; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC(sub) __nops(1), /* LSE atomics */ " neg %w[i], %w[i]\n" " stadd %w[i], %[v]") : [i] "+&r" (w0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ static inline int atomic_sub_return##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(sub_return##name) \ __nops(2), \ /* LSE atomics */ \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], w30, %[v]\n" \ " add %w[i], %w[i], w30") \ : [i] "+&r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS , ##cl); \ \ return w0; \ } ATOMIC_OP_SUB_RETURN(_relaxed, ) ATOMIC_OP_SUB_RETURN(_acquire, a, "memory") ATOMIC_OP_SUB_RETURN(_release, l, "memory") ATOMIC_OP_SUB_RETURN( , al, "memory") #undef ATOMIC_OP_SUB_RETURN #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \ { \ register int w0 asm ("w0") = i; \ register atomic_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC(fetch_sub##name) \ __nops(1), \ /* LSE atomics */ \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], %w[i], %[v]") \ : [i] "+&r" (w0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ return w0; \ } ATOMIC_FETCH_OP_SUB(_relaxed, ) ATOMIC_FETCH_OP_SUB(_acquire, a, "memory") ATOMIC_FETCH_OP_SUB(_release, l, "memory") ATOMIC_FETCH_OP_SUB( , al, "memory") #undef ATOMIC_FETCH_OP_SUB #undef __LL_SC_ATOMIC #define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) #define ATOMIC64_OP(op, asm_op) \ static inline void atomic64_##op(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \ " " #asm_op " %[i], %[v]\n") \ : [i] "+r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS); \ } ATOMIC64_OP(andnot, stclr) ATOMIC64_OP(or, stset) ATOMIC64_OP(xor, steor) ATOMIC64_OP(add, stadd) #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(fetch_##op##name), \ /* LSE atomics */ \ " " #asm_op #mb " %[i], %[i], %[v]") \ : [i] "+r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ return x0; \ } #define ATOMIC64_FETCH_OPS(op, asm_op) \ ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \ ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \ ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \ ATOMIC64_FETCH_OP( , al, op, asm_op, "memory") ATOMIC64_FETCH_OPS(andnot, ldclr) ATOMIC64_FETCH_OPS(or, ldset) ATOMIC64_FETCH_OPS(xor, ldeor) ATOMIC64_FETCH_OPS(add, ldadd) #undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OPS #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ static inline long atomic64_add_return##name(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(add_return##name) \ __nops(1), \ /* LSE atomics */ \ " ldadd" #mb " %[i], x30, %[v]\n" \ " add %[i], %[i], x30") \ : [i] "+r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ return x0; \ } ATOMIC64_OP_ADD_RETURN(_relaxed, ) ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory") ATOMIC64_OP_ADD_RETURN(_release, l, "memory") ATOMIC64_OP_ADD_RETURN( , al, "memory") #undef ATOMIC64_OP_ADD_RETURN static inline void atomic64_and(long i, atomic64_t *v) { register long x0 asm ("x0") = i; register atomic64_t *x1 asm ("x1") = v; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC64(and) __nops(1), /* LSE atomics */ " mvn %[i], %[i]\n" " stclr %[i], %[v]") : [i] "+&r" (x0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(fetch_and##name) \ __nops(1), \ /* LSE atomics */ \ " mvn %[i], %[i]\n" \ " ldclr" #mb " %[i], %[i], %[v]") \ : [i] "+&r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ return x0; \ } ATOMIC64_FETCH_OP_AND(_relaxed, ) ATOMIC64_FETCH_OP_AND(_acquire, a, "memory") ATOMIC64_FETCH_OP_AND(_release, l, "memory") ATOMIC64_FETCH_OP_AND( , al, "memory") #undef ATOMIC64_FETCH_OP_AND static inline void atomic64_sub(long i, atomic64_t *v) { register long x0 asm ("x0") = i; register atomic64_t *x1 asm ("x1") = v; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC64(sub) __nops(1), /* LSE atomics */ " neg %[i], %[i]\n" " stadd %[i], %[v]") : [i] "+&r" (x0), [v] "+Q" (v->counter) : "r" (x1) : __LL_SC_CLOBBERS); } #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(sub_return##name) \ __nops(2), \ /* LSE atomics */ \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], x30, %[v]\n" \ " add %[i], %[i], x30") \ : [i] "+&r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ return x0; \ } ATOMIC64_OP_SUB_RETURN(_relaxed, ) ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory") ATOMIC64_OP_SUB_RETURN(_release, l, "memory") ATOMIC64_OP_SUB_RETURN( , al, "memory") #undef ATOMIC64_OP_SUB_RETURN #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \ { \ register long x0 asm ("x0") = i; \ register atomic64_t *x1 asm ("x1") = v; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_ATOMIC64(fetch_sub##name) \ __nops(1), \ /* LSE atomics */ \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], %[i], %[v]") \ : [i] "+&r" (x0), [v] "+Q" (v->counter) \ : "r" (x1) \ : __LL_SC_CLOBBERS, ##cl); \ \ return x0; \ } ATOMIC64_FETCH_OP_SUB(_relaxed, ) ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory") ATOMIC64_FETCH_OP_SUB(_release, l, "memory") ATOMIC64_FETCH_OP_SUB( , al, "memory") #undef ATOMIC64_FETCH_OP_SUB static inline long atomic64_dec_if_positive(atomic64_t *v) { register long x0 asm ("x0") = (long)v; asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ __LL_SC_ATOMIC64(dec_if_positive) __nops(6), /* LSE atomics */ "1: ldr x30, %[v]\n" " subs %[ret], x30, #1\n" " b.lt 2f\n" " casal x30, %[ret], %[v]\n" " sub x30, x30, #1\n" " sub x30, x30, %[ret]\n" " cbnz x30, 1b\n" "2:") : [ret] "+&r" (x0), [v] "+Q" (v->counter) : : __LL_SC_CLOBBERS, "cc", "memory"); return x0; } #undef __LL_SC_ATOMIC64 #define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op) #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \ static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ unsigned long old, \ u##sz new) \ { \ register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ register unsigned long x1 asm ("x1") = old; \ register u##sz x2 asm ("x2") = new; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_CMPXCHG(name##sz) \ __nops(2), \ /* LSE atomics */ \ " mov " #w "30, %" #w "[old]\n" \ " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \ " mov %" #w "[ret], " #w "30") \ : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ : [old] "r" (x1), [new] "r" (x2) \ : __LL_SC_CLOBBERS, ##cl); \ \ return x0; \ } __CMPXCHG_CASE(w, b, , 8, ) __CMPXCHG_CASE(w, h, , 16, ) __CMPXCHG_CASE(w, , , 32, ) __CMPXCHG_CASE(x, , , 64, ) __CMPXCHG_CASE(w, b, acq_, 8, a, "memory") __CMPXCHG_CASE(w, h, acq_, 16, a, "memory") __CMPXCHG_CASE(w, , acq_, 32, a, "memory") __CMPXCHG_CASE(x, , acq_, 64, a, "memory") __CMPXCHG_CASE(w, b, rel_, 8, l, "memory") __CMPXCHG_CASE(w, h, rel_, 16, l, "memory") __CMPXCHG_CASE(w, , rel_, 32, l, "memory") __CMPXCHG_CASE(x, , rel_, 64, l, "memory") __CMPXCHG_CASE(w, b, mb_, 8, al, "memory") __CMPXCHG_CASE(w, h, mb_, 16, al, "memory") __CMPXCHG_CASE(w, , mb_, 32, al, "memory") __CMPXCHG_CASE(x, , mb_, 64, al, "memory") #undef __LL_SC_CMPXCHG #undef __CMPXCHG_CASE #define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op) #define __CMPXCHG_DBL(name, mb, cl...) \ static inline long __cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ unsigned long new2, \ volatile void *ptr) \ { \ unsigned long oldval1 = old1; \ unsigned long oldval2 = old2; \ register unsigned long x0 asm ("x0") = old1; \ register unsigned long x1 asm ("x1") = old2; \ register unsigned long x2 asm ("x2") = new1; \ register unsigned long x3 asm ("x3") = new2; \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ \ asm volatile(ARM64_LSE_ATOMIC_INSN( \ /* LL/SC */ \ __LL_SC_CMPXCHG_DBL(name) \ __nops(3), \ /* LSE atomics */ \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ " eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \ " orr %[old1], %[old1], %[old2]") \ : [old1] "+&r" (x0), [old2] "+&r" (x1), \ [v] "+Q" (*(__uint128_t *)ptr) \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ : __LL_SC_CLOBBERS, ##cl); \ \ return x0; \ } __CMPXCHG_DBL( , ) __CMPXCHG_DBL(_mb, al, "memory") #undef __LL_SC_CMPXCHG_DBL #undef __CMPXCHG_DBL #endif /* __ASM_ATOMIC_LSE_H */