OXIESEC PANEL
- Current Dir:
/
/
usr
/
src
/
linux-headers-4.15.0-213
/
arch
/
arc
/
include
/
asm
Server IP: 139.59.38.164
Upload:
Create Dir:
Name
Size
Modified
Perms
📁
..
-
05/09/2024 07:14:12 AM
rwxr-xr-x
📄
Kbuild
681 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
arcregs.h
8.59 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
asm-offsets.h
311 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
atomic.h
15.14 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
barrier.h
1.75 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bitops.h
9.81 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
bug.h
938 bytes
06/16/2023 05:32:39 PM
rw-r--r--
📄
cache.h
3.77 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cacheflush.h
3.88 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
checksum.h
2.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cmpxchg.h
5.4 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
current.h
695 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
delay.h
1.99 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
disasm.h
3.87 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
dma-mapping.h
734 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
dma.h
459 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
dwarf.h
892 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
elf.h
2.15 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
entry-arcv2.h
4.85 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
entry-compact.h
9.29 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
entry.h
6.73 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
exec.h
410 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
fb.h
411 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
futex.h
3.67 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
highmem.h
1.46 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hugepage.h
2.41 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
io.h
6.43 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
irq.h
825 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
irqflags-arcv2.h
3.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
irqflags-compact.h
4.25 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
irqflags.h
509 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
kdebug.h
400 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
kgdb.h
1.35 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kmap_types.h
489 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
kprobes.h
1.37 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
linkage.h
1.42 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
mach_desc.h
2.06 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
mmu.h
2.44 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mmu_context.h
5.67 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mmzone.h
989 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
module.h
661 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
page.h
2.99 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
pci.h
705 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
perf_event.h
6.86 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
pgalloc.h
3.79 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable.h
14.2 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
processor.h
4.69 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ptrace.h
3.87 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sections.h
407 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
segment.h
612 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
serial.h
644 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
setup.h
1.18 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
shmparam.h
442 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
smp.h
4.25 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock.h
8.79 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock_types.h
1.03 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stacktrace.h
1.29 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
string.h
1.15 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
switch_to.h
1.17 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
syscall.h
1.57 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
syscalls.h
653 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
thread_info.h
3.39 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
timex.h
508 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlb-mmu1.h
3.48 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlb.h
1.23 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlbflush.h
1.76 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
uaccess.h
18.45 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
unaligned.h
771 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
unwind.h
3.51 KB
01/28/2018 09:20:33 PM
rw-r--r--
Editing: spinlock.h
Close
/* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H #include <asm/spinlock_types.h> #include <asm/processor.h> #include <asm/barrier.h> #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__) #ifdef CONFIG_ARC_HAS_LLSC static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned int val; smp_mb(); __asm__ __volatile__( "1: llock %[val], [%[slock]] \n" " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */ " scond %[LOCKED], [%[slock]] \n" /* acquire */ " bnz 1b \n" " \n" : [val] "=&r" (val) : [slock] "r" (&(lock->slock)), [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) : "memory", "cc"); smp_mb(); } /* 1 - lock taken successfully */ static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int val, got_it = 0; smp_mb(); __asm__ __volatile__( "1: llock %[val], [%[slock]] \n" " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ " scond %[LOCKED], [%[slock]] \n" /* acquire */ " bnz 1b \n" " mov %[got_it], 1 \n" "4: \n" " \n" : [val] "=&r" (val), [got_it] "+&r" (got_it) : [slock] "r" (&(lock->slock)), [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) : "memory", "cc"); smp_mb(); return got_it; } static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; smp_mb(); } /* * Read-write spinlocks, allowing multiple readers but only one writer. * Unfair locking as Writers could be starved indefinitely by Reader(s) */ static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int val; smp_mb(); /* * zero means writer holds the lock exclusively, deny Reader. * Otherwise grant lock to first/subseq reader * * if (rw->counter > 0) { * rw->counter--; * ret = 1; * } */ __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */ " sub %[val], %[val], 1 \n" /* reader lock */ " scond %[val], [%[rwlock]] \n" " bnz 1b \n" " \n" : [val] "=&r" (val) : [rwlock] "r" (&(rw->counter)), [WR_LOCKED] "ir" (0) : "memory", "cc"); smp_mb(); } /* 1 - lock taken successfully */ static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int val, got_it = 0; smp_mb(); __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ " sub %[val], %[val], 1 \n" /* counter-- */ " scond %[val], [%[rwlock]] \n" " bnz 1b \n" /* retry if collided with someone */ " mov %[got_it], 1 \n" " \n" "4: ; --- done --- \n" : [val] "=&r" (val), [got_it] "+&r" (got_it) : [rwlock] "r" (&(rw->counter)), [WR_LOCKED] "ir" (0) : "memory", "cc"); smp_mb(); return got_it; } static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned int val; smp_mb(); /* * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), * deny writer. Otherwise if unlocked grant to writer * Hence the claim that Linux rwlocks are unfair to writers. * (can be starved for an indefinite time by readers). * * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { * rw->counter = 0; * ret = 1; * } */ __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */ " mov %[val], %[WR_LOCKED] \n" " scond %[val], [%[rwlock]] \n" " bnz 1b \n" " \n" : [val] "=&r" (val) : [rwlock] "r" (&(rw->counter)), [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), [WR_LOCKED] "ir" (0) : "memory", "cc"); smp_mb(); } /* 1 - lock taken successfully */ static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val, got_it = 0; smp_mb(); __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ " mov %[val], %[WR_LOCKED] \n" " scond %[val], [%[rwlock]] \n" " bnz 1b \n" /* retry if collided with someone */ " mov %[got_it], 1 \n" " \n" "4: ; --- done --- \n" : [val] "=&r" (val), [got_it] "+&r" (got_it) : [rwlock] "r" (&(rw->counter)), [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), [WR_LOCKED] "ir" (0) : "memory", "cc"); smp_mb(); return got_it; } static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int val; smp_mb(); /* * rw->counter++; */ __asm__ __volatile__( "1: llock %[val], [%[rwlock]] \n" " add %[val], %[val], 1 \n" " scond %[val], [%[rwlock]] \n" " bnz 1b \n" " \n" : [val] "=&r" (val) : [rwlock] "r" (&(rw->counter)) : "memory", "cc"); smp_mb(); } static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); rw->counter = __ARCH_RW_LOCK_UNLOCKED__; smp_mb(); } #else /* !CONFIG_ARC_HAS_LLSC */ static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; /* * This smp_mb() is technically superfluous, we only need the one * after the lock for providing the ACQUIRE semantics. * However doing the "right" thing was regressing hackbench * so keeping this, pending further investigation */ smp_mb(); __asm__ __volatile__( "1: ex %0, [%1] \n" #ifdef CONFIG_EZNPS_MTM_EXT " .word %3 \n" #endif " breq %0, %2, 1b \n" : "+&r" (val) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) #ifdef CONFIG_EZNPS_MTM_EXT , "i"(CTOP_INST_SCHD_RW) #endif : "memory"); /* * ACQUIRE barrier to ensure load/store after taking the lock * don't "bleed-up" out of the critical section (leak-in is allowed) * http://www.spinics.net/lists/kernel/msg2010409.html * * ARCv2 only has load-load, store-store and all-all barrier * thus need the full all-all barrier */ smp_mb(); } /* 1 - lock taken successfully */ static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int val = __ARCH_SPIN_LOCK_LOCKED__; smp_mb(); __asm__ __volatile__( "1: ex %0, [%1] \n" : "+r" (val) : "r"(&(lock->slock)) : "memory"); smp_mb(); return (val == __ARCH_SPIN_LOCK_UNLOCKED__); } static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__; /* * RELEASE barrier: given the instructions avail on ARCv2, full barrier * is the only option */ smp_mb(); /* * EX is not really required here, a simple STore of 0 suffices. * However this causes tasklist livelocks in SystemC based SMP virtual * platforms where the systemc core scheduler uses EX as a cue for * moving to next core. Do a git log of this file for details */ __asm__ __volatile__( " ex %0, [%1] \n" : "+r" (val) : "r"(&(lock->slock)) : "memory"); /* * superfluous, but keeping for now - see pairing version in * arch_spin_lock above */ smp_mb(); } /* * Read-write spinlocks, allowing multiple readers but only one writer. * Unfair locking as Writers could be starved indefinitely by Reader(s) * * The spinlock itself is contained in @counter and access to it is * serialized with @lock_mutex. */ /* 1 - lock taken successfully */ static inline int arch_read_trylock(arch_rwlock_t *rw) { int ret = 0; unsigned long flags; local_irq_save(flags); arch_spin_lock(&(rw->lock_mutex)); /* * zero means writer holds the lock exclusively, deny Reader. * Otherwise grant lock to first/subseq reader */ if (rw->counter > 0) { rw->counter--; ret = 1; } arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); smp_mb(); return ret; } /* 1 - lock taken successfully */ static inline int arch_write_trylock(arch_rwlock_t *rw) { int ret = 0; unsigned long flags; local_irq_save(flags); arch_spin_lock(&(rw->lock_mutex)); /* * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), * deny writer. Otherwise if unlocked grant to writer * Hence the claim that Linux rwlocks are unfair to writers. * (can be starved for an indefinite time by readers). */ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { rw->counter = 0; ret = 1; } arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); return ret; } static inline void arch_read_lock(arch_rwlock_t *rw) { while (!arch_read_trylock(rw)) cpu_relax(); } static inline void arch_write_lock(arch_rwlock_t *rw) { while (!arch_write_trylock(rw)) cpu_relax(); } static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); arch_spin_lock(&(rw->lock_mutex)); rw->counter++; arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); } static inline void arch_write_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); arch_spin_lock(&(rw->lock_mutex)); rw->counter = __ARCH_RW_LOCK_UNLOCKED__; arch_spin_unlock(&(rw->lock_mutex)); local_irq_restore(flags); } #endif #endif /* __ASM_SPINLOCK_H */