OXIESEC PANEL
- Current Dir:
/
/
usr
/
src
/
linux-headers-4.15.0-197
/
arch
/
tile
/
include
/
asm
Server IP: 139.59.38.164
Upload:
Create Dir:
Name
Size
Modified
Perms
📁
..
-
11/17/2022 06:42:16 AM
rwxr-xr-x
📄
Kbuild
439 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-offsets.h
35 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
atomic.h
5.14 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
atomic_32.h
8.91 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
atomic_64.h
5.51 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
backtrace.h
3.98 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
barrier.h
2.89 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bitops.h
2.37 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bitops_32.h
4.01 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bitops_64.h
2.67 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cache.h
2.6 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cacheflush.h
4.94 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
checksum.h
1.23 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cmpxchg.h
3.47 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
compat.h
7.38 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
current.h
947 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
delay.h
1.1 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
device.h
978 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
div64.h
319 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
dma-mapping.h
1.82 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
dma.h
762 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
elf.h
5.42 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
fixmap.h
2.73 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ftrace.h
1.08 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
futex.h
4.23 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hardirq.h
1.28 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hardwall.h
1.08 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
highmem.h
2.08 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
homecache.h
4.21 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hugetlb.h
3.04 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hv_driver.h
1.92 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ide.h
758 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
insn.h
1.79 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
io.h
12.43 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
irq.h
3.11 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
irq_work.h
283 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
irqflags.h
10.55 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
jump_label.h
1.47 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kdebug.h
769 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
kexec.h
2.23 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kgdb.h
1.99 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kmap_types.h
1.02 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kprobes.h
2.19 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
linkage.h
1.46 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mmu.h
965 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
mmu_context.h
4.5 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mmzone.h
2.07 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
module.h
1.12 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
page.h
10.32 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pci.h
6.58 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
percpu.h
1.79 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
perf_event.h
766 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgalloc.h
4.76 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable.h
15.66 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable_32.h
4.08 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable_64.h
5.11 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pmc.h
2.15 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
processor.h
10.62 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ptrace.h
2.96 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sections.h
1.37 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
setup.h
1.63 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sigframe.h
956 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
signal.h
1.07 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
smp.h
3.98 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock.h
741 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock_32.h
2.9 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock_64.h
3.88 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock_types.h
1.58 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stack.h
2.59 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
string.h
1.19 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
switch_to.h
2.75 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
syscall.h
2.88 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
syscalls.h
2.35 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
thread_info.h
5.54 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
tile-desc.h
650 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
tile-desc_32.h
12.54 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
tile-desc_64.h
10.83 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
timex.h
1.71 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlb.h
878 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlbflush.h
3.99 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
topology.h
1.52 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
traps.h
2.44 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
uaccess.h
12.77 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
unaligned.h
1.56 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
unistd.h
777 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
user.h
717 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
vdso.h
1.84 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
vga.h
1.05 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
word-at-a-time.h
1.07 KB
01/28/2018 09:20:33 PM
rw-r--r--
Editing: atomic_32.h
Close
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * Do not include directly; use <linux/atomic.h>. */ #ifndef _ASM_TILE_ATOMIC_32_H #define _ASM_TILE_ATOMIC_32_H #include <asm/barrier.h> #include <arch/chip.h> #ifndef __ASSEMBLY__ /** * atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v. */ static inline void atomic_add(int i, atomic_t *v) { _atomic_xchg_add(&v->counter, i); } #define ATOMIC_OPS(op) \ unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \ static inline void atomic_##op(int i, atomic_t *v) \ { \ _atomic_fetch_##op((unsigned long *)&v->counter, i); \ } \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ smp_mb(); \ return _atomic_fetch_##op((unsigned long *)&v->counter, i); \ } ATOMIC_OPS(and) ATOMIC_OPS(or) ATOMIC_OPS(xor) #undef ATOMIC_OPS static inline int atomic_fetch_add(int i, atomic_t *v) { smp_mb(); return _atomic_xchg_add(&v->counter, i); } /** * atomic_add_return - add integer and return * @v: pointer of type atomic_t * @i: integer value to add * * Atomically adds @i to @v and returns @i + @v */ static inline int atomic_add_return(int i, atomic_t *v) { smp_mb(); /* barrier for proper semantics */ return _atomic_xchg_add(&v->counter, i) + i; } /** * __atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as @v was not already @u. * Returns the old value of @v. */ static inline int __atomic_add_unless(atomic_t *v, int a, int u) { smp_mb(); /* barrier for proper semantics */ return _atomic_xchg_add_unless(&v->counter, a, u); } /** * atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value * * Atomically sets the value of @v to @i. * * atomic_set() can't be just a raw store, since it would be lost if it * fell between the load and store of one of the other atomic ops. */ static inline void atomic_set(atomic_t *v, int n) { _atomic_xchg(&v->counter, n); } #define atomic_set_release(v, i) atomic_set((v), (i)) /* A 64bit atomic type */ typedef struct { long long counter; } atomic64_t; #define ATOMIC64_INIT(val) { (val) } /** * atomic64_read - read atomic variable * @v: pointer of type atomic64_t * * Atomically reads the value of @v. */ static inline long long atomic64_read(const atomic64_t *v) { /* * Requires an atomic op to read both 32-bit parts consistently. * Casting away const is safe since the atomic support routines * do not write to memory if the value has not been modified. */ return _atomic64_xchg_add((long long *)&v->counter, 0); } /** * atomic64_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic64_t * * Atomically adds @i to @v. */ static inline void atomic64_add(long long i, atomic64_t *v) { _atomic64_xchg_add(&v->counter, i); } #define ATOMIC64_OPS(op) \ long long _atomic64_fetch_##op(long long *v, long long n); \ static inline void atomic64_##op(long long i, atomic64_t *v) \ { \ _atomic64_fetch_##op(&v->counter, i); \ } \ static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \ { \ smp_mb(); \ return _atomic64_fetch_##op(&v->counter, i); \ } ATOMIC64_OPS(and) ATOMIC64_OPS(or) ATOMIC64_OPS(xor) #undef ATOMIC64_OPS static inline long long atomic64_fetch_add(long long i, atomic64_t *v) { smp_mb(); return _atomic64_xchg_add(&v->counter, i); } /** * atomic64_add_return - add integer and return * @v: pointer of type atomic64_t * @i: integer value to add * * Atomically adds @i to @v and returns @i + @v */ static inline long long atomic64_add_return(long long i, atomic64_t *v) { smp_mb(); /* barrier for proper semantics */ return _atomic64_xchg_add(&v->counter, i) + i; } /** * atomic64_add_unless - add unless the number is already a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. * * Atomically adds @a to @v, so long as @v was not already @u. * Returns non-zero if @v was not @u, and zero otherwise. */ static inline long long atomic64_add_unless(atomic64_t *v, long long a, long long u) { smp_mb(); /* barrier for proper semantics */ return _atomic64_xchg_add_unless(&v->counter, a, u) != u; } /** * atomic64_set - set atomic variable * @v: pointer of type atomic64_t * @i: required value * * Atomically sets the value of @v to @i. * * atomic64_set() can't be just a raw store, since it would be lost if it * fell between the load and store of one of the other atomic ops. */ static inline void atomic64_set(atomic64_t *v, long long n) { _atomic64_xchg(&v->counter, n); } #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) #define atomic64_inc(v) atomic64_add(1LL, (v)) #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) #define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v)) #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) #define atomic64_sub(i, v) atomic64_add(-(i), (v)) #define atomic64_dec(v) atomic64_sub(1LL, (v)) #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) #endif /* !__ASSEMBLY__ */ /* * Internal definitions only beyond this point. */ /* * Number of atomic locks in atomic_locks[]. Must be a power of two. * There is no reason for more than PAGE_SIZE / 8 entries, since that * is the maximum number of pointer bits we can use to index this. * And we cannot have more than PAGE_SIZE / 4, since this has to * fit on a single page and each entry takes 4 bytes. */ #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3) #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT) #ifndef __ASSEMBLY__ extern int atomic_locks[]; #endif /* * All the code that may fault while holding an atomic lock must * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code * can correctly release and reacquire the lock. Note that we * mention the register number in a comment in "lib/atomic_asm.S" to help * assembly coders from using this register by mistake, so if it * is changed here, change that comment as well. */ #define ATOMIC_LOCK_REG 20 #define ATOMIC_LOCK_REG_NAME r20 #ifndef __ASSEMBLY__ /* Called from setup to initialize a hash table to point to per_cpu locks. */ void __init_atomic_per_cpu(void); #ifdef CONFIG_SMP /* Support releasing the atomic lock in do_page_fault_ics(). */ void __atomic_fault_unlock(int *lock_ptr); #endif /* Return a pointer to the lock for the given address. */ int *__atomic_hashed_lock(volatile void *v); /* Private helper routines in lib/atomic_asm_32.S */ struct __get_user { unsigned long val; int err; }; extern struct __get_user __atomic32_cmpxchg(volatile int *p, int *lock, int o, int n); extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n); extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n); extern struct __get_user __atomic32_xchg_add_unless(volatile int *p, int *lock, int o, int n); extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n); extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n); extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n); extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n); extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, long long o, long long n); extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n); extern long long __atomic64_xchg_add(volatile long long *p, int *lock, long long n); extern long long __atomic64_xchg_add_unless(volatile long long *p, int *lock, long long o, long long n); extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n); extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n); extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n); /* Return failure from the atomic wrappers. */ struct __get_user __atomic_bad_address(int __user *addr); #endif /* !__ASSEMBLY__ */ #endif /* _ASM_TILE_ATOMIC_32_H */