OXIESEC PANEL
- Current Dir:
/
/
usr
/
src
/
linux-headers-4.15.0-197
/
arch
/
mips
/
include
/
asm
Server IP: 139.59.38.164
Upload:
Create Dir:
Name
Size
Modified
Perms
📁
..
-
11/17/2022 06:42:15 AM
rwxr-xr-x
📄
Kbuild
577 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
abi.h
853 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
addrspace.h
4.1 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
amon.h
409 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
arch_hweight.h
792 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-eva.h
6.82 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-offsets.h
35 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-prototypes.h
197 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm.h
8.47 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
asmmacro-32.h
2.47 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
asmmacro-64.h
1.22 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
asmmacro.h
14.07 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
atomic.h
19.73 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
barrier.h
8.03 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bcache.h
2.04 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bitops.h
15.46 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bitrev.h
608 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
bmips-spaces.h
268 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
bmips.h
3.45 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
bootinfo.h
5.08 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
branch.h
2.35 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
break.h
787 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
bug.h
759 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
bugs.h
944 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
cache.h
546 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
cacheflush.h
4.99 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cacheops.h
3.71 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cdmm.h
3.67 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cevt-r4k.h
823 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
checksum.h
6.43 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
clock.h
997 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
clocksource.h
884 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
cmp.h
492 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
cmpxchg.h
5.28 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
compat-signal.h
640 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
compat.h
6.66 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
compiler.h
2.96 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
cop2.h
1.77 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cpu-features.h
19.46 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
cpu-info.h
5.84 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
cpu-type.h
4.13 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
cpu.h
15.54 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
cpufeature.h
717 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
debug.h
654 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📁
dec
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
delay.h
841 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
device.h
347 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
div64.h
2.17 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
dma-coherence.h
813 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
dma-mapping.h
981 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
dma.h
9.92 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ds1287.h
1019 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
dsemul.h
3.24 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
dsp.h
1.91 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
edac.h
819 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
elf.h
15.04 KB
01/28/2018 09:20:33 PM
rw-r--r--
📁
emma
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
errno.h
429 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
eva.h
796 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
exec.h
579 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
extable.h
241 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
fb.h
372 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
fixmap.h
2.29 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
floppy.h
1.57 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
fpregdef.h
2.66 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
fpu.h
5.21 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
fpu_emulator.h
5.74 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ftrace.h
2.11 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
futex.h
4.87 KB
01/28/2018 09:20:33 PM
rw-r--r--
📁
fw
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
gio_device.h
1.5 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
gt64120.h
19.37 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hardirq.h
544 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
hazards.h
8.36 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
highmem.h
1.72 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hpet.h
1.93 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hugetlb.h
2.76 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
hw_irq.h
475 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
i8259.h
2.52 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ide.h
330 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
idle.h
689 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
inst.h
2.34 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
io.h
18.44 KB
11/01/2022 04:52:05 PM
rw-r--r--
📁
ip32
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
irq.h
2.26 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
irq_cpu.h
708 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
irq_gt641xx.h
2.69 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
irq_regs.h
744 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
irqflags.h
4.04 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
isa-rev.h
556 bytes
11/01/2022 04:52:05 PM
rw-r--r--
📄
isadep.h
603 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
jazz.h
8 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
jazzdma.h
2.97 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
jump_label.h
1.4 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
kdebug.h
303 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
kexec.h
1.53 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
kgdb.h
1.19 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kmap_types.h
221 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
kprobes.h
2.68 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kvm_host.h
37.88 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
kvm_para.h
2.09 KB
01/28/2018 09:20:33 PM
rw-r--r--
📁
lasat
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
linkage.h
306 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
llsc.h
623 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
local.h
4.99 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
m48t37.h
732 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
maar.h
4.04 KB
01/28/2018 09:20:33 PM
rw-r--r--
📁
mach-ar7
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-ath25
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-ath79
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-au1x00
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-bcm47xx
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-bcm63xx
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-bmips
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-cavium-octeon
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-cobalt
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-db1x00
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-dec
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-emma2rh
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-generic
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-ip22
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-ip27
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-ip28
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-ip32
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-jazz
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-jz4740
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-lantiq
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-lasat
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-loongson32
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-loongson64
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-malta
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-netlogic
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-paravirt
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-pic32
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-pistachio
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-pmcs-msp71xx
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-pnx833x
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-ralink
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-rc32434
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-rm
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-sibyte
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-tx39xx
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-tx49xx
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-vr41xx
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📁
mach-xilfpga
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
machine.h
2.93 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
mc146818-time.h
3.69 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mc146818rtc.h
450 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📁
mips-boards
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
mips-cm.h
15.86 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
mips-cpc.h
5.83 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mips-cps.h
6.55 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mips-gic.h
12.3 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
mips-r2-to-r6-emul.h
2.05 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mips_machine.h
1.32 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mips_mt.h
707 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
mipsmtregs.h
10.9 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mipsprom.h
2.1 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
mipsregs.h
88.1 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
mmu.h
550 bytes
11/01/2022 04:52:05 PM
rw-r--r--
📄
mmu_context.h
5.41 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
mmzone.h
561 bytes
11/01/2022 04:52:05 PM
rw-r--r--
📄
module.h
4.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
msa.h
8.01 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
msc01_ic.h
6.55 KB
01/28/2018 09:20:33 PM
rw-r--r--
📁
netlogic
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
nile4.h
10.33 KB
01/28/2018 09:20:33 PM
rw-r--r--
📁
octeon
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
paccess.h
3.07 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
page.h
7.19 KB
01/28/2018 09:20:33 PM
rw-r--r--
📁
pci
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
pci.h
4.08 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
perf_event.h
482 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgalloc.h
3.21 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable-32.h
7.31 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
pgtable-64.h
10.87 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
pgtable-bits.h
7.36 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable.h
17.34 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pm-cps.h
1.68 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pm.h
3.99 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pmon.h
1.64 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
prefetch.h
2.1 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
processor.h
11.71 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
prom.h
845 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
ptrace.h
5.55 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
r4k-timer.h
604 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
r4kcache.h
26.34 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
reboot.h
440 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
reg.h
26 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
regdef.h
2.63 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
rtlx.h
2.1 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
seccomp.h
800 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
serial.h
607 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
setup.h
884 bytes
11/01/2022 04:52:05 PM
rw-r--r--
📁
sgi
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
sgialib.h
2.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sgiarcs.h
15.32 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
shmparam.h
352 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📁
sibyte
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
sigcontext.h
1.04 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
signal.h
1.02 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sim.h
2.32 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
smp-cps.h
1.18 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
smp-ops.h
2.33 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
smp.h
3.31 KB
11/01/2022 04:52:05 PM
rw-r--r--
📁
sn
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
sni.h
7.27 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
socket.h
1.34 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sparsemem.h
486 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock.h
459 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock_types.h
188 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
spram.h
262 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
stackframe.h
10.82 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stackprotector.h
1.15 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stacktrace.h
2.15 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
string.h
2.94 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
switch_to.h
4.19 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
syscall.h
3.57 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
termios.h
2.89 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
thread_info.h
6.63 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
time.h
2.13 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
timex.h
2.87 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
tlb.h
1.09 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlbdebug.h
403 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlbex.h
788 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlbflush.h
1.67 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlbmisc.h
320 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
topology.h
619 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
traps.h
1.25 KB
01/28/2018 09:20:33 PM
rw-r--r--
📁
txx9
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
txx9irq.h
743 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
txx9pio.h
592 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
txx9tmr.h
1.59 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
types.h
487 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
uaccess.h
22.2 KB
11/01/2022 04:52:05 PM
rw-r--r--
📄
uasm.h
9.18 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
unistd.h
1.9 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
uprobes.h
1.11 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
vdso.h
3.72 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
vga.h
1.26 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
vpe.h
2.7 KB
01/28/2018 09:20:33 PM
rw-r--r--
📁
vr41xx
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
war.h
7.48 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
watch.h
827 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
wbflush.h
694 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📁
xtalk
-
11/17/2022 06:42:20 AM
rwxr-xr-x
📄
yamon-dt.h
1.88 KB
01/28/2018 09:20:33 PM
rw-r--r--
Editing: bitops.h
Close
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1994 - 1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org) * Copyright (c) 1999, 2000 Silicon Graphics, Inc. */ #ifndef _ASM_BITOPS_H #define _ASM_BITOPS_H #ifndef _LINUX_BITOPS_H #error only <linux/bitops.h> can be included directly #endif #include <linux/compiler.h> #include <linux/types.h> #include <asm/barrier.h> #include <asm/byteorder.h> /* sigh ... */ #include <asm/compiler.h> #include <asm/cpu-features.h> #include <asm/llsc.h> #include <asm/sgidefs.h> #include <asm/war.h> /* * These are the "slower" versions of the functions and are in bitops.c. * These functions call raw_local_irq_{save,restore}(). */ void __mips_set_bit(unsigned long nr, volatile unsigned long *addr); void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr); void __mips_change_bit(unsigned long nr, volatile unsigned long *addr); int __mips_test_and_set_bit(unsigned long nr, volatile unsigned long *addr); int __mips_test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr); int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr); int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr); /* * set_bit - Atomically set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * This function is atomic and may not be reordered. See __set_bit() * if you do not require the atomic guarantees. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { __asm__ __volatile__( " .set arch=r4000 \n" "1: " __LL "%0, %1 # set_bit \n" " or %0, %2 \n" " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { do { __asm__ __volatile__( " " __LL "%0, %1 # set_bit \n" " " __INS "%0, %3, %2, 1 \n" " " __SC "%0, %1 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (bit), "r" (~0)); } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { do { __asm__ __volatile__( " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # set_bit \n" " or %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else __mips_set_bit(nr, addr); } /* * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); int bit = nr & SZLONG_MASK; unsigned long temp; if (kernel_uses_llsc && R10000_LLSC_WAR) { __asm__ __volatile__( " .set arch=r4000 \n" "1: " __LL "%0, %1 # clear_bit \n" " and %0, %2 \n" " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { do { __asm__ __volatile__( " " __LL "%0, %1 # clear_bit \n" " " __INS "%0, $0, %2, 1 \n" " " __SC "%0, %1 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (bit)); } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { do { __asm__ __volatile__( " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # clear_bit \n" " and %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); } else __mips_clear_bit(nr, addr); } /* * clear_bit_unlock - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and implies release semantics before the memory * operation. It can be used for an unlock. */ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) { smp_mb__before_atomic(); clear_bit(nr, addr); } /* * change_bit - Toggle a bit in memory * @nr: Bit to change * @addr: Address to start counting from * * change_bit() is atomic and may not be reordered. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) { int bit = nr & SZLONG_MASK; if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; __asm__ __volatile__( " .set arch=r4000 \n" "1: " __LL "%0, %1 # change_bit \n" " xor %0, %2 \n" " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } else if (kernel_uses_llsc) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; do { __asm__ __volatile__( " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # change_bit \n" " xor %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else __mips_change_bit(nr, addr); } /* * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; __asm__ __volatile__( " .set arch=r4000 \n" "1: " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; do { __asm__ __volatile__( " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); res = temp & (1UL << bit); } else res = __mips_test_and_set_bit(nr, addr); smp_llsc_mb(); return res != 0; } /* * test_and_set_bit_lock - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and implies acquire ordering semantics * after the memory operation. */ static inline int test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr) { int bit = nr & SZLONG_MASK; unsigned long res; if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; __asm__ __volatile__( " .set arch=r4000 \n" "1: " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" : "=&r" (temp), "+m" (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; do { __asm__ __volatile__( " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); res = temp & (1UL << bit); } else res = __mips_test_and_set_bit_lock(nr, addr); smp_llsc_mb(); return res != 0; } /* * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; __asm__ __volatile__( " .set arch=r4000 \n" "1: " __LL "%0, %1 # test_and_clear_bit \n" " or %2, %0, %3 \n" " xor %2, %3 \n" " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; do { __asm__ __volatile__( " " __LL "%0, %1 # test_and_clear_bit \n" " " __EXT "%2, %0, %3, 1 \n" " " __INS "%0, $0, %3, 1 \n" " " __SC "%0, %1 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "ir" (bit) : "memory"); } while (unlikely(!temp)); #endif } else if (kernel_uses_llsc) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; do { __asm__ __volatile__( " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_clear_bit \n" " or %2, %0, %3 \n" " xor %2, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); res = temp & (1UL << bit); } else res = __mips_test_and_clear_bit(nr, addr); smp_llsc_mb(); return res != 0; } /* * test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is atomic and cannot be reordered. * It also implies a memory barrier. */ static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { int bit = nr & SZLONG_MASK; unsigned long res; smp_mb__before_llsc(); if (kernel_uses_llsc && R10000_LLSC_WAR) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; __asm__ __volatile__( " .set arch=r4000 \n" "1: " __LL "%0, %1 # test_and_change_bit \n" " xor %2, %0, %3 \n" " " __SC "%2, %1 \n" " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; do { __asm__ __volatile__( " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_change_bit \n" " xor %2, %0, %3 \n" " " __SC "\t%2, %1 \n" " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); res = temp & (1UL << bit); } else res = __mips_test_and_change_bit(nr, addr); smp_llsc_mb(); return res != 0; } #include <asm-generic/bitops/non-atomic.h> /* * __clear_bit_unlock - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * __clear_bit() is non-atomic and implies release semantics before the memory * operation. It can be used for an unlock if no other CPUs can concurrently * modify other bits in the word. */ static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr) { smp_mb__before_llsc(); __clear_bit(nr, addr); nudge_writes(); } /* * Return the bit position (0..63) of the most significant 1 bit in a word * Returns -1 if no 1 bit exists */ static inline unsigned long __fls(unsigned long word) { int num; if (BITS_PER_LONG == 32 && !__builtin_constant_p(word) && __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { __asm__( " .set push \n" " .set "MIPS_ISA_LEVEL" \n" " clz %0, %1 \n" " .set pop \n" : "=r" (num) : "r" (word)); return 31 - num; } if (BITS_PER_LONG == 64 && !__builtin_constant_p(word) && __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { __asm__( " .set push \n" " .set "MIPS_ISA_LEVEL" \n" " dclz %0, %1 \n" " .set pop \n" : "=r" (num) : "r" (word)); return 63 - num; } num = BITS_PER_LONG - 1; #if BITS_PER_LONG == 64 if (!(word & (~0ul << 32))) { num -= 32; word <<= 32; } #endif if (!(word & (~0ul << (BITS_PER_LONG-16)))) { num -= 16; word <<= 16; } if (!(word & (~0ul << (BITS_PER_LONG-8)))) { num -= 8; word <<= 8; } if (!(word & (~0ul << (BITS_PER_LONG-4)))) { num -= 4; word <<= 4; } if (!(word & (~0ul << (BITS_PER_LONG-2)))) { num -= 2; word <<= 2; } if (!(word & (~0ul << (BITS_PER_LONG-1)))) num -= 1; return num; } /* * __ffs - find first bit in word. * @word: The word to search * * Returns 0..SZLONG-1 * Undefined if no bit exists, so code should check against 0 first. */ static inline unsigned long __ffs(unsigned long word) { return __fls(word & -word); } /* * fls - find last bit set. * @word: The word to search * * This is defined the same way as ffs. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ static inline int fls(int x) { int r; if (!__builtin_constant_p(x) && __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { __asm__( " .set push \n" " .set "MIPS_ISA_LEVEL" \n" " clz %0, %1 \n" " .set pop \n" : "=r" (x) : "r" (x)); return 32 - x; } r = 32; if (!x) return 0; if (!(x & 0xffff0000u)) { x <<= 16; r -= 16; } if (!(x & 0xff000000u)) { x <<= 8; r -= 8; } if (!(x & 0xf0000000u)) { x <<= 4; r -= 4; } if (!(x & 0xc0000000u)) { x <<= 2; r -= 2; } if (!(x & 0x80000000u)) { x <<= 1; r -= 1; } return r; } #include <asm-generic/bitops/fls64.h> /* * ffs - find first bit set. * @word: The word to search * * This is defined the same way as * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ static inline int ffs(int word) { if (!word) return 0; return fls(word & -word); } #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/find.h> #ifdef __KERNEL__ #include <asm-generic/bitops/sched.h> #include <asm/arch_hweight.h> #include <asm-generic/bitops/const_hweight.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic.h> #endif /* __KERNEL__ */ #endif /* _ASM_BITOPS_H */