OXIESEC PANEL
- Current Dir:
/
/
usr
/
src
/
linux-headers-4.15.0-213
/
arch
/
arm64
/
include
/
asm
Server IP: 139.59.38.164
Upload:
Create Dir:
Name
Size
Modified
Perms
📁
..
-
05/09/2024 07:14:12 AM
rwxr-xr-x
📄
Kbuild
703 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
acenv.h
541 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
acpi.h
4.34 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
alternative.h
7.63 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
arch_gicv3.h
3.44 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
arch_timer.h
4.87 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
arm-cci.h
794 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-bug.h
1.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-offsets.h
35 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
asm-uaccess.h
2.09 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
assembler.h
13.51 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
atomic.h
8.35 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
atomic_ll_sc.h
10.61 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
atomic_lse.h
14.82 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
barrier.h
3.78 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
bitops.h
1.9 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
bitrev.h
452 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
boot.h
384 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
brk-imm.h
706 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
bug.h
1.09 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cache.h
2.23 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cacheflush.h
4.87 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
checksum.h
1.35 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
clocksource.h
192 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
cmpxchg.h
7.98 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
compat.h
7.15 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
compiler.h
1.18 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cpu.h
1.84 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cpu_ops.h
2.73 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
cpucaps.h
1.87 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cpufeature.h
19.14 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
cpuidle.h
401 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
cputype.h
8.1 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
current.h
517 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
daifflags.h
1.59 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
dcc.h
1.36 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
debug-monitors.h
3.76 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
device.h
886 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
dma-mapping.h
2.42 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
dmi.h
850 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
efi.h
4.57 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
elf.h
5.7 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
esr.h
9.02 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
exception.h
1.21 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
exec.h
868 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
extable.h
815 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
fb.h
1000 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
fixmap.h
2.91 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
fpsimd.h
4.21 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
fpsimdmacros.h
5.62 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ftrace.h
1.92 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
futex.h
3.41 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
hardirq.h
2.08 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
hugetlb.h
2.71 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hw_breakpoint.h
4.46 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hwcap.h
1.86 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
hypervisor.h
144 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
insn.h
16.03 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
io.h
7.72 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
irq.h
307 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
irq_work.h
228 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
irqflags.h
2.3 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
jump_label.h
1.68 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kasan.h
1.16 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kernel-pgtable.h
4.03 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kexec.h
2.42 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kgdb.h
3.79 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kprobes.h
1.74 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kvm_arm.h
8.38 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_asm.h
4.26 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_coproc.h
2.04 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kvm_emulate.h
10.38 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_host.h
15.73 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_hyp.h
5.79 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
kvm_mmio.h
1.3 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
kvm_mmu.h
11.72 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
linkage.h
114 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
lse.h
1.26 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
memblock.h
720 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
memory.h
9.16 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
mmu.h
2.76 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
mmu_context.h
6.35 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
mmzone.h
266 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
module.h
2.8 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
neon.h
815 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
numa.h
1.33 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
page-def.h
1.17 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
page.h
1.61 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
paravirt.h
458 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
pci.h
878 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
percpu.h
7.48 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
perf_event.h
3.17 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgalloc.h
3.71 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable-hwdef.h
9.4 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
pgtable-prot.h
4.38 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
pgtable-types.h
1.84 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
pgtable.h
21.55 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
probes.h
1022 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
proc-fns.h
1.21 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
processor.h
6.52 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
ptdump.h
1.42 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
ptrace.h
9 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
sdei.h
1.46 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
seccomp.h
714 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
sections.h
1.46 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
shmparam.h
965 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
signal32.h
1.45 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
simd.h
1.39 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
smp.h
4.23 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
smp_plat.h
1.43 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sparsemem.h
771 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
spinlock.h
3.33 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
spinlock_types.h
1.06 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stack_pointer.h
247 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
stackprotector.h
1.11 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stacktrace.h
2.53 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
stage2_pgtable-nopmd.h
1.3 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stage2_pgtable-nopud.h
1.24 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stage2_pgtable.h
4.89 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
stat.h
1.43 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
string.h
2.33 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
suspend.h
1.65 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sync_bitops.h
1.11 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
syscall.h
2.87 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
sysreg.h
25.1 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
system_misc.h
1.86 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
thread_info.h
3.93 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
timex.h
883 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
tlb.h
2.22 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
tlbflush.h
5.38 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
topology.h
1.29 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
traps.h
3.33 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
uaccess.h
12.01 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
unistd.h
1.6 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
unistd32.h
27.53 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
uprobes.h
777 bytes
01/28/2018 09:20:33 PM
rw-r--r--
📄
vdso.h
1.09 KB
01/28/2018 09:20:33 PM
rw-r--r--
📄
vdso_datapage.h
1.53 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
vectors.h
1.75 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
virt.h
3 KB
06/16/2023 05:32:39 PM
rw-r--r--
📄
vmap_stack.h
769 bytes
06/16/2023 05:32:39 PM
rw-r--r--
📄
word-at-a-time.h
2.22 KB
06/16/2023 05:32:39 PM
rw-r--r--
📁
xen
-
05/09/2024 07:14:16 AM
rwxr-xr-x
Editing: cpufeature.h
Close
/* * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #ifndef __ASM_CPUFEATURE_H #define __ASM_CPUFEATURE_H #include <asm/cpucaps.h> #include <asm/cputype.h> #include <asm/fpsimd.h> #include <asm/hwcap.h> #include <asm/sigcontext.h> #include <asm/sysreg.h> /* * In the arm64 world (as in the ARM world), elf_hwcap is used both internally * in the kernel and for user space to keep track of which optional features * are supported by the current system. So let's map feature 'x' to HWCAP_x. * Note that HWCAP_x constants are bit fields so we need to take the log. */ #define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) #define cpu_feature(x) ilog2(HWCAP_ ## x) #ifndef __ASSEMBLY__ #include <linux/bug.h> #include <linux/jump_label.h> #include <linux/kernel.h> /* * CPU feature register tracking * * The safe value of a CPUID feature field is dependent on the implications * of the values assigned to it by the architecture. Based on the relationship * between the values, the features are classified into 3 types - LOWER_SAFE, * HIGHER_SAFE and EXACT. * * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest * for HIGHER_SAFE. It is expected that all CPUs have the same value for * a field when EXACT is specified, failing which, the safe value specified * in the table is chosen. */ enum ftr_type { FTR_EXACT, /* Use a predefined safe value */ FTR_LOWER_SAFE, /* Smaller value is safe */ FTR_HIGHER_SAFE, /* Bigger value is safe */ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */ }; #define FTR_STRICT true /* SANITY check strict matching required */ #define FTR_NONSTRICT false /* SANITY check ignored */ #define FTR_SIGNED true /* Value should be treated as signed */ #define FTR_UNSIGNED false /* Value should be treated as unsigned */ #define FTR_VISIBLE true /* Feature visible to the user space */ #define FTR_HIDDEN false /* Feature is hidden from the user */ #define FTR_VISIBLE_IF_IS_ENABLED(config) \ (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN) struct arm64_ftr_bits { bool sign; /* Value is signed ? */ bool visible; bool strict; /* CPU Sanity check: strict matching required ? */ enum ftr_type type; u8 shift; u8 width; s64 safe_val; /* safe value for FTR_EXACT features */ }; /* * @arm64_ftr_reg - Feature register * @strict_mask Bits which should match across all CPUs for sanity. * @sys_val Safe value across the CPUs (system view) */ struct arm64_ftr_reg { const char *name; u64 strict_mask; u64 user_mask; u64 sys_val; u64 user_val; const struct arm64_ftr_bits *ftr_bits; }; extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; /* * CPU capabilities: * * We use arm64_cpu_capabilities to represent system features, errata work * arounds (both used internally by kernel and tracked in cpu_hwcaps) and * ELF HWCAPs (which are exposed to user). * * To support systems with heterogeneous CPUs, we need to make sure that we * detect the capabilities correctly on the system and take appropriate * measures to ensure there are no incompatibilities. * * This comment tries to explain how we treat the capabilities. * Each capability has the following list of attributes : * * 1) Scope of Detection : The system detects a given capability by * performing some checks at runtime. This could be, e.g, checking the * value of a field in CPU ID feature register or checking the cpu * model. The capability provides a call back ( @matches() ) to * perform the check. Scope defines how the checks should be performed. * There are three cases: * * a) SCOPE_LOCAL_CPU: check all the CPUs and "detect" if at least one * matches. This implies, we have to run the check on all the * booting CPUs, until the system decides that state of the * capability is finalised. (See section 2 below) * Or * b) SCOPE_SYSTEM: check all the CPUs and "detect" if all the CPUs * matches. This implies, we run the check only once, when the * system decides to finalise the state of the capability. If the * capability relies on a field in one of the CPU ID feature * registers, we use the sanitised value of the register from the * CPU feature infrastructure to make the decision. * Or * c) SCOPE_BOOT_CPU: Check only on the primary boot CPU to detect the * feature. This category is for features that are "finalised" * (or used) by the kernel very early even before the SMP cpus * are brought up. * * The process of detection is usually denoted by "update" capability * state in the code. * * 2) Finalise the state : The kernel should finalise the state of a * capability at some point during its execution and take necessary * actions if any. Usually, this is done, after all the boot-time * enabled CPUs are brought up by the kernel, so that it can make * better decision based on the available set of CPUs. However, there * are some special cases, where the action is taken during the early * boot by the primary boot CPU. (e.g, running the kernel at EL2 with * Virtualisation Host Extensions). The kernel usually disallows any * changes to the state of a capability once it finalises the capability * and takes any action, as it may be impossible to execute the actions * safely. A CPU brought up after a capability is "finalised" is * referred to as "Late CPU" w.r.t the capability. e.g, all secondary * CPUs are treated "late CPUs" for capabilities determined by the boot * CPU. * * At the moment there are two passes of finalising the capabilities. * a) Boot CPU scope capabilities - Finalised by primary boot CPU via * setup_boot_cpu_capabilities(). * b) Everything except (a) - Run via setup_system_capabilities(). * * 3) Verification: When a CPU is brought online (e.g, by user or by the * kernel), the kernel should make sure that it is safe to use the CPU, * by verifying that the CPU is compliant with the state of the * capabilities finalised already. This happens via : * * secondary_start_kernel()-> check_local_cpu_capabilities() * * As explained in (2) above, capabilities could be finalised at * different points in the execution. Each newly booted CPU is verified * against the capabilities that have been finalised by the time it * boots. * * a) SCOPE_BOOT_CPU : All CPUs are verified against the capability * except for the primary boot CPU. * * b) SCOPE_LOCAL_CPU, SCOPE_SYSTEM: All CPUs hotplugged on by the * user after the kernel boot are verified against the capability. * * If there is a conflict, the kernel takes an action, based on the * severity (e.g, a CPU could be prevented from booting or cause a * kernel panic). The CPU is allowed to "affect" the state of the * capability, if it has not been finalised already. See section 5 * for more details on conflicts. * * 4) Action: As mentioned in (2), the kernel can take an action for each * detected capability, on all CPUs on the system. Appropriate actions * include, turning on an architectural feature, modifying the control * registers (e.g, SCTLR, TCR etc.) or patching the kernel via * alternatives. The kernel patching is batched and performed at later * point. The actions are always initiated only after the capability * is finalised. This is usally denoted by "enabling" the capability. * The actions are initiated as follows : * a) Action is triggered on all online CPUs, after the capability is * finalised, invoked within the stop_machine() context from * enable_cpu_capabilitie(). * * b) Any late CPU, brought up after (1), the action is triggered via: * * check_local_cpu_capabilities() -> verify_local_cpu_capabilities() * * 5) Conflicts: Based on the state of the capability on a late CPU vs. * the system state, we could have the following combinations : * * x-----------------------------x * | Type | System | Late CPU | * |-----------------------------| * | a | y | n | * |-----------------------------| * | b | n | y | * x-----------------------------x * * Two separate flag bits are defined to indicate whether each kind of * conflict can be allowed: * ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU - Case(a) is allowed * ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU - Case(b) is allowed * * Case (a) is not permitted for a capability that the system requires * all CPUs to have in order for the capability to be enabled. This is * typical for capabilities that represent enhanced functionality. * * Case (b) is not permitted for a capability that must be enabled * during boot if any CPU in the system requires it in order to run * safely. This is typical for erratum work arounds that cannot be * enabled after the corresponding capability is finalised. * * In some non-typical cases either both (a) and (b), or neither, * should be permitted. This can be described by including neither * or both flags in the capability's type field. */ /* * Decide how the capability is detected. * On any local CPU vs System wide vs the primary boot CPU */ #define ARM64_CPUCAP_SCOPE_LOCAL_CPU ((u16)BIT(0)) #define ARM64_CPUCAP_SCOPE_SYSTEM ((u16)BIT(1)) /* * The capabilitiy is detected on the Boot CPU and is used by kernel * during early boot. i.e, the capability should be "detected" and * "enabled" as early as possibly on all booting CPUs. */ #define ARM64_CPUCAP_SCOPE_BOOT_CPU ((u16)BIT(2)) #define ARM64_CPUCAP_SCOPE_MASK \ (ARM64_CPUCAP_SCOPE_SYSTEM | \ ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ ARM64_CPUCAP_SCOPE_BOOT_CPU) #define SCOPE_SYSTEM ARM64_CPUCAP_SCOPE_SYSTEM #define SCOPE_LOCAL_CPU ARM64_CPUCAP_SCOPE_LOCAL_CPU #define SCOPE_BOOT_CPU ARM64_CPUCAP_SCOPE_BOOT_CPU #define SCOPE_ALL ARM64_CPUCAP_SCOPE_MASK /* * Is it permitted for a late CPU to have this capability when system * hasn't already enabled it ? */ #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU ((u16)BIT(4)) /* Is it safe for a late CPU to miss this capability when system has it */ #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5)) /* * CPU errata workarounds that need to be enabled at boot time if one or * more CPUs in the system requires it. When one of these capabilities * has been enabled, it is safe to allow any CPU to boot that doesn't * require the workaround. However, it is not safe if a "late" CPU * requires a workaround and the system hasn't enabled it already. */ #define ARM64_CPUCAP_LOCAL_CPU_ERRATUM \ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU) /* * CPU feature detected at boot time based on system-wide value of a * feature. It is safe for a late CPU to have this feature even though * the system hasn't enabled it, although the featuer will not be used * by Linux in this case. If the system has enabled this feature already, * then every late CPU must have it. */ #define ARM64_CPUCAP_SYSTEM_FEATURE \ (ARM64_CPUCAP_SCOPE_SYSTEM | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) /* * CPU feature detected at boot time based on feature of one or more CPUs. * All possible conflicts for a late CPU are ignored. */ #define ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE \ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU | \ ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU) /* * CPU feature detected at boot time, on one or more CPUs. A late CPU * is not allowed to have the capability when the system doesn't have it. * It is Ok for a late CPU to miss the feature. */ #define ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE \ (ARM64_CPUCAP_SCOPE_LOCAL_CPU | \ ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU) /* * CPU feature used early in the boot based on the boot CPU. All secondary * CPUs must match the state of the capability as detected by the boot CPU. */ #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU struct arm64_cpu_capabilities { const char *desc; u16 capability; u16 type; bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); /* * Take the appropriate actions to enable this capability for this CPU. * For each successfully booted CPU, this method is called for each * globally detected capability. */ void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); union { struct { /* To be used for erratum handling only */ struct midr_range midr_range; }; const struct midr_range *midr_range_list; struct { /* Feature register checking */ u32 sys_reg; u8 field_pos; u8 min_field_value; u8 hwcap_type; bool sign; unsigned long hwcap; }; }; }; static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap) { return cap->type & ARM64_CPUCAP_SCOPE_MASK; } static inline bool cpucap_late_cpu_optional(const struct arm64_cpu_capabilities *cap) { return !!(cap->type & ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU); } static inline bool cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap) { return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU); } extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; bool this_cpu_has_cap(unsigned int cap); static inline bool cpu_have_feature(unsigned int num) { return elf_hwcap & (1UL << num); } /* System capability check for constant caps */ static inline bool __cpus_have_const_cap(int num) { if (num >= ARM64_NCAPS) return false; return static_branch_unlikely(&cpu_hwcap_keys[num]); } static inline bool cpus_have_cap(unsigned int num) { if (num >= ARM64_NCAPS) return false; return test_bit(num, cpu_hwcaps); } static inline bool cpus_have_const_cap(int num) { if (static_branch_likely(&arm64_const_caps_ready)) return __cpus_have_const_cap(num); else return cpus_have_cap(num); } static inline void cpus_set_cap(unsigned int num) { if (num >= ARM64_NCAPS) { pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n", num, ARM64_NCAPS); } else { __set_bit(num, cpu_hwcaps); } } static inline int __attribute_const__ cpuid_feature_extract_signed_field_width(u64 features, int field, int width) { return (s64)(features << (64 - width - field)) >> (64 - width); } static inline int __attribute_const__ cpuid_feature_extract_signed_field(u64 features, int field) { return cpuid_feature_extract_signed_field_width(features, field, 4); } static inline unsigned int __attribute_const__ cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width) { return (u64)(features << (64 - width - field)) >> (64 - width); } static inline unsigned int __attribute_const__ cpuid_feature_extract_unsigned_field(u64 features, int field) { return cpuid_feature_extract_unsigned_field_width(features, field, 4); } static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp) { return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); } static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg) { return (reg->user_val | (reg->sys_val & reg->user_mask)); } static inline int __attribute_const__ cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign) { return (sign) ? cpuid_feature_extract_signed_field_width(features, field, width) : cpuid_feature_extract_unsigned_field_width(features, field, width); } static inline int __attribute_const__ cpuid_feature_extract_field(u64 features, int field, bool sign) { return cpuid_feature_extract_field_width(features, field, 4, sign); } static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val) { return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign); } static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) { return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 || cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1; } static inline bool id_aa64pfr0_32bit_el0(u64 pfr0) { u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT); return val == ID_AA64PFR0_EL0_32BIT_64BIT; } static inline bool id_aa64pfr0_sve(u64 pfr0) { u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT); return val > 0; } void __init setup_cpu_features(void); void check_local_cpu_capabilities(void); u64 read_sanitised_ftr_reg(u32 id); static inline bool cpu_supports_mixed_endian_el0(void) { return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); } static inline bool supports_csv2p3(int scope) { u64 pfr0; u8 csv2_val; if (scope == SCOPE_LOCAL_CPU) pfr0 = read_sysreg_s(SYS_ID_AA64PFR0_EL1); else pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); csv2_val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT); return csv2_val == 3; } static inline bool supports_clearbhb(int scope) { u64 isar2; if (scope == SCOPE_LOCAL_CPU) isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1); else isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); return cpuid_feature_extract_unsigned_field(isar2, ID_AA64ISAR2_CLEARBHB_SHIFT); } static inline bool system_supports_32bit_el0(void) { return cpus_have_const_cap(ARM64_HAS_32BIT_EL0); } static inline bool system_supports_mixed_endian_el0(void) { return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1)); } static inline bool system_supports_fpsimd(void) { return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD); } static inline bool system_uses_ttbr0_pan(void) { return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && !cpus_have_const_cap(ARM64_HAS_PAN); } static inline bool system_supports_sve(void) { return IS_ENABLED(CONFIG_ARM64_SVE) && cpus_have_const_cap(ARM64_SVE); } /* * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE * vector length. * * Use only if SVE is present. * This function clobbers the SVE vector length. */ static inline u64 read_zcr_features(void) { u64 zcr; unsigned int vq_max; /* * Set the maximum possible VL, and write zeroes to all other * bits to see if they stick. */ sve_kernel_enable(NULL); write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1); zcr = read_sysreg_s(SYS_ZCR_EL1); zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */ vq_max = sve_vq_from_vl(sve_get_vl()); zcr |= vq_max - 1; /* set LEN field to maximum effective value */ return zcr; } #define ARM64_SSBD_UNKNOWN -1 #define ARM64_SSBD_FORCE_DISABLE 0 #define ARM64_SSBD_KERNEL 1 #define ARM64_SSBD_FORCE_ENABLE 2 #define ARM64_SSBD_MITIGATED 3 static inline int arm64_get_ssbd_state(void) { #ifdef CONFIG_ARM64_SSBD extern int ssbd_state; return ssbd_state; #else return ARM64_SSBD_UNKNOWN; #endif } void arm64_set_ssbd_mitigation(bool state); /* Watch out, ordering is important here. */ enum mitigation_state { SPECTRE_UNAFFECTED, SPECTRE_MITIGATED, SPECTRE_VULNERABLE, }; enum mitigation_state arm64_get_spectre_bhb_state(void); bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); u8 spectre_bhb_loop_affected(int scope); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); #endif /* __ASSEMBLY__ */ #endif