4 This tool is for checking the security hardening options of the Linux kernel.
6 Author: Alexander Popov <alex.popov@linux.com>
8 This module contains knowledge for checks.
11 # pylint: disable=missing-function-docstring,line-too-long,invalid-name
12 # pylint: disable=too-many-branches,too-many-statements
14 from .engine import KconfigCheck, CmdlineCheck, SysctlCheck, VersionCheck, OR, AND
17 def add_kconfig_checks(l, arch):
18 assert(arch), 'empty arch'
20 # Calling the KconfigCheck class constructor:
21 # KconfigCheck(reason, decision, name, expected)
23 # [!] Don't add CmdlineChecks in add_kconfig_checks() to avoid wrong results
24 # when the tool doesn't check the cmdline.
26 efi_not_set = KconfigCheck('-', '-', 'EFI', 'is not set')
27 cc_is_gcc = KconfigCheck('-', '-', 'CC_IS_GCC', 'y') # exists since v4.18
28 cc_is_clang = KconfigCheck('-', '-', 'CC_IS_CLANG', 'y') # exists since v4.18
30 modules_not_set = KconfigCheck('cut_attack_surface', 'kspp', 'MODULES', 'is not set') # radical, but may be useful in some cases
31 devmem_not_set = KconfigCheck('cut_attack_surface', 'kspp', 'DEVMEM', 'is not set') # refers to LOCKDOWN
32 bpf_syscall_not_set = KconfigCheck('cut_attack_surface', 'lockdown', 'BPF_SYSCALL', 'is not set') # refers to LOCKDOWN
34 # 'self_protection', 'defconfig'
35 l += [KconfigCheck('self_protection', 'defconfig', 'BUG', 'y')]
36 l += [KconfigCheck('self_protection', 'defconfig', 'SLUB_DEBUG', 'y')]
37 l += [KconfigCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')]
38 gcc_plugins_support_is_set = KconfigCheck('self_protection', 'defconfig', 'GCC_PLUGINS', 'y')
39 l += [gcc_plugins_support_is_set]
40 iommu_support_is_set = KconfigCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y')
41 l += [iommu_support_is_set] # is needed for mitigating DMA attacks
42 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR', 'y'),
43 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR', 'y'),
44 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_REGULAR', 'y'),
45 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_AUTO', 'y'),
46 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_STRONG', 'y'))]
47 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR_STRONG', 'y'),
48 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_STRONG', 'y'))]
49 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STRICT_KERNEL_RWX', 'y'),
50 KconfigCheck('self_protection', 'defconfig', 'DEBUG_RODATA', 'y'))] # before v4.11
51 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STRICT_MODULE_RWX', 'y'),
52 KconfigCheck('self_protection', 'defconfig', 'DEBUG_SET_MODULE_RONX', 'y'),
53 modules_not_set)] # DEBUG_SET_MODULE_RONX was before v4.11
54 l += [OR(KconfigCheck('self_protection', 'defconfig', 'REFCOUNT_FULL', 'y'),
55 VersionCheck((5, 5)))] # REFCOUNT_FULL is enabled by default since v5.5
56 if arch in ('X86_64', 'ARM64', 'X86_32'):
57 l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')]
58 if arch in ('X86_64', 'ARM64', 'ARM'):
59 l += [KconfigCheck('self_protection', 'defconfig', 'VMAP_STACK', 'y')]
60 if arch in ('X86_64', 'X86_32'):
61 l += [KconfigCheck('self_protection', 'defconfig', 'SPECULATION_MITIGATIONS', 'y')]
62 l += [KconfigCheck('self_protection', 'defconfig', 'DEBUG_WX', 'y')]
63 l += [KconfigCheck('self_protection', 'defconfig', 'WERROR', 'y')]
64 l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE', 'y')]
65 l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_INTEL', 'y')]
66 l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_AMD', 'y')]
67 l += [KconfigCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')]
68 l += [KconfigCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason?
69 microcode_is_set = KconfigCheck('self_protection', 'defconfig', 'MICROCODE', 'y')
70 l += [microcode_is_set] # is needed for mitigating CPU bugs
71 l += [OR(KconfigCheck('self_protection', 'defconfig', 'MICROCODE_INTEL', 'y'),
73 VersionCheck((6, 6))))] # MICROCODE_INTEL was included in MICROCODE since v6.6
74 l += [OR(KconfigCheck('self_protection', 'defconfig', 'MICROCODE_AMD', 'y'),
76 VersionCheck((6, 6))))] # MICROCODE_AMD was included in MICROCODE since v6.6
77 l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_SMAP', 'y'),
78 VersionCheck((5, 19)))] # X86_SMAP is enabled by default since v5.19
79 l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_UMIP', 'y'),
80 KconfigCheck('self_protection', 'defconfig', 'X86_INTEL_UMIP', 'y'))]
81 if arch in ('ARM64', 'ARM'):
82 l += [KconfigCheck('self_protection', 'defconfig', 'IOMMU_DEFAULT_DMA_STRICT', 'y')]
83 l += [KconfigCheck('self_protection', 'defconfig', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set
84 l += [KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR_PER_TASK', 'y')]
86 l += [KconfigCheck('self_protection', 'defconfig', 'PAGE_TABLE_ISOLATION', 'y')]
87 l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MEMORY', 'y')]
88 l += [KconfigCheck('self_protection', 'defconfig', 'X86_KERNEL_IBT', 'y')]
89 l += [AND(KconfigCheck('self_protection', 'defconfig', 'INTEL_IOMMU', 'y'),
90 iommu_support_is_set)]
91 l += [AND(KconfigCheck('self_protection', 'defconfig', 'AMD_IOMMU', 'y'),
92 iommu_support_is_set)]
94 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PAN', 'y')]
95 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_EPAN', 'y')]
96 l += [KconfigCheck('self_protection', 'defconfig', 'UNMAP_KERNEL_AT_EL0', 'y')]
97 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_E0PD', 'y')]
98 l += [KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y')]
99 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PTR_AUTH_KERNEL', 'y')]
100 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_BTI_KERNEL', 'y')]
101 l += [KconfigCheck('self_protection', 'defconfig', 'MITIGATE_SPECTRE_BRANCH_HISTORY', 'y')]
102 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_MTE', 'y')]
103 l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MODULE_REGION_FULL', 'y')]
104 l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'),
105 AND(KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'),
106 VersionCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9
107 l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'),
108 VersionCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10
110 l += [KconfigCheck('self_protection', 'defconfig', 'CPU_SW_DOMAIN_PAN', 'y')]
111 l += [KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y')]
112 l += [KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_HISTORY', 'y')]
113 l += [KconfigCheck('self_protection', 'defconfig', 'DEBUG_ALIGN_RODATA', 'y')]
115 # 'self_protection', 'kspp'
116 l += [KconfigCheck('self_protection', 'kspp', 'BUG_ON_DATA_CORRUPTION', 'y')]
117 l += [KconfigCheck('self_protection', 'kspp', 'SCHED_STACK_END_CHECK', 'y')]
118 l += [KconfigCheck('self_protection', 'kspp', 'SLAB_FREELIST_HARDENED', 'y')]
119 l += [KconfigCheck('self_protection', 'kspp', 'SLAB_FREELIST_RANDOM', 'y')]
120 l += [KconfigCheck('self_protection', 'kspp', 'SHUFFLE_PAGE_ALLOCATOR', 'y')]
121 l += [KconfigCheck('self_protection', 'kspp', 'FORTIFY_SOURCE', 'y')]
122 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_LIST', 'y')]
123 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_VIRTUAL', 'y')]
124 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_SG', 'y')]
125 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_CREDENTIALS', 'y')]
126 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_NOTIFIERS', 'y')]
127 l += [KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y')]
128 l += [KconfigCheck('self_protection', 'kspp', 'HW_RANDOM_TPM', 'y')]
129 l += [KconfigCheck('self_protection', 'kspp', 'STATIC_USERMODEHELPER', 'y')] # needs userspace support
130 kfence_is_set = KconfigCheck('self_protection', 'kspp', 'KFENCE', 'y')
132 l += [AND(KconfigCheck('self_protection', 'my', 'KFENCE_SAMPLE_INTERVAL', 'is not off'),
134 randstruct_is_set = OR(KconfigCheck('self_protection', 'kspp', 'RANDSTRUCT_FULL', 'y'),
135 KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT', 'y'))
136 l += [randstruct_is_set]
137 l += [AND(KconfigCheck('self_protection', 'kspp', 'RANDSTRUCT_PERFORMANCE', 'is not set'),
138 KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT_PERFORMANCE', 'is not set'),
140 hardened_usercopy_is_set = KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y')
141 l += [hardened_usercopy_is_set]
142 l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'),
143 hardened_usercopy_is_set)] # usercopy whitelist violations should be prohibited
144 l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_PAGESPAN', 'is not set'),
145 hardened_usercopy_is_set)] # this debugging for HARDENED_USERCOPY is not needed for security
146 l += [AND(KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y'),
147 gcc_plugins_support_is_set)]
148 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG', 'y'),
150 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_ALL', 'y'),
152 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_SHA512', 'y'),
154 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_FORCE', 'y'),
155 modules_not_set)] # refers to LOCKDOWN
156 l += [OR(KconfigCheck('self_protection', 'kspp', 'INIT_STACK_ALL_ZERO', 'y'),
157 KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_STRUCTLEAK_BYREF_ALL', 'y'))]
158 l += [OR(KconfigCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'),
159 KconfigCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'))]
160 # CONFIG_INIT_ON_FREE_DEFAULT_ON was added in v5.3.
161 # CONFIG_PAGE_POISONING_ZERO was removed in v5.11.
162 # Starting from v5.11 CONFIG_PAGE_POISONING unconditionally checks
163 # the 0xAA poison pattern on allocation.
164 # That brings higher performance penalty.
165 l += [OR(KconfigCheck('self_protection', 'kspp', 'EFI_DISABLE_PCI_DMA', 'y'),
167 l += [OR(KconfigCheck('self_protection', 'kspp', 'RESET_ATTACK_MITIGATION', 'y'),
168 efi_not_set)] # needs userspace support (systemd)
169 ubsan_bounds_is_set = KconfigCheck('self_protection', 'kspp', 'UBSAN_BOUNDS', 'y')
170 l += [ubsan_bounds_is_set]
171 l += [OR(KconfigCheck('self_protection', 'kspp', 'UBSAN_LOCAL_BOUNDS', 'y'),
172 AND(ubsan_bounds_is_set,
174 l += [AND(KconfigCheck('self_protection', 'kspp', 'UBSAN_TRAP', 'y'),
176 KconfigCheck('self_protection', 'kspp', 'UBSAN_SHIFT', 'is not set'),
177 KconfigCheck('self_protection', 'kspp', 'UBSAN_DIV_ZERO', 'is not set'),
178 KconfigCheck('self_protection', 'kspp', 'UBSAN_UNREACHABLE', 'is not set'),
179 KconfigCheck('self_protection', 'kspp', 'UBSAN_BOOL', 'is not set'),
180 KconfigCheck('self_protection', 'kspp', 'UBSAN_ENUM', 'is not set'),
181 KconfigCheck('self_protection', 'kspp', 'UBSAN_ALIGNMENT', 'is not set'))] # only array index bounds checking with traps
182 if arch in ('X86_64', 'ARM64', 'X86_32'):
183 l += [AND(KconfigCheck('self_protection', 'kspp', 'UBSAN_SANITIZE_ALL', 'y'),
184 ubsan_bounds_is_set)] # ARCH_HAS_UBSAN_SANITIZE_ALL is not enabled for ARM
185 stackleak_is_set = KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_STACKLEAK', 'y')
186 l += [AND(stackleak_is_set, gcc_plugins_support_is_set)]
187 l += [AND(KconfigCheck('self_protection', 'kspp', 'STACKLEAK_METRICS', 'is not set'),
189 gcc_plugins_support_is_set)]
190 l += [AND(KconfigCheck('self_protection', 'kspp', 'STACKLEAK_RUNTIME_DISABLE', 'is not set'),
192 gcc_plugins_support_is_set)]
193 l += [KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y')]
194 if arch in ('X86_64', 'ARM64'):
195 cfi_clang_is_set = KconfigCheck('self_protection', 'kspp', 'CFI_CLANG', 'y')
196 l += [cfi_clang_is_set]
197 l += [AND(KconfigCheck('self_protection', 'kspp', 'CFI_PERMISSIVE', 'is not set'),
199 if arch in ('X86_64', 'X86_32'):
200 l += [KconfigCheck('self_protection', 'kspp', 'SCHED_CORE', 'y')]
201 l += [KconfigCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '65536')]
202 l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y')]
203 l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set
204 l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU_DEFAULT_ON', 'y'),
205 iommu_support_is_set)]
206 if arch in ('ARM64', 'ARM'):
207 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_WX', 'y')]
208 l += [KconfigCheck('self_protection', 'kspp', 'WERROR', 'y')]
209 l += [KconfigCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '32768')]
210 l += [KconfigCheck('self_protection', 'kspp', 'SYN_COOKIES', 'y')] # another reason?
212 l += [KconfigCheck('self_protection', 'kspp', 'SLS', 'y')] # vs CVE-2021-26341 in Straight-Line-Speculation
213 l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU_SVM', 'y'),
214 iommu_support_is_set)]
215 l += [AND(KconfigCheck('self_protection', 'kspp', 'AMD_IOMMU_V2', 'y'),
216 iommu_support_is_set)]
218 l += [KconfigCheck('self_protection', 'kspp', 'ARM64_SW_TTBR0_PAN', 'y')]
219 l += [KconfigCheck('self_protection', 'kspp', 'SHADOW_CALL_STACK', 'y')]
220 l += [KconfigCheck('self_protection', 'kspp', 'KASAN_HW_TAGS', 'y')] # see also: kasan=on, kasan.stacktrace=off, kasan.fault=panic
222 l += [KconfigCheck('self_protection', 'kspp', 'PAGE_TABLE_ISOLATION', 'y')]
223 l += [KconfigCheck('self_protection', 'kspp', 'HIGHMEM64G', 'y')]
224 l += [KconfigCheck('self_protection', 'kspp', 'X86_PAE', 'y')]
225 l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU', 'y'),
226 iommu_support_is_set)]
228 # 'self_protection', 'clipos'
229 l += [KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set')]
232 if arch in ('X86_64', 'ARM64', 'X86_32'):
233 l += [KconfigCheck('security_policy', 'defconfig', 'SECURITY', 'y')]
235 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY', 'y')]
236 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_YAMA', 'y')]
237 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LANDLOCK', 'y')]
238 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_DISABLE', 'is not set')]
239 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_BOOTPARAM', 'is not set')]
240 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_DEVELOP', 'is not set')]
241 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LOCKDOWN_LSM', 'y')]
242 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LOCKDOWN_LSM_EARLY', 'y')]
243 l += [KconfigCheck('security_policy', 'kspp', 'LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY', 'y')]
244 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_WRITABLE_HOOKS', 'is not set')] # refers to SECURITY_SELINUX_DISABLE
245 l += [OR(KconfigCheck('security_policy', 'my', 'SECURITY_SELINUX', 'y'),
246 KconfigCheck('security_policy', 'my', 'SECURITY_APPARMOR', 'y'),
247 KconfigCheck('security_policy', 'my', 'SECURITY_SMACK', 'y'),
248 KconfigCheck('security_policy', 'my', 'SECURITY_TOMOYO', 'y'))] # one of major LSMs implementing MAC
250 # 'cut_attack_surface', 'defconfig'
251 l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP', 'y')]
252 l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP_FILTER', 'y')]
253 l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'BPF_UNPRIV_DEFAULT_OFF', 'y'),
254 bpf_syscall_not_set)] # see unprivileged_bpf_disabled
255 if arch in ('X86_64', 'ARM64', 'X86_32'):
256 l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'STRICT_DEVMEM', 'y'),
257 devmem_not_set)] # refers to LOCKDOWN
258 if arch in ('X86_64', 'X86_32'):
259 l += [KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y')] # tsx=off
261 # 'cut_attack_surface', 'kspp'
262 l += [KconfigCheck('cut_attack_surface', 'kspp', 'SECURITY_DMESG_RESTRICT', 'y')]
263 l += [KconfigCheck('cut_attack_surface', 'kspp', 'ACPI_CUSTOM_METHOD', 'is not set')] # refers to LOCKDOWN
264 l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_BRK', 'is not set')]
265 l += [KconfigCheck('cut_attack_surface', 'kspp', 'DEVKMEM', 'is not set')] # refers to LOCKDOWN
266 l += [KconfigCheck('cut_attack_surface', 'kspp', 'BINFMT_MISC', 'is not set')]
267 l += [KconfigCheck('cut_attack_surface', 'kspp', 'INET_DIAG', 'is not set')]
268 l += [KconfigCheck('cut_attack_surface', 'kspp', 'KEXEC', 'is not set')] # refers to LOCKDOWN
269 l += [KconfigCheck('cut_attack_surface', 'kspp', 'PROC_KCORE', 'is not set')] # refers to LOCKDOWN
270 l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_PTYS', 'is not set')]
271 l += [KconfigCheck('cut_attack_surface', 'kspp', 'HIBERNATION', 'is not set')] # refers to LOCKDOWN
272 l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT', 'is not set')]
273 l += [KconfigCheck('cut_attack_surface', 'kspp', 'IA32_EMULATION', 'is not set')]
274 l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_X32', 'is not set')]
275 l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_X32_ABI', 'is not set')]
276 l += [KconfigCheck('cut_attack_surface', 'kspp', 'MODIFY_LDT_SYSCALL', 'is not set')]
277 l += [KconfigCheck('cut_attack_surface', 'kspp', 'OABI_COMPAT', 'is not set')]
278 l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_MSR', 'is not set')] # refers to LOCKDOWN
279 l += [modules_not_set]
280 l += [devmem_not_set]
281 l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'IO_STRICT_DEVMEM', 'y'),
282 devmem_not_set)] # refers to LOCKDOWN
283 l += [AND(KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is not set'),
284 KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is present'))]
285 if arch in ('X86_64', 'X86_32'):
286 l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set')]
287 # CONFIG_COMPAT_VDSO disabled ASLR of vDSO only on X86_64 and X86_32;
288 # on ARM64 this option has different meaning
290 l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'X86_VSYSCALL_EMULATION', 'is not set'),
291 KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y'))]
292 # disabling X86_VSYSCALL_EMULATION turns vsyscall off completely,
293 # and LEGACY_VSYSCALL_NONE can be changed at boot time via the cmdline parameter
295 l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'STRICT_DEVMEM', 'y'),
296 devmem_not_set)] # refers to LOCKDOWN
298 # 'cut_attack_surface', 'grsec'
299 l += [KconfigCheck('cut_attack_surface', 'grsec', 'ZSMALLOC_STAT', 'is not set')]
300 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PAGE_OWNER', 'is not set')]
301 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_KMEMLEAK', 'is not set')]
302 l += [KconfigCheck('cut_attack_surface', 'grsec', 'BINFMT_AOUT', 'is not set')]
303 l += [KconfigCheck('cut_attack_surface', 'grsec', 'KPROBE_EVENTS', 'is not set')]
304 l += [KconfigCheck('cut_attack_surface', 'grsec', 'UPROBE_EVENTS', 'is not set')]
305 l += [KconfigCheck('cut_attack_surface', 'grsec', 'GENERIC_TRACER', 'is not set')] # refers to LOCKDOWN
306 l += [KconfigCheck('cut_attack_surface', 'grsec', 'FUNCTION_TRACER', 'is not set')]
307 l += [KconfigCheck('cut_attack_surface', 'grsec', 'STACK_TRACER', 'is not set')]
308 l += [KconfigCheck('cut_attack_surface', 'grsec', 'HIST_TRIGGERS', 'is not set')]
309 l += [KconfigCheck('cut_attack_surface', 'grsec', 'BLK_DEV_IO_TRACE', 'is not set')]
310 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PROC_VMCORE', 'is not set')]
311 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PROC_PAGE_MONITOR', 'is not set')]
312 l += [KconfigCheck('cut_attack_surface', 'grsec', 'USELIB', 'is not set')]
313 l += [KconfigCheck('cut_attack_surface', 'grsec', 'CHECKPOINT_RESTORE', 'is not set')]
314 l += [KconfigCheck('cut_attack_surface', 'grsec', 'USERFAULTFD', 'is not set')]
315 l += [KconfigCheck('cut_attack_surface', 'grsec', 'HWPOISON_INJECT', 'is not set')]
316 l += [KconfigCheck('cut_attack_surface', 'grsec', 'MEM_SOFT_DIRTY', 'is not set')]
317 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DEVPORT', 'is not set')] # refers to LOCKDOWN
318 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_FS', 'is not set')] # refers to LOCKDOWN
319 l += [KconfigCheck('cut_attack_surface', 'grsec', 'NOTIFIER_ERROR_INJECTION', 'is not set')]
320 l += [KconfigCheck('cut_attack_surface', 'grsec', 'FAIL_FUTEX', 'is not set')]
321 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PUNIT_ATOM_DEBUG', 'is not set')]
322 l += [KconfigCheck('cut_attack_surface', 'grsec', 'ACPI_CONFIGFS', 'is not set')]
323 l += [KconfigCheck('cut_attack_surface', 'grsec', 'EDAC_DEBUG', 'is not set')]
324 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DRM_I915_DEBUG', 'is not set')]
325 l += [KconfigCheck('cut_attack_surface', 'grsec', 'BCACHE_CLOSURES_DEBUG', 'is not set')]
326 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DVB_C8SECTPFE', 'is not set')]
327 l += [KconfigCheck('cut_attack_surface', 'grsec', 'MTD_SLRAM', 'is not set')]
328 l += [KconfigCheck('cut_attack_surface', 'grsec', 'MTD_PHRAM', 'is not set')]
329 l += [KconfigCheck('cut_attack_surface', 'grsec', 'IO_URING', 'is not set')]
330 l += [KconfigCheck('cut_attack_surface', 'grsec', 'KCMP', 'is not set')]
331 l += [KconfigCheck('cut_attack_surface', 'grsec', 'RSEQ', 'is not set')]
332 l += [KconfigCheck('cut_attack_surface', 'grsec', 'LATENCYTOP', 'is not set')]
333 l += [KconfigCheck('cut_attack_surface', 'grsec', 'KCOV', 'is not set')]
334 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PROVIDE_OHCI1394_DMA_INIT', 'is not set')]
335 l += [KconfigCheck('cut_attack_surface', 'grsec', 'SUNRPC_DEBUG', 'is not set')]
336 l += [AND(KconfigCheck('cut_attack_surface', 'grsec', 'PTDUMP_DEBUGFS', 'is not set'),
337 KconfigCheck('cut_attack_surface', 'grsec', 'X86_PTDUMP', 'is not set'))]
339 # 'cut_attack_surface', 'maintainer'
340 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'DRM_LEGACY', 'is not set')] # recommended by Daniel Vetter in /issues/38
341 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'FB', 'is not set')] # recommended by Daniel Vetter in /issues/38
342 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'VT', 'is not set')] # recommended by Daniel Vetter in /issues/38
343 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'BLK_DEV_FD', 'is not set')] # recommended by Denis Efremov in /pull/54
344 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'BLK_DEV_FD_RAWCMD', 'is not set')] # recommended by Denis Efremov in /pull/62
345 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'NOUVEAU_LEGACY_CTX_SUPPORT', 'is not set')]
346 # recommended by Dave Airlie in kernel commit b30a43ac7132cdda
348 # 'cut_attack_surface', 'clipos'
349 l += [KconfigCheck('cut_attack_surface', 'clipos', 'STAGING', 'is not set')]
350 l += [KconfigCheck('cut_attack_surface', 'clipos', 'KSM', 'is not set')] # to prevent FLUSH+RELOAD attack
351 l += [KconfigCheck('cut_attack_surface', 'clipos', 'KALLSYMS', 'is not set')]
352 l += [KconfigCheck('cut_attack_surface', 'clipos', 'MAGIC_SYSRQ', 'is not set')]
353 l += [KconfigCheck('cut_attack_surface', 'clipos', 'KEXEC_FILE', 'is not set')] # refers to LOCKDOWN (permissive)
354 l += [KconfigCheck('cut_attack_surface', 'clipos', 'USER_NS', 'is not set')] # user.max_user_namespaces=0
355 l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_CPUID', 'is not set')]
356 l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_IOPL_IOPERM', 'is not set')] # refers to LOCKDOWN
357 l += [KconfigCheck('cut_attack_surface', 'clipos', 'ACPI_TABLE_UPGRADE', 'is not set')] # refers to LOCKDOWN
358 l += [KconfigCheck('cut_attack_surface', 'clipos', 'EFI_CUSTOM_SSDT_OVERLAYS', 'is not set')]
359 # l += [KconfigCheck('cut_attack_surface', 'clipos', 'IKCONFIG', 'is not set')] # no, IKCONFIG is needed for this check :)
361 # 'cut_attack_surface', 'lockdown'
362 l += [KconfigCheck('cut_attack_surface', 'lockdown', 'EFI_TEST', 'is not set')] # refers to LOCKDOWN
363 l += [KconfigCheck('cut_attack_surface', 'lockdown', 'MMIOTRACE_TEST', 'is not set')] # refers to LOCKDOWN
364 l += [KconfigCheck('cut_attack_surface', 'lockdown', 'KPROBES', 'is not set')] # refers to LOCKDOWN
365 l += [bpf_syscall_not_set] # refers to LOCKDOWN
367 # 'cut_attack_surface', 'my'
368 l += [KconfigCheck('cut_attack_surface', 'my', 'LEGACY_TIOCSTI', 'is not set')]
369 l += [KconfigCheck('cut_attack_surface', 'my', 'MMIOTRACE', 'is not set')] # refers to LOCKDOWN (permissive)
370 l += [KconfigCheck('cut_attack_surface', 'my', 'LIVEPATCH', 'is not set')]
371 l += [KconfigCheck('cut_attack_surface', 'my', 'IP_DCCP', 'is not set')]
372 l += [KconfigCheck('cut_attack_surface', 'my', 'IP_SCTP', 'is not set')]
373 l += [KconfigCheck('cut_attack_surface', 'my', 'FTRACE', 'is not set')] # refers to LOCKDOWN
374 l += [KconfigCheck('cut_attack_surface', 'my', 'VIDEO_VIVID', 'is not set')]
375 l += [KconfigCheck('cut_attack_surface', 'my', 'INPUT_EVBUG', 'is not set')] # Can be used as a keylogger
376 l += [KconfigCheck('cut_attack_surface', 'my', 'KGDB', 'is not set')]
377 l += [KconfigCheck('cut_attack_surface', 'my', 'AIO', 'is not set')]
378 l += [KconfigCheck('cut_attack_surface', 'my', 'CORESIGHT', 'is not set')]
379 l += [KconfigCheck('cut_attack_surface', 'my', 'XFS_SUPPORT_V4', 'is not set')]
380 l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'),
382 l += [KconfigCheck('cut_attack_surface', 'my', 'MODULE_FORCE_LOAD', 'is not set')]
386 l += [KconfigCheck('harden_userspace', 'defconfig', 'ARM64_PTR_AUTH', 'y')]
387 l += [KconfigCheck('harden_userspace', 'defconfig', 'ARM64_BTI', 'y')]
388 if arch in ('ARM', 'X86_32'):
389 l += [KconfigCheck('harden_userspace', 'defconfig', 'VMSPLIT_3G', 'y')]
390 l += [KconfigCheck('harden_userspace', 'clipos', 'COREDUMP', 'is not set')]
391 l += [KconfigCheck('harden_userspace', 'my', 'ARCH_MMAP_RND_BITS', 'MAX')] # 'MAX' value is refined using ARCH_MMAP_RND_BITS_MAX
394 def add_cmdline_checks(l, arch):
395 assert(arch), 'empty arch'
397 # Calling the CmdlineCheck class constructor:
398 # CmdlineCheck(reason, decision, name, expected)
400 # [!] Don't add CmdlineChecks in add_kconfig_checks() to avoid wrong results
401 # when the tool doesn't check the cmdline.
403 # [!] Make sure that values of the options in CmdlineChecks need normalization.
404 # For more info see normalize_cmdline_options().
406 # A common pattern for checking the 'param_x' cmdline parameter
407 # that __overrides__ the 'PARAM_X_DEFAULT' kconfig option:
408 # l += [OR(CmdlineCheck(reason, decision, 'param_x', '1'),
409 # AND(KconfigCheck(reason, decision, 'PARAM_X_DEFAULT_ON', 'y'),
410 # CmdlineCheck(reason, decision, 'param_x, 'is not set')))]
412 # Here we don't check the kconfig options or minimal kernel version
413 # required for the cmdline parameters. That would make the checks
414 # very complex and not give a 100% guarantee anyway.
416 # 'self_protection', 'defconfig'
417 l += [CmdlineCheck('self_protection', 'defconfig', 'nosmep', 'is not set')]
418 l += [CmdlineCheck('self_protection', 'defconfig', 'nosmap', 'is not set')]
419 l += [CmdlineCheck('self_protection', 'defconfig', 'nokaslr', 'is not set')]
420 l += [CmdlineCheck('self_protection', 'defconfig', 'nopti', 'is not set')]
421 l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v1', 'is not set')]
422 l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v2', 'is not set')]
423 l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_bhb', 'is not set')]
424 l += [CmdlineCheck('self_protection', 'defconfig', 'nospec_store_bypass_disable', 'is not set')]
425 l += [CmdlineCheck('self_protection', 'defconfig', 'dis_ucode_ldr', 'is not set')]
426 l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nobti', 'is not set')]
427 l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nopauth', 'is not set')]
428 l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nomte', 'is not set')]
429 if arch in ('X86_64', 'X86_32'):
430 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not off'),
431 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
432 CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not set')))]
433 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not off'),
434 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
435 CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not set')))]
436 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not off'),
437 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
438 CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not set')))]
439 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not off'),
440 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
441 CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not set')))]
442 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not off'),
443 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
444 CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not set')))]
445 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not off'),
446 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
447 CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not set')))]
448 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not off'),
449 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
450 CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not set')))]
451 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not off'),
452 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
453 CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not set')))]
454 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not off'),
455 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
456 CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not set')))]
457 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spec_rstack_overflow', 'is not off'),
458 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
459 CmdlineCheck('self_protection', 'defconfig', 'spec_rstack_overflow', 'is not set')))]
461 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not off'),
462 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
463 CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not set')))]
464 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'kernel'),
465 CmdlineCheck('self_protection', 'my', 'ssbd', 'force-on'),
466 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
467 CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'is not set')))]
468 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'full'),
469 AND(KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y'),
470 CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set')))]
472 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'on'),
473 CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set'))]
475 # 'self_protection', 'kspp'
476 l += [CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt')]
477 l += [CmdlineCheck('self_protection', 'kspp', 'slab_merge', 'is not set')] # consequence of 'slab_nomerge' by kspp
478 l += [CmdlineCheck('self_protection', 'kspp', 'slub_merge', 'is not set')] # consequence of 'slab_nomerge' by kspp
479 l += [CmdlineCheck('self_protection', 'kspp', 'page_alloc.shuffle', '1')]
480 l += [OR(CmdlineCheck('self_protection', 'kspp', 'slab_nomerge', 'is present'),
481 AND(KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set'),
482 CmdlineCheck('self_protection', 'kspp', 'slab_merge', 'is not set'),
483 CmdlineCheck('self_protection', 'kspp', 'slub_merge', 'is not set')))]
484 l += [OR(CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', '1'),
485 AND(KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y'),
486 CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', 'is not set')))]
487 l += [OR(CmdlineCheck('self_protection', 'kspp', 'init_on_free', '1'),
488 AND(KconfigCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'),
489 CmdlineCheck('self_protection', 'kspp', 'init_on_free', 'is not set')),
490 AND(CmdlineCheck('self_protection', 'kspp', 'page_poison', '1'),
491 KconfigCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'),
492 CmdlineCheck('self_protection', 'kspp', 'slub_debug', 'P')))]
493 l += [OR(CmdlineCheck('self_protection', 'kspp', 'hardened_usercopy', '1'),
494 AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y'),
495 CmdlineCheck('self_protection', 'kspp', 'hardened_usercopy', 'is not set')))]
496 l += [AND(CmdlineCheck('self_protection', 'kspp', 'slab_common.usercopy_fallback', 'is not set'),
497 KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'))]
498 # don't require slab_common.usercopy_fallback=0,
499 # since HARDENED_USERCOPY_FALLBACK was removed in Linux v5.16
500 if arch in ('X86_64', 'ARM64', 'X86_32'):
501 l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.strict', '1'),
502 AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y'),
503 CmdlineCheck('self_protection', 'kspp', 'iommu.strict', 'is not set')))]
504 l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', '0'),
505 AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set'),
506 CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', 'is not set')))]
507 l += [OR(CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', '1'),
508 AND(KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y'),
509 CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', 'is not set')))]
510 if arch in ('X86_64', 'X86_32'):
511 l += [AND(CmdlineCheck('self_protection', 'kspp', 'pti', 'on'),
512 CmdlineCheck('self_protection', 'defconfig', 'nopti', 'is not set'))]
514 # 'self_protection', 'clipos'
515 if arch in ('X86_64', 'X86_32'):
516 l += [CmdlineCheck('self_protection', 'clipos', 'iommu', 'force')]
518 # 'self_protection', 'my'
519 l += [OR(CmdlineCheck('self_protection', 'my', 'kfence.sample_interval', 'is not off'),
520 AND(KconfigCheck('self_protection', 'my', 'KFENCE_SAMPLE_INTERVAL', 'is not off'),
521 CmdlineCheck('self_protection', 'my', 'kfence.sample_interval', 'is not set')))]
523 # 'cut_attack_surface', 'defconfig'
524 if arch in ('X86_64', 'X86_32'):
525 l += [OR(CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'off'),
526 AND(KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y'),
527 CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'is not set')))]
529 # 'cut_attack_surface', 'kspp'
530 l += [CmdlineCheck('cut_attack_surface', 'kspp', 'nosmt', 'is present')] # slow (high performance penalty)
532 l += [OR(CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'none'),
533 KconfigCheck('cut_attack_surface', 'kspp', 'X86_VSYSCALL_EMULATION', 'is not set'),
534 AND(KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y'),
535 CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'is not set')))]
536 l += [OR(CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '1'),
537 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '0'),
538 AND(KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set'),
539 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', 'is not set')))] # the vdso32 parameter must not be 2
541 l += [OR(CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '1'),
542 CmdlineCheck('cut_attack_surface', 'my', 'vdso', '1'),
543 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '0'),
544 CmdlineCheck('cut_attack_surface', 'my', 'vdso', '0'),
545 AND(KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set'),
546 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', 'is not set'),
547 CmdlineCheck('cut_attack_surface', 'my', 'vdso', 'is not set')))] # the vdso and vdso32 parameters must not be 2
549 # 'cut_attack_surface', 'grsec'
550 # The cmdline checks compatible with the kconfig options disabled by grsecurity...
551 l += [OR(CmdlineCheck('cut_attack_surface', 'grsec', 'debugfs', 'off'),
552 KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_FS', 'is not set'))] # ... the end
554 # 'cut_attack_surface', 'my'
555 l += [CmdlineCheck('cut_attack_surface', 'my', 'sysrq_always_enabled', 'is not set')]
558 l += [CmdlineCheck('harden_userspace', 'defconfig', 'norandmaps', 'is not set')]
561 no_kstrtobool_options = [
562 'debugfs', # See debugfs_kernel() in fs/debugfs/inode.c
563 'mitigations', # See mitigations_parse_cmdline() in kernel/cpu.c
564 'pti', # See pti_check_boottime_disable() in arch/x86/mm/pti.c
565 'spectre_v2', # See spectre_v2_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
566 'spectre_v2_user', # See spectre_v2_parse_user_cmdline() in arch/x86/kernel/cpu/bugs.c
567 'spec_store_bypass_disable', # See ssb_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
568 'l1tf', # See l1tf_cmdline() in arch/x86/kernel/cpu/bugs.c
569 'mds', # See mds_cmdline() in arch/x86/kernel/cpu/bugs.c
570 'tsx_async_abort', # See tsx_async_abort_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
571 'srbds', # See srbds_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
572 'mmio_stale_data', # See mmio_stale_data_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
573 'retbleed', # See retbleed_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
574 'rodata', # See set_debug_rodata() in init/main.c
575 'ssbd', # See parse_spectre_v4_param() in arch/arm64/kernel/proton-pack.c
576 'spec_rstack_overflow', # See srso_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
577 'slub_debug', # See setup_slub_debug() in mm/slub.c
578 'iommu', # See iommu_setup() in arch/x86/kernel/pci-dma.c
579 'vsyscall', # See vsyscall_setup() in arch/x86/entry/vsyscall/vsyscall_64.c
580 'vdso32', # See vdso32_setup() in arch/x86/entry/vdso/vdso32-setup.c
581 'vdso', # See vdso32_setup() in arch/x86/entry/vdso/vdso32-setup.c
582 'tsx' # See tsx_init() in arch/x86/kernel/cpu/tsx.c
586 def normalize_cmdline_options(option, value):
587 # Don't normalize the cmdline option values if
588 # the Linux kernel doesn't use kstrtobool() for them
589 if option in no_kstrtobool_options:
592 # Implement a limited part of the kstrtobool() logic
593 if value.lower() in ('1', 'on', 'y', 'yes', 't', 'true'):
595 if value.lower() in ('0', 'off', 'n', 'no', 'f', 'false'):
598 # Preserve unique values
602 # TODO: draft of security hardening sysctls:
603 # what about bpf_jit_enable?
604 # vm.mmap_min_addr has a good value
605 # nosmt sysfs control file
606 # vm.mmap_rnd_bits=max (?)
608 # abi.vsyscall32 (any value except 2)
609 # kernel.oops_limit (think about a proper value)
610 # kernel.warn_limit (think about a proper value)
611 # net.ipv4.tcp_syncookies=1 (?)
613 def add_sysctl_checks(l, arch):
614 # This function may be called with arch=None
616 # Calling the SysctlCheck class constructor:
617 # SysctlCheck(reason, decision, name, expected)
619 l += [SysctlCheck('self_protection', 'kspp', 'net.core.bpf_jit_harden', '2')]
621 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.dmesg_restrict', '1')]
622 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.perf_event_paranoid', '3')] # with a custom patch, see https://lwn.net/Articles/696216/
623 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.kexec_load_disabled', '1')]
624 l += [SysctlCheck('cut_attack_surface', 'kspp', 'user.max_user_namespaces', '0')]
625 l += [SysctlCheck('cut_attack_surface', 'kspp', 'dev.tty.ldisc_autoload', '0')]
626 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.unprivileged_bpf_disabled', '1')]
627 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.kptr_restrict', '2')]
628 l += [SysctlCheck('cut_attack_surface', 'kspp', 'dev.tty.legacy_tiocsti', '0')]
629 l += [SysctlCheck('cut_attack_surface', 'kspp', 'vm.unprivileged_userfaultfd', '0')]
630 # At first, it disabled unprivileged userfaultfd,
631 # and since v5.11 it enables unprivileged userfaultfd for user-mode only.
633 l += [SysctlCheck('cut_attack_surface', 'clipos', 'kernel.modules_disabled', '1')] # radical, but may be useful in some cases
635 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_symlinks', '1')]
636 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_hardlinks', '1')]
637 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_fifos', '2')]
638 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_regular', '2')]
639 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.suid_dumpable', '0')]
640 l += [SysctlCheck('harden_userspace', 'kspp', 'kernel.randomize_va_space', '2')]
641 l += [SysctlCheck('harden_userspace', 'kspp', 'kernel.yama.ptrace_scope', '3')]