4 This tool is for checking the security hardening options of the Linux kernel.
6 Author: Alexander Popov <alex.popov@linux.com>
8 This module contains knowledge for checks.
11 # pylint: disable=missing-function-docstring,line-too-long,invalid-name
12 # pylint: disable=too-many-branches,too-many-statements
14 from .engine import KconfigCheck, CmdlineCheck, SysctlCheck, VersionCheck, OR, AND
17 def add_kconfig_checks(l, arch):
18 assert(arch), 'empty arch'
20 # Calling the KconfigCheck class constructor:
21 # KconfigCheck(reason, decision, name, expected)
23 # [!] Don't add CmdlineChecks in add_kconfig_checks() to avoid wrong results
24 # when the tool doesn't check the cmdline.
26 efi_not_set = KconfigCheck('-', '-', 'EFI', 'is not set')
27 cc_is_gcc = KconfigCheck('-', '-', 'CC_IS_GCC', 'y') # exists since v4.18
28 cc_is_clang = KconfigCheck('-', '-', 'CC_IS_CLANG', 'y') # exists since v4.18
30 modules_not_set = KconfigCheck('cut_attack_surface', 'kspp', 'MODULES', 'is not set') # radical, but may be useful in some cases
31 devmem_not_set = KconfigCheck('cut_attack_surface', 'kspp', 'DEVMEM', 'is not set') # refers to LOCKDOWN
32 bpf_syscall_not_set = KconfigCheck('cut_attack_surface', 'lockdown', 'BPF_SYSCALL', 'is not set') # refers to LOCKDOWN
34 # 'self_protection', 'defconfig'
35 l += [KconfigCheck('self_protection', 'defconfig', 'BUG', 'y')]
36 l += [KconfigCheck('self_protection', 'defconfig', 'SLUB_DEBUG', 'y')]
37 l += [KconfigCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')]
38 gcc_plugins_support_is_set = KconfigCheck('self_protection', 'defconfig', 'GCC_PLUGINS', 'y')
39 l += [gcc_plugins_support_is_set]
40 iommu_support_is_set = KconfigCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y')
41 l += [iommu_support_is_set] # is needed for mitigating DMA attacks
42 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR', 'y'),
43 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR', 'y'),
44 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_REGULAR', 'y'),
45 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_AUTO', 'y'),
46 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_STRONG', 'y'))]
47 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR_STRONG', 'y'),
48 KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_STRONG', 'y'))]
49 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STRICT_KERNEL_RWX', 'y'),
50 KconfigCheck('self_protection', 'defconfig', 'DEBUG_RODATA', 'y'))] # before v4.11
51 l += [OR(KconfigCheck('self_protection', 'defconfig', 'STRICT_MODULE_RWX', 'y'),
52 KconfigCheck('self_protection', 'defconfig', 'DEBUG_SET_MODULE_RONX', 'y'),
53 modules_not_set)] # DEBUG_SET_MODULE_RONX was before v4.11
54 l += [OR(KconfigCheck('self_protection', 'defconfig', 'REFCOUNT_FULL', 'y'),
55 VersionCheck((5, 5)))] # REFCOUNT_FULL is enabled by default since v5.5
56 if arch in ('X86_64', 'ARM64', 'X86_32'):
57 l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')]
58 if arch in ('X86_64', 'ARM64', 'ARM'):
59 l += [KconfigCheck('self_protection', 'defconfig', 'VMAP_STACK', 'y')]
60 if arch in ('X86_64', 'X86_32'):
61 l += [KconfigCheck('self_protection', 'defconfig', 'SPECULATION_MITIGATIONS', 'y')]
62 l += [KconfigCheck('self_protection', 'defconfig', 'DEBUG_WX', 'y')]
63 l += [KconfigCheck('self_protection', 'defconfig', 'WERROR', 'y')]
64 l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE', 'y')]
65 l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_INTEL', 'y')]
66 l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_AMD', 'y')]
67 l += [KconfigCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')]
68 l += [KconfigCheck('self_protection', 'defconfig', 'CPU_SRSO', 'y')]
69 l += [KconfigCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason?
70 microcode_is_set = KconfigCheck('self_protection', 'defconfig', 'MICROCODE', 'y')
71 l += [microcode_is_set] # is needed for mitigating CPU bugs
72 l += [OR(KconfigCheck('self_protection', 'defconfig', 'MICROCODE_INTEL', 'y'),
74 VersionCheck((6, 6))))] # MICROCODE_INTEL was included in MICROCODE since v6.6
75 l += [OR(KconfigCheck('self_protection', 'defconfig', 'MICROCODE_AMD', 'y'),
77 VersionCheck((6, 6))))] # MICROCODE_AMD was included in MICROCODE since v6.6
78 l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_SMAP', 'y'),
79 VersionCheck((5, 19)))] # X86_SMAP is enabled by default since v5.19
80 l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_UMIP', 'y'),
81 KconfigCheck('self_protection', 'defconfig', 'X86_INTEL_UMIP', 'y'))]
82 if arch in ('ARM64', 'ARM'):
83 l += [KconfigCheck('self_protection', 'defconfig', 'IOMMU_DEFAULT_DMA_STRICT', 'y')]
84 l += [KconfigCheck('self_protection', 'defconfig', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set
85 l += [KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR_PER_TASK', 'y')]
87 l += [KconfigCheck('self_protection', 'defconfig', 'PAGE_TABLE_ISOLATION', 'y')]
88 l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MEMORY', 'y')]
89 l += [KconfigCheck('self_protection', 'defconfig', 'X86_KERNEL_IBT', 'y')]
90 l += [AND(KconfigCheck('self_protection', 'defconfig', 'INTEL_IOMMU', 'y'),
91 iommu_support_is_set)]
92 l += [AND(KconfigCheck('self_protection', 'defconfig', 'AMD_IOMMU', 'y'),
93 iommu_support_is_set)]
95 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PAN', 'y')]
96 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_EPAN', 'y')]
97 l += [KconfigCheck('self_protection', 'defconfig', 'UNMAP_KERNEL_AT_EL0', 'y')]
98 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_E0PD', 'y')]
99 l += [KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y')]
100 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PTR_AUTH_KERNEL', 'y')]
101 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_BTI_KERNEL', 'y')]
102 l += [KconfigCheck('self_protection', 'defconfig', 'MITIGATE_SPECTRE_BRANCH_HISTORY', 'y')]
103 l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_MTE', 'y')]
104 l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MODULE_REGION_FULL', 'y')]
105 l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'),
106 AND(KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'),
107 VersionCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9
108 l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'),
109 VersionCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10
111 l += [KconfigCheck('self_protection', 'defconfig', 'CPU_SW_DOMAIN_PAN', 'y')]
112 l += [KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y')]
113 l += [KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_HISTORY', 'y')]
114 l += [KconfigCheck('self_protection', 'defconfig', 'DEBUG_ALIGN_RODATA', 'y')]
116 # 'self_protection', 'kspp'
117 l += [KconfigCheck('self_protection', 'kspp', 'BUG_ON_DATA_CORRUPTION', 'y')]
118 l += [KconfigCheck('self_protection', 'kspp', 'SCHED_STACK_END_CHECK', 'y')]
119 l += [KconfigCheck('self_protection', 'kspp', 'SLAB_FREELIST_HARDENED', 'y')]
120 l += [KconfigCheck('self_protection', 'kspp', 'SLAB_FREELIST_RANDOM', 'y')]
121 l += [KconfigCheck('self_protection', 'kspp', 'SHUFFLE_PAGE_ALLOCATOR', 'y')]
122 l += [KconfigCheck('self_protection', 'kspp', 'FORTIFY_SOURCE', 'y')]
123 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_LIST', 'y')]
124 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_VIRTUAL', 'y')]
125 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_SG', 'y')]
126 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_CREDENTIALS', 'y')]
127 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_NOTIFIERS', 'y')]
128 l += [KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y')]
129 l += [KconfigCheck('self_protection', 'kspp', 'HW_RANDOM_TPM', 'y')]
130 l += [KconfigCheck('self_protection', 'kspp', 'STATIC_USERMODEHELPER', 'y')] # needs userspace support
131 kfence_is_set = KconfigCheck('self_protection', 'kspp', 'KFENCE', 'y')
133 l += [AND(KconfigCheck('self_protection', 'my', 'KFENCE_SAMPLE_INTERVAL', 'is not off'),
135 randstruct_is_set = OR(KconfigCheck('self_protection', 'kspp', 'RANDSTRUCT_FULL', 'y'),
136 KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT', 'y'))
137 l += [randstruct_is_set]
138 l += [AND(KconfigCheck('self_protection', 'kspp', 'RANDSTRUCT_PERFORMANCE', 'is not set'),
139 KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT_PERFORMANCE', 'is not set'),
141 hardened_usercopy_is_set = KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y')
142 l += [hardened_usercopy_is_set]
143 l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'),
144 hardened_usercopy_is_set)] # usercopy whitelist violations should be prohibited
145 l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_PAGESPAN', 'is not set'),
146 hardened_usercopy_is_set)] # this debugging for HARDENED_USERCOPY is not needed for security
147 l += [AND(KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y'),
148 gcc_plugins_support_is_set)]
149 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG', 'y'),
151 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_ALL', 'y'),
153 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_SHA512', 'y'),
155 l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_FORCE', 'y'),
156 modules_not_set)] # refers to LOCKDOWN
157 l += [OR(KconfigCheck('self_protection', 'kspp', 'INIT_STACK_ALL_ZERO', 'y'),
158 KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_STRUCTLEAK_BYREF_ALL', 'y'))]
159 l += [OR(KconfigCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'),
160 KconfigCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'))]
161 # CONFIG_INIT_ON_FREE_DEFAULT_ON was added in v5.3.
162 # CONFIG_PAGE_POISONING_ZERO was removed in v5.11.
163 # Starting from v5.11 CONFIG_PAGE_POISONING unconditionally checks
164 # the 0xAA poison pattern on allocation.
165 # That brings higher performance penalty.
166 l += [OR(KconfigCheck('self_protection', 'kspp', 'EFI_DISABLE_PCI_DMA', 'y'),
168 l += [OR(KconfigCheck('self_protection', 'kspp', 'RESET_ATTACK_MITIGATION', 'y'),
169 efi_not_set)] # needs userspace support (systemd)
170 ubsan_bounds_is_set = KconfigCheck('self_protection', 'kspp', 'UBSAN_BOUNDS', 'y')
171 l += [ubsan_bounds_is_set]
172 l += [OR(KconfigCheck('self_protection', 'kspp', 'UBSAN_LOCAL_BOUNDS', 'y'),
173 AND(ubsan_bounds_is_set,
175 l += [AND(KconfigCheck('self_protection', 'kspp', 'UBSAN_TRAP', 'y'),
177 KconfigCheck('self_protection', 'kspp', 'UBSAN_SHIFT', 'is not set'),
178 KconfigCheck('self_protection', 'kspp', 'UBSAN_DIV_ZERO', 'is not set'),
179 KconfigCheck('self_protection', 'kspp', 'UBSAN_UNREACHABLE', 'is not set'),
180 KconfigCheck('self_protection', 'kspp', 'UBSAN_BOOL', 'is not set'),
181 KconfigCheck('self_protection', 'kspp', 'UBSAN_ENUM', 'is not set'),
182 KconfigCheck('self_protection', 'kspp', 'UBSAN_ALIGNMENT', 'is not set'))] # only array index bounds checking with traps
183 if arch in ('X86_64', 'ARM64', 'X86_32'):
184 l += [AND(KconfigCheck('self_protection', 'kspp', 'UBSAN_SANITIZE_ALL', 'y'),
185 ubsan_bounds_is_set)] # ARCH_HAS_UBSAN_SANITIZE_ALL is not enabled for ARM
186 stackleak_is_set = KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_STACKLEAK', 'y')
187 l += [AND(stackleak_is_set, gcc_plugins_support_is_set)]
188 l += [AND(KconfigCheck('self_protection', 'kspp', 'STACKLEAK_METRICS', 'is not set'),
190 gcc_plugins_support_is_set)]
191 l += [AND(KconfigCheck('self_protection', 'kspp', 'STACKLEAK_RUNTIME_DISABLE', 'is not set'),
193 gcc_plugins_support_is_set)]
194 l += [KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y')]
195 if arch in ('X86_64', 'ARM64'):
196 cfi_clang_is_set = KconfigCheck('self_protection', 'kspp', 'CFI_CLANG', 'y')
197 l += [cfi_clang_is_set]
198 l += [AND(KconfigCheck('self_protection', 'kspp', 'CFI_PERMISSIVE', 'is not set'),
200 if arch in ('X86_64', 'X86_32'):
201 l += [KconfigCheck('self_protection', 'kspp', 'SCHED_CORE', 'y')]
202 l += [KconfigCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '65536')]
203 l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y')]
204 l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set
205 l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU_DEFAULT_ON', 'y'),
206 iommu_support_is_set)]
207 if arch in ('ARM64', 'ARM'):
208 l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_WX', 'y')]
209 l += [KconfigCheck('self_protection', 'kspp', 'WERROR', 'y')]
210 l += [KconfigCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '32768')]
211 l += [KconfigCheck('self_protection', 'kspp', 'SYN_COOKIES', 'y')] # another reason?
213 l += [KconfigCheck('self_protection', 'kspp', 'SLS', 'y')] # vs CVE-2021-26341 in Straight-Line-Speculation
214 l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU_SVM', 'y'),
215 iommu_support_is_set)]
216 l += [AND(KconfigCheck('self_protection', 'kspp', 'AMD_IOMMU_V2', 'y'),
217 iommu_support_is_set)]
219 l += [KconfigCheck('self_protection', 'kspp', 'ARM64_SW_TTBR0_PAN', 'y')]
220 l += [KconfigCheck('self_protection', 'kspp', 'SHADOW_CALL_STACK', 'y')]
221 l += [KconfigCheck('self_protection', 'kspp', 'KASAN_HW_TAGS', 'y')] # see also: kasan=on, kasan.stacktrace=off, kasan.fault=panic
223 l += [KconfigCheck('self_protection', 'kspp', 'PAGE_TABLE_ISOLATION', 'y')]
224 l += [KconfigCheck('self_protection', 'kspp', 'HIGHMEM64G', 'y')]
225 l += [KconfigCheck('self_protection', 'kspp', 'X86_PAE', 'y')]
226 l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU', 'y'),
227 iommu_support_is_set)]
229 # 'self_protection', 'clipos'
230 l += [KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set')]
232 # 'self_protection', 'my'
233 l += [KconfigCheck('self_protection', 'my', 'LIST_HARDENED', 'y')]
236 if arch in ('X86_64', 'ARM64', 'X86_32'):
237 l += [KconfigCheck('security_policy', 'defconfig', 'SECURITY', 'y')]
239 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY', 'y')]
240 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_YAMA', 'y')]
241 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LANDLOCK', 'y')]
242 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_DISABLE', 'is not set')]
243 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_BOOTPARAM', 'is not set')]
244 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_DEVELOP', 'is not set')]
245 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LOCKDOWN_LSM', 'y')]
246 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LOCKDOWN_LSM_EARLY', 'y')]
247 l += [KconfigCheck('security_policy', 'kspp', 'LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY', 'y')]
248 l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_WRITABLE_HOOKS', 'is not set')] # refers to SECURITY_SELINUX_DISABLE
249 l += [KconfigCheck('security_policy', 'my', 'SECURITY_SELINUX_DEBUG', 'is not set')]
250 l += [OR(KconfigCheck('security_policy', 'my', 'SECURITY_SELINUX', 'y'),
251 KconfigCheck('security_policy', 'my', 'SECURITY_APPARMOR', 'y'),
252 KconfigCheck('security_policy', 'my', 'SECURITY_SMACK', 'y'),
253 KconfigCheck('security_policy', 'my', 'SECURITY_TOMOYO', 'y'))] # one of major LSMs implementing MAC
255 # 'cut_attack_surface', 'defconfig'
256 l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP', 'y')]
257 l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP_FILTER', 'y')]
258 l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'BPF_UNPRIV_DEFAULT_OFF', 'y'),
259 bpf_syscall_not_set)] # see unprivileged_bpf_disabled
260 if arch in ('X86_64', 'ARM64', 'X86_32'):
261 l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'STRICT_DEVMEM', 'y'),
262 devmem_not_set)] # refers to LOCKDOWN
263 if arch in ('X86_64', 'X86_32'):
264 l += [KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y')] # tsx=off
266 # 'cut_attack_surface', 'kspp'
267 l += [KconfigCheck('cut_attack_surface', 'kspp', 'SECURITY_DMESG_RESTRICT', 'y')]
268 l += [KconfigCheck('cut_attack_surface', 'kspp', 'ACPI_CUSTOM_METHOD', 'is not set')] # refers to LOCKDOWN
269 l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_BRK', 'is not set')]
270 l += [KconfigCheck('cut_attack_surface', 'kspp', 'DEVKMEM', 'is not set')] # refers to LOCKDOWN
271 l += [KconfigCheck('cut_attack_surface', 'kspp', 'BINFMT_MISC', 'is not set')]
272 l += [KconfigCheck('cut_attack_surface', 'kspp', 'INET_DIAG', 'is not set')]
273 l += [KconfigCheck('cut_attack_surface', 'kspp', 'KEXEC', 'is not set')] # refers to LOCKDOWN
274 l += [KconfigCheck('cut_attack_surface', 'kspp', 'PROC_KCORE', 'is not set')] # refers to LOCKDOWN
275 l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_PTYS', 'is not set')]
276 l += [KconfigCheck('cut_attack_surface', 'kspp', 'HIBERNATION', 'is not set')] # refers to LOCKDOWN
277 l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT', 'is not set')]
278 l += [KconfigCheck('cut_attack_surface', 'kspp', 'IA32_EMULATION', 'is not set')]
279 l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_X32', 'is not set')]
280 l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_X32_ABI', 'is not set')]
281 l += [KconfigCheck('cut_attack_surface', 'kspp', 'MODIFY_LDT_SYSCALL', 'is not set')]
282 l += [KconfigCheck('cut_attack_surface', 'kspp', 'OABI_COMPAT', 'is not set')]
283 l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_MSR', 'is not set')] # refers to LOCKDOWN
284 l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_TIOCSTI', 'is not set')]
285 l += [modules_not_set]
286 l += [devmem_not_set]
287 l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'IO_STRICT_DEVMEM', 'y'),
288 devmem_not_set)] # refers to LOCKDOWN
289 l += [AND(KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is not set'),
290 KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is present'))]
291 if arch in ('X86_64', 'X86_32'):
292 l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set')]
293 # CONFIG_COMPAT_VDSO disabled ASLR of vDSO only on X86_64 and X86_32;
294 # on ARM64 this option has different meaning
296 l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'X86_VSYSCALL_EMULATION', 'is not set'),
297 KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y'))]
298 # disabling X86_VSYSCALL_EMULATION turns vsyscall off completely,
299 # and LEGACY_VSYSCALL_NONE can be changed at boot time via the cmdline parameter
301 l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'STRICT_DEVMEM', 'y'),
302 devmem_not_set)] # refers to LOCKDOWN
304 # 'cut_attack_surface', 'grsec'
305 l += [KconfigCheck('cut_attack_surface', 'grsec', 'ZSMALLOC_STAT', 'is not set')]
306 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PAGE_OWNER', 'is not set')]
307 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_KMEMLEAK', 'is not set')]
308 l += [KconfigCheck('cut_attack_surface', 'grsec', 'BINFMT_AOUT', 'is not set')]
309 l += [KconfigCheck('cut_attack_surface', 'grsec', 'KPROBE_EVENTS', 'is not set')]
310 l += [KconfigCheck('cut_attack_surface', 'grsec', 'UPROBE_EVENTS', 'is not set')]
311 l += [KconfigCheck('cut_attack_surface', 'grsec', 'GENERIC_TRACER', 'is not set')] # refers to LOCKDOWN
312 l += [KconfigCheck('cut_attack_surface', 'grsec', 'FUNCTION_TRACER', 'is not set')]
313 l += [KconfigCheck('cut_attack_surface', 'grsec', 'STACK_TRACER', 'is not set')]
314 l += [KconfigCheck('cut_attack_surface', 'grsec', 'HIST_TRIGGERS', 'is not set')]
315 l += [KconfigCheck('cut_attack_surface', 'grsec', 'BLK_DEV_IO_TRACE', 'is not set')]
316 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PROC_VMCORE', 'is not set')]
317 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PROC_PAGE_MONITOR', 'is not set')]
318 l += [KconfigCheck('cut_attack_surface', 'grsec', 'USELIB', 'is not set')]
319 l += [KconfigCheck('cut_attack_surface', 'grsec', 'CHECKPOINT_RESTORE', 'is not set')]
320 l += [KconfigCheck('cut_attack_surface', 'grsec', 'USERFAULTFD', 'is not set')]
321 l += [KconfigCheck('cut_attack_surface', 'grsec', 'HWPOISON_INJECT', 'is not set')]
322 l += [KconfigCheck('cut_attack_surface', 'grsec', 'MEM_SOFT_DIRTY', 'is not set')]
323 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DEVPORT', 'is not set')] # refers to LOCKDOWN
324 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_FS', 'is not set')] # refers to LOCKDOWN
325 l += [KconfigCheck('cut_attack_surface', 'grsec', 'NOTIFIER_ERROR_INJECTION', 'is not set')]
326 l += [KconfigCheck('cut_attack_surface', 'grsec', 'FAIL_FUTEX', 'is not set')]
327 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PUNIT_ATOM_DEBUG', 'is not set')]
328 l += [KconfigCheck('cut_attack_surface', 'grsec', 'ACPI_CONFIGFS', 'is not set')]
329 l += [KconfigCheck('cut_attack_surface', 'grsec', 'EDAC_DEBUG', 'is not set')]
330 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DRM_I915_DEBUG', 'is not set')]
331 l += [KconfigCheck('cut_attack_surface', 'grsec', 'BCACHE_CLOSURES_DEBUG', 'is not set')]
332 l += [KconfigCheck('cut_attack_surface', 'grsec', 'DVB_C8SECTPFE', 'is not set')]
333 l += [KconfigCheck('cut_attack_surface', 'grsec', 'MTD_SLRAM', 'is not set')]
334 l += [KconfigCheck('cut_attack_surface', 'grsec', 'MTD_PHRAM', 'is not set')]
335 l += [KconfigCheck('cut_attack_surface', 'grsec', 'IO_URING', 'is not set')]
336 l += [KconfigCheck('cut_attack_surface', 'grsec', 'KCMP', 'is not set')]
337 l += [KconfigCheck('cut_attack_surface', 'grsec', 'RSEQ', 'is not set')]
338 l += [KconfigCheck('cut_attack_surface', 'grsec', 'LATENCYTOP', 'is not set')]
339 l += [KconfigCheck('cut_attack_surface', 'grsec', 'KCOV', 'is not set')]
340 l += [KconfigCheck('cut_attack_surface', 'grsec', 'PROVIDE_OHCI1394_DMA_INIT', 'is not set')]
341 l += [KconfigCheck('cut_attack_surface', 'grsec', 'SUNRPC_DEBUG', 'is not set')]
342 l += [AND(KconfigCheck('cut_attack_surface', 'grsec', 'PTDUMP_DEBUGFS', 'is not set'),
343 KconfigCheck('cut_attack_surface', 'grsec', 'X86_PTDUMP', 'is not set'))]
345 # 'cut_attack_surface', 'maintainer'
346 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'DRM_LEGACY', 'is not set')] # recommended by Daniel Vetter in /issues/38
347 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'FB', 'is not set')] # recommended by Daniel Vetter in /issues/38
348 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'VT', 'is not set')] # recommended by Daniel Vetter in /issues/38
349 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'BLK_DEV_FD', 'is not set')] # recommended by Denis Efremov in /pull/54
350 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'BLK_DEV_FD_RAWCMD', 'is not set')] # recommended by Denis Efremov in /pull/62
351 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'NOUVEAU_LEGACY_CTX_SUPPORT', 'is not set')]
352 # recommended by Dave Airlie in kernel commit b30a43ac7132cdda
354 # 'cut_attack_surface', 'clipos'
355 l += [KconfigCheck('cut_attack_surface', 'clipos', 'STAGING', 'is not set')]
356 l += [KconfigCheck('cut_attack_surface', 'clipos', 'KSM', 'is not set')] # to prevent FLUSH+RELOAD attack
357 l += [KconfigCheck('cut_attack_surface', 'clipos', 'KALLSYMS', 'is not set')]
358 l += [KconfigCheck('cut_attack_surface', 'clipos', 'MAGIC_SYSRQ', 'is not set')]
359 l += [KconfigCheck('cut_attack_surface', 'clipos', 'KEXEC_FILE', 'is not set')] # refers to LOCKDOWN (permissive)
360 l += [KconfigCheck('cut_attack_surface', 'clipos', 'USER_NS', 'is not set')] # user.max_user_namespaces=0
361 l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_CPUID', 'is not set')]
362 l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_IOPL_IOPERM', 'is not set')] # refers to LOCKDOWN
363 l += [KconfigCheck('cut_attack_surface', 'clipos', 'ACPI_TABLE_UPGRADE', 'is not set')] # refers to LOCKDOWN
364 l += [KconfigCheck('cut_attack_surface', 'clipos', 'EFI_CUSTOM_SSDT_OVERLAYS', 'is not set')]
365 # l += [KconfigCheck('cut_attack_surface', 'clipos', 'IKCONFIG', 'is not set')] # no, IKCONFIG is needed for this check :)
367 # 'cut_attack_surface', 'lockdown'
368 l += [KconfigCheck('cut_attack_surface', 'lockdown', 'EFI_TEST', 'is not set')] # refers to LOCKDOWN
369 l += [KconfigCheck('cut_attack_surface', 'lockdown', 'MMIOTRACE_TEST', 'is not set')] # refers to LOCKDOWN
370 l += [KconfigCheck('cut_attack_surface', 'lockdown', 'KPROBES', 'is not set')] # refers to LOCKDOWN
371 l += [bpf_syscall_not_set] # refers to LOCKDOWN
373 # 'cut_attack_surface', 'my'
374 l += [KconfigCheck('cut_attack_surface', 'my', 'MMIOTRACE', 'is not set')] # refers to LOCKDOWN (permissive)
375 l += [KconfigCheck('cut_attack_surface', 'my', 'LIVEPATCH', 'is not set')]
376 l += [KconfigCheck('cut_attack_surface', 'my', 'IP_DCCP', 'is not set')]
377 l += [KconfigCheck('cut_attack_surface', 'my', 'IP_SCTP', 'is not set')]
378 l += [KconfigCheck('cut_attack_surface', 'my', 'FTRACE', 'is not set')] # refers to LOCKDOWN
379 l += [KconfigCheck('cut_attack_surface', 'my', 'VIDEO_VIVID', 'is not set')]
380 l += [KconfigCheck('cut_attack_surface', 'my', 'INPUT_EVBUG', 'is not set')] # Can be used as a keylogger
381 l += [KconfigCheck('cut_attack_surface', 'my', 'KGDB', 'is not set')]
382 l += [KconfigCheck('cut_attack_surface', 'my', 'AIO', 'is not set')]
383 l += [KconfigCheck('cut_attack_surface', 'my', 'CORESIGHT', 'is not set')]
384 l += [KconfigCheck('cut_attack_surface', 'my', 'XFS_SUPPORT_V4', 'is not set')]
385 l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'),
387 l += [KconfigCheck('cut_attack_surface', 'my', 'MODULE_FORCE_LOAD', 'is not set')]
391 l += [KconfigCheck('harden_userspace', 'defconfig', 'ARM64_PTR_AUTH', 'y')]
392 l += [KconfigCheck('harden_userspace', 'defconfig', 'ARM64_BTI', 'y')]
393 if arch in ('ARM', 'X86_32'):
394 l += [KconfigCheck('harden_userspace', 'defconfig', 'VMSPLIT_3G', 'y')]
395 l += [KconfigCheck('harden_userspace', 'clipos', 'COREDUMP', 'is not set')]
396 l += [KconfigCheck('harden_userspace', 'my', 'ARCH_MMAP_RND_BITS', 'MAX')] # 'MAX' value is refined using ARCH_MMAP_RND_BITS_MAX
399 def add_cmdline_checks(l, arch):
400 assert(arch), 'empty arch'
402 # Calling the CmdlineCheck class constructor:
403 # CmdlineCheck(reason, decision, name, expected)
405 # [!] Don't add CmdlineChecks in add_kconfig_checks() to avoid wrong results
406 # when the tool doesn't check the cmdline.
408 # [!] Make sure that values of the options in CmdlineChecks need normalization.
409 # For more info see normalize_cmdline_options().
411 # A common pattern for checking the 'param_x' cmdline parameter
412 # that __overrides__ the 'PARAM_X_DEFAULT' kconfig option:
413 # l += [OR(CmdlineCheck(reason, decision, 'param_x', '1'),
414 # AND(KconfigCheck(reason, decision, 'PARAM_X_DEFAULT_ON', 'y'),
415 # CmdlineCheck(reason, decision, 'param_x, 'is not set')))]
417 # Here we don't check the kconfig options or minimal kernel version
418 # required for the cmdline parameters. That would make the checks
419 # very complex and not give a 100% guarantee anyway.
421 # 'self_protection', 'defconfig'
422 l += [CmdlineCheck('self_protection', 'defconfig', 'nosmep', 'is not set')]
423 l += [CmdlineCheck('self_protection', 'defconfig', 'nosmap', 'is not set')]
424 l += [CmdlineCheck('self_protection', 'defconfig', 'nokaslr', 'is not set')]
425 l += [CmdlineCheck('self_protection', 'defconfig', 'nopti', 'is not set')]
426 l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v1', 'is not set')]
427 l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v2', 'is not set')]
428 l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_bhb', 'is not set')]
429 l += [CmdlineCheck('self_protection', 'defconfig', 'nospec_store_bypass_disable', 'is not set')]
430 l += [CmdlineCheck('self_protection', 'defconfig', 'dis_ucode_ldr', 'is not set')]
431 l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nobti', 'is not set')]
432 l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nopauth', 'is not set')]
433 l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nomte', 'is not set')]
434 if arch in ('X86_64', 'X86_32'):
435 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not off'),
436 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
437 CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not set')))]
438 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not off'),
439 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
440 CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not set')))]
441 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not off'),
442 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
443 CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not set')))]
444 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not off'),
445 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
446 CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not set')))]
447 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not off'),
448 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
449 CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not set')))]
450 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not off'),
451 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
452 CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not set')))]
453 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not off'),
454 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
455 CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not set')))]
456 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not off'),
457 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
458 CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not set')))]
459 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not off'),
460 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
461 CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not set')))]
462 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spec_rstack_overflow', 'is not off'),
463 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
464 CmdlineCheck('self_protection', 'defconfig', 'spec_rstack_overflow', 'is not set')))]
465 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'gather_data_sampling', 'is not off'),
466 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
467 CmdlineCheck('self_protection', 'defconfig', 'gather_data_sampling', 'is not set')))]
469 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not off'),
470 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
471 CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not set')))]
472 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'kernel'),
473 CmdlineCheck('self_protection', 'my', 'ssbd', 'force-on'),
474 AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'),
475 CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'is not set')))]
476 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'full'),
477 AND(KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y'),
478 CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set')))]
480 l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'on'),
481 CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set'))]
483 # 'self_protection', 'kspp'
484 l += [CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt')]
485 l += [CmdlineCheck('self_protection', 'kspp', 'slab_merge', 'is not set')] # consequence of 'slab_nomerge' by kspp
486 l += [CmdlineCheck('self_protection', 'kspp', 'slub_merge', 'is not set')] # consequence of 'slab_nomerge' by kspp
487 l += [CmdlineCheck('self_protection', 'kspp', 'page_alloc.shuffle', '1')]
488 l += [OR(CmdlineCheck('self_protection', 'kspp', 'slab_nomerge', 'is present'),
489 AND(KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set'),
490 CmdlineCheck('self_protection', 'kspp', 'slab_merge', 'is not set'),
491 CmdlineCheck('self_protection', 'kspp', 'slub_merge', 'is not set')))]
492 l += [OR(CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', '1'),
493 AND(KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y'),
494 CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', 'is not set')))]
495 l += [OR(CmdlineCheck('self_protection', 'kspp', 'init_on_free', '1'),
496 AND(KconfigCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'),
497 CmdlineCheck('self_protection', 'kspp', 'init_on_free', 'is not set')),
498 AND(CmdlineCheck('self_protection', 'kspp', 'page_poison', '1'),
499 KconfigCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'),
500 CmdlineCheck('self_protection', 'kspp', 'slub_debug', 'P')))]
501 l += [OR(CmdlineCheck('self_protection', 'kspp', 'hardened_usercopy', '1'),
502 AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y'),
503 CmdlineCheck('self_protection', 'kspp', 'hardened_usercopy', 'is not set')))]
504 l += [AND(CmdlineCheck('self_protection', 'kspp', 'slab_common.usercopy_fallback', 'is not set'),
505 KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'))]
506 # don't require slab_common.usercopy_fallback=0,
507 # since HARDENED_USERCOPY_FALLBACK was removed in Linux v5.16
508 if arch in ('X86_64', 'ARM64', 'X86_32'):
509 l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.strict', '1'),
510 AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y'),
511 CmdlineCheck('self_protection', 'kspp', 'iommu.strict', 'is not set')))]
512 l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', '0'),
513 AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set'),
514 CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', 'is not set')))]
515 l += [OR(CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', '1'),
516 AND(KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y'),
517 CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', 'is not set')))]
518 if arch in ('X86_64', 'X86_32'):
519 l += [AND(CmdlineCheck('self_protection', 'kspp', 'pti', 'on'),
520 CmdlineCheck('self_protection', 'defconfig', 'nopti', 'is not set'))]
522 # 'self_protection', 'clipos'
523 if arch in ('X86_64', 'X86_32'):
524 l += [CmdlineCheck('self_protection', 'clipos', 'iommu', 'force')]
526 # 'self_protection', 'my'
527 l += [OR(CmdlineCheck('self_protection', 'my', 'kfence.sample_interval', 'is not off'),
528 AND(KconfigCheck('self_protection', 'my', 'KFENCE_SAMPLE_INTERVAL', 'is not off'),
529 CmdlineCheck('self_protection', 'my', 'kfence.sample_interval', 'is not set')))]
531 # 'cut_attack_surface', 'defconfig'
532 if arch in ('X86_64', 'X86_32'):
533 l += [OR(CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'off'),
534 AND(KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y'),
535 CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'is not set')))]
537 # 'cut_attack_surface', 'kspp'
538 l += [CmdlineCheck('cut_attack_surface', 'kspp', 'nosmt', 'is present')] # slow (high performance penalty)
540 l += [OR(CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'none'),
541 KconfigCheck('cut_attack_surface', 'kspp', 'X86_VSYSCALL_EMULATION', 'is not set'),
542 AND(KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y'),
543 CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'is not set')))]
544 l += [OR(CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '1'),
545 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '0'),
546 AND(KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set'),
547 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', 'is not set')))] # the vdso32 parameter must not be 2
549 l += [OR(CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '1'),
550 CmdlineCheck('cut_attack_surface', 'my', 'vdso', '1'),
551 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '0'),
552 CmdlineCheck('cut_attack_surface', 'my', 'vdso', '0'),
553 AND(KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set'),
554 CmdlineCheck('cut_attack_surface', 'my', 'vdso32', 'is not set'),
555 CmdlineCheck('cut_attack_surface', 'my', 'vdso', 'is not set')))] # the vdso and vdso32 parameters must not be 2
557 # 'cut_attack_surface', 'grsec'
558 # The cmdline checks compatible with the kconfig options disabled by grsecurity...
559 l += [OR(CmdlineCheck('cut_attack_surface', 'grsec', 'debugfs', 'off'),
560 KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_FS', 'is not set'))] # ... the end
562 # 'cut_attack_surface', 'my'
563 l += [CmdlineCheck('cut_attack_surface', 'my', 'sysrq_always_enabled', 'is not set')]
566 l += [CmdlineCheck('harden_userspace', 'defconfig', 'norandmaps', 'is not set')]
569 no_kstrtobool_options = [
570 'debugfs', # See debugfs_kernel() in fs/debugfs/inode.c
571 'mitigations', # See mitigations_parse_cmdline() in kernel/cpu.c
572 'pti', # See pti_check_boottime_disable() in arch/x86/mm/pti.c
573 'spectre_v2', # See spectre_v2_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
574 'spectre_v2_user', # See spectre_v2_parse_user_cmdline() in arch/x86/kernel/cpu/bugs.c
575 'spec_store_bypass_disable', # See ssb_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
576 'l1tf', # See l1tf_cmdline() in arch/x86/kernel/cpu/bugs.c
577 'mds', # See mds_cmdline() in arch/x86/kernel/cpu/bugs.c
578 'tsx_async_abort', # See tsx_async_abort_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
579 'srbds', # See srbds_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
580 'mmio_stale_data', # See mmio_stale_data_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
581 'retbleed', # See retbleed_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
582 'rodata', # See set_debug_rodata() in init/main.c
583 'ssbd', # See parse_spectre_v4_param() in arch/arm64/kernel/proton-pack.c
584 'spec_rstack_overflow', # See srso_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
585 'gather_data_sampling', # See gds_parse_cmdline() in arch/x86/kernel/cpu/bugs.c
586 'slub_debug', # See setup_slub_debug() in mm/slub.c
587 'iommu', # See iommu_setup() in arch/x86/kernel/pci-dma.c
588 'vsyscall', # See vsyscall_setup() in arch/x86/entry/vsyscall/vsyscall_64.c
589 'vdso32', # See vdso32_setup() in arch/x86/entry/vdso/vdso32-setup.c
590 'vdso', # See vdso32_setup() in arch/x86/entry/vdso/vdso32-setup.c
591 'tsx' # See tsx_init() in arch/x86/kernel/cpu/tsx.c
595 def normalize_cmdline_options(option, value):
596 # Don't normalize the cmdline option values if
597 # the Linux kernel doesn't use kstrtobool() for them
598 if option in no_kstrtobool_options:
601 # Implement a limited part of the kstrtobool() logic
602 if value.lower() in ('1', 'on', 'y', 'yes', 't', 'true'):
604 if value.lower() in ('0', 'off', 'n', 'no', 'f', 'false'):
607 # Preserve unique values
611 # TODO: draft of security hardening sysctls:
612 # what about bpf_jit_enable?
613 # vm.mmap_min_addr has a good value
614 # nosmt sysfs control file
615 # vm.mmap_rnd_bits=max (?)
617 # abi.vsyscall32 (any value except 2)
618 # kernel.oops_limit (think about a proper value)
619 # kernel.warn_limit (think about a proper value)
620 # net.ipv4.tcp_syncookies=1 (?)
622 def add_sysctl_checks(l, arch):
623 # This function may be called with arch=None
625 # Calling the SysctlCheck class constructor:
626 # SysctlCheck(reason, decision, name, expected)
628 l += [SysctlCheck('self_protection', 'kspp', 'net.core.bpf_jit_harden', '2')]
630 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.dmesg_restrict', '1')]
631 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.perf_event_paranoid', '3')] # with a custom patch, see https://lwn.net/Articles/696216/
632 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.kexec_load_disabled', '1')]
633 l += [SysctlCheck('cut_attack_surface', 'kspp', 'user.max_user_namespaces', '0')]
634 l += [SysctlCheck('cut_attack_surface', 'kspp', 'dev.tty.ldisc_autoload', '0')]
635 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.unprivileged_bpf_disabled', '1')]
636 l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.kptr_restrict', '2')]
637 l += [SysctlCheck('cut_attack_surface', 'kspp', 'dev.tty.legacy_tiocsti', '0')]
638 l += [SysctlCheck('cut_attack_surface', 'kspp', 'vm.unprivileged_userfaultfd', '0')]
639 # At first, it disabled unprivileged userfaultfd,
640 # and since v5.11 it enables unprivileged userfaultfd for user-mode only.
642 l += [SysctlCheck('cut_attack_surface', 'clipos', 'kernel.modules_disabled', '1')] # radical, but may be useful in some cases
644 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_symlinks', '1')]
645 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_hardlinks', '1')]
646 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_fifos', '2')]
647 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.protected_regular', '2')]
648 l += [SysctlCheck('harden_userspace', 'kspp', 'fs.suid_dumpable', '0')]
649 l += [SysctlCheck('harden_userspace', 'kspp', 'kernel.randomize_va_space', '2')]
650 l += [SysctlCheck('harden_userspace', 'kspp', 'kernel.yama.ptrace_scope', '3')]