-# against my hardening preferences for X86_64, ARM64, X86_32, and ARM.
+# against my security hardening preferences for X86_64, ARM64, X86_32, and ARM.
# kernel.perf_event_paranoid=3
# kernel.kexec_load_disabled=1
# kernel.yama.ptrace_scope=3
# kernel.perf_event_paranoid=3
# kernel.kexec_load_disabled=1
# kernel.yama.ptrace_scope=3
if not ret:
# This FAIL is caused by additional checks,
# and not by the main option that this AND-check is about.
if not ret:
# This FAIL is caused by additional checks,
# and not by the main option that this AND-check is about.
VerCheck((5, 5)))] # REFCOUNT_FULL is enabled by default since v5.5
iommu_support_is_set = OptCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y')
l += [iommu_support_is_set] # is needed for mitigating DMA attacks
VerCheck((5, 5)))] # REFCOUNT_FULL is enabled by default since v5.5
iommu_support_is_set = OptCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y')
l += [iommu_support_is_set] # is needed for mitigating DMA attacks
+ if arch in ('X86_64', 'ARM64', 'X86_32'):
+ l += [OptCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')]
+ l += [OptCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')]
+ if arch in ('X86_64', 'ARM64'):
+ l += [OptCheck('self_protection', 'defconfig', 'VMAP_STACK', 'y')]
if arch in ('X86_64', 'X86_32'):
l += [OptCheck('self_protection', 'defconfig', 'MICROCODE', 'y')] # is needed for mitigating CPU bugs
l += [OptCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')]
if arch in ('X86_64', 'X86_32'):
l += [OptCheck('self_protection', 'defconfig', 'MICROCODE', 'y')] # is needed for mitigating CPU bugs
l += [OptCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')]
l += [OptCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason?
l += [OR(OptCheck('self_protection', 'defconfig', 'X86_UMIP', 'y'),
OptCheck('self_protection', 'defconfig', 'X86_INTEL_UMIP', 'y'))]
l += [OptCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason?
l += [OR(OptCheck('self_protection', 'defconfig', 'X86_UMIP', 'y'),
OptCheck('self_protection', 'defconfig', 'X86_INTEL_UMIP', 'y'))]
if arch == 'X86_64':
l += [OptCheck('self_protection', 'defconfig', 'PAGE_TABLE_ISOLATION', 'y')]
l += [OptCheck('self_protection', 'defconfig', 'RANDOMIZE_MEMORY', 'y')]
if arch == 'X86_64':
l += [OptCheck('self_protection', 'defconfig', 'PAGE_TABLE_ISOLATION', 'y')]
l += [OptCheck('self_protection', 'defconfig', 'RANDOMIZE_MEMORY', 'y')]
iommu_support_is_set)]
if arch == 'ARM64':
l += [OptCheck('self_protection', 'defconfig', 'ARM64_PAN', 'y')]
iommu_support_is_set)]
if arch == 'ARM64':
l += [OptCheck('self_protection', 'defconfig', 'ARM64_PAN', 'y')]
l += [OptCheck('self_protection', 'defconfig', 'UNMAP_KERNEL_AT_EL0', 'y')]
l += [OR(OptCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'),
l += [OptCheck('self_protection', 'defconfig', 'UNMAP_KERNEL_AT_EL0', 'y')]
l += [OR(OptCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'),
- VerCheck((5,9)))] # HARDEN_EL2_VECTORS was removed in v5.9
+ AND(OptCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'),
+ VerCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9
l += [OptCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y')]
l += [OptCheck('self_protection', 'defconfig', 'ARM64_PTR_AUTH', 'y')]
l += [OptCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y')]
l += [OptCheck('self_protection', 'defconfig', 'ARM64_PTR_AUTH', 'y')]
- if arch in ('X86_64', 'ARM64'):
- l += [OptCheck('self_protection', 'defconfig', 'VMAP_STACK', 'y')]
- if arch in ('X86_64', 'ARM64', 'X86_32'):
- l += [OptCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')]
- l += [OptCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')]
+ l += [OptCheck('self_protection', 'defconfig', 'ARM64_BTI_KERNEL', 'y')]
+ l += [OR(OptCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'),
+ VerCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10
+ l += [OptCheck('self_protection', 'defconfig', 'ARM64_MTE', 'y')]
if arch == 'ARM':
l += [OptCheck('self_protection', 'defconfig', 'CPU_SW_DOMAIN_PAN', 'y')]
if arch == 'ARM':
l += [OptCheck('self_protection', 'defconfig', 'CPU_SW_DOMAIN_PAN', 'y')]
l += [OptCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y')]
# 'self_protection', 'kspp'
l += [OptCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y')]
# 'self_protection', 'kspp'
l += [OptCheck('self_protection', 'kspp', 'BUG_ON_DATA_CORRUPTION', 'y')]
l += [OptCheck('self_protection', 'kspp', 'DEBUG_WX', 'y')]
l += [OptCheck('self_protection', 'kspp', 'SCHED_STACK_END_CHECK', 'y')]
l += [OptCheck('self_protection', 'kspp', 'BUG_ON_DATA_CORRUPTION', 'y')]
l += [OptCheck('self_protection', 'kspp', 'DEBUG_WX', 'y')]
l += [OptCheck('self_protection', 'kspp', 'SCHED_STACK_END_CHECK', 'y')]
modules_not_set)]
l += [OR(OptCheck('self_protection', 'kspp', 'MODULE_SIG_FORCE', 'y'),
modules_not_set)] # refers to LOCKDOWN
modules_not_set)]
l += [OR(OptCheck('self_protection', 'kspp', 'MODULE_SIG_FORCE', 'y'),
modules_not_set)] # refers to LOCKDOWN
- l += [OR(OptCheck('self_protection', 'kspp', 'INIT_STACK_ALL', 'y'),
+ l += [OR(OptCheck('self_protection', 'kspp', 'INIT_STACK_ALL_ZERO', 'y'),
OptCheck('self_protection', 'kspp', 'GCC_PLUGIN_STRUCTLEAK_BYREF_ALL', 'y'))]
l += [OR(OptCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'),
OptCheck('self_protection', 'kspp', 'GCC_PLUGIN_STRUCTLEAK_BYREF_ALL', 'y'))]
l += [OR(OptCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'),
- OptCheck('self_protection', 'kspp', 'PAGE_POISONING', 'y'))] # before v5.3
+ OptCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'))]
+ # CONFIG_INIT_ON_FREE_DEFAULT_ON was added in v5.3.
+ # CONFIG_PAGE_POISONING_ZERO was removed in v5.11.
+ # Starting from v5.11 CONFIG_PAGE_POISONING unconditionally checks
+ # the 0xAA poison pattern on allocation.
+ # That brings higher performance penalty.
if arch in ('X86_64', 'ARM64', 'X86_32'):
stackleak_is_set = OptCheck('self_protection', 'kspp', 'GCC_PLUGIN_STACKLEAK', 'y')
l += [stackleak_is_set]
if arch in ('X86_64', 'X86_32'):
l += [OptCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '65536')]
if arch in ('X86_64', 'ARM64', 'X86_32'):
stackleak_is_set = OptCheck('self_protection', 'kspp', 'GCC_PLUGIN_STACKLEAK', 'y')
l += [stackleak_is_set]
if arch in ('X86_64', 'X86_32'):
l += [OptCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '65536')]
+ if arch in ('ARM64', 'ARM'):
+ l += [OptCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '32768')]
+ l += [OptCheck('self_protection', 'kspp', 'SYN_COOKIES', 'y')] # another reason?
+ if arch == 'ARM64':
+ l += [OptCheck('self_protection', 'kspp', 'ARM64_SW_TTBR0_PAN', 'y')]
if arch == 'X86_32':
l += [OptCheck('self_protection', 'kspp', 'PAGE_TABLE_ISOLATION', 'y')]
l += [OptCheck('self_protection', 'kspp', 'HIGHMEM64G', 'y')]
l += [OptCheck('self_protection', 'kspp', 'X86_PAE', 'y')]
if arch == 'X86_32':
l += [OptCheck('self_protection', 'kspp', 'PAGE_TABLE_ISOLATION', 'y')]
l += [OptCheck('self_protection', 'kspp', 'HIGHMEM64G', 'y')]
l += [OptCheck('self_protection', 'kspp', 'X86_PAE', 'y')]
- if arch == 'ARM64':
- l += [OptCheck('self_protection', 'kspp', 'ARM64_SW_TTBR0_PAN', 'y')]
- if arch in ('ARM64', 'ARM'):
- l += [OptCheck('self_protection', 'kspp', 'SYN_COOKIES', 'y')] # another reason?
- l += [OptCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '32768')]
l += [OptCheck('self_protection', 'clipos', 'DEBUG_VIRTUAL', 'y')]
l += [OptCheck('self_protection', 'clipos', 'STATIC_USERMODEHELPER', 'y')] # needs userspace support
l += [OptCheck('self_protection', 'clipos', 'EFI_DISABLE_PCI_DMA', 'y')]
l += [OptCheck('self_protection', 'clipos', 'DEBUG_VIRTUAL', 'y')]
l += [OptCheck('self_protection', 'clipos', 'STATIC_USERMODEHELPER', 'y')] # needs userspace support
l += [OptCheck('self_protection', 'clipos', 'EFI_DISABLE_PCI_DMA', 'y')]
l += [AND(OptCheck('self_protection', 'clipos', 'STACKLEAK_RUNTIME_DISABLE', 'is not set'),
stackleak_is_set)]
if arch in ('X86_64', 'X86_32'):
l += [AND(OptCheck('self_protection', 'clipos', 'STACKLEAK_RUNTIME_DISABLE', 'is not set'),
stackleak_is_set)]
if arch in ('X86_64', 'X86_32'):
l += [AND(OptCheck('self_protection', 'clipos', 'INTEL_IOMMU_DEFAULT_ON', 'y'),
iommu_support_is_set)]
l += [AND(OptCheck('self_protection', 'clipos', 'INTEL_IOMMU_DEFAULT_ON', 'y'),
iommu_support_is_set)]
if arch == 'X86_32':
l += [AND(OptCheck('self_protection', 'clipos', 'INTEL_IOMMU', 'y'),
iommu_support_is_set)]
# 'self_protection', 'my'
if arch == 'X86_32':
l += [AND(OptCheck('self_protection', 'clipos', 'INTEL_IOMMU', 'y'),
iommu_support_is_set)]
# 'self_protection', 'my'
- l += [OptCheck('self_protection', 'my', 'SLUB_DEBUG_ON', 'y')] # TODO: is it better to set that via kernel cmd?
+ l += [AND(OptCheck('self_protection', 'my', 'UBSAN_BOUNDS', 'y'),
+ OptCheck('self_protection', 'my', 'UBSAN_MISC', 'is not set'),
+ OptCheck('self_protection', 'my', 'UBSAN_TRAP', 'y'))]
l += [OptCheck('self_protection', 'my', 'RESET_ATTACK_MITIGATION', 'y')] # needs userspace support (systemd)
l += [OptCheck('self_protection', 'my', 'RESET_ATTACK_MITIGATION', 'y')] # needs userspace support (systemd)
+ if arch in ('X86_64', 'ARM64', 'X86_32'):
+ l += [OptCheck('self_protection', 'my', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y')]
if arch == 'X86_64':
l += [AND(OptCheck('self_protection', 'my', 'AMD_IOMMU_V2', 'y'),
iommu_support_is_set)]
if arch == 'X86_64':
l += [AND(OptCheck('self_protection', 'my', 'AMD_IOMMU_V2', 'y'),
iommu_support_is_set)]
+ if arch == 'ARM64':
+ l += [OptCheck('self_protection', 'my', 'SHADOW_CALL_STACK', 'y')] # depends on clang, maybe it's alternative to STACKPROTECTOR_STRONG
+ l += [OptCheck('self_protection', 'my', 'KASAN_HW_TAGS', 'y')]
+ cfi_clang_is_set = OptCheck('self_protection', 'my', 'CFI_CLANG', 'y')
+ l += [cfi_clang_is_set]
+ l += [AND(OptCheck('self_protection', 'my', 'CFI_PERMISSIVE', 'is not set'),
+ cfi_clang_is_set)]
l += [OptCheck('cut_attack_surface', 'maintainer', 'DRM_LEGACY', 'is not set')]
l += [OptCheck('cut_attack_surface', 'maintainer', 'FB', 'is not set')]
l += [OptCheck('cut_attack_surface', 'maintainer', 'VT', 'is not set')]
l += [OptCheck('cut_attack_surface', 'maintainer', 'DRM_LEGACY', 'is not set')]
l += [OptCheck('cut_attack_surface', 'maintainer', 'FB', 'is not set')]
l += [OptCheck('cut_attack_surface', 'maintainer', 'VT', 'is not set')]
# 'cut_attack_surface', 'grapheneos'
l += [OptCheck('cut_attack_surface', 'grapheneos', 'AIO', 'is not set')]
# 'cut_attack_surface', 'grapheneos'
l += [OptCheck('cut_attack_surface', 'grapheneos', 'AIO', 'is not set')]
l += [OptCheck('cut_attack_surface', 'lockdown', 'MMIOTRACE_TEST', 'is not set')] # refers to LOCKDOWN
# 'cut_attack_surface', 'my'
l += [OptCheck('cut_attack_surface', 'lockdown', 'MMIOTRACE_TEST', 'is not set')] # refers to LOCKDOWN
# 'cut_attack_surface', 'my'
l += [OptCheck('cut_attack_surface', 'my', 'MMIOTRACE', 'is not set')] # refers to LOCKDOWN (permissive)
l += [OptCheck('cut_attack_surface', 'my', 'LIVEPATCH', 'is not set')]
l += [OptCheck('cut_attack_surface', 'my', 'IP_DCCP', 'is not set')]
l += [OptCheck('cut_attack_surface', 'my', 'MMIOTRACE', 'is not set')] # refers to LOCKDOWN (permissive)
l += [OptCheck('cut_attack_surface', 'my', 'LIVEPATCH', 'is not set')]
l += [OptCheck('cut_attack_surface', 'my', 'IP_DCCP', 'is not set')]
l += [OptCheck('userspace_hardening', 'defconfig', 'INTEGRITY', 'y')]
if arch == 'ARM':
l += [OptCheck('userspace_hardening', 'my', 'INTEGRITY', 'y')]
l += [OptCheck('userspace_hardening', 'defconfig', 'INTEGRITY', 'y')]
if arch == 'ARM':
l += [OptCheck('userspace_hardening', 'my', 'INTEGRITY', 'y')]
if arch in ('ARM', 'X86_32'):
l += [OptCheck('userspace_hardening', 'defconfig', 'VMSPLIT_3G', 'y')]
if arch in ('X86_64', 'ARM64'):
if arch in ('ARM', 'X86_32'):
l += [OptCheck('userspace_hardening', 'defconfig', 'VMSPLIT_3G', 'y')]
if arch in ('X86_64', 'ARM64'):
- if hasattr(opt, 'opts'):
- # prepare ComplexOptCheck
- for o in opt.opts:
- if hasattr(o, 'opts'):
- # Recursion for nested ComplexOptChecks
- perform_check(o, parsed_options, kernel_version)
- if hasattr(o, 'state'):
- o.state = parsed_options.get(o.name, None)
- if hasattr(o, 'ver'):
- o.ver = kernel_version
- else:
- # prepare simple check, opt.state is mandatory
- if not hasattr(opt, 'state'):
- sys.exit('[!] ERROR: bad simple check {}'.format(vars(opt)))
- opt.state = parsed_options.get(opt.name, None)
- opt.check()
+ if hasattr(opt, 'opts'):
+ # prepare ComplexOptCheck
+ for o in opt.opts:
+ if hasattr(o, 'opts'):
+ # Recursion for nested ComplexOptChecks
+ perform_check(o, parsed_options, kernel_version)
+ if hasattr(o, 'state'):
+ o.state = parsed_options.get(o.name, None)
+ if hasattr(o, 'ver'):
+ o.ver = kernel_version
+ else:
+ # prepare simple check, opt.state is mandatory
+ if not hasattr(opt, 'state'):
+ sys.exit('[!] ERROR: bad simple check {}'.format(vars(opt)))
+ opt.state = parsed_options.get(opt.name, None)
+ opt.check()
report_modes = ['verbose', 'json', 'show_ok', 'show_fail']
supported_archs = ['X86_64', 'X86_32', 'ARM64', 'ARM']
parser = ArgumentParser(prog='kconfig-hardened-check',
report_modes = ['verbose', 'json', 'show_ok', 'show_fail']
supported_archs = ['X86_64', 'X86_32', 'ARM64', 'ARM']
parser = ArgumentParser(prog='kconfig-hardened-check',
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-p', '--print', choices=supported_archs,
parser.add_argument('--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-p', '--print', choices=supported_archs,
parser.add_argument('-c', '--config',
help='check the kernel config file against these preferences')
parser.add_argument('-m', '--mode', choices=report_modes,
parser.add_argument('-c', '--config',
help='check the kernel config file against these preferences')
parser.add_argument('-m', '--mode', choices=report_modes,