X-Git-Url: https://jxself.org/git/?a=blobdiff_plain;f=kconfig_hardened_check%2F__init__.py;h=8e08fd2da4df97b95b3cd3a4ae668e77d88e2616;hb=15c16ac1a4aa7c620471af04ab786f1edf7ce28c;hp=64731e5d534bddf73ee2d44c5b36b5d140d9898d;hpb=b9ce5ca8e4652e9741cbbeaf8a168e2d7fc6e3fb;p=kconfig-hardened-check.git diff --git a/kconfig_hardened_check/__init__.py b/kconfig_hardened_check/__init__.py index 64731e5..8e08fd2 100644 --- a/kconfig_hardened_check/__init__.py +++ b/kconfig_hardened_check/__init__.py @@ -20,13 +20,6 @@ # tsx=off # ARM64: # kpti=on -# ssbd=force-on -# -# Should NOT be set: -# sysrq_always_enabled -# arm64.nobti -# arm64.nopauth -# arm64.nomte # # Hardware tag-based KASAN with arm64 Memory Tagging Extension (MTE): # kasan=on @@ -369,8 +362,11 @@ def add_kconfig_checks(l, arch): # 'self_protection', 'defconfig' l += [KconfigCheck('self_protection', 'defconfig', 'BUG', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'SLUB_DEBUG', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')] gcc_plugins_support_is_set = KconfigCheck('self_protection', 'defconfig', 'GCC_PLUGINS', 'y') l += [gcc_plugins_support_is_set] + iommu_support_is_set = KconfigCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y') + l += [iommu_support_is_set] # is needed for mitigating DMA attacks l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR', 'y'), KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR', 'y'), KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_REGULAR', 'y'), @@ -385,9 +381,6 @@ def add_kconfig_checks(l, arch): modules_not_set)] # DEBUG_SET_MODULE_RONX was before v4.11 l += [OR(KconfigCheck('self_protection', 'defconfig', 'REFCOUNT_FULL', 'y'), VersionCheck((5, 5)))] # REFCOUNT_FULL is enabled by default since v5.5 - l += [KconfigCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')] - iommu_support_is_set = KconfigCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y') - l += [iommu_support_is_set] # is needed for mitigating DMA attacks if arch in ('X86_64', 'ARM64', 'X86_32'): l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')] if arch in ('X86_64', 'ARM64'): @@ -398,9 +391,9 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_AMD', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'MICROCODE', 'y')] # is needed for mitigating CPU bugs l += [KconfigCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason? l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_SMAP', 'y'), VersionCheck((5, 19)))] # X86_SMAP is enabled by default since v5.19 - l += [KconfigCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason? l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_UMIP', 'y'), KconfigCheck('self_protection', 'defconfig', 'X86_INTEL_UMIP', 'y'))] if arch in ('ARM64', 'ARM'): @@ -417,17 +410,17 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_EPAN', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'UNMAP_KERNEL_AT_EL0', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_E0PD', 'y')] - l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'), - AND(KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'), - VersionCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9 l += [KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PTR_AUTH_KERNEL', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_BTI_KERNEL', 'y')] - l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'), - VersionCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10 l += [KconfigCheck('self_protection', 'defconfig', 'MITIGATE_SPECTRE_BRANCH_HISTORY', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_MTE', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MODULE_REGION_FULL', 'y')] + l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'), + AND(KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'), + VersionCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9 + l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'), + VersionCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10 if arch == 'ARM': l += [KconfigCheck('self_protection', 'defconfig', 'CPU_SW_DOMAIN_PAN', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y')] @@ -447,8 +440,6 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_CREDENTIALS', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_NOTIFIERS', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y')] - l += [AND(KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y'), - gcc_plugins_support_is_set)] l += [KconfigCheck('self_protection', 'kspp', 'KFENCE', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'WERROR', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y')] @@ -469,6 +460,8 @@ def add_kconfig_checks(l, arch): hardened_usercopy_is_set)] l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_PAGESPAN', 'is not set'), hardened_usercopy_is_set)] + l += [AND(KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y'), + gcc_plugins_support_is_set)] l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG', 'y'), modules_not_set)] l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_ALL', 'y'), @@ -563,13 +556,15 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_WRITABLE_HOOKS', 'is not set')] # refers to SECURITY_SELINUX_DISABLE # 'cut_attack_surface', 'defconfig' - l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'BPF_UNPRIV_DEFAULT_OFF', 'y'), - bpf_syscall_not_set)] # see unprivileged_bpf_disabled l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP', 'y')] l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP_FILTER', 'y')] + l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'BPF_UNPRIV_DEFAULT_OFF', 'y'), + bpf_syscall_not_set)] # see unprivileged_bpf_disabled if arch in ('X86_64', 'ARM64', 'X86_32'): l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'STRICT_DEVMEM', 'y'), devmem_not_set)] # refers to LOCKDOWN + if arch in ('X86_64', 'X86_32'): + l += [KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y')] # tsx=off # 'cut_attack_surface', 'kspp' l += [KconfigCheck('cut_attack_surface', 'kspp', 'SECURITY_DMESG_RESTRICT', 'y')] @@ -594,11 +589,11 @@ def add_kconfig_checks(l, arch): devmem_not_set)] # refers to LOCKDOWN l += [AND(KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is not set'), KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is present'))] + if arch == 'X86_64': + l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y')] # 'vsyscall=none' if arch == 'ARM': l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'STRICT_DEVMEM', 'y'), devmem_not_set)] # refers to LOCKDOWN - if arch == 'X86_64': - l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y')] # 'vsyscall=none' # 'cut_attack_surface', 'grsec' l += [KconfigCheck('cut_attack_surface', 'grsec', 'ZSMALLOC_STAT', 'is not set')] @@ -654,7 +649,6 @@ def add_kconfig_checks(l, arch): # 'cut_attack_surface', 'clipos' l += [KconfigCheck('cut_attack_surface', 'clipos', 'STAGING', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'KSM', 'is not set')] # to prevent FLUSH+RELOAD attack -# l += [KconfigCheck('cut_attack_surface', 'clipos', 'IKCONFIG', 'is not set')] # no, IKCONFIG is needed for this check :) l += [KconfigCheck('cut_attack_surface', 'clipos', 'KALLSYMS', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_VSYSCALL_EMULATION', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'MAGIC_SYSRQ', 'is not set')] @@ -665,18 +659,15 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'clipos', 'ACPI_TABLE_UPGRADE', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'clipos', 'EFI_CUSTOM_SSDT_OVERLAYS', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'COREDUMP', 'is not set')] # cut userspace attack surface - if arch in ('X86_64', 'X86_32'): - l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_INTEL_TSX_MODE_OFF', 'y')] # tsx=off +# l += [KconfigCheck('cut_attack_surface', 'clipos', 'IKCONFIG', 'is not set')] # no, IKCONFIG is needed for this check :) # 'cut_attack_surface', 'lockdown' - l += [bpf_syscall_not_set] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'lockdown', 'EFI_TEST', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'lockdown', 'MMIOTRACE_TEST', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'lockdown', 'KPROBES', 'is not set')] # refers to LOCKDOWN + l += [bpf_syscall_not_set] # refers to LOCKDOWN # 'cut_attack_surface', 'my' - l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'), - modules_not_set)] l += [KconfigCheck('cut_attack_surface', 'my', 'MMIOTRACE', 'is not set')] # refers to LOCKDOWN (permissive) l += [KconfigCheck('cut_attack_surface', 'my', 'LIVEPATCH', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'my', 'IP_DCCP', 'is not set')] @@ -685,6 +676,8 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'my', 'VIDEO_VIVID', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'my', 'INPUT_EVBUG', 'is not set')] # Can be used as a keylogger l += [KconfigCheck('cut_attack_surface', 'my', 'KGDB', 'is not set')] + l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'), + modules_not_set)] # 'harden_userspace' if arch in ('X86_64', 'ARM64', 'X86_32'): @@ -730,6 +723,9 @@ def add_cmdline_checks(l, arch): l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v1', 'is not set')] l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v2', 'is not set')] l += [CmdlineCheck('self_protection', 'defconfig', 'nospec_store_bypass_disable', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nobti', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nopauth', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nomte', 'is not set')] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mitigations', 'is not off'), CmdlineCheck('self_protection', 'defconfig', 'mitigations', 'is not set'))] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not off'), @@ -744,9 +740,16 @@ def add_cmdline_checks(l, arch): CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not set'))] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not off'), CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not set'))] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not off'), CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not set'))] if arch == 'ARM64': + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'kernel'), + CmdlineCheck('self_protection', 'my', 'ssbd', 'force-on'), + CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'is not set'))] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'full'), AND(KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y'), CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set')))] @@ -804,6 +807,8 @@ def add_cmdline_checks(l, arch): l += [OR(CmdlineCheck('cut_attack_surface', 'grsec', 'debugfs', 'off'), KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_FS', 'is not set'))] # ... the end + # 'cut_attack_surface', 'my' + l += [CmdlineCheck('cut_attack_surface', 'my', 'sysrq_always_enabled', 'is not set')] def print_unknown_options(checklist, parsed_options): known_options = [] @@ -975,9 +980,15 @@ def normalize_cmdline_options(option, value): if option == 'tsx_async_abort': # See tsx_async_abort_parse_cmdline() in arch/x86/kernel/cpu/bugs.c return value + if option == 'srbds': + # See srbds_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value if option == 'mmio_stale_data': # See mmio_stale_data_parse_cmdline() in arch/x86/kernel/cpu/bugs.c return value + if option == 'retbleed': + # See retbleed_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value # Implement a limited part of the kstrtobool() logic if value in ('1', 'on', 'On', 'ON', 'y', 'Y', 'yes', 'Yes', 'YES'):