X-Git-Url: https://jxself.org/git/?a=blobdiff_plain;f=kconfig_hardened_check%2F__init__.py;h=f2170a5a2ae3c32163c47266ef1daa0b8c48d53c;hb=6f7e9c10f051e9b7120891403026cb927864cac0;hp=a926402426c3eba4ba100a9fb0f17b4a81d4bcfa;hpb=3f57717b4ec311b04fbc658616f835f1f3abbf51;p=kconfig-hardened-check.git diff --git a/kconfig_hardened_check/__init__.py b/kconfig_hardened_check/__init__.py index a926402..f2170a5 100644 --- a/kconfig_hardened_check/__init__.py +++ b/kconfig_hardened_check/__init__.py @@ -17,16 +17,10 @@ # Аrch-independent: # X86: # l1d_flush=on (a part of the l1tf option) -# mds=full,nosmt # tsx=off # ARM64: # kpti=on -# ssbd=force-on # -# Should NOT be set: -# sysrq_always_enabled -# arm64.nobti -# arm64.nopauth # arm64.nomte # # Hardware tag-based KASAN with arm64 Memory Tagging Extension (MTE): @@ -370,8 +364,11 @@ def add_kconfig_checks(l, arch): # 'self_protection', 'defconfig' l += [KconfigCheck('self_protection', 'defconfig', 'BUG', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'SLUB_DEBUG', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')] gcc_plugins_support_is_set = KconfigCheck('self_protection', 'defconfig', 'GCC_PLUGINS', 'y') l += [gcc_plugins_support_is_set] + iommu_support_is_set = KconfigCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y') + l += [iommu_support_is_set] # is needed for mitigating DMA attacks l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR', 'y'), KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR', 'y'), KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_REGULAR', 'y'), @@ -386,9 +383,6 @@ def add_kconfig_checks(l, arch): modules_not_set)] # DEBUG_SET_MODULE_RONX was before v4.11 l += [OR(KconfigCheck('self_protection', 'defconfig', 'REFCOUNT_FULL', 'y'), VersionCheck((5, 5)))] # REFCOUNT_FULL is enabled by default since v5.5 - l += [KconfigCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')] - iommu_support_is_set = KconfigCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y') - l += [iommu_support_is_set] # is needed for mitigating DMA attacks if arch in ('X86_64', 'ARM64', 'X86_32'): l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')] if arch in ('X86_64', 'ARM64'): @@ -399,9 +393,9 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_AMD', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'MICROCODE', 'y')] # is needed for mitigating CPU bugs l += [KconfigCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason? l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_SMAP', 'y'), VersionCheck((5, 19)))] # X86_SMAP is enabled by default since v5.19 - l += [KconfigCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason? l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_UMIP', 'y'), KconfigCheck('self_protection', 'defconfig', 'X86_INTEL_UMIP', 'y'))] if arch in ('ARM64', 'ARM'): @@ -418,17 +412,17 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_EPAN', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'UNMAP_KERNEL_AT_EL0', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_E0PD', 'y')] - l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'), - AND(KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'), - VersionCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9 l += [KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PTR_AUTH_KERNEL', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_BTI_KERNEL', 'y')] - l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'), - VersionCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10 l += [KconfigCheck('self_protection', 'defconfig', 'MITIGATE_SPECTRE_BRANCH_HISTORY', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_MTE', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MODULE_REGION_FULL', 'y')] + l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'), + AND(KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'), + VersionCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9 + l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'), + VersionCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10 if arch == 'ARM': l += [KconfigCheck('self_protection', 'defconfig', 'CPU_SW_DOMAIN_PAN', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y')] @@ -448,8 +442,6 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_CREDENTIALS', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_NOTIFIERS', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y')] - l += [AND(KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y'), - gcc_plugins_support_is_set)] l += [KconfigCheck('self_protection', 'kspp', 'KFENCE', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'WERROR', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y')] @@ -470,6 +462,8 @@ def add_kconfig_checks(l, arch): hardened_usercopy_is_set)] l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_PAGESPAN', 'is not set'), hardened_usercopy_is_set)] + l += [AND(KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y'), + gcc_plugins_support_is_set)] l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG', 'y'), modules_not_set)] l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_ALL', 'y'), @@ -564,10 +558,10 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_WRITABLE_HOOKS', 'is not set')] # refers to SECURITY_SELINUX_DISABLE # 'cut_attack_surface', 'defconfig' - l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'BPF_UNPRIV_DEFAULT_OFF', 'y'), - bpf_syscall_not_set)] # see unprivileged_bpf_disabled l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP', 'y')] l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP_FILTER', 'y')] + l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'BPF_UNPRIV_DEFAULT_OFF', 'y'), + bpf_syscall_not_set)] # see unprivileged_bpf_disabled if arch in ('X86_64', 'ARM64', 'X86_32'): l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'STRICT_DEVMEM', 'y'), devmem_not_set)] # refers to LOCKDOWN @@ -595,11 +589,11 @@ def add_kconfig_checks(l, arch): devmem_not_set)] # refers to LOCKDOWN l += [AND(KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is not set'), KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is present'))] + if arch == 'X86_64': + l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y')] # 'vsyscall=none' if arch == 'ARM': l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'STRICT_DEVMEM', 'y'), devmem_not_set)] # refers to LOCKDOWN - if arch == 'X86_64': - l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y')] # 'vsyscall=none' # 'cut_attack_surface', 'grsec' l += [KconfigCheck('cut_attack_surface', 'grsec', 'ZSMALLOC_STAT', 'is not set')] @@ -655,7 +649,6 @@ def add_kconfig_checks(l, arch): # 'cut_attack_surface', 'clipos' l += [KconfigCheck('cut_attack_surface', 'clipos', 'STAGING', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'KSM', 'is not set')] # to prevent FLUSH+RELOAD attack -# l += [KconfigCheck('cut_attack_surface', 'clipos', 'IKCONFIG', 'is not set')] # no, IKCONFIG is needed for this check :) l += [KconfigCheck('cut_attack_surface', 'clipos', 'KALLSYMS', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_VSYSCALL_EMULATION', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'MAGIC_SYSRQ', 'is not set')] @@ -666,18 +659,17 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'clipos', 'ACPI_TABLE_UPGRADE', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'clipos', 'EFI_CUSTOM_SSDT_OVERLAYS', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'COREDUMP', 'is not set')] # cut userspace attack surface +# l += [KconfigCheck('cut_attack_surface', 'clipos', 'IKCONFIG', 'is not set')] # no, IKCONFIG is needed for this check :) if arch in ('X86_64', 'X86_32'): l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_INTEL_TSX_MODE_OFF', 'y')] # tsx=off # 'cut_attack_surface', 'lockdown' - l += [bpf_syscall_not_set] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'lockdown', 'EFI_TEST', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'lockdown', 'MMIOTRACE_TEST', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'lockdown', 'KPROBES', 'is not set')] # refers to LOCKDOWN + l += [bpf_syscall_not_set] # refers to LOCKDOWN # 'cut_attack_surface', 'my' - l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'), - modules_not_set)] l += [KconfigCheck('cut_attack_surface', 'my', 'MMIOTRACE', 'is not set')] # refers to LOCKDOWN (permissive) l += [KconfigCheck('cut_attack_surface', 'my', 'LIVEPATCH', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'my', 'IP_DCCP', 'is not set')] @@ -686,6 +678,8 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'my', 'VIDEO_VIVID', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'my', 'INPUT_EVBUG', 'is not set')] # Can be used as a keylogger l += [KconfigCheck('cut_attack_surface', 'my', 'KGDB', 'is not set')] + l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'), + modules_not_set)] # 'harden_userspace' if arch in ('X86_64', 'ARM64', 'X86_32'): @@ -731,6 +725,8 @@ def add_cmdline_checks(l, arch): l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v1', 'is not set')] l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v2', 'is not set')] l += [CmdlineCheck('self_protection', 'defconfig', 'nospec_store_bypass_disable', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nobti', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nopauth', 'is not set')] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mitigations', 'is not off'), CmdlineCheck('self_protection', 'defconfig', 'mitigations', 'is not set'))] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not off'), @@ -741,7 +737,20 @@ def add_cmdline_checks(l, arch): CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not set'))] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not off'), CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not set'))] if arch == 'ARM64': + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'kernel'), + CmdlineCheck('self_protection', 'my', 'ssbd', 'force-on'), + CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'is not set'))] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'full'), AND(KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y'), CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set')))] @@ -799,6 +808,8 @@ def add_cmdline_checks(l, arch): l += [OR(CmdlineCheck('cut_attack_surface', 'grsec', 'debugfs', 'off'), KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_FS', 'is not set'))] # ... the end + # 'cut_attack_surface', 'my' + l += [CmdlineCheck('cut_attack_surface', 'my', 'sysrq_always_enabled', 'is not set')] def print_unknown_options(checklist, parsed_options): known_options = [] @@ -964,6 +975,21 @@ def normalize_cmdline_options(option, value): if option == 'l1tf': # See l1tf_cmdline() in arch/x86/kernel/cpu/bugs.c return value + if option == 'mds': + # See mds_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'tsx_async_abort': + # See tsx_async_abort_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'srbds': + # See srbds_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'mmio_stale_data': + # See mmio_stale_data_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'retbleed': + # See retbleed_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value # Implement a limited part of the kstrtobool() logic if value in ('1', 'on', 'On', 'ON', 'y', 'Y', 'yes', 'Yes', 'YES'):