X-Git-Url: https://jxself.org/git/?a=blobdiff_plain;f=kconfig_hardened_check%2Fchecks.py;h=de63c3e44ff3ac6de575ac45f07028498345dc75;hb=108eb7374967b0f66e70b68cca60a0548f12844c;hp=cc71ef1dcaf030bae7f64f5135a459e084862fcd;hpb=7bb6a185ab3e68b25d24279466727c7ac69bd059;p=kconfig-hardened-check.git diff --git a/kconfig_hardened_check/checks.py b/kconfig_hardened_check/checks.py index cc71ef1..de63c3e 100644 --- a/kconfig_hardened_check/checks.py +++ b/kconfig_hardened_check/checks.py @@ -1,58 +1,22 @@ #!/usr/bin/python3 """ -This tool helps me to check Linux kernel options against -my security hardening preferences for X86_64, ARM64, X86_32, and ARM. -Let the computers do their job! +This tool is for checking the security hardening options of the Linux kernel. Author: Alexander Popov This module contains knowledge for checks. """ -# N.B. Hardening sysctls: -# kernel.kptr_restrict=2 (or 1?) -# kernel.dmesg_restrict=1 (also see the kconfig option) -# kernel.perf_event_paranoid=2 (or 3 with a custom patch, see https://lwn.net/Articles/696216/) -# kernel.kexec_load_disabled=1 -# kernel.yama.ptrace_scope=3 -# user.max_user_namespaces=0 -# what about bpf_jit_enable? -# kernel.unprivileged_bpf_disabled=1 -# net.core.bpf_jit_harden=2 -# vm.unprivileged_userfaultfd=0 -# (at first, it disabled unprivileged userfaultfd, -# and since v5.11 it enables unprivileged userfaultfd for user-mode only) -# vm.mmap_min_addr has a good value -# dev.tty.ldisc_autoload=0 -# fs.protected_symlinks=1 -# fs.protected_hardlinks=1 -# fs.protected_fifos=2 -# fs.protected_regular=2 -# fs.suid_dumpable=0 -# kernel.modules_disabled=1 -# kernel.randomize_va_space=2 -# nosmt sysfs control file -# dev.tty.legacy_tiocsti=0 -# vm.mmap_rnd_bits=max (?) -# kernel.sysrq=0 -# -# Think of these boot params: -# module.sig_enforce=1 -# lockdown=confidentiality -# mce=0 -# nosmt=force -# intel_iommu=on -# amd_iommu=on -# efi=disable_early_pci_dma - # pylint: disable=missing-function-docstring,line-too-long,invalid-name # pylint: disable=too-many-branches,too-many-statements -from .engine import KconfigCheck, CmdlineCheck, VersionCheck, OR, AND +from .engine import KconfigCheck, CmdlineCheck, SysctlCheck, VersionCheck, OR, AND def add_kconfig_checks(l, arch): + assert(arch), 'empty arch' + # Calling the KconfigCheck class constructor: # KconfigCheck(reason, decision, name, expected) # @@ -113,6 +77,7 @@ def add_kconfig_checks(l, arch): if arch == 'X86_64': l += [KconfigCheck('self_protection', 'defconfig', 'PAGE_TABLE_ISOLATION', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MEMORY', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'X86_KERNEL_IBT', 'y')] l += [AND(KconfigCheck('self_protection', 'defconfig', 'INTEL_IOMMU', 'y'), iommu_support_is_set)] l += [AND(KconfigCheck('self_protection', 'defconfig', 'AMD_IOMMU', 'y'), @@ -165,9 +130,9 @@ def add_kconfig_checks(l, arch): hardened_usercopy_is_set = KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y') l += [hardened_usercopy_is_set] l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'), - hardened_usercopy_is_set)] + hardened_usercopy_is_set)] # usercopy whitelist violations should be prohibited l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_PAGESPAN', 'is not set'), - hardened_usercopy_is_set)] + hardened_usercopy_is_set)] # this debugging for HARDENED_USERCOPY is not needed for security l += [AND(KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y'), gcc_plugins_support_is_set)] l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG', 'y'), @@ -288,7 +253,6 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'kspp', 'ACPI_CUSTOM_METHOD', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_BRK', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'DEVKMEM', 'is not set')] # refers to LOCKDOWN - l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'BINFMT_MISC', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'INET_DIAG', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'KEXEC', 'is not set')] # refers to LOCKDOWN @@ -308,6 +272,10 @@ def add_kconfig_checks(l, arch): devmem_not_set)] # refers to LOCKDOWN l += [AND(KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is not set'), KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is present'))] + if arch in ('X86_64', 'X86_32'): + l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set')] + # CONFIG_COMPAT_VDSO disabled ASLR of vDSO only on X86_64 and X86_32; + # on ARM64 this option has different meaning if arch == 'ARM': l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'STRICT_DEVMEM', 'y'), devmem_not_set)] # refers to LOCKDOWN @@ -399,6 +367,7 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'my', 'KGDB', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'my', 'AIO', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'my', 'CORESIGHT', 'is not set')] + l += [KconfigCheck('cut_attack_surface', 'my', 'XFS_SUPPORT_V4', 'is not set')] l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'), modules_not_set)] @@ -412,6 +381,8 @@ def add_kconfig_checks(l, arch): def add_cmdline_checks(l, arch): + assert(arch), 'empty arch' + # Calling the CmdlineCheck class constructor: # CmdlineCheck(reason, decision, name, expected) # @@ -443,37 +414,38 @@ def add_cmdline_checks(l, arch): l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nobti', 'is not set')] l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nopauth', 'is not set')] l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nomte', 'is not set')] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not off'), - AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), - CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not set')))] + if arch in ('X86_64', 'X86_32'): + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not set')))] if arch == 'ARM64': + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not off'), + AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), + CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not set')))] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'kernel'), CmdlineCheck('self_protection', 'my', 'ssbd', 'force-on'), AND(CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt'), @@ -482,7 +454,7 @@ def add_cmdline_checks(l, arch): AND(KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y'), CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set')))] else: - l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', '1'), + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'on'), CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set'))] # 'self_protection', 'kspp' @@ -503,12 +475,6 @@ def add_cmdline_checks(l, arch): AND(CmdlineCheck('self_protection', 'kspp', 'page_poison', '1'), KconfigCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'), CmdlineCheck('self_protection', 'kspp', 'slub_debug', 'P')))] - l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.strict', '1'), - AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y'), - CmdlineCheck('self_protection', 'kspp', 'iommu.strict', 'is not set')))] - l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', '0'), - AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set'), - CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', 'is not set')))] # The cmdline checks compatible with the kconfig recommendations of the KSPP project... l += [OR(CmdlineCheck('self_protection', 'kspp', 'hardened_usercopy', '1'), AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y'), @@ -518,6 +484,12 @@ def add_cmdline_checks(l, arch): CmdlineCheck('self_protection', 'kspp', 'slab_common.usercopy_fallback', 'is not set')))] # ... the end if arch in ('X86_64', 'ARM64', 'X86_32'): + l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.strict', '1'), + AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y'), + CmdlineCheck('self_protection', 'kspp', 'iommu.strict', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', '0'), + AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set'), + CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', 'is not set')))] l += [OR(CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', '1'), AND(KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y'), CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', 'is not set')))] @@ -542,6 +514,18 @@ def add_cmdline_checks(l, arch): KconfigCheck('cut_attack_surface', 'clipos', 'X86_VSYSCALL_EMULATION', 'is not set'), AND(KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y'), CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'is not set')))] + l += [OR(CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '1'), + CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '0'), + AND(KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set'), + CmdlineCheck('cut_attack_surface', 'my', 'vdso32', 'is not set')))] # the vdso32 parameter must not be 2 + if arch == 'X86_32': + l += [OR(CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '1'), + CmdlineCheck('cut_attack_surface', 'my', 'vdso', '1'), + CmdlineCheck('cut_attack_surface', 'my', 'vdso32', '0'), + CmdlineCheck('cut_attack_surface', 'my', 'vdso', '0'), + AND(KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT_VDSO', 'is not set'), + CmdlineCheck('cut_attack_surface', 'my', 'vdso32', 'is not set'), + CmdlineCheck('cut_attack_surface', 'my', 'vdso', 'is not set')))] # the vdso and vdso32 parameters must not be 2 # 'cut_attack_surface', 'grsec' # The cmdline checks compatible with the kconfig options disabled by grsecurity... @@ -568,6 +552,13 @@ no_kstrtobool_options = [ 'srbds', # See srbds_parse_cmdline() in arch/x86/kernel/cpu/bugs.c 'mmio_stale_data', # See mmio_stale_data_parse_cmdline() in arch/x86/kernel/cpu/bugs.c 'retbleed', # See retbleed_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + 'rodata', # See set_debug_rodata() in init/main.c + 'ssbd', # See parse_spectre_v4_param() in arch/arm64/kernel/proton-pack.c + 'slub_debug', # See setup_slub_debug() in mm/slub.c + 'iommu', # See iommu_setup() in arch/x86/kernel/pci-dma.c + 'vsyscall', # See vsyscall_setup() in arch/x86/entry/vsyscall/vsyscall_64.c + 'vdso32', # See vdso32_setup() in arch/x86/entry/vdso/vdso32-setup.c + 'vdso', # See vdso32_setup() in arch/x86/entry/vdso/vdso32-setup.c 'tsx' # See tsx_init() in arch/x86/kernel/cpu/tsx.c ] @@ -579,10 +570,50 @@ def normalize_cmdline_options(option, value): return value # Implement a limited part of the kstrtobool() logic - if value in ('1', 'on', 'On', 'ON', 'y', 'Y', 'yes', 'Yes', 'YES'): + if value.lower() in ('1', 'on', 'y', 'yes', 't', 'true'): return '1' - if value in ('0', 'off', 'Off', 'OFF', 'n', 'N', 'no', 'No', 'NO'): + if value.lower() in ('0', 'off', 'n', 'no', 'f', 'false'): return '0' # Preserve unique values return value + + +# TODO: draft of security hardening sysctls: +# kernel.kptr_restrict=2 (or 1?) +# kernel.yama.ptrace_scope=3 +# what about bpf_jit_enable? +# vm.unprivileged_userfaultfd=0 +# (at first, it disabled unprivileged userfaultfd, +# and since v5.11 it enables unprivileged userfaultfd for user-mode only) +# vm.mmap_min_addr has a good value +# fs.protected_symlinks=1 +# fs.protected_hardlinks=1 +# fs.protected_fifos=2 +# fs.protected_regular=2 +# fs.suid_dumpable=0 +# kernel.modules_disabled=1 +# kernel.randomize_va_space=2 +# nosmt sysfs control file +# dev.tty.legacy_tiocsti=0 +# vm.mmap_rnd_bits=max (?) +# kernel.sysrq=0 +# abi.vsyscall32 (any value except 2) +# kernel.oops_limit (think about a proper value) +# kernel.warn_limit (think about a proper value) +# net.ipv4.tcp_syncookies=1 (?) + +def add_sysctl_checks(l, arch): +# This function may be called with arch=None + +# Calling the SysctlCheck class constructor: +# SysctlCheck(reason, decision, name, expected) + + l += [SysctlCheck('self_protection', 'kspp', 'net.core.bpf_jit_harden', '2')] + + l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.dmesg_restrict', '1')] + l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.perf_event_paranoid', '3')] # with a custom patch, see https://lwn.net/Articles/696216/ + l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.kexec_load_disabled', '1')] + l += [SysctlCheck('cut_attack_surface', 'kspp', 'user.max_user_namespaces', '0')] + l += [SysctlCheck('cut_attack_surface', 'kspp', 'dev.tty.ldisc_autoload', '0')] + l += [SysctlCheck('cut_attack_surface', 'kspp', 'kernel.unprivileged_bpf_disabled', '1')]