X-Git-Url: https://jxself.org/git/?a=blobdiff_plain;f=kconfig_hardened_check%2F__init__.py;h=45e51fa8079f5a2c398d150ba17df9a757dbcea5;hb=cd559d31f74be75a4a000c6113079cdb43ffaa7c;hp=58d82f2b015f51dd4d54871907154f63351ee1ca;hpb=ba678bf39a67ccf857727f034ab21ee4e380f0df;p=kconfig-hardened-check.git diff --git a/kconfig_hardened_check/__init__.py b/kconfig_hardened_check/__init__.py index 58d82f2..45e51fa 100644 --- a/kconfig_hardened_check/__init__.py +++ b/kconfig_hardened_check/__init__.py @@ -13,10 +13,7 @@ # N.B Hardening command line parameters: # page_alloc.shuffle=1 # iommu=force (does it help against DMA attacks?) -# iommu.passthrough=0 -# iommu.strict=1 # slub_debug=FZ (slow) -# init_on_free=1 (since v5.3, otherwise slub_debug=P and page_poison=1) # loadpin.enforce=1 # debugfs=no-mount (or off if possible) # @@ -35,9 +32,7 @@ # ssbd=force-on # # Should NOT be set: -# slab_merge # nokaslr -# rodata=off # sysrq_always_enabled # arm64.nobti # arm64.nopauth @@ -326,7 +321,10 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'defconfig', 'SLUB_DEBUG', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'GCC_PLUGINS', 'y')] l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR', 'y'), - KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR', 'y'))] + KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR', 'y'), + KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_REGULAR', 'y'), + KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_AUTO', 'y'), + KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_STRONG', 'y'))] l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR_STRONG', 'y'), KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_STRONG', 'y'))] l += [OR(KconfigCheck('self_protection', 'defconfig', 'STRICT_KERNEL_RWX', 'y'), @@ -397,6 +395,7 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'kspp', 'KFENCE', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'WERROR', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y')] + l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set l += [KconfigCheck('self_protection', 'kspp', 'ZERO_CALL_USED_REGS', 'y')] randstruct_is_set = KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT', 'y') l += [randstruct_is_set] @@ -426,8 +425,7 @@ def add_kconfig_checks(l, arch): if arch in ('X86_64', 'ARM64', 'X86_32'): stackleak_is_set = KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_STACKLEAK', 'y') l += [stackleak_is_set] - l += [OR(KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y'), - CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', '1'))] + l += [KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y')] if arch in ('X86_64', 'X86_32'): l += [KconfigCheck('self_protection', 'kspp', 'SCHED_CORE', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '65536')] @@ -455,8 +453,7 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'clipos', 'STATIC_USERMODEHELPER', 'y')] # needs userspace support l += [OR(KconfigCheck('self_protection', 'clipos', 'EFI_DISABLE_PCI_DMA', 'y'), efi_not_set)] - l += [OR(KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set'), - CmdlineCheck('self_protection', 'kspp', 'slab_nomerge'))] # option presence check + l += [KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set')] l += [KconfigCheck('self_protection', 'clipos', 'RANDOM_TRUST_BOOTLOADER', 'is not set')] l += [KconfigCheck('self_protection', 'clipos', 'RANDOM_TRUST_CPU', 'is not set')] l += [AND(KconfigCheck('self_protection', 'clipos', 'GCC_PLUGIN_RANDSTRUCT_PERFORMANCE', 'is not set'), @@ -635,6 +632,9 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('harden_userspace', 'defconfig', 'INTEGRITY', 'y')] if arch == 'ARM': l += [KconfigCheck('harden_userspace', 'my', 'INTEGRITY', 'y')] + if arch == 'ARM64': + l += [KconfigCheck('harden_userspace', 'defconfig', 'ARM64_PTR_AUTH', 'y')] + l += [KconfigCheck('harden_userspace', 'defconfig', 'ARM64_BTI', 'y')] if arch in ('ARM', 'X86_32'): l += [KconfigCheck('harden_userspace', 'defconfig', 'VMSPLIT_3G', 'y')] if arch in ('X86_64', 'ARM64'): @@ -642,19 +642,50 @@ def add_kconfig_checks(l, arch): if arch in ('X86_32', 'ARM'): l += [KconfigCheck('harden_userspace', 'my', 'ARCH_MMAP_RND_BITS', '16')] -# l += [KconfigCheck('feature_test', 'my', 'LKDTM', 'm')] # only for debugging! - def add_cmdline_checks(l, arch): # Calling the CmdlineCheck class constructor: # CmdlineCheck(reason, decision, name, expected) + # Don't add CmdlineChecks in add_kconfig_checks() to avoid wrong results + # when the tool doesn't check the cmdline. + + if arch == 'ARM64': + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'full'), + AND(KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y'), + CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set')))] l += [OR(CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', '1'), AND(KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y'), CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', 'is not set')))] - + l += [OR(CmdlineCheck('self_protection', 'kspp', 'init_on_free', '1'), + AND(KconfigCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'), + CmdlineCheck('self_protection', 'kspp', 'init_on_free', 'is not set')), + AND(CmdlineCheck('self_protection', 'kspp', 'page_poison', '1'), + KconfigCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'), + CmdlineCheck('self_protection', 'kspp', 'slub_debug', 'P')))] + l += [OR(CmdlineCheck('self_protection', 'kspp', 'slab_nomerge'), + AND(KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set'), + CmdlineCheck('self_protection', 'kspp', 'slab_merge', 'is not set')))] # option presence check + l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.strict', '1'), + AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y'), + CmdlineCheck('self_protection', 'kspp', 'iommu.strict', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', '0'), + AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set'), + CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', 'is not set')))] + l += [OR(CmdlineCheck('self_protection', 'kspp', 'slab_common.usercopy_fallback', '0'), + KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'))] + if arch in ('X86_64', 'ARM64', 'X86_32'): + l += [OR(CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', '1'), + AND(KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y'), + CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', 'is not set')))] if arch in ('X86_64', 'X86_32'): l += [CmdlineCheck('self_protection', 'kspp', 'pti', 'on')] + + if arch == 'X86_64': + l += [OR(CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'none'), + AND(KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y'), + CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'is not set')))] + # TODO: add other