X-Git-Url: https://jxself.org/git/?a=blobdiff_plain;f=kconfig_hardened_check%2F__init__.py;h=ebd0e0517b5b389a2df6fcbf94d71e5f0b07869c;hb=68622ad2d4c9ff86e2084a8e0bcf93e28043847c;hp=48f40196f082fedac3175318986fb6d866d209ce;hpb=4dcb0cdd1c84d90f268c9f970ff5b0093b12fcd8;p=kconfig-hardened-check.git diff --git a/kconfig_hardened_check/__init__.py b/kconfig_hardened_check/__init__.py index 48f4019..ebd0e05 100644 --- a/kconfig_hardened_check/__init__.py +++ b/kconfig_hardened_check/__init__.py @@ -11,18 +11,18 @@ # # # N.B Hardening command line parameters: -# slub_debug=FZP # slab_nomerge # page_alloc.shuffle=1 # iommu=force (does it help against DMA attacks?) -# page_poison=1 (if enabled) -# init_on_alloc=1 -# init_on_free=1 +# slub_debug=FZ (slow) +# init_on_alloc=1 (since v5.3) +# init_on_free=1 (since v5.3, otherwise slub_debug=P and page_poison=1) # loadpin.enforce=1 +# debugfs=no-mount (or off if possible) # # Mitigations of CPU vulnerabilities: # Аrch-independent: -# mitigations=auto,nosmt +# mitigations=auto,nosmt (nosmt is slow) # X86: # spectre_v2=on # pti=on @@ -35,8 +35,8 @@ # ssbd=force-on # # N.B. Hardening sysctls: -# kernel.kptr_restrict=2 -# kernel.dmesg_restrict=1 +# kernel.kptr_restrict=2 (or 1?) +# kernel.dmesg_restrict=1 (also see the kconfig option) # kernel.perf_event_paranoid=3 # kernel.kexec_load_disabled=1 # kernel.yama.ptrace_scope=3 @@ -290,6 +290,11 @@ def construct_checklist(l, arch): VerCheck((5, 5)))] # REFCOUNT_FULL is enabled by default since v5.5 iommu_support_is_set = OptCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y') l += [iommu_support_is_set] # is needed for mitigating DMA attacks + if arch in ('X86_64', 'ARM64', 'X86_32'): + l += [OptCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')] + l += [OptCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')] + if arch in ('X86_64', 'ARM64'): + l += [OptCheck('self_protection', 'defconfig', 'VMAP_STACK', 'y')] if arch in ('X86_64', 'X86_32'): l += [OptCheck('self_protection', 'defconfig', 'MICROCODE', 'y')] # is needed for mitigating CPU bugs l += [OptCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')] @@ -297,6 +302,8 @@ def construct_checklist(l, arch): l += [OptCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason? l += [OR(OptCheck('self_protection', 'defconfig', 'X86_UMIP', 'y'), OptCheck('self_protection', 'defconfig', 'X86_INTEL_UMIP', 'y'))] + if arch in ('ARM64', 'ARM'): + l += [OptCheck('self_protection', 'defconfig', 'STACKPROTECTOR_PER_TASK', 'y')] if arch == 'X86_64': l += [OptCheck('self_protection', 'defconfig', 'PAGE_TABLE_ISOLATION', 'y')] l += [OptCheck('self_protection', 'defconfig', 'RANDOMIZE_MEMORY', 'y')] @@ -312,18 +319,15 @@ def construct_checklist(l, arch): VerCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9 l += [OptCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y')] l += [OptCheck('self_protection', 'defconfig', 'ARM64_PTR_AUTH', 'y')] - if arch in ('X86_64', 'ARM64'): - l += [OptCheck('self_protection', 'defconfig', 'VMAP_STACK', 'y')] - if arch in ('X86_64', 'ARM64', 'X86_32'): - l += [OptCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')] - l += [OptCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')] + l += [OptCheck('self_protection', 'defconfig', 'ARM64_BTI_KERNEL', 'y')] + l += [OR(OptCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'), + VerCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10 if arch == 'ARM': l += [OptCheck('self_protection', 'defconfig', 'CPU_SW_DOMAIN_PAN', 'y')] - l += [OptCheck('self_protection', 'defconfig', 'STACKPROTECTOR_PER_TASK', 'y')] - if arch in ('ARM64', 'ARM'): l += [OptCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y')] # 'self_protection', 'kspp' + l += [OptCheck('self_protection', 'kspp', 'SECURITY_DMESG_RESTRICT', 'y')] l += [OptCheck('self_protection', 'kspp', 'BUG_ON_DATA_CORRUPTION', 'y')] l += [OptCheck('self_protection', 'kspp', 'DEBUG_WX', 'y')] l += [OptCheck('self_protection', 'kspp', 'SCHED_STACK_END_CHECK', 'y')] @@ -351,7 +355,7 @@ def construct_checklist(l, arch): modules_not_set)] l += [OR(OptCheck('self_protection', 'kspp', 'MODULE_SIG_FORCE', 'y'), modules_not_set)] # refers to LOCKDOWN - l += [OR(OptCheck('self_protection', 'kspp', 'INIT_STACK_ALL', 'y'), + l += [OR(OptCheck('self_protection', 'kspp', 'INIT_STACK_ALL_ZERO', 'y'), OptCheck('self_protection', 'kspp', 'GCC_PLUGIN_STRUCTLEAK_BYREF_ALL', 'y'))] l += [OR(OptCheck('self_protection', 'kspp', 'INIT_ON_FREE_DEFAULT_ON', 'y'), OptCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'))] # before v5.3 @@ -360,18 +364,17 @@ def construct_checklist(l, arch): l += [stackleak_is_set] if arch in ('X86_64', 'X86_32'): l += [OptCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '65536')] + if arch in ('ARM64', 'ARM'): + l += [OptCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '32768')] + l += [OptCheck('self_protection', 'kspp', 'SYN_COOKIES', 'y')] # another reason? + if arch == 'ARM64': + l += [OptCheck('self_protection', 'kspp', 'ARM64_SW_TTBR0_PAN', 'y')] if arch == 'X86_32': l += [OptCheck('self_protection', 'kspp', 'PAGE_TABLE_ISOLATION', 'y')] l += [OptCheck('self_protection', 'kspp', 'HIGHMEM64G', 'y')] l += [OptCheck('self_protection', 'kspp', 'X86_PAE', 'y')] - if arch == 'ARM64': - l += [OptCheck('self_protection', 'kspp', 'ARM64_SW_TTBR0_PAN', 'y')] - if arch in ('ARM64', 'ARM'): - l += [OptCheck('self_protection', 'kspp', 'SYN_COOKIES', 'y')] # another reason? - l += [OptCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '32768')] # 'self_protection', 'clipos' - l += [OptCheck('self_protection', 'clipos', 'SECURITY_DMESG_RESTRICT', 'y')] l += [OptCheck('self_protection', 'clipos', 'DEBUG_VIRTUAL', 'y')] l += [OptCheck('self_protection', 'clipos', 'STATIC_USERMODEHELPER', 'y')] # needs userspace support l += [OptCheck('self_protection', 'clipos', 'EFI_DISABLE_PCI_DMA', 'y')] @@ -386,20 +389,25 @@ def construct_checklist(l, arch): l += [AND(OptCheck('self_protection', 'clipos', 'STACKLEAK_RUNTIME_DISABLE', 'is not set'), stackleak_is_set)] if arch in ('X86_64', 'X86_32'): - l += [AND(OptCheck('self_protection', 'clipos', 'INTEL_IOMMU_SVM', 'y'), - iommu_support_is_set)] l += [AND(OptCheck('self_protection', 'clipos', 'INTEL_IOMMU_DEFAULT_ON', 'y'), iommu_support_is_set)] + if arch == 'X86_64': + l += [AND(OptCheck('self_protection', 'clipos', 'INTEL_IOMMU_SVM', 'y'), + iommu_support_is_set)] if arch == 'X86_32': l += [AND(OptCheck('self_protection', 'clipos', 'INTEL_IOMMU', 'y'), iommu_support_is_set)] # 'self_protection', 'my' - l += [OptCheck('self_protection', 'my', 'SLUB_DEBUG_ON', 'y')] # TODO: is it better to set that via kernel cmd? + l += [AND(OptCheck('self_protection', 'my', 'UBSAN_BOUNDS', 'y'), + OptCheck('self_protection', 'my', 'UBSAN_MISC', 'is not set'), + OptCheck('self_protection', 'my', 'UBSAN_TRAP', 'y'))] l += [OptCheck('self_protection', 'my', 'RESET_ATTACK_MITIGATION', 'y')] # needs userspace support (systemd) if arch == 'X86_64': l += [AND(OptCheck('self_protection', 'my', 'AMD_IOMMU_V2', 'y'), iommu_support_is_set)] + if arch == 'ARM64': + l += [OptCheck('self_protection', 'my', 'SHADOW_CALL_STACK', 'y')] # depends on clang, maybe it's alternative to STACKPROTECTOR_STRONG # 'security_policy' if arch in ('X86_64', 'ARM64', 'X86_32'): @@ -505,6 +513,7 @@ def construct_checklist(l, arch): l += [OptCheck('cut_attack_surface', 'lockdown', 'MMIOTRACE_TEST', 'is not set')] # refers to LOCKDOWN # 'cut_attack_surface', 'my' + l += [OptCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y')] l += [OptCheck('cut_attack_surface', 'my', 'MMIOTRACE', 'is not set')] # refers to LOCKDOWN (permissive) l += [OptCheck('cut_attack_surface', 'my', 'LIVEPATCH', 'is not set')] l += [OptCheck('cut_attack_surface', 'my', 'IP_DCCP', 'is not set')] @@ -518,6 +527,8 @@ def construct_checklist(l, arch): l += [OptCheck('userspace_hardening', 'defconfig', 'INTEGRITY', 'y')] if arch == 'ARM': l += [OptCheck('userspace_hardening', 'my', 'INTEGRITY', 'y')] + if arch == 'ARM64': + l += [OptCheck('userspace_hardening', 'defconfig', 'ARM64_MTE', 'y')] if arch in ('ARM', 'X86_32'): l += [OptCheck('userspace_hardening', 'defconfig', 'VMSPLIT_3G', 'y')] if arch in ('X86_64', 'ARM64'): @@ -594,22 +605,22 @@ def print_checklist(mode, checklist, with_results): def perform_check(opt, parsed_options, kernel_version): - if hasattr(opt, 'opts'): - # prepare ComplexOptCheck - for o in opt.opts: - if hasattr(o, 'opts'): - # Recursion for nested ComplexOptChecks - perform_check(o, parsed_options, kernel_version) - if hasattr(o, 'state'): - o.state = parsed_options.get(o.name, None) - if hasattr(o, 'ver'): - o.ver = kernel_version - else: - # prepare simple check, opt.state is mandatory - if not hasattr(opt, 'state'): - sys.exit('[!] ERROR: bad simple check {}'.format(vars(opt))) - opt.state = parsed_options.get(opt.name, None) - opt.check() + if hasattr(opt, 'opts'): + # prepare ComplexOptCheck + for o in opt.opts: + if hasattr(o, 'opts'): + # Recursion for nested ComplexOptChecks + perform_check(o, parsed_options, kernel_version) + if hasattr(o, 'state'): + o.state = parsed_options.get(o.name, None) + if hasattr(o, 'ver'): + o.ver = kernel_version + else: + # prepare simple check, opt.state is mandatory + if not hasattr(opt, 'state'): + sys.exit('[!] ERROR: bad simple check {}'.format(vars(opt))) + opt.state = parsed_options.get(opt.name, None) + opt.check() def perform_checks(checklist, parsed_options, kernel_version):