X-Git-Url: https://jxself.org/git/?a=blobdiff_plain;f=kconfig_hardened_check%2F__init__.py;h=fa0b9e031075398236b779ab5cf150c9bbed3cd8;hb=1d27115f1c05297bdd64869cbedd960db863e8b4;hp=0367d88094b22ea8e779e2146841ed7dd18270b0;hpb=e9fa43330ce69e7b532c845b01ec61d626ed206a;p=kconfig-hardened-check.git diff --git a/kconfig_hardened_check/__init__.py b/kconfig_hardened_check/__init__.py index 0367d88..fa0b9e0 100644 --- a/kconfig_hardened_check/__init__.py +++ b/kconfig_hardened_check/__init__.py @@ -10,43 +10,10 @@ # Please don't cry if my Python code looks like C. # # -# N.B Hardening command line parameters: -# page_alloc.shuffle=1 -# iommu=force (does it help against DMA attacks?) -# slub_debug=FZ (slow) -# loadpin.enforce=1 -# debugfs=no-mount (or off if possible) -# -# Mitigations of CPU vulnerabilities: -# Аrch-independent: -# mitigations=auto,nosmt (nosmt is slow) -# X86: -# spectre_v2=on -# spec_store_bypass_disable=on -# l1tf=full,force -# l1d_flush=on (a part of the l1tf option) -# mds=full,nosmt -# tsx=off -# ARM64: -# kpti=on -# ssbd=force-on -# -# Should NOT be set: -# nokaslr -# sysrq_always_enabled -# arm64.nobti -# arm64.nopauth -# arm64.nomte -# -# Hardware tag-based KASAN with arm64 Memory Tagging Extension (MTE): -# kasan=on -# kasan.stacktrace=off -# kasan.fault=panic -# # N.B. Hardening sysctls: # kernel.kptr_restrict=2 (or 1?) # kernel.dmesg_restrict=1 (also see the kconfig option) -# kernel.perf_event_paranoid=3 +# kernel.perf_event_paranoid=2 (or 3 with a custom patch, see https://lwn.net/Articles/696216/) # kernel.kexec_load_disabled=1 # kernel.yama.ptrace_scope=3 # user.max_user_namespaces=0 @@ -65,6 +32,7 @@ # fs.suid_dumpable=0 # kernel.modules_disabled=1 # kernel.randomize_va_space = 2 +# nosmt sysfs control file # pylint: disable=missing-module-docstring,missing-class-docstring,missing-function-docstring @@ -81,14 +49,33 @@ from .__about__ import __version__ SIMPLE_OPTION_TYPES = ('kconfig', 'version', 'cmdline') class OptCheck: - # Constructor without the 'expected' parameter is for option presence checks (any value is OK) - def __init__(self, reason, decision, name, expected=None): - assert(reason and decision and name), \ - 'invalid {} check for "{}"'.format(self.__class__.__name__, name) + def __init__(self, reason, decision, name, expected): + assert(name and name == name.strip() and len(name.split()) == 1), \ + 'invalid name "{}" for {}'.format(name, self.__class__.__name__) self.name = name - self.expected = expected + + assert(decision and decision == decision.strip() and len(decision.split()) == 1), \ + 'invalid decision "{}" for "{}" check'.format(decision, name) self.decision = decision + + assert(reason and reason == reason.strip() and len(reason.split()) == 1), \ + 'invalid reason "{}" for "{}" check'.format(reason, name) self.reason = reason + + assert(expected and expected == expected.strip()), \ + 'invalid expected value "{}" for "{}" check (1)'.format(expected, name) + val_len = len(expected.split()) + if val_len == 3: + assert(expected in ('is not set', 'is not off')), \ + 'invalid expected value "{}" for "{}" check (2)'.format(expected, name) + elif val_len == 2: + assert(expected == 'is present'), \ + 'invalid expected value "{}" for "{}" check (3)'.format(expected, name) + else: + assert(val_len == 1), \ + 'invalid expected value "{}" for "{}" check (4)'.format(expected, name) + self.expected = expected + self.state = None self.result = None @@ -97,31 +84,39 @@ class OptCheck: return None def check(self): - # handle the option presence check - if self.expected is None: + # handle the 'is present' check + if self.expected == 'is present': if self.state is None: - self.result = 'FAIL: not present' + self.result = 'FAIL: is not present' else: self.result = 'OK: is present' return + # handle the 'is not off' option check + if self.expected == 'is not off': + if self.state == 'off': + self.result = 'FAIL: is off' + if self.state == '0': + self.result = 'FAIL: is off, "0"' + elif self.state is None: + self.result = 'FAIL: is off, not found' + else: + self.result = 'OK: is not off, "' + self.state + '"' + return + # handle the option value check if self.expected == self.state: self.result = 'OK' elif self.state is None: if self.expected == 'is not set': - self.result = 'OK: not found' + self.result = 'OK: is not found' else: - self.result = 'FAIL: not found' + self.result = 'FAIL: is not found' else: self.result = 'FAIL: "' + self.state + '"' def table_print(self, _mode, with_results): - if self.expected is None: - expected = '' - else: - expected = self.expected - print('{:<40}|{:^7}|{:^12}|{:^10}|{:^18}'.format(self.name, self.type, expected, self.decision, self.reason), end='') + print('{:<40}|{:^7}|{:^12}|{:^10}|{:^18}'.format(self.name, self.type, self.expected, self.decision, self.reason), end='') if with_results: print('| {}'.format(self.result), end='') @@ -150,6 +145,8 @@ class CmdlineCheck(OptCheck): class VersionCheck: def __init__(self, ver_expected): + assert(ver_expected and isinstance(ver_expected, tuple) and len(ver_expected) == 2), \ + 'invalid version "{}" for VersionCheck'.format(ver_expected) self.ver_expected = ver_expected self.ver = () self.result = None @@ -234,11 +231,13 @@ class OR(ComplexOptCheck): # Add more info for additional checks: if i != 0: if opt.result == 'OK': - self.result = 'OK: {} "{}"'.format(opt.name, opt.expected) - elif opt.result == 'OK: not found': - self.result = 'OK: {} not found'.format(opt.name) + self.result = 'OK: {} is "{}"'.format(opt.name, opt.expected) + elif opt.result == 'OK: is not found': + self.result = 'OK: {} is not found'.format(opt.name) elif opt.result == 'OK: is present': self.result = 'OK: {} is present'.format(opt.name) + elif opt.result.startswith('OK: is not off'): + self.result = 'OK: {} is not off'.format(opt.name) else: # VersionCheck provides enough info assert(opt.result.startswith('OK: version')), \ @@ -263,10 +262,14 @@ class AND(ComplexOptCheck): # This FAIL is caused by additional checks, # and not by the main option that this AND-check is about. # Describe the reason of the FAIL. - if opt.result.startswith('FAIL: \"') or opt.result == 'FAIL: not found': - self.result = 'FAIL: {} not "{}"'.format(opt.name, opt.expected) - elif opt.result == 'FAIL: not present': - self.result = 'FAIL: {} not present'.format(opt.name) + if opt.result.startswith('FAIL: \"') or opt.result == 'FAIL: is not found': + self.result = 'FAIL: {} is not "{}"'.format(opt.name, opt.expected) + elif opt.result == 'FAIL: is not present': + self.result = 'FAIL: {} is not present'.format(opt.name) + elif opt.result in ('FAIL: is off', 'FAIL: is off, "0"'): + self.result = 'FAIL: {} is off'.format(opt.name) + elif opt.result == 'FAIL: is off, not found': + self.result = 'FAIL: {} is off, not found'.format(opt.name) else: # VersionCheck provides enough info self.result = opt.result @@ -292,7 +295,7 @@ def detect_arch(fname, archs): return arch, 'OK' -def detect_version(fname): +def detect_kernel_version(fname): with open(fname, 'r') as f: ver_pattern = re.compile("# Linux/.* Kernel Configuration") for line in f.readlines(): @@ -308,6 +311,26 @@ def detect_version(fname): return None, 'no kernel version detected' +def detect_compiler(fname): + gcc_version = None + clang_version = None + with open(fname, 'r') as f: + gcc_version_pattern = re.compile("CONFIG_GCC_VERSION=[0-9]*") + clang_version_pattern = re.compile("CONFIG_CLANG_VERSION=[0-9]*") + for line in f.readlines(): + if gcc_version_pattern.match(line): + gcc_version = line[19:-1] + if clang_version_pattern.match(line): + clang_version = line[21:-1] + if not gcc_version or not clang_version: + return None, 'no CONFIG_GCC_VERSION or CONFIG_CLANG_VERSION' + if gcc_version == '0' and clang_version != '0': + return 'CLANG ' + clang_version, 'OK' + if gcc_version != '0' and clang_version == '0': + return 'GCC ' + gcc_version, 'OK' + sys.exit('[!] ERROR: invalid GCC_VERSION and CLANG_VERSION: {} {}'.format(gcc_version, clang_version)) + + def add_kconfig_checks(l, arch): # Calling the KconfigCheck class constructor: # KconfigCheck(reason, decision, name, expected) @@ -315,15 +338,22 @@ def add_kconfig_checks(l, arch): # [!] Don't add CmdlineChecks in add_kconfig_checks() to avoid wrong results # when the tool doesn't check the cmdline. + efi_not_set = KconfigCheck('-', '-', 'EFI', 'is not set') + cc_is_gcc = KconfigCheck('-', '-', 'CC_IS_GCC', 'y') # exists since v4.18 + cc_is_clang = KconfigCheck('-', '-', 'CC_IS_CLANG', 'y') # exists since v4.18 + modules_not_set = KconfigCheck('cut_attack_surface', 'kspp', 'MODULES', 'is not set') devmem_not_set = KconfigCheck('cut_attack_surface', 'kspp', 'DEVMEM', 'is not set') # refers to LOCKDOWN bpf_syscall_not_set = KconfigCheck('cut_attack_surface', 'lockdown', 'BPF_SYSCALL', 'is not set') # refers to LOCKDOWN - efi_not_set = KconfigCheck('cut_attack_surface', 'my', 'EFI', 'is not set') # 'self_protection', 'defconfig' l += [KconfigCheck('self_protection', 'defconfig', 'BUG', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'SLUB_DEBUG', 'y')] - l += [KconfigCheck('self_protection', 'defconfig', 'GCC_PLUGINS', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')] + gcc_plugins_support_is_set = KconfigCheck('self_protection', 'defconfig', 'GCC_PLUGINS', 'y') + l += [gcc_plugins_support_is_set] + iommu_support_is_set = KconfigCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y') + l += [iommu_support_is_set] # is needed for mitigating DMA attacks l += [OR(KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR', 'y'), KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR', 'y'), KconfigCheck('self_protection', 'defconfig', 'CC_STACKPROTECTOR_REGULAR', 'y'), @@ -338,21 +368,26 @@ def add_kconfig_checks(l, arch): modules_not_set)] # DEBUG_SET_MODULE_RONX was before v4.11 l += [OR(KconfigCheck('self_protection', 'defconfig', 'REFCOUNT_FULL', 'y'), VersionCheck((5, 5)))] # REFCOUNT_FULL is enabled by default since v5.5 - l += [KconfigCheck('self_protection', 'defconfig', 'THREAD_INFO_IN_TASK', 'y')] - iommu_support_is_set = KconfigCheck('self_protection', 'defconfig', 'IOMMU_SUPPORT', 'y') - l += [iommu_support_is_set] # is needed for mitigating DMA attacks if arch in ('X86_64', 'ARM64', 'X86_32'): l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y')] - if arch in ('X86_64', 'ARM64'): + if arch in ('X86_64', 'ARM64', 'ARM'): l += [KconfigCheck('self_protection', 'defconfig', 'VMAP_STACK', 'y')] if arch in ('X86_64', 'X86_32'): + l += [KconfigCheck('self_protection', 'defconfig', 'DEBUG_WX', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'WERROR', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_INTEL', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_AMD', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'MICROCODE', 'y')] # is needed for mitigating CPU bugs l += [KconfigCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')] - l += [KconfigCheck('self_protection', 'defconfig', 'X86_SMAP', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason? + l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_SMAP', 'y'), + VersionCheck((5, 19)))] # X86_SMAP is enabled by default since v5.19 l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_UMIP', 'y'), KconfigCheck('self_protection', 'defconfig', 'X86_INTEL_UMIP', 'y'))] if arch in ('ARM64', 'ARM'): + l += [KconfigCheck('self_protection', 'defconfig', 'IOMMU_DEFAULT_DMA_STRICT', 'y')] + l += [KconfigCheck('self_protection', 'defconfig', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set l += [KconfigCheck('self_protection', 'defconfig', 'STACKPROTECTOR_PER_TASK', 'y')] if arch == 'X86_64': l += [KconfigCheck('self_protection', 'defconfig', 'PAGE_TABLE_ISOLATION', 'y')] @@ -365,17 +400,18 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PAN', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_EPAN', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'UNMAP_KERNEL_AT_EL0', 'y')] - l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'), - AND(KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'), - VersionCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9 + l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_E0PD', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_PTR_AUTH_KERNEL', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_BTI_KERNEL', 'y')] - l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'), - VersionCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10 l += [KconfigCheck('self_protection', 'defconfig', 'MITIGATE_SPECTRE_BRANCH_HISTORY', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'ARM64_MTE', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MODULE_REGION_FULL', 'y')] + l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_EL2_VECTORS', 'y'), + AND(KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_BASE', 'y'), + VersionCheck((5, 9))))] # HARDEN_EL2_VECTORS was included in RANDOMIZE_BASE in v5.9 + l += [OR(KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y'), + VersionCheck((5, 10)))] # HARDEN_BRANCH_PREDICTOR is enabled by default since v5.10 if arch == 'ARM': l += [KconfigCheck('self_protection', 'defconfig', 'CPU_SW_DOMAIN_PAN', 'y')] l += [KconfigCheck('self_protection', 'defconfig', 'HARDEN_BRANCH_PREDICTOR', 'y')] @@ -383,31 +419,35 @@ def add_kconfig_checks(l, arch): # 'self_protection', 'kspp' l += [KconfigCheck('self_protection', 'kspp', 'BUG_ON_DATA_CORRUPTION', 'y')] - l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_WX', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'SCHED_STACK_END_CHECK', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'SLAB_FREELIST_HARDENED', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'SLAB_FREELIST_RANDOM', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'SHUFFLE_PAGE_ALLOCATOR', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'FORTIFY_SOURCE', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_LIST', 'y')] + l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_VIRTUAL', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_SG', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_CREDENTIALS', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_NOTIFIERS', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y')] - l += [KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'KFENCE', 'y')] - l += [KconfigCheck('self_protection', 'kspp', 'WERROR', 'y')] - l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y')] - l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set l += [KconfigCheck('self_protection', 'kspp', 'ZERO_CALL_USED_REGS', 'y')] - randstruct_is_set = KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT', 'y') + l += [KconfigCheck('self_protection', 'kspp', 'HW_RANDOM_TPM', 'y')] + l += [KconfigCheck('self_protection', 'kspp', 'STATIC_USERMODEHELPER', 'y')] # needs userspace support + randstruct_is_set = OR(KconfigCheck('self_protection', 'kspp', 'RANDSTRUCT_FULL', 'y'), + KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT', 'y')) l += [randstruct_is_set] + l += [AND(KconfigCheck('self_protection', 'kspp', 'RANDSTRUCT_PERFORMANCE', 'is not set'), + KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_RANDSTRUCT_PERFORMANCE', 'is not set'), + randstruct_is_set)] hardened_usercopy_is_set = KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y') l += [hardened_usercopy_is_set] l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'), hardened_usercopy_is_set)] l += [AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_PAGESPAN', 'is not set'), hardened_usercopy_is_set)] + l += [AND(KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_LATENT_ENTROPY', 'y'), + gcc_plugins_support_is_set)] l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG', 'y'), modules_not_set)] l += [OR(KconfigCheck('self_protection', 'kspp', 'MODULE_SIG_ALL', 'y'), @@ -425,71 +465,71 @@ def add_kconfig_checks(l, arch): # Starting from v5.11 CONFIG_PAGE_POISONING unconditionally checks # the 0xAA poison pattern on allocation. # That brings higher performance penalty. + l += [OR(KconfigCheck('self_protection', 'kspp', 'EFI_DISABLE_PCI_DMA', 'y'), + efi_not_set)] + l += [OR(KconfigCheck('self_protection', 'kspp', 'RESET_ATTACK_MITIGATION', 'y'), + efi_not_set)] # needs userspace support (systemd) + ubsan_bounds_is_set = KconfigCheck('self_protection', 'kspp', 'UBSAN_BOUNDS', 'y') + l += [ubsan_bounds_is_set] + l += [OR(KconfigCheck('self_protection', 'kspp', 'UBSAN_LOCAL_BOUNDS', 'y'), + AND(ubsan_bounds_is_set, + cc_is_gcc))] + l += [AND(KconfigCheck('self_protection', 'kspp', 'UBSAN_TRAP', 'y'), + ubsan_bounds_is_set, + KconfigCheck('self_protection', 'kspp', 'UBSAN_SHIFT', 'is not set'), + KconfigCheck('self_protection', 'kspp', 'UBSAN_DIV_ZERO', 'is not set'), + KconfigCheck('self_protection', 'kspp', 'UBSAN_UNREACHABLE', 'is not set'), + KconfigCheck('self_protection', 'kspp', 'UBSAN_BOOL', 'is not set'), + KconfigCheck('self_protection', 'kspp', 'UBSAN_ENUM', 'is not set'), + KconfigCheck('self_protection', 'kspp', 'UBSAN_ALIGNMENT', 'is not set'))] # only array index bounds checking with traps if arch in ('X86_64', 'ARM64', 'X86_32'): + l += [AND(KconfigCheck('self_protection', 'kspp', 'UBSAN_SANITIZE_ALL', 'y'), + ubsan_bounds_is_set)] # ARCH_HAS_UBSAN_SANITIZE_ALL is not enabled for ARM stackleak_is_set = KconfigCheck('self_protection', 'kspp', 'GCC_PLUGIN_STACKLEAK', 'y') - l += [stackleak_is_set] + l += [AND(stackleak_is_set, gcc_plugins_support_is_set)] + l += [AND(KconfigCheck('self_protection', 'kspp', 'STACKLEAK_METRICS', 'is not set'), + stackleak_is_set, + gcc_plugins_support_is_set)] + l += [AND(KconfigCheck('self_protection', 'kspp', 'STACKLEAK_RUNTIME_DISABLE', 'is not set'), + stackleak_is_set, + gcc_plugins_support_is_set)] l += [KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y')] + if arch in ('X86_64', 'ARM64'): + cfi_clang_is_set = KconfigCheck('self_protection', 'kspp', 'CFI_CLANG', 'y') + l += [cfi_clang_is_set] + l += [AND(KconfigCheck('self_protection', 'kspp', 'CFI_PERMISSIVE', 'is not set'), + cfi_clang_is_set)] if arch in ('X86_64', 'X86_32'): l += [KconfigCheck('self_protection', 'kspp', 'SCHED_CORE', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '65536')] + l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y')] + l += [KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set')] # true if IOMMU_DEFAULT_DMA_STRICT is set + l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU_DEFAULT_ON', 'y'), + iommu_support_is_set)] if arch in ('ARM64', 'ARM'): + l += [KconfigCheck('self_protection', 'kspp', 'DEBUG_WX', 'y')] + l += [KconfigCheck('self_protection', 'kspp', 'WERROR', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'DEFAULT_MMAP_MIN_ADDR', '32768')] l += [KconfigCheck('self_protection', 'kspp', 'SYN_COOKIES', 'y')] # another reason? + if arch == 'X86_64': + l += [KconfigCheck('self_protection', 'kspp', 'SLS', 'y')] # vs CVE-2021-26341 in Straight-Line-Speculation + l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU_SVM', 'y'), + iommu_support_is_set)] + l += [AND(KconfigCheck('self_protection', 'kspp', 'AMD_IOMMU_V2', 'y'), + iommu_support_is_set)] if arch == 'ARM64': l += [KconfigCheck('self_protection', 'kspp', 'ARM64_SW_TTBR0_PAN', 'y')] + l += [KconfigCheck('self_protection', 'kspp', 'SHADOW_CALL_STACK', 'y')] + l += [KconfigCheck('self_protection', 'kspp', 'KASAN_HW_TAGS', 'y')] # see also: kasan=on, kasan.stacktrace=off, kasan.fault=panic if arch == 'X86_32': l += [KconfigCheck('self_protection', 'kspp', 'PAGE_TABLE_ISOLATION', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'HIGHMEM64G', 'y')] l += [KconfigCheck('self_protection', 'kspp', 'X86_PAE', 'y')] - - # 'self_protection', 'maintainer' - ubsan_bounds_is_set = KconfigCheck('self_protection', 'maintainer', 'UBSAN_BOUNDS', 'y') # only array index bounds checking - l += [ubsan_bounds_is_set] # recommended by Kees Cook in /issues/53 - if arch in ('X86_64', 'ARM64', 'X86_32'): # ARCH_HAS_UBSAN_SANITIZE_ALL is not enabled for ARM - l += [AND(KconfigCheck('self_protection', 'maintainer', 'UBSAN_SANITIZE_ALL', 'y'), - ubsan_bounds_is_set)] # recommended by Kees Cook in /issues/53 - l += [AND(KconfigCheck('self_protection', 'maintainer', 'UBSAN_TRAP', 'y'), - ubsan_bounds_is_set)] # recommended by Kees Cook in /issues/53 + l += [AND(KconfigCheck('self_protection', 'kspp', 'INTEL_IOMMU', 'y'), + iommu_support_is_set)] # 'self_protection', 'clipos' - l += [KconfigCheck('self_protection', 'clipos', 'DEBUG_VIRTUAL', 'y')] - l += [KconfigCheck('self_protection', 'clipos', 'STATIC_USERMODEHELPER', 'y')] # needs userspace support - l += [OR(KconfigCheck('self_protection', 'clipos', 'EFI_DISABLE_PCI_DMA', 'y'), - efi_not_set)] l += [KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set')] - l += [KconfigCheck('self_protection', 'clipos', 'RANDOM_TRUST_BOOTLOADER', 'is not set')] - l += [KconfigCheck('self_protection', 'clipos', 'RANDOM_TRUST_CPU', 'is not set')] - l += [AND(KconfigCheck('self_protection', 'clipos', 'GCC_PLUGIN_RANDSTRUCT_PERFORMANCE', 'is not set'), - randstruct_is_set)] - if arch in ('X86_64', 'ARM64', 'X86_32'): - l += [AND(KconfigCheck('self_protection', 'clipos', 'STACKLEAK_METRICS', 'is not set'), - stackleak_is_set)] - l += [AND(KconfigCheck('self_protection', 'clipos', 'STACKLEAK_RUNTIME_DISABLE', 'is not set'), - stackleak_is_set)] - if arch in ('X86_64', 'X86_32'): - l += [AND(KconfigCheck('self_protection', 'clipos', 'INTEL_IOMMU_DEFAULT_ON', 'y'), - iommu_support_is_set)] - if arch == 'X86_64': - l += [AND(KconfigCheck('self_protection', 'clipos', 'INTEL_IOMMU_SVM', 'y'), - iommu_support_is_set)] - if arch == 'X86_32': - l += [AND(KconfigCheck('self_protection', 'clipos', 'INTEL_IOMMU', 'y'), - iommu_support_is_set)] - - # 'self_protection', 'my' - l += [OR(KconfigCheck('self_protection', 'my', 'RESET_ATTACK_MITIGATION', 'y'), - efi_not_set)] # needs userspace support (systemd) - if arch == 'X86_64': - l += [KconfigCheck('self_protection', 'my', 'SLS', 'y')] # vs CVE-2021-26341 in Straight-Line-Speculation - l += [AND(KconfigCheck('self_protection', 'my', 'AMD_IOMMU_V2', 'y'), - iommu_support_is_set)] - if arch == 'ARM64': - l += [KconfigCheck('self_protection', 'my', 'SHADOW_CALL_STACK', 'y')] # depends on clang, maybe it's alternative to STACKPROTECTOR_STRONG - l += [KconfigCheck('self_protection', 'my', 'KASAN_HW_TAGS', 'y')] - cfi_clang_is_set = KconfigCheck('self_protection', 'my', 'CFI_CLANG', 'y') - l += [cfi_clang_is_set] - l += [AND(KconfigCheck('self_protection', 'my', 'CFI_PERMISSIVE', 'is not set'), - cfi_clang_is_set)] # 'security_policy' if arch in ('X86_64', 'ARM64', 'X86_32'): @@ -497,25 +537,25 @@ def add_kconfig_checks(l, arch): if arch == 'ARM': l += [KconfigCheck('security_policy', 'kspp', 'SECURITY', 'y')] # and choose your favourite LSM l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_YAMA', 'y')] + l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LANDLOCK', 'y')] l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_DISABLE', 'is not set')] - l += [KconfigCheck('security_policy', 'clipos', 'SECURITY_LOCKDOWN_LSM', 'y')] - l += [KconfigCheck('security_policy', 'clipos', 'SECURITY_LOCKDOWN_LSM_EARLY', 'y')] - l += [KconfigCheck('security_policy', 'clipos', 'LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY', 'y')] - l += [KconfigCheck('security_policy', 'my', 'SECURITY_WRITABLE_HOOKS', 'is not set')] # refers to SECURITY_SELINUX_DISABLE - l += [KconfigCheck('security_policy', 'my', 'SECURITY_SAFESETID', 'y')] - loadpin_is_set = KconfigCheck('security_policy', 'my', 'SECURITY_LOADPIN', 'y') - l += [loadpin_is_set] # needs userspace support - l += [AND(KconfigCheck('security_policy', 'my', 'SECURITY_LOADPIN_ENFORCE', 'y'), - loadpin_is_set)] + l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_BOOTPARAM', 'is not set')] + l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_SELINUX_DEVELOP', 'is not set')] + l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LOCKDOWN_LSM', 'y')] + l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_LOCKDOWN_LSM_EARLY', 'y')] + l += [KconfigCheck('security_policy', 'kspp', 'LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY', 'y')] + l += [KconfigCheck('security_policy', 'kspp', 'SECURITY_WRITABLE_HOOKS', 'is not set')] # refers to SECURITY_SELINUX_DISABLE # 'cut_attack_surface', 'defconfig' - l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'BPF_UNPRIV_DEFAULT_OFF', 'y'), - bpf_syscall_not_set)] # see unprivileged_bpf_disabled l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP', 'y')] l += [KconfigCheck('cut_attack_surface', 'defconfig', 'SECCOMP_FILTER', 'y')] + l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'BPF_UNPRIV_DEFAULT_OFF', 'y'), + bpf_syscall_not_set)] # see unprivileged_bpf_disabled if arch in ('X86_64', 'ARM64', 'X86_32'): l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'STRICT_DEVMEM', 'y'), devmem_not_set)] # refers to LOCKDOWN + if arch in ('X86_64', 'X86_32'): + l += [KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y')] # tsx=off # 'cut_attack_surface', 'kspp' l += [KconfigCheck('cut_attack_surface', 'kspp', 'SECURITY_DMESG_RESTRICT', 'y')] @@ -529,8 +569,10 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'kspp', 'PROC_KCORE', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_PTYS', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'HIBERNATION', 'is not set')] # refers to LOCKDOWN + l += [KconfigCheck('cut_attack_surface', 'kspp', 'COMPAT', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'IA32_EMULATION', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_X32', 'is not set')] + l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_X32_ABI', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'MODIFY_LDT_SYSCALL', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'OABI_COMPAT', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'kspp', 'X86_MSR', 'is not set')] # refers to LOCKDOWN @@ -538,11 +580,13 @@ def add_kconfig_checks(l, arch): l += [devmem_not_set] l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'IO_STRICT_DEVMEM', 'y'), devmem_not_set)] # refers to LOCKDOWN + l += [AND(KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is not set'), + KconfigCheck('cut_attack_surface', 'kspp', 'LDISC_AUTOLOAD', 'is present'))] + if arch == 'X86_64': + l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y')] # 'vsyscall=none' if arch == 'ARM': l += [OR(KconfigCheck('cut_attack_surface', 'kspp', 'STRICT_DEVMEM', 'y'), devmem_not_set)] # refers to LOCKDOWN - if arch == 'X86_64': - l += [KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y')] # 'vsyscall=none' # 'cut_attack_surface', 'grsec' l += [KconfigCheck('cut_attack_surface', 'grsec', 'ZSMALLOC_STAT', 'is not set')] @@ -591,14 +635,12 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'maintainer', 'VT', 'is not set')] # recommended by Daniel Vetter in /issues/38 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'BLK_DEV_FD', 'is not set')] # recommended by Denis Efremov in /pull/54 l += [KconfigCheck('cut_attack_surface', 'maintainer', 'BLK_DEV_FD_RAWCMD', 'is not set')] # recommended by Denis Efremov in /pull/62 - - # 'cut_attack_surface', 'grapheneos' - l += [KconfigCheck('cut_attack_surface', 'grapheneos', 'AIO', 'is not set')] + l += [KconfigCheck('cut_attack_surface', 'maintainer', 'NOUVEAU_LEGACY_CTX_SUPPORT', 'is not set')] + # recommended by Dave Airlie in kernel commit b30a43ac7132cdda # 'cut_attack_surface', 'clipos' l += [KconfigCheck('cut_attack_surface', 'clipos', 'STAGING', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'KSM', 'is not set')] # to prevent FLUSH+RELOAD attack -# l += [KconfigCheck('cut_attack_surface', 'clipos', 'IKCONFIG', 'is not set')] # no, IKCONFIG is needed for this check :) l += [KconfigCheck('cut_attack_surface', 'clipos', 'KALLSYMS', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_VSYSCALL_EMULATION', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'clipos', 'MAGIC_SYSRQ', 'is not set')] @@ -608,20 +650,16 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_IOPL_IOPERM', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'clipos', 'ACPI_TABLE_UPGRADE', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'clipos', 'EFI_CUSTOM_SSDT_OVERLAYS', 'is not set')] - l += [AND(KconfigCheck('cut_attack_surface', 'clipos', 'LDISC_AUTOLOAD', 'is not set'), - KconfigCheck('cut_attack_surface', 'clipos', 'LDISC_AUTOLOAD'))] # option presence check - if arch in ('X86_64', 'X86_32'): - l += [KconfigCheck('cut_attack_surface', 'clipos', 'X86_INTEL_TSX_MODE_OFF', 'y')] # tsx=off + l += [KconfigCheck('cut_attack_surface', 'clipos', 'COREDUMP', 'is not set')] # cut userspace attack surface +# l += [KconfigCheck('cut_attack_surface', 'clipos', 'IKCONFIG', 'is not set')] # no, IKCONFIG is needed for this check :) # 'cut_attack_surface', 'lockdown' - l += [bpf_syscall_not_set] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'lockdown', 'EFI_TEST', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'lockdown', 'MMIOTRACE_TEST', 'is not set')] # refers to LOCKDOWN l += [KconfigCheck('cut_attack_surface', 'lockdown', 'KPROBES', 'is not set')] # refers to LOCKDOWN + l += [bpf_syscall_not_set] # refers to LOCKDOWN # 'cut_attack_surface', 'my' - l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'), - modules_not_set)] l += [KconfigCheck('cut_attack_surface', 'my', 'MMIOTRACE', 'is not set')] # refers to LOCKDOWN (permissive) l += [KconfigCheck('cut_attack_surface', 'my', 'LIVEPATCH', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'my', 'IP_DCCP', 'is not set')] @@ -630,6 +668,9 @@ def add_kconfig_checks(l, arch): l += [KconfigCheck('cut_attack_surface', 'my', 'VIDEO_VIVID', 'is not set')] l += [KconfigCheck('cut_attack_surface', 'my', 'INPUT_EVBUG', 'is not set')] # Can be used as a keylogger l += [KconfigCheck('cut_attack_surface', 'my', 'KGDB', 'is not set')] + l += [KconfigCheck('cut_attack_surface', 'my', 'AIO', 'is not set')] + l += [OR(KconfigCheck('cut_attack_surface', 'my', 'TRIM_UNUSED_KSYMS', 'y'), + modules_not_set)] # 'harden_userspace' if arch in ('X86_64', 'ARM64', 'X86_32'): @@ -667,7 +708,44 @@ def add_cmdline_checks(l, arch): # required for the cmdline parameters. That would make the checks # very complex and not give a 100% guarantee anyway. + # 'self_protection', 'defconfig' + l += [CmdlineCheck('self_protection', 'defconfig', 'nosmep', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'nosmap', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'nokaslr', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'nopti', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v1', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_v2', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'nospectre_bhb', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'nospec_store_bypass_disable', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nobti', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nopauth', 'is not set')] + l += [CmdlineCheck('self_protection', 'defconfig', 'arm64.nomte', 'is not set')] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'spectre_v2', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'spectre_v2_user', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'spec_store_bypass_disable', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'l1tf', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'mds', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'tsx_async_abort', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'srbds', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'mmio_stale_data', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'retbleed', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'kpti', 'is not set'))] + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'kvm.nx_huge_pages', 'is not off'), + CmdlineCheck('self_protection', 'defconfig', 'kvm.nx_huge_pages', 'is not set'))] if arch == 'ARM64': + l += [OR(CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'kernel'), + CmdlineCheck('self_protection', 'my', 'ssbd', 'force-on'), + CmdlineCheck('self_protection', 'defconfig', 'ssbd', 'is not set'))] l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', 'full'), AND(KconfigCheck('self_protection', 'defconfig', 'RODATA_FULL_DEFAULT_ENABLED', 'y'), CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set')))] @@ -675,6 +753,9 @@ def add_cmdline_checks(l, arch): l += [OR(CmdlineCheck('self_protection', 'defconfig', 'rodata', '1'), CmdlineCheck('self_protection', 'defconfig', 'rodata', 'is not set'))] + # 'self_protection', 'kspp' + l += [CmdlineCheck('self_protection', 'kspp', 'nosmt', 'is present')] + l += [CmdlineCheck('self_protection', 'kspp', 'mitigations', 'auto,nosmt')] # 'nosmt' by kspp + 'auto' by defconfig l += [OR(CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', '1'), AND(KconfigCheck('self_protection', 'kspp', 'INIT_ON_ALLOC_DEFAULT_ON', 'y'), CmdlineCheck('self_protection', 'kspp', 'init_on_alloc', 'is not set')))] @@ -684,35 +765,56 @@ def add_cmdline_checks(l, arch): AND(CmdlineCheck('self_protection', 'kspp', 'page_poison', '1'), KconfigCheck('self_protection', 'kspp', 'PAGE_POISONING_ZERO', 'y'), CmdlineCheck('self_protection', 'kspp', 'slub_debug', 'P')))] - l += [OR(CmdlineCheck('self_protection', 'kspp', 'slab_nomerge'), + l += [OR(CmdlineCheck('self_protection', 'kspp', 'slab_nomerge', 'is present'), AND(KconfigCheck('self_protection', 'clipos', 'SLAB_MERGE_DEFAULT', 'is not set'), - CmdlineCheck('self_protection', 'kspp', 'slab_merge', 'is not set')))] # option presence check + CmdlineCheck('self_protection', 'kspp', 'slab_merge', 'is not set'), + CmdlineCheck('self_protection', 'clipos', 'slub_merge', 'is not set')))] l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.strict', '1'), AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_DMA_STRICT', 'y'), CmdlineCheck('self_protection', 'kspp', 'iommu.strict', 'is not set')))] l += [OR(CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', '0'), AND(KconfigCheck('self_protection', 'kspp', 'IOMMU_DEFAULT_PASSTHROUGH', 'is not set'), CmdlineCheck('self_protection', 'kspp', 'iommu.passthrough', 'is not set')))] + # The cmdline checks compatible with the kconfig recommendations of the KSPP project... l += [OR(CmdlineCheck('self_protection', 'kspp', 'hardened_usercopy', '1'), AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY', 'y'), CmdlineCheck('self_protection', 'kspp', 'hardened_usercopy', 'is not set')))] l += [OR(CmdlineCheck('self_protection', 'kspp', 'slab_common.usercopy_fallback', '0'), AND(KconfigCheck('self_protection', 'kspp', 'HARDENED_USERCOPY_FALLBACK', 'is not set'), CmdlineCheck('self_protection', 'kspp', 'slab_common.usercopy_fallback', 'is not set')))] + # ... the end if arch in ('X86_64', 'ARM64', 'X86_32'): l += [OR(CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', '1'), AND(KconfigCheck('self_protection', 'kspp', 'RANDOMIZE_KSTACK_OFFSET_DEFAULT', 'y'), CmdlineCheck('self_protection', 'kspp', 'randomize_kstack_offset', 'is not set')))] if arch in ('X86_64', 'X86_32'): - l += [CmdlineCheck('self_protection', 'kspp', 'pti', 'on')] + l += [AND(CmdlineCheck('self_protection', 'kspp', 'pti', 'on'), + CmdlineCheck('self_protection', 'defconfig', 'nopti', 'is not set'))] + # 'self_protection', 'clipos' + l += [CmdlineCheck('self_protection', 'clipos', 'page_alloc.shuffle', '1')] + if arch in ('X86_64', 'X86_32'): + l += [CmdlineCheck('self_protection', 'clipos', 'iommu', 'force')] + + # 'cut_attack_surface', 'defconfig' + if arch in ('X86_64', 'X86_32'): + l += [OR(CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'off'), + AND(KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y'), + CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'is not set')))] + + # 'cut_attack_surface', 'kspp' if arch == 'X86_64': l += [OR(CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'none'), AND(KconfigCheck('cut_attack_surface', 'kspp', 'LEGACY_VSYSCALL_NONE', 'y'), CmdlineCheck('cut_attack_surface', 'kspp', 'vsyscall', 'is not set')))] - # TODO: add other + # 'cut_attack_surface', 'grsec' + # The cmdline checks compatible with the kconfig options disabled by grsecurity... + l += [OR(CmdlineCheck('cut_attack_surface', 'grsec', 'debugfs', 'off'), + KconfigCheck('cut_attack_surface', 'grsec', 'DEBUG_FS', 'is not set'))] # ... the end + # 'cut_attack_surface', 'my' + l += [CmdlineCheck('cut_attack_surface', 'my', 'sysrq_always_enabled', 'is not set')] def print_unknown_options(checklist, parsed_options): known_options = [] @@ -855,11 +957,46 @@ def parse_kconfig_file(parsed_options, fname): def normalize_cmdline_options(option, value): - # Handle special cases + # Don't normalize the cmdline option values if + # the Linux kernel doesn't use kstrtobool() for them + if option == 'debugfs': + # See debugfs_kernel() in fs/debugfs/inode.c + return value + if option == 'mitigations': + # See mitigations_parse_cmdline() in kernel/cpu.c + return value if option == 'pti': - # Don't normalize the pti value since - # the Linux kernel doesn't use kstrtobool() for pti. - # See pti_check_boottime_disable() in linux/arch/x86/mm/pti.c + # See pti_check_boottime_disable() in arch/x86/mm/pti.c + return value + if option == 'spectre_v2': + # See spectre_v2_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'spectre_v2_user': + # See spectre_v2_parse_user_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'spec_store_bypass_disable': + # See ssb_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'l1tf': + # See l1tf_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'mds': + # See mds_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'tsx_async_abort': + # See tsx_async_abort_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'srbds': + # See srbds_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'mmio_stale_data': + # See mmio_stale_data_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'retbleed': + # See retbleed_parse_cmdline() in arch/x86/kernel/cpu/bugs.c + return value + if option == 'tsx': + # See tsx_init() in arch/x86/kernel/cpu/tsx.c return value # Implement a limited part of the kstrtobool() logic @@ -935,12 +1072,19 @@ def main(): if mode != 'json': print('[+] Detected architecture: {}'.format(arch)) - kernel_version, msg = detect_version(args.config) + kernel_version, msg = detect_kernel_version(args.config) if not kernel_version: sys.exit('[!] ERROR: {}'.format(msg)) if mode != 'json': print('[+] Detected kernel version: {}.{}'.format(kernel_version[0], kernel_version[1])) + compiler, msg = detect_compiler(args.config) + if mode != 'json': + if compiler: + print('[+] Detected compiler: {}'.format(compiler)) + else: + print('[-] Can\'t detect the compiler: {}'.format(msg)) + # add relevant kconfig checks to the checklist add_kconfig_checks(config_checklist, arch) @@ -960,12 +1104,17 @@ def main(): parse_cmdline_file(parsed_cmdline_options, args.cmdline) populate_with_data(config_checklist, parsed_cmdline_options, 'cmdline') - # now everything is ready for performing the checks + # now everything is ready, perform the checks perform_checks(config_checklist) - # finally print the results if mode == 'verbose': - print_unknown_options(config_checklist, parsed_kconfig_options) + # print the parsed options without the checks (for debugging) + all_parsed_options = parsed_kconfig_options # assignment does not copy + if args.cmdline: + all_parsed_options.update(parsed_cmdline_options) + print_unknown_options(config_checklist, all_parsed_options) + + # finally print the results print_checklist(mode, config_checklist, True) sys.exit(0)