From: Alexander Popov Date: Wed, 20 Apr 2022 14:59:38 +0000 (+0300) Subject: Update the KSPP recommendations in the config_files X-Git-Tag: v0.5.17~12 X-Git-Url: https://jxself.org/git/?a=commitdiff_plain;h=ea880f61ef5e65dae9beb09beb6cdfca669af9cc;p=kconfig-hardened-check.git Update the KSPP recommendations in the config_files --- diff --git a/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-arm.config b/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-arm.config index 3bba331..57ff9d7 100644 --- a/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-arm.config +++ b/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-arm.config @@ -68,8 +68,8 @@ CONFIG_PAGE_POISONING_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_INIT_ON_FREE_DEFAULT_ON=y -# Initialize all stack variables on function entry. (Clang builds only. For GCC, see CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y below) -CONFIG_INIT_STACK_ALL=y +# Initialize all stack variables on function entry. (Clang and GCC 12+ builds only. For earlier GCC, see CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y below) +CONFIG_INIT_STACK_ALL_ZERO=y # Adds guard pages to kernel stacks (not all architectures support this yet). CONFIG_VMAP_STACK=y @@ -83,6 +83,24 @@ CONFIG_FORTIFY_SOURCE=y # Avoid kernel memory address exposures via dmesg (sets sysctl kernel.dmesg_restrict initial value to 1) CONFIG_SECURITY_DMESG_RESTRICT=y +# Randomize kernel stack offset on syscall entry (since v5.13). +CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y + +# Enable sampling-based overflow detection. This is similar to KASAN coverage, but with almost zero runtime overhead. +CONFIG_KFENCE=y + +# Do not ignore compile-time warnings (since v5.15) +CONFIG_WERROR=y + +# Force IOMMU TLB invalidation so devices will never be able to access stale data contents (or set "iommu.passthrough=0 iommu.strict=1" at boot) +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y + +# Make scheduler aware of SMT Cores. Program needs to opt-in to using this feature with prctl(PR_SCHED_CORE). +CONFIG_SCHED_CORE=y + +# Wipe all caller-used registers on exit from the function (reduces available ROP gadgets and minimizes stale data in registers) +CONFIG_ZERO_CALL_USED_REGS=y + # Dangerous; enabling this allows direct physical memory writing. # CONFIG_ACPI_CUSTOM_METHOD is not set diff --git a/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-arm64.config b/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-arm64.config index 6a24c42..c166290 100644 --- a/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-arm64.config +++ b/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-arm64.config @@ -68,8 +68,8 @@ CONFIG_PAGE_POISONING_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_INIT_ON_FREE_DEFAULT_ON=y -# Initialize all stack variables on function entry. (Clang builds only. For GCC, see CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y below) -CONFIG_INIT_STACK_ALL=y +# Initialize all stack variables on function entry. (Clang and GCC 12+ builds only. For earlier GCC, see CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y below) +CONFIG_INIT_STACK_ALL_ZERO=y # Adds guard pages to kernel stacks (not all architectures support this yet). CONFIG_VMAP_STACK=y @@ -83,6 +83,24 @@ CONFIG_FORTIFY_SOURCE=y # Avoid kernel memory address exposures via dmesg (sets sysctl kernel.dmesg_restrict initial value to 1) CONFIG_SECURITY_DMESG_RESTRICT=y +# Randomize kernel stack offset on syscall entry (since v5.13). +CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y + +# Enable sampling-based overflow detection. This is similar to KASAN coverage, but with almost zero runtime overhead. +CONFIG_KFENCE=y + +# Do not ignore compile-time warnings (since v5.15) +CONFIG_WERROR=y + +# Force IOMMU TLB invalidation so devices will never be able to access stale data contents (or set "iommu.passthrough=0 iommu.strict=1" at boot) +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y + +# Make scheduler aware of SMT Cores. Program needs to opt-in to using this feature with prctl(PR_SCHED_CORE). +CONFIG_SCHED_CORE=y + +# Wipe all caller-used registers on exit from the function (reduces available ROP gadgets and minimizes stale data in registers) +CONFIG_ZERO_CALL_USED_REGS=y + # Dangerous; enabling this allows direct physical memory writing. # CONFIG_ACPI_CUSTOM_METHOD is not set @@ -162,9 +180,6 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=32768 # Randomize position of kernel (requires UEFI RNG or bootloader support for /chosen/kaslr-seed DT property). CONFIG_RANDOMIZE_BASE=y -# Randomize kernel stack offset on syscall entry (since v5.13). -CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y - # Make sure PAN emulation is enabled. CONFIG_ARM64_SW_TTBR0_PAN=y diff --git a/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-x86-32.config b/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-x86-32.config index a382f41..ca92998 100644 --- a/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-x86-32.config +++ b/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-x86-32.config @@ -68,8 +68,8 @@ CONFIG_PAGE_POISONING_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_INIT_ON_FREE_DEFAULT_ON=y -# Initialize all stack variables on function entry. (Clang builds only. For GCC, see CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y below) -CONFIG_INIT_STACK_ALL=y +# Initialize all stack variables on function entry. (Clang and GCC 12+ builds only. For earlier GCC, see CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y below) +CONFIG_INIT_STACK_ALL_ZERO=y # Adds guard pages to kernel stacks (not all architectures support this yet). CONFIG_VMAP_STACK=y @@ -83,6 +83,24 @@ CONFIG_FORTIFY_SOURCE=y # Avoid kernel memory address exposures via dmesg (sets sysctl kernel.dmesg_restrict initial value to 1) CONFIG_SECURITY_DMESG_RESTRICT=y +# Randomize kernel stack offset on syscall entry (since v5.13). +CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y + +# Enable sampling-based overflow detection. This is similar to KASAN coverage, but with almost zero runtime overhead. +CONFIG_KFENCE=y + +# Do not ignore compile-time warnings (since v5.15) +CONFIG_WERROR=y + +# Force IOMMU TLB invalidation so devices will never be able to access stale data contents (or set "iommu.passthrough=0 iommu.strict=1" at boot) +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y + +# Make scheduler aware of SMT Cores. Program needs to opt-in to using this feature with prctl(PR_SCHED_CORE). +CONFIG_SCHED_CORE=y + +# Wipe all caller-used registers on exit from the function (reduces available ROP gadgets and minimizes stale data in registers) +CONFIG_ZERO_CALL_USED_REGS=y + # Dangerous; enabling this allows direct physical memory writing. # CONFIG_ACPI_CUSTOM_METHOD is not set @@ -171,9 +189,6 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 # Randomize position of kernel. CONFIG_RANDOMIZE_BASE=y -# Randomize kernel stack offset on syscall entry (since v5.13). -CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y - # Enable Kernel Page Table Isolation to remove an entire class of cache timing side-channels. CONFIG_PAGE_TABLE_ISOLATION=y diff --git a/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-x86-64.config b/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-x86-64.config index c6b0820..b8097c6 100644 --- a/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-x86-64.config +++ b/kconfig_hardened_check/config_files/kspp-recommendations/kspp-recommendations-x86-64.config @@ -68,8 +68,8 @@ CONFIG_PAGE_POISONING_ZERO=y CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y CONFIG_INIT_ON_FREE_DEFAULT_ON=y -# Initialize all stack variables on function entry. (Clang builds only. For GCC, see CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y below) -CONFIG_INIT_STACK_ALL=y +# Initialize all stack variables on function entry. (Clang and GCC 12+ builds only. For earlier GCC, see CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL=y below) +CONFIG_INIT_STACK_ALL_ZERO=y # Adds guard pages to kernel stacks (not all architectures support this yet). CONFIG_VMAP_STACK=y @@ -83,6 +83,24 @@ CONFIG_FORTIFY_SOURCE=y # Avoid kernel memory address exposures via dmesg (sets sysctl kernel.dmesg_restrict initial value to 1) CONFIG_SECURITY_DMESG_RESTRICT=y +# Randomize kernel stack offset on syscall entry (since v5.13). +CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y + +# Enable sampling-based overflow detection. This is similar to KASAN coverage, but with almost zero runtime overhead. +CONFIG_KFENCE=y + +# Do not ignore compile-time warnings (since v5.15) +CONFIG_WERROR=y + +# Force IOMMU TLB invalidation so devices will never be able to access stale data contents (or set "iommu.passthrough=0 iommu.strict=1" at boot) +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y + +# Make scheduler aware of SMT Cores. Program needs to opt-in to using this feature with prctl(PR_SCHED_CORE). +CONFIG_SCHED_CORE=y + +# Wipe all caller-used registers on exit from the function (reduces available ROP gadgets and minimizes stale data in registers) +CONFIG_ZERO_CALL_USED_REGS=y + # Dangerous; enabling this allows direct physical memory writing. # CONFIG_ACPI_CUSTOM_METHOD is not set @@ -167,9 +185,6 @@ CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_RANDOMIZE_BASE=y CONFIG_RANDOMIZE_MEMORY=y -# Randomize kernel stack offset on syscall entry (since v5.13). -CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT=y - # Modern libc no longer needs a fixed-position mapping in userspace, remove it as a possible target. CONFIG_LEGACY_VSYSCALL_NONE=y