if arch in ('X86_64', 'ARM64', 'ARM'):
l += [vmap_stack_is_set]
if arch in ('X86_64', 'X86_32'):
+ cpu_sup_amd_not_set=KconfigCheck('-', '-', 'CPU_SUP_AMD', 'is not set')
+ cpu_sup_intel_not_set=KconfigCheck('-', '-', 'CPU_SUP_INTEL', 'is not set')
l += [KconfigCheck('self_protection', 'defconfig', 'SPECULATION_MITIGATIONS', 'y')]
l += [KconfigCheck('self_protection', 'defconfig', 'DEBUG_WX', 'y')]
l += [KconfigCheck('self_protection', 'defconfig', 'WERROR', 'y')]
l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE', 'y')]
- l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_INTEL', 'y')]
- l += [KconfigCheck('self_protection', 'defconfig', 'X86_MCE_AMD', 'y')]
+ l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_MCE_INTEL', 'y'),
+ cpu_sup_intel_not_set)]
+ l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_MCE_AMD', 'y'),
+ cpu_sup_amd_not_set)]
l += [KconfigCheck('self_protection', 'defconfig', 'RETPOLINE', 'y')]
l += [KconfigCheck('self_protection', 'defconfig', 'SYN_COOKIES', 'y')] # another reason?
microcode_is_set = KconfigCheck('self_protection', 'defconfig', 'MICROCODE', 'y')
l += [microcode_is_set] # is needed for mitigating CPU bugs
l += [OR(KconfigCheck('self_protection', 'defconfig', 'MICROCODE_INTEL', 'y'),
+ cpu_sup_intel_not_set,
AND(microcode_is_set,
VersionCheck((6, 6, 0))))] # MICROCODE_INTEL was included in MICROCODE since v6.6
l += [OR(KconfigCheck('self_protection', 'defconfig', 'MICROCODE_AMD', 'y'),
+ cpu_sup_amd_not_set,
AND(microcode_is_set,
VersionCheck((6, 6, 0))))] # MICROCODE_AMD was included in MICROCODE since v6.6
l += [OR(KconfigCheck('self_protection', 'defconfig', 'X86_SMAP', 'y'),
l += [KconfigCheck('self_protection', 'defconfig', 'PAGE_TABLE_ISOLATION', 'y')]
l += [KconfigCheck('self_protection', 'defconfig', 'RANDOMIZE_MEMORY', 'y')]
l += [KconfigCheck('self_protection', 'defconfig', 'X86_KERNEL_IBT', 'y')]
- l += [KconfigCheck('self_protection', 'defconfig', 'CPU_SRSO', 'y')]
+ l += [OR(KconfigCheck('self_protection', 'defconfig', 'CPU_SRSO', 'y'),
+ cpu_sup_amd_not_set)]
l += [AND(KconfigCheck('self_protection', 'defconfig', 'INTEL_IOMMU', 'y'),
iommu_support_is_set)]
l += [AND(KconfigCheck('self_protection', 'defconfig', 'AMD_IOMMU', 'y'),
l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'STRICT_DEVMEM', 'y'),
devmem_not_set)] # refers to LOCKDOWN
if arch in ('X86_64', 'X86_32'):
- l += [KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y')] # tsx=off
+ l += [OR(KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y'), # tsx=off
+ cpu_sup_intel_not_set)]
# 'cut_attack_surface', 'kspp'
l += [KconfigCheck('cut_attack_surface', 'kspp', 'SECURITY_DMESG_RESTRICT', 'y')]
# 'cut_attack_surface', 'defconfig'
if arch in ('X86_64', 'X86_32'):
+ tsx_not_set = CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'is not set')
l += [OR(CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'off'),
AND(KconfigCheck('cut_attack_surface', 'defconfig', 'X86_INTEL_TSX_MODE_OFF', 'y'),
- CmdlineCheck('cut_attack_surface', 'defconfig', 'tsx', 'is not set')))]
+ tsx_not_set),
+ AND(KconfigCheck('cut_attack_surface', 'defconfig', 'CPU_SUP_INTEL', 'is not set'),
+ tsx_not_set))]
# 'cut_attack_surface', 'kspp'
l += [CmdlineCheck('cut_attack_surface', 'kspp', 'nosmt', 'is present')] # slow (high performance penalty)