From a46e3e766cbac907a8b8f1bb4384bb1f063061ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20=C5=BByjewski?= Date: Tue, 25 Apr 2023 16:56:57 +0200 Subject: [PATCH] Add patches for Intel TXT support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tomasz Żyjewski --- ...intel_txt.h-constants-and-accessors-.patch | 304 ++++++++++ ...t-add-MLE-header-and-new-entry-point.patch | 107 ++++ ...ly-add-early-TXT-tests-and-restore-M.patch | 268 +++++++++ 1304-xen-arch-x86-reserve-TXT-memory.patch | 193 +++++++ 1305-x86-intel_txt.c-restore-boot-MTRRs.patch | 117 ++++ 1306-x86-sha1.c-add-file.patch | 231 ++++++++ ...-for-early-hashing-and-extending-PCR.patch | 530 ++++++++++++++++++ ...-measure-kernel-and-initrd-before-do.patch | 68 +++ ...oot-choose-AP-stack-based-on-APIC-ID.patch | 113 ++++ 1310-x86-smpboot.c-TXT-AP-bringup.patch | 177 ++++++ ...t-use-XSM-policy-and-ucode-updates-w.patch | 60 ++ xen.spec.in | 14 + 12 files changed, 2182 insertions(+) create mode 100644 1301-x86-include-asm-intel_txt.h-constants-and-accessors-.patch create mode 100644 1302-x86-boot-add-MLE-header-and-new-entry-point.patch create mode 100644 1303-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch create mode 100644 1304-xen-arch-x86-reserve-TXT-memory.patch create mode 100644 1305-x86-intel_txt.c-restore-boot-MTRRs.patch create mode 100644 1306-x86-sha1.c-add-file.patch create mode 100644 1307-x86-tpm.c-code-for-early-hashing-and-extending-PCR.patch create mode 100644 1308-x86-dom0_build.c-measure-kernel-and-initrd-before-do.patch create mode 100644 1309-x86-boot-choose-AP-stack-based-on-APIC-ID.patch create mode 100644 1310-x86-smpboot.c-TXT-AP-bringup.patch create mode 100644 1311-x86-setup.c-don-t-use-XSM-policy-and-ucode-updates-w.patch diff --git a/1301-x86-include-asm-intel_txt.h-constants-and-accessors-.patch b/1301-x86-include-asm-intel_txt.h-constants-and-accessors-.patch new file mode 100644 index 00000000..10526eed --- /dev/null +++ b/1301-x86-include-asm-intel_txt.h-constants-and-accessors-.patch @@ -0,0 +1,304 @@ +From 668ddbf4f8e7cc0fe61784bcce45201670437428 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Mon, 17 Apr 2023 20:10:13 +0200 +Subject: [PATCH 01/11] x86/include/asm/intel_txt.h: constants and accessors + for TXT registers and heap + +File contains TXT register spaces base address, registers offsets, +error codes and inline functions for accessing structures stored on +TXT heap. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/include/asm/intel_txt.h | 279 +++++++++++++++++++++++++++ + 1 file changed, 279 insertions(+) + create mode 100644 xen/arch/x86/include/asm/intel_txt.h + +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +new file mode 100644 +index 000000000000..6ecc21508a77 +--- /dev/null ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -0,0 +1,279 @@ ++/* ++ * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE) ++ */ ++ ++#define TXT_PUB_CONFIG_REGS_BASE 0xfed30000 ++#define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000 ++ ++#define NR_TXT_CONFIG_PAGES ((TXT_PUB_CONFIG_REGS_BASE - \ ++ TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT) ++ ++#define TXTCR_STS 0x0000 ++#define TXTCR_ESTS 0x0008 ++#define TXTCR_ERRORCODE 0x0030 ++#define TXTCR_CMD_RESET 0x0038 ++#define TXTCR_CMD_CLOSE_PRIVATE 0x0048 ++#define TXTCR_DIDVID 0x0110 ++#define TXTCR_VER_EMIF 0x0200 ++#define TXTCR_CMD_UNLOCK_MEM_CONFIG 0x0218 ++#define TXTCR_SINIT_BASE 0x0270 ++#define TXTCR_SINIT_SIZE 0x0278 ++#define TXTCR_MLE_JOIN 0x0290 ++#define TXTCR_HEAP_BASE 0x0300 ++#define TXTCR_HEAP_SIZE 0x0308 ++#define TXTCR_SCRATCHPAD 0x0378 ++#define TXTCR_CMD_OPEN_LOCALITY1 0x0380 ++#define TXTCR_CMD_CLOSE_LOCALITY1 0x0388 ++#define TXTCR_CMD_OPEN_LOCALITY2 0x0390 ++#define TXTCR_CMD_CLOSE_LOCALITY2 0x0398 ++#define TXTCR_CMD_SECRETS 0x08e0 ++#define TXTCR_CMD_NO_SECRETS 0x08e8 ++#define TXTCR_E2STS 0x08f0 ++ ++/* ++ * Secure Launch Defined Error Codes used in MLE-initiated TXT resets. ++ * ++ * TXT Specification ++ * Appendix I ACM Error Codes ++ */ ++#define SL_ERROR_GENERIC 0xc0008001 ++#define SL_ERROR_TPM_INIT 0xc0008002 ++#define SL_ERROR_TPM_INVALID_LOG20 0xc0008003 ++#define SL_ERROR_TPM_LOGGING_FAILED 0xc0008004 ++#define SL_ERROR_REGION_STRADDLE_4GB 0xc0008005 ++#define SL_ERROR_TPM_EXTEND 0xc0008006 ++#define SL_ERROR_MTRR_INV_VCNT 0xc0008007 ++#define SL_ERROR_MTRR_INV_DEF_TYPE 0xc0008008 ++#define SL_ERROR_MTRR_INV_BASE 0xc0008009 ++#define SL_ERROR_MTRR_INV_MASK 0xc000800a ++#define SL_ERROR_MSR_INV_MISC_EN 0xc000800b ++#define SL_ERROR_INV_AP_INTERRUPT 0xc000800c ++#define SL_ERROR_INTEGER_OVERFLOW 0xc000800d ++#define SL_ERROR_HEAP_WALK 0xc000800e ++#define SL_ERROR_HEAP_MAP 0xc000800f ++#define SL_ERROR_REGION_ABOVE_4GB 0xc0008010 ++#define SL_ERROR_HEAP_INVALID_DMAR 0xc0008011 ++#define SL_ERROR_HEAP_DMAR_SIZE 0xc0008012 ++#define SL_ERROR_HEAP_DMAR_MAP 0xc0008013 ++#define SL_ERROR_HI_PMR_BASE 0xc0008014 ++#define SL_ERROR_HI_PMR_SIZE 0xc0008015 ++#define SL_ERROR_LO_PMR_BASE 0xc0008016 ++#define SL_ERROR_LO_PMR_MLE 0xc0008017 ++#define SL_ERROR_INITRD_TOO_BIG 0xc0008018 ++#define SL_ERROR_HEAP_ZERO_OFFSET 0xc0008019 ++#define SL_ERROR_WAKE_BLOCK_TOO_SMALL 0xc000801a ++#define SL_ERROR_MLE_BUFFER_OVERLAP 0xc000801b ++#define SL_ERROR_BUFFER_BEYOND_PMR 0xc000801c ++#define SL_ERROR_OS_SINIT_BAD_VERSION 0xc000801d ++#define SL_ERROR_EVENTLOG_MAP 0xc000801e ++#define SL_ERROR_TPM_NUMBER_ALGS 0xc000801f ++#define SL_ERROR_TPM_UNKNOWN_DIGEST 0xc0008020 ++#define SL_ERROR_TPM_INVALID_EVENT 0xc0008021 ++ ++#define TXT_OS_MLE_MAX_VARIABLE_MTRRS 32 ++ ++#define SLAUNCH_BOOTLOADER_MAGIC 0x4c534254 ++ ++#ifndef __ASSEMBLY__ ++ ++/* We need to differentiate between pre- and post paging enabled. */ ++#ifdef __BOOT_DEFS_H__ ++#define _txt(x) _p(x) ++#else ++#include ++#include // __va() ++#define _txt(x) __va(x) ++#endif ++ ++/* ++ * Always use private space as some of registers are either read-only or not ++ * present in public space. ++ */ ++static inline volatile uint64_t read_txt_reg(int reg_no) ++{ ++ volatile uint64_t *reg = _txt(TXT_PRIV_CONFIG_REGS_BASE + reg_no); ++ return *reg; ++} ++ ++static inline void write_txt_reg(int reg_no, uint64_t val) ++{ ++ volatile uint64_t *reg = _txt(TXT_PRIV_CONFIG_REGS_BASE + reg_no); ++ *reg = val; ++ /* This serves as TXT register barrier */ ++ (void)read_txt_reg(TXTCR_ESTS); ++} ++ ++static inline void txt_reset(uint32_t error) ++{ ++ write_txt_reg(TXTCR_ERRORCODE, error); ++ write_txt_reg(TXTCR_CMD_NO_SECRETS, 1); ++ write_txt_reg(TXTCR_CMD_UNLOCK_MEM_CONFIG, 1); ++ write_txt_reg(TXTCR_CMD_RESET, 1); ++ while (1); ++} ++ ++/* ++ * Secure Launch defined MTRR saving structures ++ */ ++struct txt_mtrr_pair { ++ uint64_t mtrr_physbase; ++ uint64_t mtrr_physmask; ++} __packed; ++ ++struct txt_mtrr_state { ++ uint64_t default_mem_type; ++ uint64_t mtrr_vcnt; ++ struct txt_mtrr_pair mtrr_pair[TXT_OS_MLE_MAX_VARIABLE_MTRRS]; ++} __packed; ++ ++/* ++ * Secure Launch defined OS/MLE TXT Heap table ++ */ ++struct txt_os_mle_data { ++ uint32_t version; ++ uint32_t boot_params_addr; ++ uint64_t saved_misc_enable_msr; ++ struct txt_mtrr_state saved_bsp_mtrrs; ++ uint32_t ap_wake_block; ++ uint32_t ap_wake_block_size; ++ uint64_t evtlog_addr; ++ uint32_t evtlog_size; ++ uint8_t mle_scratch[64]; ++} __packed; ++ ++/* ++ * TXT specification defined BIOS data TXT Heap table ++ */ ++struct txt_bios_data { ++ uint32_t version; /* Currently 5 for TPM 1.2 and 6 for TPM 2.0 */ ++ uint32_t bios_sinit_size; ++ uint64_t reserved1; ++ uint64_t reserved2; ++ uint32_t num_logical_procs; ++ /* Versions >= 3 && < 5 */ ++ uint32_t sinit_flags; ++ /* Versions >= 5 with updates in version 6 */ ++ uint32_t mle_flags; ++ /* Versions >= 4 */ ++ /* Ext Data Elements */ ++} __packed; ++ ++/* ++ * TXT specification defined OS/SINIT TXT Heap table ++ */ ++struct txt_os_sinit_data { ++ uint32_t version; /* Currently 6 for TPM 1.2 and 7 for TPM 2.0 */ ++ uint32_t flags; /* Reserved in version 6 */ ++ uint64_t mle_ptab; ++ uint64_t mle_size; ++ uint64_t mle_hdr_base; ++ uint64_t vtd_pmr_lo_base; ++ uint64_t vtd_pmr_lo_size; ++ uint64_t vtd_pmr_hi_base; ++ uint64_t vtd_pmr_hi_size; ++ uint64_t lcp_po_base; ++ uint64_t lcp_po_size; ++ uint32_t capabilities; ++ /* Version = 5 */ ++ uint64_t efi_rsdt_ptr; /* RSD*P* in versions >= 6 */ ++ /* Versions >= 6 */ ++ /* Ext Data Elements */ ++} __packed; ++ ++/* ++ * TXT specification defined SINIT/MLE TXT Heap table ++ */ ++struct txt_sinit_mle_data { ++ uint32_t version; /* Current values are 6 through 9 */ ++ /* Versions <= 8, fields until lcp_policy_control must be 0 for >= 9 */ ++ uint8_t bios_acm_id[20]; ++ uint32_t edx_senter_flags; ++ uint64_t mseg_valid; ++ uint8_t sinit_hash[20]; ++ uint8_t mle_hash[20]; ++ uint8_t stm_hash[20]; ++ uint8_t lcp_policy_hash[20]; ++ uint32_t lcp_policy_control; ++ /* Versions >= 7 */ ++ uint32_t rlp_wakeup_addr; ++ uint32_t reserved; ++ uint32_t num_of_sinit_mdrs; ++ uint32_t sinit_mdrs_table_offset; ++ uint32_t sinit_vtd_dmar_table_size; ++ uint32_t sinit_vtd_dmar_table_offset; ++ /* Versions >= 8 */ ++ uint32_t processor_scrtm_status; ++ /* Versions >= 9 */ ++ /* Ext Data Elements */ ++} __packed; ++ ++/* ++ * Functions to extract data from the Intel TXT Heap Memory. The layout ++ * of the heap is as follows: ++ * +---------------------------------+ ++ * | Size Bios Data table (uint64_t) | ++ * +---------------------------------+ ++ * | Bios Data table | ++ * +---------------------------------+ ++ * | Size OS MLE table (uint64_t) | ++ * +---------------------------------+ ++ * | OS MLE table | ++ * +-------------------------------- + ++ * | Size OS SINIT table (uint64_t) | ++ * +---------------------------------+ ++ * | OS SINIT table | ++ * +---------------------------------+ ++ * | Size SINIT MLE table (uint64_t) | ++ * +---------------------------------+ ++ * | SINIT MLE table | ++ * +---------------------------------+ ++ * ++ * NOTE: the table size fields include the 8 byte size field itself. ++ */ ++static inline uint64_t txt_bios_data_size(void *heap) ++{ ++ return *((uint64_t *)heap); ++} ++ ++static inline void *txt_bios_data_start(void *heap) ++{ ++ return heap + sizeof(uint64_t); ++} ++ ++static inline uint64_t txt_os_mle_data_size(void *heap) ++{ ++ return *((uint64_t *)(heap + txt_bios_data_size(heap))); ++} ++ ++static inline void *txt_os_mle_data_start(void *heap) ++{ ++ return heap + txt_bios_data_size(heap) + sizeof(uint64_t); ++} ++ ++static inline uint64_t txt_os_sinit_data_size(void *heap) ++{ ++ return *((uint64_t *)(heap + txt_bios_data_size(heap) + ++ txt_os_mle_data_size(heap))); ++} ++ ++static inline void *txt_os_sinit_data_start(void *heap) ++{ ++ return heap + txt_bios_data_size(heap) + ++ txt_os_mle_data_size(heap) + sizeof(uint64_t); ++} ++ ++static inline uint64_t txt_sinit_mle_data_size(void *heap) ++{ ++ return *((uint64_t *)(heap + txt_bios_data_size(heap) + ++ txt_os_mle_data_size(heap) + ++ txt_os_sinit_data_size(heap))); ++} ++ ++static inline void *txt_sinit_mle_data_start(void *heap) ++{ ++ return heap + txt_bios_data_size(heap) + ++ txt_os_mle_data_size(heap) + ++ txt_os_sinit_data_size(heap) + sizeof(uint64_t); ++} ++ ++#endif /* __ASSEMBLY__ */ +-- +2.34.1 + diff --git a/1302-x86-boot-add-MLE-header-and-new-entry-point.patch b/1302-x86-boot-add-MLE-header-and-new-entry-point.patch new file mode 100644 index 00000000..8cd47e18 --- /dev/null +++ b/1302-x86-boot-add-MLE-header-and-new-entry-point.patch @@ -0,0 +1,107 @@ +From 971f427a46b46e761239fd8200dad70ceffdba05 Mon Sep 17 00:00:00 2001 +From: Kacper Stojek +Date: Wed, 31 Aug 2022 15:03:51 +0200 +Subject: [PATCH 02/11] x86/boot: add MLE header and new entry point + +MLE header is used with Intel TXT, together with MB2 headers. +Entrypoint is different, but it is used just to differentiate +from other entries by moving a magic number to EAX. Execution +environment is similar to that of Multiboot 2 and code falls +through to MB2's entry point. + +Signed-off-by: Kacper Stojek +Signed-off-by: Krystian Hebel +--- + docs/hypervisor-guide/x86/how-xen-boots.rst | 5 +++ + xen/arch/x86/boot/head.S | 45 +++++++++++++++++++++ + 2 files changed, 50 insertions(+) + +diff --git a/docs/hypervisor-guide/x86/how-xen-boots.rst b/docs/hypervisor-guide/x86/how-xen-boots.rst +index ca77d7c8a333..eb60a1cd8031 100644 +--- a/docs/hypervisor-guide/x86/how-xen-boots.rst ++++ b/docs/hypervisor-guide/x86/how-xen-boots.rst +@@ -55,6 +55,11 @@ If ``CONFIG_PVH_GUEST`` was selected at build time, an Elf note is included + which indicates the ability to use the PVH boot protocol, and registers + ``__pvh_start`` as the entrypoint, entered in 32bit mode. + ++MLE header is used with Intel TXT, together with MB2 headers. Entrypoint is ++different, but it is used just to differentiate from other entries by moving ++a magic number to EAX. Execution environment is similar to that of Multiboot 2 ++and code falls through to ``start``. ++ + + xen.gz + ~~~~~~ +diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S +index 0fb7dd3029f2..b2a1e0de8faf 100644 +--- a/xen/arch/x86/boot/head.S ++++ b/xen/arch/x86/boot/head.S +@@ -3,6 +3,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -113,6 +114,25 @@ multiboot2_header: + .size multiboot2_header, . - multiboot2_header + .type multiboot2_header, @object + ++ .balign 16 ++mle_header: ++ .long 0x9082ac5a /* UUID0 */ ++ .long 0x74a7476f /* UUID1 */ ++ .long 0xa2555c0f /* UUID2 */ ++ .long 0x42b651cb /* UUID3 */ ++ .long 0x00000034 /* MLE header size */ ++ .long 0x00020002 /* MLE version 2.2 */ ++ .long (sl_stub_entry - start) /* Linear entry point of MLE (SINIT virt. address) */ ++ .long 0x00000000 /* First valid page of MLE */ ++ .long 0x00000000 /* Offset within binary of first byte of MLE */ ++ .long 0x00000000 /* Offset within binary of last byte + 1 of MLE */ ++ .long 0x00000223 /* Bit vector of MLE-supported capabilities */ ++ .long 0x00000000 /* Starting linear address of command line (unused) */ ++ .long 0x00000000 /* Ending linear address of command line (unused) */ ++ ++ .size mle_header, .-mle_header ++ .type mle_header, @object ++ + .section .init.rodata, "a", @progbits + + .Lbad_cpu_msg: .asciz "ERR: Not a 64-bit CPU!" +@@ -425,6 +445,31 @@ __pvh_start: + + #endif /* CONFIG_PVH_GUEST */ + ++ /* ++ * Entry point for TrenchBoot Secure Launch on Intel TXT platforms. ++ * ++ * CPU is in 32b protected mode with paging disabled. On entry: ++ * - %ebx = %ebp = SINIT physical base address ++ * - %edx = SENTER control flags ++ * - stack pointer is undefined ++ * - CS is flat 4GB code segment ++ * - DS, ES and SS are flat 4GB data segments ++ * ++ * Additional restrictions: ++ * - some MSRs are partially cleared, among them IA32_MISC_ENABLE, so ++ * some capabilities might be reported as disabled even if they are ++ * supported by CPU ++ * - interrupts (including NMIs and SMIs) are disabled and must be ++ * enabled later ++ * - trying to enter real mode results in reset ++ * - APs must be brought up by MONITOR or GETSEC[WAKEUP], depending on ++ * which is supported by a given SINIT ACM ++ */ ++sl_stub_entry: ++ movl $SLAUNCH_BOOTLOADER_MAGIC,%eax ++ ++ /* Fall through to Multiboot entry point. */ ++ + __start: + cld + cli +-- +2.34.1 + diff --git a/1303-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch b/1303-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch new file mode 100644 index 00000000..45268537 --- /dev/null +++ b/1303-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch @@ -0,0 +1,268 @@ +From 67dc7a18a02417de737314db61fa90f0498beb61 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Mon, 17 Apr 2023 20:09:54 +0200 +Subject: [PATCH 03/11] x86/boot/txt_early: add early TXT tests and restore MBI + pointer + +These tests validate that important parts of memory are protected +against DMA attacks, including Xen and MBI. Modules can be tested later, +when it is possible to report issues to user before invoking TXT reset. + +TPM event log validation is temporarily disabled due to issue with its +allocation by bootloader. Ultimately event log will also have to be +validated early as it is used immediately after these tests to hold +MBI measurements. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/boot/Makefile | 2 +- + xen/arch/x86/boot/head.S | 25 +++++ + xen/arch/x86/boot/txt_early.c | 131 +++++++++++++++++++++++++++ + xen/arch/x86/include/asm/intel_txt.h | 26 ++++++ + 4 files changed, 183 insertions(+), 1 deletion(-) + create mode 100644 xen/arch/x86/boot/txt_early.c + +diff --git a/xen/arch/x86/boot/Makefile b/xen/arch/x86/boot/Makefile +index d6bc8fc084ae..34df17664aed 100644 +--- a/xen/arch/x86/boot/Makefile ++++ b/xen/arch/x86/boot/Makefile +@@ -1,6 +1,6 @@ + obj-bin-y += head.o + +-head-bin-objs := cmdline.o reloc.o ++head-bin-objs := cmdline.o reloc.o txt_early.o + + nocov-y += $(head-bin-objs) + noubsan-y += $(head-bin-objs) +diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S +index b2a1e0de8faf..57d2aa67da0a 100644 +--- a/xen/arch/x86/boot/head.S ++++ b/xen/arch/x86/boot/head.S +@@ -498,6 +498,10 @@ __start: + /* Bootloaders may set multiboot{1,2}.mem_lower to a nonzero value. */ + xor %edx,%edx + ++ /* Check for TrenchBoot slaunch bootloader. */ ++ cmp $SLAUNCH_BOOTLOADER_MAGIC,%eax ++ je .Lslaunch_proto ++ + /* Check for Multiboot2 bootloader. */ + cmp $MULTIBOOT2_BOOTLOADER_MAGIC,%eax + je .Lmultiboot2_proto +@@ -513,6 +517,23 @@ __start: + cmovnz MB_mem_lower(%ebx),%edx + jmp trampoline_bios_setup + ++.Lslaunch_proto: ++ /* Save information that TrenchBoot slaunch was used. */ ++ movl $1, sym_esi(sl_status) ++ ++ /* Push arguments to stack and call txt_early_tests(). */ ++ push $sym_offs(__2M_rwdata_end) /* end of target image */ ++ push $sym_offs(_start) /* target base address */ ++ push %esi /* load base address */ ++ call txt_early_tests ++ ++ /* ++ * txt_early_tests() returns MBI address, move it to EBX, move magic ++ * number expected by Multiboot 2 to EAX and fall through. ++ */ ++ movl %eax,%ebx ++ movl $MULTIBOOT2_BOOTLOADER_MAGIC,%eax ++ + .Lmultiboot2_proto: + /* Skip Multiboot2 information fixed part. */ + lea (MB2_fixed_sizeof+MULTIBOOT2_TAG_ALIGN-1)(%ebx),%ecx +@@ -834,6 +855,10 @@ cmdline_parse_early: + reloc: + .incbin "reloc.bin" + ++ ALIGN ++txt_early_tests: ++ .incbin "txt_early.bin" ++ + ENTRY(trampoline_start) + #include "trampoline.S" + ENTRY(trampoline_end) +diff --git a/xen/arch/x86/boot/txt_early.c b/xen/arch/x86/boot/txt_early.c +new file mode 100644 +index 000000000000..f94b2c6cc05c +--- /dev/null ++++ b/xen/arch/x86/boot/txt_early.c +@@ -0,0 +1,131 @@ ++/* ++ * Copyright (c) 2022-2023 3mdeb Sp. z o.o. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program. If not, see . ++ */ ++ ++/* ++ * This entry point is entered from xen/arch/x86/boot/head.S with Xen base at ++ * 0x4(%esp). A pointer to MBI is returned in %eax. ++ */ ++asm ( ++ " .text \n" ++ " .globl _start \n" ++ "_start: \n" ++ " jmp txt_early_tests \n" ++ ); ++ ++#include "defs.h" ++#include "../include/asm/intel_txt.h" ++ ++static void verify_pmr_ranges(struct txt_os_mle_data *os_mle, ++ struct txt_os_sinit_data *os_sinit, ++ uint32_t load_base_addr, uint32_t tgt_base_addr, ++ uint32_t xen_size) ++{ ++ int check_high_pmr = 0; ++ ++ /* Verify the value of the low PMR base. It should always be 0. */ ++ if (os_sinit->vtd_pmr_lo_base != 0) ++ txt_reset(SL_ERROR_LO_PMR_BASE); ++ ++ /* ++ * Low PMR size should not be 0 on current platforms. There is an ongoing ++ * transition to TPR-based DMA protection instead of PMR-based; this is not ++ * yet supported by the code. ++ */ ++ if (os_sinit->vtd_pmr_lo_size == 0) ++ txt_reset(SL_ERROR_LO_PMR_BASE); ++ ++ /* Check if regions overlap. Treat regions with no hole between as error. */ ++ if (os_sinit->vtd_pmr_hi_size != 0 && ++ os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size) ++ txt_reset(SL_ERROR_HI_PMR_BASE); ++ ++ /* All regions accessed by 32b code must be below 4G. */ ++ if (os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <= 0x100000000ull) ++ check_high_pmr = 1; ++ ++ /* ++ * ACM checks that TXT heap and MLE memory is protected against DMA. We have ++ * to check if MBI and whole Xen memory is protected. The latter is done in ++ * case bootloader failed to set whole image as MLE and to make sure that ++ * both pre- and post-relocation code is protected. ++ */ ++ ++ /* Check if all of Xen before relocation is covered by PMR. */ ++ if (!is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr)) ++ txt_reset(SL_ERROR_LO_PMR_MLE); ++ ++ /* Check if all of Xen after relocation is covered by PMR. */ ++ if (load_base_addr != tgt_base_addr && ++ !is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr)) ++ txt_reset(SL_ERROR_LO_PMR_MLE); ++ ++ /* Check if MBI is covered by PMR. MBI starts with 'uint32_t total_size'. */ ++ if (!is_in_pmr(os_sinit, os_mle->boot_params_addr, ++ *(uint32_t *)os_mle->boot_params_addr, check_high_pmr)) ++ txt_reset(SL_ERROR_BUFFER_BEYOND_PMR); ++ ++ /* Check if TPM event log (if present) is covered by PMR. */ ++ /* ++ * FIXME: currently commented out as GRUB allocates it in a hole between ++ * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other ++ * easy-to-use DMA protection mechanisms that would allow to protect that ++ * part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but ++ * it still wouldn't be enough. ++ * ++ * One possible solution would be for GRUB to allocate log at lower address, ++ * but this would further increase memory space fragmentation. Another ++ * option is to align PMR up instead of down, making PMR cover part of ++ * reserved region, but it is unclear what the consequences may be. ++ * ++ * In tboot this issue was resolved by reserving leftover chunks of memory ++ * in e820 and/or UEFI memory map. This is also a valid solution, but would ++ * require more changes to GRUB than the ones listed above, as event log is ++ * allocated much earlier than PMRs. ++ */ ++ /* ++ if (os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 && ++ !is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size, ++ check_high_pmr)) ++ txt_reset(SL_ERROR_BUFFER_BEYOND_PMR); ++ */ ++} ++ ++uint32_t __stdcall txt_early_tests(uint32_t load_base_addr, ++ uint32_t tgt_base_addr, ++ uint32_t tgt_end_addr) ++{ ++ void *txt_heap; ++ struct txt_os_mle_data *os_mle; ++ struct txt_os_sinit_data *os_sinit; ++ uint32_t size = tgt_end_addr - tgt_base_addr; ++ ++ /* Clear the TXT error registers for a clean start of day */ ++ write_txt_reg(TXTCR_ERRORCODE, 0); ++ ++ txt_heap = _p(read_txt_reg(TXTCR_HEAP_BASE)); ++ ++ if (txt_os_mle_data_size(txt_heap) < sizeof(*os_mle) || ++ txt_os_sinit_data_size(txt_heap) < sizeof(*os_sinit)) ++ txt_reset(SL_ERROR_GENERIC); ++ ++ os_mle = txt_os_mle_data_start(txt_heap); ++ os_sinit = txt_os_sinit_data_start(txt_heap); ++ ++ verify_pmr_ranges(os_mle, os_sinit, load_base_addr, tgt_base_addr, size); ++ ++ return os_mle->boot_params_addr; ++} +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index 6ecc21508a77..aba544e2e694 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -76,6 +76,8 @@ + + #ifndef __ASSEMBLY__ + ++extern unsigned long sl_status; ++ + /* We need to differentiate between pre- and post paging enabled. */ + #ifdef __BOOT_DEFS_H__ + #define _txt(x) _p(x) +@@ -276,4 +278,28 @@ static inline void *txt_sinit_mle_data_start(void *heap) + txt_os_sinit_data_size(heap) + sizeof(uint64_t); + } + ++static inline int is_in_pmr(struct txt_os_sinit_data *os_sinit, uint64_t base, ++ uint32_t size, int check_high) ++{ ++ /* Check for size overflow. */ ++ if (base + size < base) ++ txt_reset(SL_ERROR_INTEGER_OVERFLOW); ++ ++ /* Low range always starts at 0, so its size is also end address. */ ++ if (base >= os_sinit->vtd_pmr_lo_base && ++ base + size <= os_sinit->vtd_pmr_lo_size) ++ return 1; ++ ++ if (check_high && os_sinit->vtd_pmr_hi_size != 0) { ++ if (os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size < ++ os_sinit->vtd_pmr_hi_size) ++ txt_reset(SL_ERROR_INTEGER_OVERFLOW); ++ if (base >= os_sinit->vtd_pmr_hi_base && ++ base + size <= os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size) ++ return 1; ++ } ++ ++ return 0; ++} ++ + #endif /* __ASSEMBLY__ */ +-- +2.34.1 + diff --git a/1304-xen-arch-x86-reserve-TXT-memory.patch b/1304-xen-arch-x86-reserve-TXT-memory.patch new file mode 100644 index 00000000..796a1448 --- /dev/null +++ b/1304-xen-arch-x86-reserve-TXT-memory.patch @@ -0,0 +1,193 @@ +From 1d338620a88d01e0529c5dc1d78cd2ebe5d4c481 Mon Sep 17 00:00:00 2001 +From: Kacper Stojek +Date: Fri, 2 Sep 2022 08:11:43 +0200 +Subject: [PATCH 04/11] xen/arch/x86: reserve TXT memory + +TXT heap is marked as reserved in e820 to protect against being allocated +and overwritten. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/Makefile | 1 + + xen/arch/x86/include/asm/intel_txt.h | 2 + + xen/arch/x86/intel_txt.c | 121 +++++++++++++++++++++++++++ + xen/arch/x86/setup.c | 5 ++ + 4 files changed, 129 insertions(+) + create mode 100644 xen/arch/x86/intel_txt.c + +diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile +index 177a2ff74272..312932d50f02 100644 +--- a/xen/arch/x86/Makefile ++++ b/xen/arch/x86/Makefile +@@ -55,6 +55,7 @@ obj-y += percpu.o + obj-y += physdev.o + obj-$(CONFIG_COMPAT) += x86_64/physdev.o + obj-y += psr.o ++obj-y += intel_txt.o + obj-y += setup.o + obj-y += shutdown.o + obj-y += smp.o +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index aba544e2e694..46532329c205 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -302,4 +302,6 @@ static inline int is_in_pmr(struct txt_os_sinit_data *os_sinit, uint64_t base, + return 0; + } + ++extern void protect_txt_mem_regions(void); ++ + #endif /* __ASSEMBLY__ */ +diff --git a/xen/arch/x86/intel_txt.c b/xen/arch/x86/intel_txt.c +new file mode 100644 +index 000000000000..85ce934d198d +--- /dev/null ++++ b/xen/arch/x86/intel_txt.c +@@ -0,0 +1,121 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static uint64_t __initdata txt_heap_base, txt_heap_size; ++ ++unsigned long __initdata sl_status; ++ ++#define PREBUILT_MAP_LIMIT (1 << L2_PAGETABLE_SHIFT) ++ ++/* ++ * These helper functions are used to (un)map memory using L2 page tables by ++ * aligning mapped regions to 2MB. This way page allocator (which at this point ++ * isn't yet initialized) isn't needed for creating new L1 mappings. Functions ++ * also check and skip memory already mapped by the prebuilt tables. ++ * ++ * There are no tests against multiple mappings in the same superpage, in such ++ * case first call to unmap_l2() destroys all mappings to given memory range. ++ */ ++static int map_l2(unsigned long paddr, unsigned long size) ++{ ++ unsigned long aligned_paddr = paddr & ~((1ULL << L2_PAGETABLE_SHIFT) - 1); ++ unsigned long pages = ((paddr + size) - aligned_paddr); ++ pages += (1ULL << L2_PAGETABLE_SHIFT) - 1; ++ pages &= ~((1ULL << L2_PAGETABLE_SHIFT) - 1); ++ pages >>= PAGE_SHIFT; ++ ++ if ( (aligned_paddr + pages * PAGE_SIZE) <= PREBUILT_MAP_LIMIT ) ++ return 0; ++ ++ if ( aligned_paddr < PREBUILT_MAP_LIMIT ) { ++ pages -= (PREBUILT_MAP_LIMIT - aligned_paddr) >> PAGE_SHIFT; ++ aligned_paddr = PREBUILT_MAP_LIMIT; ++ } ++ ++ return map_pages_to_xen((unsigned long)__va(aligned_paddr), ++ maddr_to_mfn(aligned_paddr), ++ pages, PAGE_HYPERVISOR); ++} ++ ++static int unmap_l2(unsigned long paddr, unsigned long size) ++{ ++ unsigned long aligned_paddr = paddr & ~((1ULL << L2_PAGETABLE_SHIFT) - 1); ++ unsigned long pages = ((paddr + size) - aligned_paddr); ++ pages += (1ULL << L2_PAGETABLE_SHIFT) - 1; ++ pages &= ~((1ULL << L2_PAGETABLE_SHIFT) - 1); ++ pages >>= PAGE_SHIFT; ++ ++ if ( (aligned_paddr + pages * PAGE_SIZE) <= PREBUILT_MAP_LIMIT ) ++ return 0; ++ ++ if ( aligned_paddr < PREBUILT_MAP_LIMIT ) { ++ pages -= (PREBUILT_MAP_LIMIT - aligned_paddr) >> PAGE_SHIFT; ++ aligned_paddr = PREBUILT_MAP_LIMIT; ++ } ++ ++ return destroy_xen_mappings(aligned_paddr, ++ aligned_paddr + pages * PAGE_SIZE); ++} ++ ++void __init protect_txt_mem_regions(void) ++{ ++ uint64_t sinit_base, sinit_size; ++ ++ map_l2(TXT_PUB_CONFIG_REGS_BASE, NR_TXT_CONFIG_PAGES * PAGE_SIZE); ++ ++ txt_heap_base = txt_heap_size = sinit_base = sinit_size = 0; ++ ++ /* TXT Heap */ ++ txt_heap_base = read_txt_reg(TXTCR_HEAP_BASE); ++ txt_heap_size = read_txt_reg(TXTCR_HEAP_SIZE); ++ /* SINIT */ ++ sinit_base = read_txt_reg(TXTCR_SINIT_BASE); ++ sinit_size = read_txt_reg(TXTCR_SINIT_SIZE); ++ ++ /* Remove mapping of TXT register space. */ ++ unmap_l2(TXT_PUB_CONFIG_REGS_BASE, NR_TXT_CONFIG_PAGES * PAGE_SIZE); ++ ++ /* TXT Heap */ ++ if ( txt_heap_base != 0 ) { ++ struct txt_os_mle_data *os_mle; ++ ++ printk("SLAUNCH: reserving TXT heap (%#lx - %#lx)\n", txt_heap_base, ++ txt_heap_base + txt_heap_size); ++ e820_change_range_type(&e820_raw, txt_heap_base, ++ txt_heap_base + txt_heap_size, ++ E820_RAM, E820_RESERVED); ++ ++ /* TXT TPM Event Log */ ++ map_l2(txt_heap_base, txt_heap_size); ++ os_mle = txt_os_mle_data_start(__va(txt_heap_base)); ++ ++ if ( os_mle->evtlog_addr != 0 ) { ++ printk("SLAUNCH: reserving event log (%#lx - %#lx)\n", os_mle->evtlog_addr, ++ os_mle->evtlog_addr + os_mle->evtlog_size); ++ e820_change_range_type(&e820_raw, os_mle->evtlog_addr, ++ os_mle->evtlog_addr + os_mle->evtlog_size, ++ E820_RAM, E820_RESERVED); ++ } ++ ++ unmap_l2(txt_heap_base, txt_heap_size); ++ } ++ ++ /* SINIT */ ++ if ( sinit_base != 0 ) { ++ printk("SLAUNCH: reserving SINIT memory (%#lx - %#lx)\n", sinit_base, ++ sinit_base + sinit_size); ++ e820_change_range_type(&e820_raw, sinit_base, ++ sinit_base + sinit_size, ++ E820_RAM, E820_RESERVED); ++ } ++ ++ /* TXT Private Space */ ++ e820_change_range_type(&e820_raw, TXT_PRIV_CONFIG_REGS_BASE, ++ TXT_PRIV_CONFIG_REGS_BASE + NR_TXT_CONFIG_PAGES * PAGE_SIZE, ++ E820_RAM, E820_UNUSABLE); ++} +diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c +index e05189f64997..a8816a93f601 100644 +--- a/xen/arch/x86/setup.c ++++ b/xen/arch/x86/setup.c +@@ -55,6 +55,7 @@ + #include + #include + #include ++#include + + /* opt_nosmp: If true, secondary processors are ignored. */ + static bool __initdata opt_nosmp; +@@ -1140,6 +1141,10 @@ void __init noreturn __start_xen(unsigned long mbi_p) + #endif + } + ++ /* Reserve TXT heap and SINIT for Secure Launch path. */ ++ if ( sl_status ) ++ protect_txt_mem_regions(); ++ + /* Sanitise the raw E820 map to produce a final clean version. */ + max_page = raw_max_page = init_e820(memmap_type, &e820_raw); + +-- +2.34.1 + diff --git a/1305-x86-intel_txt.c-restore-boot-MTRRs.patch b/1305-x86-intel_txt.c-restore-boot-MTRRs.patch new file mode 100644 index 00000000..e7f60852 --- /dev/null +++ b/1305-x86-intel_txt.c-restore-boot-MTRRs.patch @@ -0,0 +1,117 @@ +From de5a0dc58253bacc4bc89bf4a9d71ecc7185081a Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Wed, 19 Oct 2022 19:52:24 +0200 +Subject: [PATCH 05/11] x86/intel_txt.c: restore boot MTRRs + +In preparation for TXT SENTER call, GRUB had to modify MTRR settings +to be UC for everything except SINIT ACM. Old values are restored +from TXT heap where they were saved by the bootloader. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/e820.c | 4 ++ + xen/arch/x86/include/asm/intel_txt.h | 1 + + xen/arch/x86/intel_txt.c | 57 ++++++++++++++++++++++++++++ + 3 files changed, 62 insertions(+) + +diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c +index b653a19c93af..03a0a86902e2 100644 +--- a/xen/arch/x86/e820.c ++++ b/xen/arch/x86/e820.c +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + + /* + * opt_mem: Limit maximum address of physical RAM. +@@ -455,6 +456,9 @@ static uint64_t __init mtrr_top_of_ram(void) + rdmsrl(MSR_MTRRcap, mtrr_cap); + rdmsrl(MSR_MTRRdefType, mtrr_def); + ++ if ( sl_status ) ++ txt_restore_mtrrs(e820_verbose); ++ + if ( e820_verbose ) + printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def); + +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index 46532329c205..09cafbc803d4 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -303,5 +303,6 @@ static inline int is_in_pmr(struct txt_os_sinit_data *os_sinit, uint64_t base, + } + + extern void protect_txt_mem_regions(void); ++extern void txt_restore_mtrrs(bool e820_verbose); + + #endif /* __ASSEMBLY__ */ +diff --git a/xen/arch/x86/intel_txt.c b/xen/arch/x86/intel_txt.c +index 85ce934d198d..0edb8a6a85a3 100644 +--- a/xen/arch/x86/intel_txt.c ++++ b/xen/arch/x86/intel_txt.c +@@ -119,3 +119,60 @@ void __init protect_txt_mem_regions(void) + TXT_PRIV_CONFIG_REGS_BASE + NR_TXT_CONFIG_PAGES * PAGE_SIZE, + E820_RAM, E820_UNUSABLE); + } ++ ++void __init txt_restore_mtrrs(bool e820_verbose) ++{ ++ struct txt_os_mle_data *os_mle; ++ int os_mle_size; ++ uint64_t mtrr_cap, mtrr_def, base, mask; ++ unsigned int i; ++ ++ map_l2(txt_heap_base, txt_heap_size); ++ ++ os_mle_size = txt_os_mle_data_size(__va(txt_heap_base)); ++ os_mle = txt_os_mle_data_start(__va(txt_heap_base)); ++ ++ if ( os_mle_size < sizeof(*os_mle) ) ++ panic("OS-MLE too small\n"); ++ ++ rdmsrl(MSR_MTRRcap, mtrr_cap); ++ rdmsrl(MSR_MTRRdefType, mtrr_def); ++ ++ if ( e820_verbose ) { ++ printk("MTRRs set previously for SINIT ACM:\n"); ++ printk(" MTRR cap: %"PRIx64" type: %"PRIx64"\n", mtrr_cap, mtrr_def); ++ ++ for ( i = 0; i < (uint8_t)mtrr_cap; i++ ) ++ { ++ rdmsrl(MSR_IA32_MTRR_PHYSBASE(i), base); ++ rdmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask); ++ ++ printk(" MTRR[%d]: base %"PRIx64" mask %"PRIx64"\n", ++ i, base, mask); ++ } ++ } ++ ++ if ( (mtrr_cap & 0xFF) != os_mle->saved_bsp_mtrrs.mtrr_vcnt ) { ++ printk("Bootloader saved %ld MTRR values, but there should be %ld\n", ++ os_mle->saved_bsp_mtrrs.mtrr_vcnt, mtrr_cap & 0xFF); ++ /* Choose the smaller one to be on the safe side. */ ++ mtrr_cap = (mtrr_cap & 0xFF) > os_mle->saved_bsp_mtrrs.mtrr_vcnt ? ++ os_mle->saved_bsp_mtrrs.mtrr_vcnt : mtrr_cap; ++ } ++ ++ /* Restore MTRRs saved by bootloader. */ ++ wrmsrl(MSR_MTRRdefType, os_mle->saved_bsp_mtrrs.default_mem_type); ++ ++ for ( i = 0; i < (uint8_t)mtrr_cap; i++ ) ++ { ++ base = os_mle->saved_bsp_mtrrs.mtrr_pair[i].mtrr_physbase; ++ mask = os_mle->saved_bsp_mtrrs.mtrr_pair[i].mtrr_physmask; ++ wrmsrl(MSR_IA32_MTRR_PHYSBASE(i), base); ++ wrmsrl(MSR_IA32_MTRR_PHYSMASK(i), mask); ++ } ++ ++ unmap_l2(txt_heap_base, txt_heap_size); ++ ++ if ( e820_verbose ) ++ printk("Restored MTRRs:\n"); /* Printed by caller, mtrr_top_of_ram(). */ ++} +-- +2.34.1 + diff --git a/1306-x86-sha1.c-add-file.patch b/1306-x86-sha1.c-add-file.patch new file mode 100644 index 00000000..2b6c93b8 --- /dev/null +++ b/1306-x86-sha1.c-add-file.patch @@ -0,0 +1,231 @@ +From c90bc4b8f65f70b5ab72dbca5846c8b3dbd94234 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Tue, 25 Oct 2022 16:04:17 +0200 +Subject: [PATCH 06/11] x86/sha1.c: add file + +File comes from [1] and is licensed under MIT License. Only enough +changes to make it compile under Xen and to swap endianness of result +were made to the original file. + +[1] https://www.nayuki.io/page/fast-sha1-hash-implementation-in-x86-assembly + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/Makefile | 1 + + xen/arch/x86/sha1.c | 192 ++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 193 insertions(+) + create mode 100644 xen/arch/x86/sha1.c + +diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile +index 312932d50f02..bc302ff0d0a6 100644 +--- a/xen/arch/x86/Makefile ++++ b/xen/arch/x86/Makefile +@@ -57,6 +57,7 @@ obj-$(CONFIG_COMPAT) += x86_64/physdev.o + obj-y += psr.o + obj-y += intel_txt.o + obj-y += setup.o ++obj-y += sha1.o + obj-y += shutdown.o + obj-y += smp.o + obj-y += smpboot.o +diff --git a/xen/arch/x86/sha1.c b/xen/arch/x86/sha1.c +new file mode 100644 +index 000000000000..f62305b423f1 +--- /dev/null ++++ b/xen/arch/x86/sha1.c +@@ -0,0 +1,192 @@ ++/* ++ * SHA-1 hash in C ++ * ++ * Copyright (c) 2021 Project Nayuki. (MIT License) ++ * https://www.nayuki.io/page/fast-sha1-hash-implementation-in-x86-assembly ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a copy of ++ * this software and associated documentation files (the "Software"), to deal in ++ * the Software without restriction, including without limitation the rights to ++ * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of ++ * the Software, and to permit persons to whom the Software is furnished to do so, ++ * subject to the following conditions: ++ * - The above copyright notice and this permission notice shall be included in ++ * all copies or substantial portions of the Software. ++ * - The Software is provided "as is", without warranty of any kind, express or ++ * implied, including but not limited to the warranties of merchantability, ++ * fitness for a particular purpose and noninfringement. In no event shall the ++ * authors or copyright holders be liable for any claim, damages or other ++ * liability, whether in an action of contract, tort or otherwise, arising from, ++ * out of or in connection with the Software or the use or other dealings in the ++ * Software. ++ */ ++ ++#include ++#include ++ ++#define UINT32_C(v) v ## U ++ ++#define BLOCK_LEN 64 // In bytes ++#define STATE_LEN 5 // In words ++ ++static void sha1_compress(const uint8_t block[static 64], uint32_t state[static 5]) { ++ #define ROTL32(x, n) (((0U + (x)) << (n)) | ((x) >> (32 - (n)))) // Assumes that x is uint32_t and 0 < n < 32 ++ ++ #define LOADSCHEDULE(i) \ ++ schedule[i] = (uint32_t)block[i * 4 + 0] << 24 \ ++ | (uint32_t)block[i * 4 + 1] << 16 \ ++ | (uint32_t)block[i * 4 + 2] << 8 \ ++ | (uint32_t)block[i * 4 + 3] << 0; ++ ++ #define SCHEDULE(i) \ ++ temp = schedule[(i - 3) & 0xF] ^ schedule[(i - 8) & 0xF] ^ schedule[(i - 14) & 0xF] ^ schedule[(i - 16) & 0xF]; \ ++ schedule[i & 0xF] = ROTL32(temp, 1); ++ ++ #define ROUND0a(a, b, c, d, e, i) LOADSCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) | (~b & d)) , i, 0x5A827999) ++ #define ROUND0b(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) | (~b & d)) , i, 0x5A827999) ++ #define ROUND1(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, (b ^ c ^ d) , i, 0x6ED9EBA1) ++ #define ROUND2(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, ((b & c) ^ (b & d) ^ (c & d)), i, 0x8F1BBCDC) ++ #define ROUND3(a, b, c, d, e, i) SCHEDULE(i) ROUNDTAIL(a, b, e, (b ^ c ^ d) , i, 0xCA62C1D6) ++ ++ #define ROUNDTAIL(a, b, e, f, i, k) \ ++ e = 0U + e + ROTL32(a, 5) + f + UINT32_C(k) + schedule[i & 0xF]; \ ++ b = ROTL32(b, 30); ++ ++ uint32_t a = state[0]; ++ uint32_t b = state[1]; ++ uint32_t c = state[2]; ++ uint32_t d = state[3]; ++ uint32_t e = state[4]; ++ ++ uint32_t schedule[16]; ++ uint32_t temp; ++ ROUND0a(a, b, c, d, e, 0) ++ ROUND0a(e, a, b, c, d, 1) ++ ROUND0a(d, e, a, b, c, 2) ++ ROUND0a(c, d, e, a, b, 3) ++ ROUND0a(b, c, d, e, a, 4) ++ ROUND0a(a, b, c, d, e, 5) ++ ROUND0a(e, a, b, c, d, 6) ++ ROUND0a(d, e, a, b, c, 7) ++ ROUND0a(c, d, e, a, b, 8) ++ ROUND0a(b, c, d, e, a, 9) ++ ROUND0a(a, b, c, d, e, 10) ++ ROUND0a(e, a, b, c, d, 11) ++ ROUND0a(d, e, a, b, c, 12) ++ ROUND0a(c, d, e, a, b, 13) ++ ROUND0a(b, c, d, e, a, 14) ++ ROUND0a(a, b, c, d, e, 15) ++ ROUND0b(e, a, b, c, d, 16) ++ ROUND0b(d, e, a, b, c, 17) ++ ROUND0b(c, d, e, a, b, 18) ++ ROUND0b(b, c, d, e, a, 19) ++ ROUND1(a, b, c, d, e, 20) ++ ROUND1(e, a, b, c, d, 21) ++ ROUND1(d, e, a, b, c, 22) ++ ROUND1(c, d, e, a, b, 23) ++ ROUND1(b, c, d, e, a, 24) ++ ROUND1(a, b, c, d, e, 25) ++ ROUND1(e, a, b, c, d, 26) ++ ROUND1(d, e, a, b, c, 27) ++ ROUND1(c, d, e, a, b, 28) ++ ROUND1(b, c, d, e, a, 29) ++ ROUND1(a, b, c, d, e, 30) ++ ROUND1(e, a, b, c, d, 31) ++ ROUND1(d, e, a, b, c, 32) ++ ROUND1(c, d, e, a, b, 33) ++ ROUND1(b, c, d, e, a, 34) ++ ROUND1(a, b, c, d, e, 35) ++ ROUND1(e, a, b, c, d, 36) ++ ROUND1(d, e, a, b, c, 37) ++ ROUND1(c, d, e, a, b, 38) ++ ROUND1(b, c, d, e, a, 39) ++ ROUND2(a, b, c, d, e, 40) ++ ROUND2(e, a, b, c, d, 41) ++ ROUND2(d, e, a, b, c, 42) ++ ROUND2(c, d, e, a, b, 43) ++ ROUND2(b, c, d, e, a, 44) ++ ROUND2(a, b, c, d, e, 45) ++ ROUND2(e, a, b, c, d, 46) ++ ROUND2(d, e, a, b, c, 47) ++ ROUND2(c, d, e, a, b, 48) ++ ROUND2(b, c, d, e, a, 49) ++ ROUND2(a, b, c, d, e, 50) ++ ROUND2(e, a, b, c, d, 51) ++ ROUND2(d, e, a, b, c, 52) ++ ROUND2(c, d, e, a, b, 53) ++ ROUND2(b, c, d, e, a, 54) ++ ROUND2(a, b, c, d, e, 55) ++ ROUND2(e, a, b, c, d, 56) ++ ROUND2(d, e, a, b, c, 57) ++ ROUND2(c, d, e, a, b, 58) ++ ROUND2(b, c, d, e, a, 59) ++ ROUND3(a, b, c, d, e, 60) ++ ROUND3(e, a, b, c, d, 61) ++ ROUND3(d, e, a, b, c, 62) ++ ROUND3(c, d, e, a, b, 63) ++ ROUND3(b, c, d, e, a, 64) ++ ROUND3(a, b, c, d, e, 65) ++ ROUND3(e, a, b, c, d, 66) ++ ROUND3(d, e, a, b, c, 67) ++ ROUND3(c, d, e, a, b, 68) ++ ROUND3(b, c, d, e, a, 69) ++ ROUND3(a, b, c, d, e, 70) ++ ROUND3(e, a, b, c, d, 71) ++ ROUND3(d, e, a, b, c, 72) ++ ROUND3(c, d, e, a, b, 73) ++ ROUND3(b, c, d, e, a, 74) ++ ROUND3(a, b, c, d, e, 75) ++ ROUND3(e, a, b, c, d, 76) ++ ROUND3(d, e, a, b, c, 77) ++ ROUND3(c, d, e, a, b, 78) ++ ROUND3(b, c, d, e, a, 79) ++ ++ state[0] = 0U + state[0] + a; ++ state[1] = 0U + state[1] + b; ++ state[2] = 0U + state[2] + c; ++ state[3] = 0U + state[3] + d; ++ state[4] = 0U + state[4] + e; ++} ++ ++/* Full message hasher */ ++ ++void sha1_hash(const uint8_t message[], size_t len, uint32_t hash[static STATE_LEN]) { ++ uint8_t block[BLOCK_LEN] = {0}; ++ size_t rem; ++ size_t off; ++ ++ hash[0] = UINT32_C(0x67452301); ++ hash[1] = UINT32_C(0xEFCDAB89); ++ hash[2] = UINT32_C(0x98BADCFE); ++ hash[3] = UINT32_C(0x10325476); ++ hash[4] = UINT32_C(0xC3D2E1F0); ++ ++ #define LENGTH_SIZE 8 // In bytes ++ ++ for (off = 0; len - off >= BLOCK_LEN; off += BLOCK_LEN) ++ sha1_compress(&message[off], hash); ++ ++ rem = len - off; ++ ++ if (rem > 0) ++ memcpy(block, &message[off], rem); ++ ++ block[rem] = 0x80; ++ rem++; ++ if (BLOCK_LEN - rem < LENGTH_SIZE) { ++ sha1_compress(block, hash); ++ memset(block, 0, sizeof(block)); ++ } ++ ++ block[BLOCK_LEN - 1] = (uint8_t)((len & 0x1FU) << 3); ++ len >>= 5; ++ for (int i = 1; i < LENGTH_SIZE; i++, len >>= 8) ++ block[BLOCK_LEN - 1 - i] = (uint8_t)(len & 0xFFU); ++ sha1_compress(block, hash); ++ ++ hash[0] = __builtin_bswap32(hash[0]); ++ hash[1] = __builtin_bswap32(hash[1]); ++ hash[2] = __builtin_bswap32(hash[2]); ++ hash[3] = __builtin_bswap32(hash[3]); ++ hash[4] = __builtin_bswap32(hash[4]); ++} +-- +2.34.1 + diff --git a/1307-x86-tpm.c-code-for-early-hashing-and-extending-PCR.patch b/1307-x86-tpm.c-code-for-early-hashing-and-extending-PCR.patch new file mode 100644 index 00000000..cc078732 --- /dev/null +++ b/1307-x86-tpm.c-code-for-early-hashing-and-extending-PCR.patch @@ -0,0 +1,530 @@ +From 829bcbf6656d91f6690328425f63b9c7b2ba8f45 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Fri, 21 Oct 2022 18:46:33 +0200 +Subject: [PATCH 07/11] x86/tpm.c: code for early hashing and extending PCR + +This file is built twice: for early 32b mode without paging to measure +MBI and for 64b code to measure dom0 kernel and initramfs. Since MBI +is small, the first case uses TPM to do the hashing. Kernel and +initramfs on the other hand are too big, sending them to the TPM would +take multiple minutes. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/boot/Makefile | 6 +- + xen/arch/x86/boot/head.S | 12 +- + xen/arch/x86/include/asm/intel_txt.h | 14 + + xen/arch/x86/tpm.c | 423 +++++++++++++++++++++++++++ + 4 files changed, 452 insertions(+), 3 deletions(-) + create mode 100644 xen/arch/x86/tpm.c + +diff --git a/xen/arch/x86/boot/Makefile b/xen/arch/x86/boot/Makefile +index 34df17664aed..913fa9d2c6ca 100644 +--- a/xen/arch/x86/boot/Makefile ++++ b/xen/arch/x86/boot/Makefile +@@ -1,6 +1,6 @@ + obj-bin-y += head.o + +-head-bin-objs := cmdline.o reloc.o txt_early.o ++head-bin-objs := cmdline.o reloc.o txt_early.o tpm_early.o + + nocov-y += $(head-bin-objs) + noubsan-y += $(head-bin-objs) +@@ -29,6 +29,10 @@ LDFLAGS_DIRECT += $(LDFLAGS_DIRECT-y) + %.bin: %.lnk + $(OBJCOPY) -j .text -O binary $< $@ + ++$(obj)/tpm_early.o: XEN_CFLAGS += -D__EARLY_TPM__ ++$(obj)/tpm_early.o: $(src)/../tpm.c FORCE ++ $(call if_changed_rule,cc_o_c) ++ + %.lnk: %.o $(src)/build32.lds + $(LD) $(subst x86_64,i386,$(LDFLAGS_DIRECT)) -N -T $(filter %.lds,$^) -o $@ $< + +diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S +index 57d2aa67da0a..32f99dfc52f9 100644 +--- a/xen/arch/x86/boot/head.S ++++ b/xen/arch/x86/boot/head.S +@@ -528,10 +528,14 @@ __start: + call txt_early_tests + + /* +- * txt_early_tests() returns MBI address, move it to EBX, move magic +- * number expected by Multiboot 2 to EAX and fall through. ++ * txt_early_tests() returns MBI address, pass it to tpm_extend_mbi() ++ * and store for later in EBX. + */ ++ push %eax + movl %eax,%ebx ++ call tpm_extend_mbi ++ ++ /* Move magic number expected by Multiboot 2 to EAX and fall through. */ + movl $MULTIBOOT2_BOOTLOADER_MAGIC,%eax + + .Lmultiboot2_proto: +@@ -859,6 +863,10 @@ reloc: + txt_early_tests: + .incbin "txt_early.bin" + ++ ALIGN ++tpm_extend_mbi: ++ .incbin "tpm_early.bin" ++ + ENTRY(trampoline_start) + #include "trampoline.S" + ENTRY(trampoline_end) +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index 09cafbc803d4..42b6ef21744c 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -305,4 +305,18 @@ static inline int is_in_pmr(struct txt_os_sinit_data *os_sinit, uint64_t base, + extern void protect_txt_mem_regions(void); + extern void txt_restore_mtrrs(bool e820_verbose); + ++#define DRTM_LOC 2 ++#define DRTM_CODE_PCR 17 ++#define DRTM_DATA_PCR 18 ++ ++/* TXT-defined use 0x4xx, TrenchBoot in Linux uses 0x5xx, use 0x6xx here. */ ++#define TXT_EVTYPE_MBI 0x600 ++#define TXT_EVTYPE_KERNEL 0x601 ++#define TXT_EVTYPE_INITRD 0x602 ++ ++#define SHA1_DIGEST_SIZE 20 ++ ++void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, ++ uint32_t type, uint8_t *log_data, unsigned log_data_size); ++ + #endif /* __ASSEMBLY__ */ +diff --git a/xen/arch/x86/tpm.c b/xen/arch/x86/tpm.c +new file mode 100644 +index 000000000000..bae3e4634d3b +--- /dev/null ++++ b/xen/arch/x86/tpm.c +@@ -0,0 +1,423 @@ ++/* ++ * Copyright (c) 2022 3mdeb Sp. z o.o. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program. If not, see . ++ */ ++ ++#ifdef __EARLY_TPM__ ++/* ++ * This entry point is entered from xen/arch/x86/boot/head.S with MBI base at ++ * 0x4(%esp). ++ */ ++asm ( ++ " .text \n" ++ " .globl _start \n" ++ "_start: \n" ++ " jmp tpm_extend_mbi \n" ++ ); ++ ++#include "boot/defs.h" ++#include "include/asm/intel_txt.h" ++#ifdef __va ++#error "__va defined in non-paged mode!" ++#endif ++#define __va(x) _p(x) ++ ++void *memcpy(void *dest, const void *src, size_t n) ++{ ++ const uint8_t *s = src; ++ uint8_t *d = dest; ++ ++ while (n--) ++ *d++ = *s++; ++ ++ return dest; ++} ++ ++#else /* __EARLY_TPM__ */ ++ ++#include ++#include ++#include ++#include ++ ++void sha1_hash(const uint8_t message[], size_t len, ++ uint32_t hash[static SHA1_DIGEST_SIZE/sizeof(uint32_t)]); ++ ++#endif /* __EARLY_TPM__ */ ++ ++#define TPM_TIS_BASE 0xFED40000 ++#define TPM_LOC_REG(loc, reg) (0x1000 * (loc) + (reg)) ++ ++#define TPM_ACCESS_(x) TPM_LOC_REG(x, 0x00) ++#define ACCESS_REQUEST_USE (1 << 1) ++#define ACCESS_ACTIVE_LOCALITY (1 << 5) ++#define TPM_INTF_CAPABILITY_(x) TPM_LOC_REG(x, 0x14) ++#define INTF_VERSION_MASK 0x70000000 ++#define TPM_STS_(x) TPM_LOC_REG(x, 0x18) ++#define TPM_FAMILY_MASK 0x0C000000 ++#define STS_DATA_AVAIL (1 << 4) ++#define STS_TPM_GO (1 << 5) ++#define STS_COMMAND_READY (1 << 6) ++#define STS_VALID (1 << 7) ++#define TPM_DATA_FIFO_(x) TPM_LOC_REG(x, 0x24) ++ ++#define swap16(x) __builtin_bswap16(x) ++#define swap32(x) __builtin_bswap32(x) ++#define memcpy(d, s, n) __builtin_memcpy(d, s, n) ++ ++static inline volatile uint32_t tis_read32(unsigned reg) ++{ ++ return *(volatile uint32_t *)__va(TPM_TIS_BASE + reg); ++} ++ ++static inline volatile uint8_t tis_read8(unsigned reg) ++{ ++ return *(volatile uint8_t *)__va(TPM_TIS_BASE + reg); ++} ++ ++static inline void tis_write8(unsigned reg, uint8_t val) ++{ ++ *(volatile uint8_t *)__va(TPM_TIS_BASE + reg) = val; ++} ++ ++/* TODO: check if locality was actually activated. */ ++static inline void request_locality(unsigned loc) ++{ ++ tis_write8(TPM_ACCESS_(loc), ACCESS_REQUEST_USE); ++} ++ ++static inline void relinquish_locality(unsigned loc) ++{ ++ tis_write8(TPM_ACCESS_(loc), ACCESS_ACTIVE_LOCALITY); ++} ++ ++static void send_cmd(unsigned loc, uint8_t *buf, unsigned i_size, ++ unsigned *o_size) ++{ ++ unsigned i; ++ ++ tis_write8(TPM_STS_(loc), STS_COMMAND_READY); ++ ++ for ( i = 0; i < i_size; i++ ) ++ tis_write8(TPM_DATA_FIFO_(loc), buf[i]); ++ ++ tis_write8(TPM_STS_(loc), STS_TPM_GO); ++ ++ while ( (tis_read8(TPM_STS_(loc)) & STS_DATA_AVAIL) == 0 ); ++ ++ for ( i = 0; i < *o_size && tis_read8(TPM_STS_(loc)) & STS_DATA_AVAIL; i++ ) ++ buf[i] = tis_read8(TPM_DATA_FIFO_(loc)); ++ ++ if ( i < *o_size ) ++ *o_size = i; ++ ++ tis_write8(TPM_STS_(loc), STS_COMMAND_READY); ++} ++ ++static inline bool is_tpm12(void) ++{ ++ /* ++ * If either INTF_CAPABILITY_x.interfaceVersion is 0 (TIS <= 1.21) or ++ * STS_x.tpmFamily is 0 we're dealing with TPM1.2. ++ */ ++ return ((tis_read32(TPM_INTF_CAPABILITY_(0)) & INTF_VERSION_MASK) == 0 || ++ (tis_read32(TPM_STS_(0)) & TPM_FAMILY_MASK) == 0); ++} ++ ++static inline void find_evt_log(void **evt_log, uint32_t *evt_log_size) ++{ ++ struct txt_os_mle_data *os_mle; ++ os_mle = txt_os_mle_data_start(__va(read_txt_reg(TXTCR_HEAP_BASE))); ++ ++ *evt_log = __va(os_mle->evtlog_addr); ++ *evt_log_size = os_mle->evtlog_size; ++} ++ ++/****************************** TPM1.2 specific *******************************/ ++#define TPM_ORD_Extend 0x00000014 ++#define TPM_ORD_SHA1Start 0x000000A0 ++#define TPM_ORD_SHA1Update 0x000000A1 ++#define TPM_ORD_SHA1CompleteExtend 0x000000A3 ++ ++#define TPM_TAG_RQU_COMMAND 0x00C1 ++#define TPM_TAG_RSP_COMMAND 0x00C4 ++ ++/* All fields of following structs are big endian. */ ++struct tpm_cmd_hdr { ++ uint16_t tag; ++ uint32_t paramSize; ++ uint32_t ordinal; ++} __packed; ++ ++struct tpm_rsp_hdr { ++ uint16_t tag; ++ uint32_t paramSize; ++ uint32_t returnCode; ++} __packed; ++ ++struct extend_cmd { ++ struct tpm_cmd_hdr h; ++ uint32_t pcrNum; ++ uint8_t inDigest[SHA1_DIGEST_SIZE]; ++} __packed; ++ ++struct extend_rsp { ++ struct tpm_rsp_hdr h; ++ uint8_t outDigest[SHA1_DIGEST_SIZE]; ++} __packed; ++ ++struct sha1_start_cmd { ++ struct tpm_cmd_hdr h; ++} __packed; ++ ++struct sha1_start_rsp { ++ struct tpm_rsp_hdr h; ++ uint32_t maxNumBytes; ++} __packed; ++ ++struct sha1_update_cmd { ++ struct tpm_cmd_hdr h; ++ uint32_t numBytes; /* Must be a multiple of 64 */ ++ uint8_t hashData[]; ++} __packed; ++ ++struct sha1_update_rsp { ++ struct tpm_rsp_hdr h; ++} __packed; ++ ++struct sha1_complete_extend_cmd { ++ struct tpm_cmd_hdr h; ++ uint32_t pcrNum; ++ uint32_t hashDataSize; /* 0-64, inclusive */ ++ uint8_t hashData[]; ++} __packed; ++ ++struct sha1_complete_extend_rsp { ++ struct tpm_rsp_hdr h; ++ uint8_t hashValue[SHA1_DIGEST_SIZE]; ++ uint8_t outDigest[SHA1_DIGEST_SIZE]; ++} __packed; ++ ++struct TPM12_PCREvent { ++ uint32_t PCRIndex; ++ uint32_t Type; ++ uint8_t Digest[SHA1_DIGEST_SIZE]; ++ uint32_t Size; ++ uint8_t Data[]; ++}; ++ ++struct txt_ev_log_container_12 { ++ char Signature[20]; /* "TXT Event Container", null-terminated */ ++ uint8_t Reserved[12]; ++ uint8_t ContainerVerMajor; ++ uint8_t ContainerVerMinor; ++ uint8_t PCREventVerMajor; ++ uint8_t PCREventVerMinor; ++ uint32_t ContainerSize; /* Allocated size */ ++ uint32_t PCREventsOffset; ++ uint32_t NextEventOffset; ++ struct TPM12_PCREvent PCREvents[]; ++}; ++ ++#ifdef __EARLY_TPM__ ++/* ++ * TPM1.2 is required to support commands of up to 1101 bytes, vendors rarely ++ * go above that. Limit maximum size of block of data to be hashed to 1024. ++ */ ++#define MAX_HASH_BLOCK 1024 ++#define CMD_RSP_BUF_SIZE (sizeof(struct sha1_update_cmd) + MAX_HASH_BLOCK) ++ ++union cmd_rsp { ++ struct sha1_start_cmd start_c; ++ struct sha1_start_rsp start_r; ++ struct sha1_update_cmd update_c; ++ struct sha1_update_rsp update_r; ++ struct sha1_complete_extend_cmd finish_c; ++ struct sha1_complete_extend_rsp finish_r; ++ uint8_t buf[CMD_RSP_BUF_SIZE]; ++}; ++ ++static void tpm_hash_extend12(unsigned loc, uint8_t *buf, unsigned size, ++ unsigned pcr, uint8_t *out_digest) ++{ ++ union cmd_rsp cmd_rsp; ++ unsigned max_bytes = MAX_HASH_BLOCK; ++ unsigned o_size = sizeof(cmd_rsp); ++ ++ request_locality(loc); ++ ++ cmd_rsp.start_c = (struct sha1_start_cmd) { ++ .h.tag = swap16(TPM_TAG_RQU_COMMAND), ++ .h.paramSize = swap32(sizeof(struct sha1_start_cmd)), ++ .h.ordinal = swap32(TPM_ORD_SHA1Start), ++ }; ++ ++ send_cmd(loc, cmd_rsp.buf, sizeof(struct sha1_start_cmd), &o_size); ++ ++ // assert (o_size >= sizeof(struct sha1_start_rsp)); ++ ++ if ( max_bytes > swap32(cmd_rsp.start_r.maxNumBytes) ) ++ max_bytes = swap32(cmd_rsp.start_r.maxNumBytes); ++ ++ while ( size > 64 ) { ++ if ( size < max_bytes ) ++ max_bytes = size & ~(64 - 1); ++ ++ o_size = sizeof(cmd_rsp); ++ ++ cmd_rsp.update_c = (struct sha1_update_cmd){ ++ .h.tag = swap16(TPM_TAG_RQU_COMMAND), ++ .h.paramSize = swap32(sizeof(struct sha1_update_cmd) + max_bytes), ++ .h.ordinal = swap32(TPM_ORD_SHA1Update), ++ .numBytes = swap32(max_bytes), ++ }; ++ memcpy(cmd_rsp.update_c.hashData, buf, max_bytes); ++ ++ send_cmd(loc, cmd_rsp.buf, sizeof(struct sha1_update_cmd) + max_bytes, ++ &o_size); ++ ++ // assert (o_size >= sizeof(struct sha1_update_rsp)); ++ ++ size -= max_bytes; ++ buf += max_bytes; ++ } ++ ++ o_size = sizeof(cmd_rsp); ++ ++ cmd_rsp.finish_c = (struct sha1_complete_extend_cmd) { ++ .h.tag = swap16(TPM_TAG_RQU_COMMAND), ++ .h.paramSize = swap32(sizeof(struct sha1_complete_extend_cmd) + size), ++ .h.ordinal = swap32(TPM_ORD_SHA1CompleteExtend), ++ .pcrNum = swap32(pcr), ++ .hashDataSize = swap32(size), ++ }; ++ memcpy(cmd_rsp.finish_c.hashData, buf, size); ++ ++ send_cmd(loc, cmd_rsp.buf, sizeof(struct sha1_complete_extend_cmd) + size, ++ &o_size); ++ ++ // assert (o_size >= sizeof(struct sha1_complete_extend_rsp)); ++ ++ relinquish_locality(loc); ++ ++ if ( out_digest != NULL ) ++ memcpy(out_digest, cmd_rsp.finish_r.hashValue, SHA1_DIGEST_SIZE); ++} ++ ++#else ++ ++union cmd_rsp { ++ struct extend_cmd extend_c; ++ struct extend_rsp extend_r; ++}; ++ ++static void tpm_hash_extend12(unsigned loc, uint8_t *buf, unsigned size, ++ unsigned pcr, uint8_t *out_digest) ++{ ++ union cmd_rsp cmd_rsp; ++ unsigned o_size = sizeof(cmd_rsp); ++ ++ sha1_hash(buf, size, (uint32_t *)out_digest); ++ ++ request_locality(loc); ++ ++ cmd_rsp.extend_c = (struct extend_cmd) { ++ .h.tag = swap16(TPM_TAG_RQU_COMMAND), ++ .h.paramSize = swap32(sizeof(struct extend_cmd)), ++ .h.ordinal = swap32(TPM_ORD_Extend), ++ .pcrNum = swap32(pcr), ++ }; ++ ++ if ( out_digest != NULL ) ++ memcpy(cmd_rsp.extend_c.inDigest, out_digest, SHA1_DIGEST_SIZE); ++ ++ send_cmd(loc, (uint8_t *)&cmd_rsp, sizeof(struct extend_cmd), &o_size); ++ ++ relinquish_locality(loc); ++} ++ ++#endif /* __EARLY_TPM__ */ ++ ++static void *create_log_event12(struct txt_ev_log_container_12 *evt_log, ++ uint32_t evt_log_size, uint32_t pcr, ++ uint32_t type, uint8_t *data, ++ unsigned data_size) ++{ ++ struct TPM12_PCREvent *new_entry; ++ ++ new_entry = (void *)(((uint8_t *)evt_log) + evt_log->NextEventOffset); ++ ++ /* ++ * Check if there is enough space left for new entry. ++ * Note: it is possible to introduce a gap in event log if entry with big ++ * data_size is followed by another entry with smaller data. Maybe we should ++ * cap the event log size in such case? ++ */ ++ if ( evt_log->NextEventOffset + sizeof(struct TPM12_PCREvent) + data_size ++ > evt_log_size ) ++ return NULL; ++ ++ evt_log->NextEventOffset += sizeof(struct TPM12_PCREvent) + data_size; ++ ++ new_entry->PCRIndex = pcr; ++ new_entry->Type = type; ++ new_entry->Size = data_size; ++ ++ if (data && data_size > 0) ++ memcpy(new_entry->Data, data, data_size); ++ ++ return new_entry->Digest; ++} ++ ++/************************** end of TPM1.2 specific ****************************/ ++ ++void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, ++ uint32_t type, uint8_t *log_data, unsigned log_data_size) ++{ ++ void *evt_log_addr; ++ uint32_t evt_log_size; ++ ++ find_evt_log(&evt_log_addr, &evt_log_size); ++ ++#ifndef __EARLY_TPM__ ++ /* ++ * Low mappings are destroyed somewhere during the boot process, it includes ++ * event log (marked as reserved by protect_txt_mem_regions(). ++ */ ++ map_pages_to_xen((unsigned long)evt_log_addr, maddr_to_mfn(__pa(evt_log_addr)), ++ PFN_UP(evt_log_size), __PAGE_HYPERVISOR_RW); ++#endif ++ ++ if ( is_tpm12() ) { ++ struct txt_ev_log_container_12 *evt_log = evt_log_addr; ++ void *entry_digest = create_log_event12(evt_log, evt_log_size, pcr, ++ type, log_data, log_data_size); ++ tpm_hash_extend12(loc, buf, size, pcr, entry_digest); ++ } ++ ++#ifndef __EARLY_TPM__ ++ destroy_xen_mappings((unsigned long)evt_log_addr, ++ (unsigned long)evt_log_addr + PFN_UP(evt_log_size) * PAGE_SIZE); ++#endif ++} ++ ++#ifdef __EARLY_TPM__ ++void __stdcall tpm_extend_mbi(uint32_t *mbi) ++{ ++ /* MBI starts with uint32_t total_size. */ ++ tpm_hash_extend(DRTM_LOC, DRTM_DATA_PCR, (uint8_t *)mbi, *mbi, ++ TXT_EVTYPE_MBI, NULL, 0); ++} ++#endif +-- +2.34.1 + diff --git a/1308-x86-dom0_build.c-measure-kernel-and-initrd-before-do.patch b/1308-x86-dom0_build.c-measure-kernel-and-initrd-before-do.patch new file mode 100644 index 00000000..740f9128 --- /dev/null +++ b/1308-x86-dom0_build.c-measure-kernel-and-initrd-before-do.patch @@ -0,0 +1,68 @@ +From 7962cb8b862beabd9de2c78f9dae5b02530e7f07 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Mon, 24 Oct 2022 19:52:49 +0200 +Subject: [PATCH 08/11] x86/dom0_build.c: measure kernel and initrd before dom0 + is constructed + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/Makefile | 1 + + xen/arch/x86/dom0_build.c | 23 +++++++++++++++++++++++ + 2 files changed, 24 insertions(+) + +diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile +index bc302ff0d0a6..b0b534513909 100644 +--- a/xen/arch/x86/Makefile ++++ b/xen/arch/x86/Makefile +@@ -65,6 +65,7 @@ obj-y += spec_ctrl.o + obj-y += srat.o + obj-y += string.o + obj-y += time.o ++obj-y += tpm.o + obj-y += traps.o + obj-y += tsx.o + obj-y += usercopy.o +diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c +index 79234f18ff01..115e95d3c626 100644 +--- a/xen/arch/x86/dom0_build.c ++++ b/xen/arch/x86/dom0_build.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -585,6 +586,28 @@ int __init construct_dom0(struct domain *d, const module_t *image, + BUG_ON(d->vcpu[0] == NULL); + BUG_ON(d->vcpu[0]->is_initialised); + ++ if ( sl_status ) { ++ /* ++ * Note: __start_xen() changed the meaning of mod_start and mod_end ++ * fields, they are now MFN and module length in bytes, respectively. ++ * For kernel, image_headroom was added both to mod_end and mod_start. ++ */ ++ printk("Measuring dom0 kernel...\n"); ++ tpm_hash_extend(DRTM_LOC, DRTM_CODE_PCR, ++ __va(image->mod_start * PAGE_SIZE + image_headroom), ++ image->mod_end - image_headroom, TXT_EVTYPE_KERNEL, ++ NULL, 0); ++ ++ if ( initrd != NULL ) { ++ process_pending_softirqs(); ++ ++ printk("Measuring dom0 initrd...\n"); ++ tpm_hash_extend(DRTM_LOC, DRTM_CODE_PCR, ++ __va(initrd->mod_start * PAGE_SIZE), ++ initrd->mod_end, TXT_EVTYPE_INITRD, NULL, 0); ++ } ++ } ++ + process_pending_softirqs(); + + if ( is_hvm_domain(d) ) +-- +2.34.1 + diff --git a/1309-x86-boot-choose-AP-stack-based-on-APIC-ID.patch b/1309-x86-boot-choose-AP-stack-based-on-APIC-ID.patch new file mode 100644 index 00000000..499aa5ed --- /dev/null +++ b/1309-x86-boot-choose-AP-stack-based-on-APIC-ID.patch @@ -0,0 +1,113 @@ +From f0976407926ad1d3004e3a175998b452072ab7fe Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Wed, 16 Nov 2022 15:03:07 +0100 +Subject: [PATCH 09/11] x86/boot: choose AP stack based on APIC ID + +This is made as first step of making parallel AP bring-up possible. It +should be enough for pre-C code. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/boot/trampoline.S | 20 ++++++++++++++++++++ + xen/arch/x86/boot/x86_64.S | 28 +++++++++++++++++++++++++++- + xen/arch/x86/setup.c | 7 +++++++ + 3 files changed, 54 insertions(+), 1 deletion(-) + +diff --git a/xen/arch/x86/boot/trampoline.S b/xen/arch/x86/boot/trampoline.S +index cdecf949b410..3c393c9e3050 100644 +--- a/xen/arch/x86/boot/trampoline.S ++++ b/xen/arch/x86/boot/trampoline.S +@@ -72,6 +72,26 @@ trampoline_protmode_entry: + mov $X86_CR4_PAE,%ecx + mov %ecx,%cr4 + ++ /* ++ * Get APIC ID while we're in non-paged mode. Start by checking if ++ * x2APIC is enabled. ++ */ ++ mov $MSR_APIC_BASE, %ecx ++ rdmsr ++ and $APIC_BASE_EXTD, %eax ++ jnz .Lx2apic ++ ++ /* Not x2APIC, read from MMIO */ ++ mov 0xfee00020, %esp ++ shr $24, %esp ++ jmp 1f ++ ++.Lx2apic: ++ mov $(MSR_X2APIC_FIRST + (0x20 >> 4)), %ecx ++ rdmsr ++ mov %eax, %esp ++1: ++ + /* Load pagetable base register. */ + mov $sym_offs(idle_pg_table),%eax + add bootsym_rel(trampoline_xen_phys_start,4,%eax) +diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S +index 5d12937a0e40..ad2f5058f008 100644 +--- a/xen/arch/x86/boot/x86_64.S ++++ b/xen/arch/x86/boot/x86_64.S +@@ -15,7 +15,33 @@ ENTRY(__high_start) + mov $XEN_MINIMAL_CR4,%rcx + mov %rcx,%cr4 + +- mov stack_start(%rip),%rsp ++ test %ebx,%ebx ++ cmovz stack_start(%rip), %rsp ++ jz .L_stack_set ++ ++ /* APs only: get stack base from APIC ID saved in %esp. */ ++ mov $-1, %rax ++ lea x86_cpu_to_apicid(%rip), %rcx ++1: ++ add $1, %rax ++ cmp $NR_CPUS, %eax ++ jb 2f ++ hlt ++2: ++ cmp %esp, (%rcx, %rax, 4) ++ jne 1b ++ ++ /* %eax is now Xen CPU index. */ ++ lea stack_base(%rip), %rcx ++ mov (%rcx, %rax, 8), %rsp ++ ++ test %rsp,%rsp ++ jnz 1f ++ hlt ++1: ++ add $(STACK_SIZE - CPUINFO_sizeof), %rsp ++ ++.L_stack_set: + + /* Reset EFLAGS (subsumes CLI and CLD). */ + pushq $0 +diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c +index a8816a93f601..a2a2b2f98455 100644 +--- a/xen/arch/x86/setup.c ++++ b/xen/arch/x86/setup.c +@@ -1876,6 +1876,7 @@ void __init noreturn __start_xen(unsigned long mbi_p) + */ + if ( !pv_shim ) + { ++ /* Separate loop to make parallel AP bringup possible. */ + for_each_present_cpu ( i ) + { + /* Set up cpu_to_node[]. */ +@@ -1883,6 +1884,12 @@ void __init noreturn __start_xen(unsigned long mbi_p) + /* Set up node_to_cpumask based on cpu_to_node[]. */ + numa_add_cpu(i); + ++ if ( stack_base[i] == NULL ) ++ stack_base[i] = cpu_alloc_stack(i); ++ } ++ ++ for_each_present_cpu ( i ) ++ { + if ( (park_offline_cpus || num_online_cpus() < max_cpus) && + !cpu_online(i) ) + { +-- +2.34.1 + diff --git a/1310-x86-smpboot.c-TXT-AP-bringup.patch b/1310-x86-smpboot.c-TXT-AP-bringup.patch new file mode 100644 index 00000000..df2fdef0 --- /dev/null +++ b/1310-x86-smpboot.c-TXT-AP-bringup.patch @@ -0,0 +1,177 @@ +From 23804dc59d572ccd5e1d61afa9f24111da4f3ace Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Wed, 16 Nov 2022 15:06:18 +0100 +Subject: [PATCH 10/11] x86/smpboot.c: TXT AP bringup + +On Intel TXT, APs are started in one of two ways, depending on ACM +which reports it in its information table. In both cases, all APs are +started simultaneously after BSP requests them to do so. Two possible +ways are: +- GETSEC[WAKEUP] instruction, +- MONITOR address. + +This patch implements just the latter, GETSEC[WAKEUP] support will be +added later. + +With this patch, every AP goes through assembly part, and only when in +start_secondary() in C they re-enter MONITOR/MWAIT iff they are not the +AP that was asked to boot. The same address is reused for simplicity, +and on next wakeup call APs don't have to go through assembly part +again (GDT, paging, stack setting). + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/boot/trampoline.S | 19 ++++++++++- + xen/arch/x86/include/asm/intel_txt.h | 6 ++++ + xen/arch/x86/smpboot.c | 49 ++++++++++++++++++++++++++++ + 3 files changed, 73 insertions(+), 1 deletion(-) + +diff --git a/xen/arch/x86/boot/trampoline.S b/xen/arch/x86/boot/trampoline.S +index 3c393c9e3050..4c792fa2b002 100644 +--- a/xen/arch/x86/boot/trampoline.S ++++ b/xen/arch/x86/boot/trampoline.S +@@ -59,6 +59,16 @@ GLOBAL(trampoline_realmode_entry) + ljmpl $BOOT_CS32,$bootsym_rel(trampoline_protmode_entry,6) + + .code32 ++GLOBAL(txt_ap_entry) ++ /* ++ * APs enter here in protected mode without paging. GDT is set in JOIN ++ * structure, it points to trampoline_gdt. Interrupts are disabled by ++ * TXT (including NMI and SMI), so IDT doesn't matter at this point. ++ * The only missing point is telling that we are AP by saving non-zero ++ * value in EBX. ++ */ ++ mov $1, %ebx ++ + trampoline_protmode_entry: + /* Set up a few descriptors: on entry only CS is guaranteed good. */ + mov $BOOT_DS,%eax +@@ -143,7 +153,7 @@ start64: + .word 0 + idt_48: .word 0, 0, 0 # base = limit = 0 + +-trampoline_gdt: ++GLOBAL(trampoline_gdt) + .word 0 /* 0x0000: unused (reused for GDTR) */ + gdt_48: + .word .Ltrampoline_gdt_end - trampoline_gdt - 1 +@@ -154,6 +164,13 @@ gdt_48: + .quad 0x00cf93000000ffff /* 0x0018: ring 0 data */ + .quad 0x00009b000000ffff /* 0x0020: real-mode code @ BOOT_TRAMPOLINE */ + .quad 0x000093000000ffff /* 0x0028: real-mode data @ BOOT_TRAMPOLINE */ ++ /* ++ * Intel TXT requires these two in exact order. This isn't compatible ++ * with order required by syscall, so we have duplicated entries... ++ * If order ever changes, update selector numbers in asm/intel_txt.h. ++ */ ++ .quad 0x00cf9b000000ffff /* 0x0030: ring 0 code, 32-bit mode */ ++ .quad 0x00cf93000000ffff /* 0x0038: ring 0 data */ + .Ltrampoline_gdt_end: + + /* Relocations for trampoline Real Mode segments. */ +diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h +index 42b6ef21744c..d1ed04159054 100644 +--- a/xen/arch/x86/include/asm/intel_txt.h ++++ b/xen/arch/x86/include/asm/intel_txt.h +@@ -74,10 +74,16 @@ + + #define SLAUNCH_BOOTLOADER_MAGIC 0x4c534254 + ++#define TXT_AP_BOOT_CS 0x0030 ++#define TXT_AP_BOOT_DS 0x0038 ++ + #ifndef __ASSEMBLY__ + + extern unsigned long sl_status; + ++extern char txt_ap_entry[]; ++extern uint32_t trampoline_gdt[]; ++ + /* We need to differentiate between pre- and post paging enabled. */ + #ifdef __BOOT_DEFS_H__ + #define _txt(x) _p(x) +diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c +index b46fd9ab18e4..c2fb1a22c2ee 100644 +--- a/xen/arch/x86/smpboot.c ++++ b/xen/arch/x86/smpboot.c +@@ -39,6 +39,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -331,6 +332,29 @@ void start_secondary(void *unused) + */ + unsigned int cpu = booting_cpu; + ++ if ( sl_status ) { ++ uint64_t misc_enable; ++ uint32_t my_apicid; ++ struct txt_sinit_mle_data *sinit_mle = ++ txt_sinit_mle_data_start(__va(read_txt_reg(TXTCR_HEAP_BASE))); ++ ++ /* TXT released us with MONITOR disabled in IA32_MISC_ENABLE. */ ++ rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); ++ wrmsrl(MSR_IA32_MISC_ENABLE, ++ misc_enable | MSR_IA32_MISC_ENABLE_MONITOR_ENABLE); ++ ++ /* get_apic_id() reads from x2APIC if it thinks it is enabled. */ ++ x2apic_ap_setup(); ++ my_apicid = get_apic_id(); ++ ++ while ( my_apicid != x86_cpu_to_apicid[cpu] ) { ++ asm volatile ("monitor; xor %0,%0; mwait" ++ :: "a"(__va(sinit_mle->rlp_wakeup_addr)), "c"(0), ++ "d"(0) : "memory"); ++ cpu = booting_cpu; ++ } ++ } ++ + /* Critical region without IDT or TSS. Any fault is deadly! */ + + set_current(idle_vcpu[cpu]); +@@ -424,6 +448,28 @@ void start_secondary(void *unused) + startup_cpu_idle_loop(); + } + ++static int sl_wake_aps(unsigned long trampoline_rm) ++{ ++ struct txt_sinit_mle_data *sinit_mle = ++ txt_sinit_mle_data_start(__va(read_txt_reg(TXTCR_HEAP_BASE))); ++ uint32_t *wakeup_addr = __va(sinit_mle->rlp_wakeup_addr); ++#define trampoline_relative(x) (trampoline_rm + ((char *)(x) - trampoline_realmode_entry)) ++ uint32_t join[4] = { ++ trampoline_gdt[1], /* GDT limit */ ++ trampoline_relative(trampoline_gdt), /* GDT base */ ++ TXT_AP_BOOT_CS, /* CS selector, DS = CS+8 */ ++ trampoline_relative(txt_ap_entry) /* EIP */ ++ }; ++ ++ write_txt_reg(TXTCR_MLE_JOIN, __pa(join)); ++ ++ smp_mb(); ++ ++ *wakeup_addr = 1; ++ ++ return 0; ++} ++ + static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) + { + unsigned long send_status = 0, accept_status = 0; +@@ -446,6 +492,9 @@ static int wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) + if ( tboot_in_measured_env() && !tboot_wake_ap(phys_apicid, start_eip) ) + return 0; + ++ if ( sl_status ) ++ return sl_wake_aps(start_eip); ++ + /* + * Be paranoid about clearing APIC errors. + */ +-- +2.34.1 + diff --git a/1311-x86-setup.c-don-t-use-XSM-policy-and-ucode-updates-w.patch b/1311-x86-setup.c-don-t-use-XSM-policy-and-ucode-updates-w.patch new file mode 100644 index 00000000..7b283899 --- /dev/null +++ b/1311-x86-setup.c-don-t-use-XSM-policy-and-ucode-updates-w.patch @@ -0,0 +1,60 @@ +From 8a496bec884af2843e937b197e846ce65ecd6cd6 Mon Sep 17 00:00:00 2001 +From: Krystian Hebel +Date: Thu, 20 Apr 2023 16:21:03 +0200 +Subject: [PATCH 11/11] x86/setup.c: don't use XSM policy and ucode updates + with slaunch + +These must be measured before use, and code for choosing proper module +does partial reads of all modules until it finds a matching one. This +may introduce TOCTOU issues, so until introduction of secdev driver +those functionalities are temporarily disabled when Xen is started +with slaunch enabled. + +Signed-off-by: Krystian Hebel +--- + xen/arch/x86/setup.c | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c +index a2a2b2f98455..a4f86417cd92 100644 +--- a/xen/arch/x86/setup.c ++++ b/xen/arch/x86/setup.c +@@ -1029,6 +1029,13 @@ void __init noreturn __start_xen(unsigned long mbi_p) + mbi->mods_count); + } + ++ if ( sl_status ) ++ if ( mbi->mods_count > 2 ) ++ { ++ mbi->mods_count = 2; ++ printk("Excessive multiboot modules for slaunch - limiting to 2\n"); ++ } ++ + bitmap_fill(module_map, mbi->mods_count); + __clear_bit(0, module_map); /* Dom0 kernel is always first */ + +@@ -1710,7 +1717,9 @@ void __init noreturn __start_xen(unsigned long mbi_p) + mmio_ro_ranges = rangeset_new(NULL, "r/o mmio ranges", + RANGESETF_prettyprint_hex); + +- xsm_multiboot_init(module_map, mbi); ++ /* TODO: XSM policies are not supported by slaunch yet. */ ++ if ( sl_status == 0 ) ++ xsm_multiboot_init(module_map, mbi); + + /* + * IOMMU-related ACPI table parsing may require some of the system domains +@@ -1779,7 +1788,9 @@ void __init noreturn __start_xen(unsigned long mbi_p) + + init_IRQ(); + +- microcode_grab_module(module_map, mbi); ++ /* TODO: Microcode updates are not supported by slaunch yet. */ ++ if ( sl_status == 0 ) ++ microcode_grab_module(module_map, mbi); + + timer_init(); + +-- +2.34.1 + diff --git a/xen.spec.in b/xen.spec.in index 1051fe55..f9748ed2 100644 --- a/xen.spec.in +++ b/xen.spec.in @@ -190,6 +190,20 @@ Patch1200: 1200-hypercall-XENMEM_get_mfn_from_pfn.patch Patch1201: 1201-patch-gvt-hvmloader.patch.patch Patch1202: 1202-libxl-Add-partially-Intel-GVT-g-support-xengt-device.patch +# Intel TXT support patches + +Patch1301: 1301-x86-include-asm-intel_txt.h-constants-and-accessors-.patch +Patch1302: 1302-x86-boot-add-MLE-header-and-new-entry-point.patch +Patch1303: 1303-x86-boot-txt_early-add-early-TXT-tests-and-restore-M.patch +Patch1304: 1304-xen-arch-x86-reserve-TXT-memory.patch +Patch1305: 1305-x86-intel_txt.c-restore-boot-MTRRs.patch +Patch1306: 1306-x86-sha1.c-add-file.patch +Patch1307: 1307-x86-tpm.c-code-for-early-hashing-and-extending-PCR.patch +Patch1308: 1308-x86-dom0_build.c-measure-kernel-and-initrd-before-do.patch +Patch1309: 1309-x86-boot-choose-AP-stack-based-on-APIC-ID.patch +Patch1310: 1310-x86-smpboot.c-TXT-AP-bringup.patch +Patch1311: 1311-x86-setup.c-don-t-use-XSM-policy-and-ucode-updates-w.patch + %if %build_qemutrad BuildRequires: libidn-devel zlib-devel SDL-devel curl-devel BuildRequires: libX11-devel gtk2-devel libaio-devel