Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Reinette Chatre | 5394 | 55.26% | 14 | 50.00% |
Jarkko Sakkinen | 4357 | 44.64% | 13 | 46.43% |
Tianjia Zhang | 10 | 0.10% | 1 | 3.57% |
Total | 9761 | 28 |
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-20 Intel Corporation. */ #include <cpuid.h> #include <elf.h> #include <errno.h> #include <fcntl.h> #include <stdbool.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> #include <sys/auxv.h> #include "defines.h" #include "../kselftest_harness.h" #include "main.h" static const uint64_t MAGIC = 0x1122334455667788ULL; static const uint64_t MAGIC2 = 0x8877665544332211ULL; vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave; /* * Security Information (SECINFO) data structure needed by a few SGX * instructions (eg. ENCLU[EACCEPT] and ENCLU[EMODPE]) holds meta-data * about an enclave page. &enum sgx_secinfo_page_state specifies the * secinfo flags used for page state. */ enum sgx_secinfo_page_state { SGX_SECINFO_PENDING = (1 << 3), SGX_SECINFO_MODIFIED = (1 << 4), SGX_SECINFO_PR = (1 << 5), }; struct vdso_symtab { Elf64_Sym *elf_symtab; const char *elf_symstrtab; Elf64_Word *elf_hashtab; }; static Elf64_Dyn *vdso_get_dyntab(void *addr) { Elf64_Ehdr *ehdr = addr; Elf64_Phdr *phdrtab = addr + ehdr->e_phoff; int i; for (i = 0; i < ehdr->e_phnum; i++) if (phdrtab[i].p_type == PT_DYNAMIC) return addr + phdrtab[i].p_offset; return NULL; } static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag) { int i; for (i = 0; dyntab[i].d_tag != DT_NULL; i++) if (dyntab[i].d_tag == tag) return addr + dyntab[i].d_un.d_ptr; return NULL; } static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab) { Elf64_Dyn *dyntab = vdso_get_dyntab(addr); symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB); if (!symtab->elf_symtab) return false; symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB); if (!symtab->elf_symstrtab) return false; symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH); if (!symtab->elf_hashtab) return false; return true; } static inline int sgx2_supported(void) { unsigned int eax, ebx, ecx, edx; __cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx); return eax & 0x2; } static unsigned long elf_sym_hash(const char *name) { unsigned long h = 0, high; while (*name) { h = (h << 4) + *name++; high = h & 0xf0000000; if (high) h ^= high >> 24; h &= ~high; } return h; } static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name) { Elf64_Word bucketnum = symtab->elf_hashtab[0]; Elf64_Word *buckettab = &symtab->elf_hashtab[2]; Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum]; Elf64_Sym *sym; Elf64_Word i; for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF; i = chaintab[i]) { sym = &symtab->elf_symtab[i]; if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name])) return sym; } return NULL; } /* * Return the offset in the enclave where the TCS segment can be found. * The first RW segment loaded is the TCS. */ static off_t encl_get_tcs_offset(struct encl *encl) { int i; for (i = 0; i < encl->nr_segments; i++) { struct encl_segment *seg = &encl->segment_tbl[i]; if (i == 0 && seg->prot == (PROT_READ | PROT_WRITE)) return seg->offset; } return -1; } /* * Return the offset in the enclave where the data segment can be found. * The first RW segment loaded is the TCS, skip that to get info on the * data segment. */ static off_t encl_get_data_offset(struct encl *encl) { int i; for (i = 1; i < encl->nr_segments; i++) { struct encl_segment *seg = &encl->segment_tbl[i]; if (seg->prot == (PROT_READ | PROT_WRITE)) return seg->offset; } return -1; } FIXTURE(enclave) { struct encl encl; struct sgx_enclave_run run; }; static bool setup_test_encl(unsigned long heap_size, struct encl *encl, struct __test_metadata *_metadata) { Elf64_Sym *sgx_enter_enclave_sym = NULL; struct vdso_symtab symtab; struct encl_segment *seg; char maps_line[256]; FILE *maps_file; unsigned int i; void *addr; if (!encl_load("test_encl.elf", encl, heap_size)) { encl_delete(encl); TH_LOG("Failed to load the test enclave."); return false; } if (!encl_measure(encl)) goto err; if (!encl_build(encl)) goto err; /* * An enclave consumer only must do this. */ for (i = 0; i < encl->nr_segments; i++) { struct encl_segment *seg = &encl->segment_tbl[i]; addr = mmap((void *)encl->encl_base + seg->offset, seg->size, seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0); EXPECT_NE(addr, MAP_FAILED); if (addr == MAP_FAILED) goto err; } /* Get vDSO base address */ addr = (void *)getauxval(AT_SYSINFO_EHDR); if (!addr) goto err; if (!vdso_get_symtab(addr, &symtab)) goto err; sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave"); if (!sgx_enter_enclave_sym) goto err; vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value; return true; err: for (i = 0; i < encl->nr_segments; i++) { seg = &encl->segment_tbl[i]; TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot); } maps_file = fopen("/proc/self/maps", "r"); if (maps_file != NULL) { while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) { maps_line[strlen(maps_line) - 1] = '\0'; if (strstr(maps_line, "/dev/sgx_enclave")) TH_LOG("%s", maps_line); } fclose(maps_file); } TH_LOG("Failed to initialize the test enclave."); encl_delete(encl); return false; } FIXTURE_SETUP(enclave) { } FIXTURE_TEARDOWN(enclave) { encl_delete(&self->encl); } #define ENCL_CALL(op, run, clobbered) \ ({ \ int ret; \ if ((clobbered)) \ ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \ EENTER, 0, 0, (run)); \ else \ ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \ (run)); \ ret; \ }) #define EXPECT_EEXIT(run) \ do { \ EXPECT_EQ((run)->function, EEXIT); \ if ((run)->function != EEXIT) \ TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \ (run)->exception_error_code, (run)->exception_addr); \ } while (0) TEST_F(enclave, unclobbered_vdso) { struct encl_op_get_from_buf get_op; struct encl_op_put_to_buf put_op; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } /* * A section metric is concatenated in a way that @low bits 12-31 define the * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the * metric. */ static unsigned long sgx_calc_section_metric(unsigned int low, unsigned int high) { return (low & GENMASK_ULL(31, 12)) + ((high & GENMASK_ULL(19, 0)) << 32); } /* * Sum total available physical SGX memory across all EPC sections * * Return: total available physical SGX memory available on system */ static unsigned long get_total_epc_mem(void) { unsigned int eax, ebx, ecx, edx; unsigned long total_size = 0; unsigned int type; int section = 0; while (true) { __cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx); type = eax & SGX_CPUID_EPC_MASK; if (type == SGX_CPUID_EPC_INVALID) break; if (type != SGX_CPUID_EPC_SECTION) break; total_size += sgx_calc_section_metric(ecx, edx); section++; } return total_size; } TEST_F(enclave, unclobbered_vdso_oversubscribed) { struct encl_op_get_from_buf get_op; struct encl_op_put_to_buf put_op; unsigned long total_mem; total_mem = get_total_epc_mem(); ASSERT_NE(total_mem, 0); ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, 900) { struct sgx_enclave_remove_pages remove_ioc; struct sgx_enclave_modify_types modt_ioc; struct encl_op_get_from_buf get_op; struct encl_op_eaccept eaccept_op; struct encl_op_put_to_buf put_op; struct encl_segment *heap; unsigned long total_mem; int ret, errno_save; unsigned long addr; unsigned long i; /* * Create enclave with additional heap that is as big as all * available physical SGX memory. */ total_mem = get_total_epc_mem(); ASSERT_NE(total_mem, 0); TH_LOG("Creating an enclave with %lu bytes heap may take a while ...", total_mem); ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata)); /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* SGX2 is supported by kernel and hardware, test can proceed. */ memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; heap = &self->encl.segment_tbl[self->encl.nr_segments - 1]; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); /* Trim entire heap. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = heap->offset; modt_ioc.length = heap->size; modt_ioc.page_type = SGX_PAGE_TYPE_TRIM; TH_LOG("Changing type of %zd bytes to trimmed may take a while ...", heap->size); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, heap->size); /* EACCEPT all removed pages. */ addr = self->encl.encl_base + heap->offset; eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED; eaccept_op.header.type = ENCL_OP_EACCEPT; TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...", heap->size); for (i = 0; i < heap->size; i += 4096) { eaccept_op.epc_addr = addr + i; eaccept_op.ret = 0; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); ASSERT_EQ(eaccept_op.ret, 0); ASSERT_EQ(self->run.function, EEXIT); } /* Complete page removal. */ memset(&remove_ioc, 0, sizeof(remove_ioc)); remove_ioc.offset = heap->offset; remove_ioc.length = heap->size; TH_LOG("Removing %zd bytes from enclave may take a while ...", heap->size); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(remove_ioc.count, heap->size); } TEST_F(enclave, clobbered_vdso) { struct encl_op_get_from_buf get_op; struct encl_op_put_to_buf put_op; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9, struct sgx_enclave_run *run) { run->user_data = 0; return 0; } TEST_F(enclave, clobbered_vdso_and_user_function) { struct encl_op_get_from_buf get_op; struct encl_op_put_to_buf put_op; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; self->run.user_handler = (__u64)test_handler; self->run.user_data = 0xdeadbeef; put_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); get_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0); EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } /* * Sanity check that it is possible to enter either of the two hardcoded TCS */ TEST_F(enclave, tcs_entry) { struct encl_op_header op; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; op.type = ENCL_OP_NOP; EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Move to the next TCS. */ self->run.tcs = self->encl.encl_base + PAGE_SIZE; EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); } /* * Second page of .data segment is used to test changing PTE permissions. * This spans the local encl_buffer within the test enclave. * * 1) Start with a sanity check: a value is written to the target page within * the enclave and read back to ensure target page can be written to. * 2) Change PTE permissions (RW -> RO) of target page within enclave. * 3) Repeat (1) - this time expecting a regular #PF communicated via the * vDSO. * 4) Change PTE permissions of target page within enclave back to be RW. * 5) Repeat (1) by resuming enclave, now expected to be possible to write to * and read from target page within enclave. */ TEST_F(enclave, pte_permissions) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; unsigned long data_start; int ret; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check to ensure it is possible to write to page that will * have its permissions manipulated. */ /* Write MAGIC to page */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that it is the * value previously written (MAGIC). */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Change PTE permissions of target page within the enclave */ ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ); if (ret) perror("mprotect"); /* * PTE permissions of target page changed to read-only, EPCM * permissions unchanged (EPCM permissions are RW), attempt to * write to the page, expecting a regular #PF. */ put_addr_op.value = MAGIC2; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_error_code, 0x7); EXPECT_EQ(self->run.exception_addr, data_start); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* * Change PTE permissions back to enable enclave to write to the * target page and resume enclave - do not expect any exceptions this * time. */ ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE); if (ret) perror("mprotect"); EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0, ERESUME, 0, 0, &self->run), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); get_addr_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC2); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); } /* * Modifying permissions of TCS page should not be possible. */ TEST_F(enclave, tcs_permissions) { struct sgx_enclave_restrict_permissions ioc; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; memset(&ioc, 0, sizeof(ioc)); /* * Ensure kernel supports needed ioctl() and system supports needed * commands. */ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc); errno_save = ret == -1 ? errno : 0; /* * Invalid parameters were provided during sanity check, * expect command to fail. */ ASSERT_EQ(ret, -1); /* ret == -1 */ if (errno_save == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()"); else if (errno_save == ENODEV) SKIP(return, "System does not support SGX2"); /* * Attempt to make TCS page read-only. This is not allowed and * should be prevented by the kernel. */ ioc.offset = encl_get_tcs_offset(&self->encl); ioc.length = PAGE_SIZE; ioc.permissions = SGX_SECINFO_R; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, -1); EXPECT_EQ(errno_save, EINVAL); EXPECT_EQ(ioc.result, 0); EXPECT_EQ(ioc.count, 0); } /* * Enclave page permission test. * * Modify and restore enclave page's EPCM (enclave) permissions from * outside enclave (ENCLS[EMODPR] via kernel) as well as from within * enclave (via ENCLU[EMODPE]). Check for page fault if * VMA allows access but EPCM permissions do not. */ TEST_F(enclave, epcm_permissions) { struct sgx_enclave_restrict_permissions restrict_ioc; struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct encl_op_eaccept eaccept_op; struct encl_op_emodpe emodpe_op; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Ensure kernel supports needed ioctl() and system supports needed * commands. */ memset(&restrict_ioc, 0, sizeof(restrict_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &restrict_ioc); errno_save = ret == -1 ? errno : 0; /* * Invalid parameters were provided during sanity check, * expect command to fail. */ ASSERT_EQ(ret, -1); /* ret == -1 */ if (errno_save == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()"); else if (errno_save == ENODEV) SKIP(return, "System does not support SGX2"); /* * Page that will have its permissions changed is the second data * page in the .data segment. This forms part of the local encl_buffer * within the enclave. * * At start of test @data_start should have EPCM as well as PTE and * VMA permissions of RW. */ data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check that page at @data_start is writable before making * any changes to page permissions. * * Start by writing MAGIC to test page. */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that * page is writable. */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Change EPCM permissions to read-only. Kernel still considers * the page writable. */ memset(&restrict_ioc, 0, sizeof(restrict_ioc)); restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; restrict_ioc.length = PAGE_SIZE; restrict_ioc.permissions = SGX_SECINFO_R; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &restrict_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(restrict_ioc.result, 0); EXPECT_EQ(restrict_ioc.count, 4096); /* * EPCM permissions changed from kernel, need to EACCEPT from enclave. */ eaccept_op.epc_addr = data_start; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_REG | SGX_SECINFO_PR; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* * EPCM permissions of page is now read-only, expect #PF * on EPCM when attempting to write to page from within enclave. */ put_addr_op.value = MAGIC2; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EQ(self->run.function, ERESUME); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_error_code, 0x8007); EXPECT_EQ(self->run.exception_addr, data_start); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* * Received AEX but cannot return to enclave at same entrypoint, * need different TCS from where EPCM permission can be made writable * again. */ self->run.tcs = self->encl.encl_base + PAGE_SIZE; /* * Enter enclave at new TCS to change EPCM permissions to be * writable again and thus fix the page fault that triggered the * AEX. */ emodpe_op.epc_addr = data_start; emodpe_op.flags = SGX_SECINFO_R | SGX_SECINFO_W; emodpe_op.header.type = ENCL_OP_EMODPE; EXPECT_EQ(ENCL_CALL(&emodpe_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Attempt to return to main TCS to resume execution at faulting * instruction, PTE should continue to allow writing to the page. */ self->run.tcs = self->encl.encl_base; /* * Wrong page permissions that caused original fault has * now been fixed via EPCM permissions. * Resume execution in main TCS to re-attempt the memory access. */ self->run.tcs = self->encl.encl_base; EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0, ERESUME, 0, 0, &self->run), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); get_addr_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC2); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); } /* * Test the addition of pages to an initialized enclave via writing to * a page belonging to the enclave's address space but was not added * during enclave creation. */ TEST_F(enclave, augment) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct encl_op_eaccept eaccept_op; size_t total_size = 0; void *addr; int i; if (!sgx2_supported()) SKIP(return, "SGX2 not supported"); ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; for (i = 0; i < self->encl.nr_segments; i++) { struct encl_segment *seg = &self->encl.segment_tbl[i]; total_size += seg->size; } /* * Actual enclave size is expected to be larger than the loaded * test enclave since enclave size must be a power of 2 in bytes * and test_encl does not consume it all. */ EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size); /* * Create memory mapping for the page that will be added. New * memory mapping is for one page right after all existing * mappings. * Kernel will allow new mapping using any permissions if it * falls into the enclave's address range but not backed * by existing enclave pages. */ addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED, self->encl.fd, 0); EXPECT_NE(addr, MAP_FAILED); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* * Attempt to write to the new page from within enclave. * Expected to fail since page is not (yet) part of the enclave. * The first #PF will trigger the addition of the page to the * enclave, but since the new page needs an EACCEPT from within the * enclave before it can be used it would not be possible * to successfully return to the failing instruction. This is the * cause of the second #PF captured here having the SGX bit set, * it is from hardware preventing the page from being used. */ put_addr_op.value = MAGIC; put_addr_op.addr = (unsigned long)addr; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EQ(self->run.function, ERESUME); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_addr, (unsigned long)addr); if (self->run.exception_error_code == 0x6) { munmap(addr, PAGE_SIZE); SKIP(return, "Kernel does not support adding pages to initialized enclave"); } EXPECT_EQ(self->run.exception_error_code, 0x8007); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* Handle AEX by running EACCEPT from new entry point. */ self->run.tcs = self->encl.encl_base + PAGE_SIZE; eaccept_op.epc_addr = self->encl.encl_base + total_size; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* Can now return to main TCS to resume execution. */ self->run.tcs = self->encl.encl_base; EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0, ERESUME, 0, 0, &self->run), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory from newly added page that was just written to, * confirming that data previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = (unsigned long)addr; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); munmap(addr, PAGE_SIZE); } /* * Test for the addition of pages to an initialized enclave via a * pre-emptive run of EACCEPT on page to be added. */ TEST_F(enclave, augment_via_eaccept) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct encl_op_eaccept eaccept_op; size_t total_size = 0; void *addr; int i; if (!sgx2_supported()) SKIP(return, "SGX2 not supported"); ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; for (i = 0; i < self->encl.nr_segments; i++) { struct encl_segment *seg = &self->encl.segment_tbl[i]; total_size += seg->size; } /* * Actual enclave size is expected to be larger than the loaded * test enclave since enclave size must be a power of 2 in bytes while * test_encl does not consume it all. */ EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size); /* * mmap() a page at end of existing enclave to be used for dynamic * EPC page. * * Kernel will allow new mapping using any permissions if it * falls into the enclave's address range but not backed * by existing enclave pages. */ addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED, self->encl.fd, 0); EXPECT_NE(addr, MAP_FAILED); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; /* * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again * without a #PF). All should be transparent to userspace. */ eaccept_op.epc_addr = self->encl.encl_base + total_size; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); if (self->run.exception_vector == 14 && self->run.exception_error_code == 4 && self->run.exception_addr == self->encl.encl_base + total_size) { munmap(addr, PAGE_SIZE); SKIP(return, "Kernel does not support adding pages to initialized enclave"); } EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* * New page should be accessible from within enclave - attempt to * write to it. */ put_addr_op.value = MAGIC; put_addr_op.addr = (unsigned long)addr; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory from newly added page that was just written to, * confirming that data previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = (unsigned long)addr; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); munmap(addr, PAGE_SIZE); } /* * SGX2 page type modification test in two phases: * Phase 1: * Create a new TCS, consisting out of three new pages (stack page with regular * page type, SSA page with regular page type, and TCS page with TCS page * type) in an initialized enclave and run a simple workload within it. * Phase 2: * Remove the three pages added in phase 1, add a new regular page at the * same address that previously hosted the TCS page and verify that it can * be modified. */ TEST_F(enclave, tcs_create) { struct encl_op_init_tcs_page init_tcs_page_op; struct sgx_enclave_remove_pages remove_ioc; struct encl_op_get_from_addr get_addr_op; struct sgx_enclave_modify_types modt_ioc; struct encl_op_put_to_addr put_addr_op; struct encl_op_get_from_buf get_buf_op; struct encl_op_put_to_buf put_buf_op; void *addr, *tcs, *stack_end, *ssa; struct encl_op_eaccept eaccept_op; size_t total_size = 0; uint64_t val_64; int errno_save; int ret, i; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* * Add three regular pages via EAUG: one will be the TCS stack, one * will be the TCS SSA, and one will be the new TCS. The stack and * SSA will remain as regular pages, the TCS page will need its * type changed after populated with needed data. */ for (i = 0; i < self->encl.nr_segments; i++) { struct encl_segment *seg = &self->encl.segment_tbl[i]; total_size += seg->size; } /* * Actual enclave size is expected to be larger than the loaded * test enclave since enclave size must be a power of 2 in bytes while * test_encl does not consume it all. */ EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size); /* * mmap() three pages at end of existing enclave to be used for the * three new pages. */ addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, self->encl.fd, 0); EXPECT_NE(addr, MAP_FAILED); self->run.exception_vector = 0; self->run.exception_error_code = 0; self->run.exception_addr = 0; stack_end = (void *)self->encl.encl_base + total_size; tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE; ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE; /* * Run EACCEPT on each new page to trigger the * EACCEPT->(#PF)->EAUG->EACCEPT(again without a #PF) flow. */ eaccept_op.epc_addr = (unsigned long)stack_end; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); if (self->run.exception_vector == 14 && self->run.exception_error_code == 4 && self->run.exception_addr == (unsigned long)stack_end) { munmap(addr, 3 * PAGE_SIZE); SKIP(return, "Kernel does not support adding pages to initialized enclave"); } EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); eaccept_op.epc_addr = (unsigned long)ssa; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); eaccept_op.epc_addr = (unsigned long)tcs; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* * Three new pages added to enclave. Now populate the TCS page with * needed data. This should be done from within enclave. Provide * the function that will do the actual data population with needed * data. */ /* * New TCS will use the "encl_dyn_entry" entrypoint that expects * stack to begin in page before TCS page. */ val_64 = encl_get_entry(&self->encl, "encl_dyn_entry"); EXPECT_NE(val_64, 0); init_tcs_page_op.tcs_page = (unsigned long)tcs; init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE; init_tcs_page_op.entry = val_64; init_tcs_page_op.header.type = ENCL_OP_INIT_TCS_PAGE; EXPECT_EQ(ENCL_CALL(&init_tcs_page_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Change TCS page type to TCS. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = total_size + PAGE_SIZE; modt_ioc.length = PAGE_SIZE; modt_ioc.page_type = SGX_PAGE_TYPE_TCS; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, 4096); /* EACCEPT new TCS page from enclave. */ eaccept_op.epc_addr = (unsigned long)tcs; eaccept_op.flags = SGX_SECINFO_TCS | SGX_SECINFO_MODIFIED; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* Run workload from new TCS. */ self->run.tcs = (unsigned long)tcs; /* * Simple workload to write to data buffer and read value back. */ put_buf_op.header.type = ENCL_OP_PUT_TO_BUFFER; put_buf_op.value = MAGIC; EXPECT_EQ(ENCL_CALL(&put_buf_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); get_buf_op.header.type = ENCL_OP_GET_FROM_BUFFER; get_buf_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_buf_op, &self->run, true), 0); EXPECT_EQ(get_buf_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Phase 2 of test: * Remove pages associated with new TCS, create a regular page * where TCS page used to be and verify it can be used as a regular * page. */ /* Start page removal by requesting change of page type to PT_TRIM. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = total_size; modt_ioc.length = 3 * PAGE_SIZE; modt_ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE); /* * Enter enclave via TCS #1 and approve page removal by sending * EACCEPT for each of three removed pages. */ self->run.tcs = self->encl.encl_base; eaccept_op.epc_addr = (unsigned long)stack_end; eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); eaccept_op.epc_addr = (unsigned long)tcs; eaccept_op.ret = 0; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); eaccept_op.epc_addr = (unsigned long)ssa; eaccept_op.ret = 0; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* Send final ioctl() to complete page removal. */ memset(&remove_ioc, 0, sizeof(remove_ioc)); remove_ioc.offset = total_size; remove_ioc.length = 3 * PAGE_SIZE; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE); /* * Enter enclave via TCS #1 and access location where TCS #3 was to * trigger dynamic add of regular page at that location. */ eaccept_op.epc_addr = (unsigned long)tcs; eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* * New page should be accessible from within enclave - write to it. */ put_addr_op.value = MAGIC; put_addr_op.addr = (unsigned long)tcs; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory from newly added page that was just written to, * confirming that data previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = (unsigned long)tcs; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); munmap(addr, 3 * PAGE_SIZE); } /* * Ensure sane behavior if user requests page removal, does not run * EACCEPT from within enclave but still attempts to finalize page removal * with the SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl(). The latter should fail * because the removal was not EACCEPTed from within the enclave. */ TEST_F(enclave, remove_added_page_no_eaccept) { struct sgx_enclave_remove_pages remove_ioc; struct encl_op_get_from_addr get_addr_op; struct sgx_enclave_modify_types modt_ioc; struct encl_op_put_to_addr put_addr_op; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* * Page that will be removed is the second data page in the .data * segment. This forms part of the local encl_buffer within the * enclave. */ data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check that page at @data_start is writable before * removing it. * * Start by writing MAGIC to test page. */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that data * previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Start page removal by requesting change of page type to PT_TRIM */ memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; modt_ioc.length = PAGE_SIZE; modt_ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, 4096); /* Skip EACCEPT */ /* Send final ioctl() to complete page removal */ memset(&remove_ioc, 0, sizeof(remove_ioc)); remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; remove_ioc.length = PAGE_SIZE; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); errno_save = ret == -1 ? errno : 0; /* Operation not permitted since EACCEPT was omitted. */ EXPECT_EQ(ret, -1); EXPECT_EQ(errno_save, EPERM); EXPECT_EQ(remove_ioc.count, 0); } /* * Request enclave page removal but instead of correctly following with * EACCEPT a read attempt to page is made from within the enclave. */ TEST_F(enclave, remove_added_page_invalid_access) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct sgx_enclave_modify_types ioc; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&ioc, 0, sizeof(ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* * Page that will be removed is the second data page in the .data * segment. This forms part of the local encl_buffer within the * enclave. */ data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check that page at @data_start is writable before * removing it. * * Start by writing MAGIC to test page. */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that data * previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Start page removal by requesting change of page type to PT_TRIM. */ memset(&ioc, 0, sizeof(ioc)); ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; ioc.length = PAGE_SIZE; ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(ioc.result, 0); EXPECT_EQ(ioc.count, 4096); /* * Read from page that was just removed. */ get_addr_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); /* * From kernel perspective the page is present but according to SGX the * page should not be accessible so a #PF with SGX bit set is * expected. */ EXPECT_EQ(self->run.function, ERESUME); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_error_code, 0x8005); EXPECT_EQ(self->run.exception_addr, data_start); } /* * Request enclave page removal and correctly follow with * EACCEPT but do not follow with removal ioctl() but instead a read attempt * to removed page is made from within the enclave. */ TEST_F(enclave, remove_added_page_invalid_access_after_eaccept) { struct encl_op_get_from_addr get_addr_op; struct encl_op_put_to_addr put_addr_op; struct sgx_enclave_modify_types ioc; struct encl_op_eaccept eaccept_op; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&ioc, 0, sizeof(ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* * Page that will be removed is the second data page in the .data * segment. This forms part of the local encl_buffer within the * enclave. */ data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; /* * Sanity check that page at @data_start is writable before * removing it. * * Start by writing MAGIC to test page. */ put_addr_op.value = MAGIC; put_addr_op.addr = data_start; put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* * Read memory that was just written to, confirming that data * previously written (MAGIC) is present. */ get_addr_op.value = 0; get_addr_op.addr = data_start; get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); EXPECT_EQ(get_addr_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); /* Start page removal by requesting change of page type to PT_TRIM. */ memset(&ioc, 0, sizeof(ioc)); ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; ioc.length = PAGE_SIZE; ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(ioc.result, 0); EXPECT_EQ(ioc.count, 4096); eaccept_op.epc_addr = (unsigned long)data_start; eaccept_op.ret = 0; eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); /* Skip ioctl() to remove page. */ /* * Read from page that was just removed. */ get_addr_op.value = 0; EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); /* * From kernel perspective the page is present but according to SGX the * page should not be accessible so a #PF with SGX bit set is * expected. */ EXPECT_EQ(self->run.function, ERESUME); EXPECT_EQ(self->run.exception_vector, 14); EXPECT_EQ(self->run.exception_error_code, 0x8005); EXPECT_EQ(self->run.exception_addr, data_start); } TEST_F(enclave, remove_untouched_page) { struct sgx_enclave_remove_pages remove_ioc; struct sgx_enclave_modify_types modt_ioc; struct encl_op_eaccept eaccept_op; unsigned long data_start; int ret, errno_save; ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); /* * Hardware (SGX2) and kernel support is needed for this test. Start * with check that test has a chance of succeeding. */ memset(&modt_ioc, 0, sizeof(modt_ioc)); ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); if (ret == -1) { if (errno == ENOTTY) SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()"); else if (errno == ENODEV) SKIP(return, "System does not support SGX2"); } /* * Invalid parameters were provided during sanity check, * expect command to fail. */ EXPECT_EQ(ret, -1); /* SGX2 is supported by kernel and hardware, test can proceed. */ memset(&self->run, 0, sizeof(self->run)); self->run.tcs = self->encl.encl_base; data_start = self->encl.encl_base + encl_get_data_offset(&self->encl) + PAGE_SIZE; memset(&modt_ioc, 0, sizeof(modt_ioc)); modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; modt_ioc.length = PAGE_SIZE; modt_ioc.page_type = SGX_PAGE_TYPE_TRIM; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(modt_ioc.result, 0); EXPECT_EQ(modt_ioc.count, 4096); /* * Enter enclave via TCS #1 and approve page removal by sending * EACCEPT for removed page. */ eaccept_op.epc_addr = data_start; eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED; eaccept_op.ret = 0; eaccept_op.header.type = ENCL_OP_EACCEPT; EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.exception_vector, 0); EXPECT_EQ(self->run.exception_error_code, 0); EXPECT_EQ(self->run.exception_addr, 0); EXPECT_EQ(eaccept_op.ret, 0); memset(&remove_ioc, 0, sizeof(remove_ioc)); remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE; remove_ioc.length = PAGE_SIZE; ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc); errno_save = ret == -1 ? errno : 0; EXPECT_EQ(ret, 0); EXPECT_EQ(errno_save, 0); EXPECT_EQ(remove_ioc.count, 4096); } TEST_HARNESS_MAIN
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1