1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
14 #include <sys/ioctl.h>
18 #include <sys/types.h>
22 void encl_delete(struct encl *encl)
25 munmap((void *)encl->encl_base, encl->encl_size);
28 munmap(encl->bin, encl->bin_size);
33 if (encl->segment_tbl)
34 free(encl->segment_tbl);
36 memset(encl, 0, sizeof(*encl));
39 static bool encl_map_bin(const char *path, struct encl *encl)
46 fd = open(path, O_RDONLY);
48 perror("enclave executable open()");
52 ret = stat(path, &sb);
54 perror("enclave executable stat()");
58 bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
59 if (bin == MAP_FAILED) {
60 perror("enclave executable mmap()");
65 encl->bin_size = sb.st_size;
75 static bool encl_ioc_create(struct encl *encl)
77 struct sgx_secs *secs = &encl->secs;
78 struct sgx_enclave_create ioc;
81 assert(encl->encl_base != 0);
83 memset(secs, 0, sizeof(*secs));
84 secs->ssa_frame_size = 1;
85 secs->attributes = SGX_ATTR_MODE64BIT;
87 secs->base = encl->encl_base;
88 secs->size = encl->encl_size;
90 ioc.src = (unsigned long)secs;
91 rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc);
93 perror("SGX_IOC_ENCLAVE_CREATE failed");
94 munmap((void *)secs->base, encl->encl_size);
101 static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
103 struct sgx_enclave_add_pages ioc;
104 struct sgx_secinfo secinfo;
107 memset(&secinfo, 0, sizeof(secinfo));
108 secinfo.flags = seg->flags;
110 ioc.src = (uint64_t)encl->src + seg->offset;
111 ioc.offset = seg->offset;
112 ioc.length = seg->size;
113 ioc.secinfo = (unsigned long)&secinfo;
114 ioc.flags = SGX_PAGE_MEASURE;
116 rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc);
118 perror("SGX_IOC_ENCLAVE_ADD_PAGES failed");
127 bool encl_load(const char *path, struct encl *encl)
129 const char device_path[] = "/dev/sgx_enclave";
130 Elf64_Phdr *phdr_tbl;
139 memset(encl, 0, sizeof(*encl));
141 fd = open(device_path, O_RDWR);
143 perror("Unable to open /dev/sgx_enclave");
147 ret = stat(device_path, &sb);
149 perror("device file stat()");
154 * This just checks if the /dev file has these permission
155 * bits set. It does not check that the current user is
156 * the owner or in the owning group.
158 if (!(sb.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
159 fprintf(stderr, "no execute permissions on device file %s\n", device_path);
163 ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
164 if (ptr == (void *)-1) {
165 perror("mmap for read");
168 munmap(ptr, PAGE_SIZE);
171 "mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \
172 " Check that current user has execute permissions on %s and \n" \
173 " that /dev does not have noexec set: mount | grep \"/dev .*noexec\"\n" \
174 " If so, remount it executable: mount -o remount,exec /dev\n\n"
176 ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0);
177 if (ptr == (void *)-1) {
178 fprintf(stderr, ERR_MSG, device_path);
181 munmap(ptr, PAGE_SIZE);
185 if (!encl_map_bin(path, encl))
189 phdr_tbl = encl->bin + ehdr->e_phoff;
191 for (i = 0; i < ehdr->e_phnum; i++) {
192 Elf64_Phdr *phdr = &phdr_tbl[i];
194 if (phdr->p_type == PT_LOAD)
198 encl->segment_tbl = calloc(encl->nr_segments,
199 sizeof(struct encl_segment));
200 if (!encl->segment_tbl)
203 for (i = 0, j = 0; i < ehdr->e_phnum; i++) {
204 Elf64_Phdr *phdr = &phdr_tbl[i];
205 unsigned int flags = phdr->p_flags;
206 struct encl_segment *seg;
208 if (phdr->p_type != PT_LOAD)
211 seg = &encl->segment_tbl[j];
213 if (!!(flags & ~(PF_R | PF_W | PF_X))) {
215 "%d has invalid segment flags 0x%02x.\n", i,
220 if (j == 0 && flags != (PF_R | PF_W)) {
222 "TCS has invalid segment flags 0x%02x.\n",
228 src_offset = phdr->p_offset & PAGE_MASK;
230 seg->prot = PROT_READ | PROT_WRITE;
231 seg->flags = SGX_PAGE_TYPE_TCS << 8;
233 seg->prot = (phdr->p_flags & PF_R) ? PROT_READ : 0;
234 seg->prot |= (phdr->p_flags & PF_W) ? PROT_WRITE : 0;
235 seg->prot |= (phdr->p_flags & PF_X) ? PROT_EXEC : 0;
236 seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot;
239 seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset;
240 seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK;
242 printf("0x%016lx 0x%016lx 0x%02x\n", seg->offset, seg->size,
248 assert(j == encl->nr_segments);
250 encl->src = encl->bin + src_offset;
251 encl->src_size = encl->segment_tbl[j - 1].offset +
252 encl->segment_tbl[j - 1].size;
254 for (encl->encl_size = 4096; encl->encl_size < encl->src_size; )
255 encl->encl_size <<= 1;
266 static bool encl_map_area(struct encl *encl)
268 size_t encl_size = encl->encl_size;
271 area = mmap(NULL, encl_size * 2, PROT_NONE,
272 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
273 if (area == MAP_FAILED) {
274 perror("reservation mmap()");
278 encl->encl_base = ((uint64_t)area + encl_size - 1) & ~(encl_size - 1);
280 munmap(area, encl->encl_base - (uint64_t)area);
281 munmap((void *)(encl->encl_base + encl_size),
282 (uint64_t)area + encl_size - encl->encl_base);
287 bool encl_build(struct encl *encl)
289 struct sgx_enclave_init ioc;
293 if (!encl_map_area(encl))
296 if (!encl_ioc_create(encl))
300 * Pages must be added before mapping VMAs because their permissions
301 * cap the VMA permissions.
303 for (i = 0; i < encl->nr_segments; i++) {
304 struct encl_segment *seg = &encl->segment_tbl[i];
306 if (!encl_ioc_add_pages(encl, seg))
310 ioc.sigstruct = (uint64_t)&encl->sigstruct;
311 ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc);
313 perror("SGX_IOC_ENCLAVE_INIT failed");