From: Reinette Chatre <reinette.chatre(a)intel.com>
Removing a page from an initialized enclave involves three steps:
first the user requests changing the page type to SGX_PAGE_TYPE_TRIM
via an ioctl(), on success the ENCLU[EACCEPT] instruction needs to be
run from within the enclave to accept the page removal, finally the
user requests page removal to be completed via an ioctl(). Only after
acceptance (ENCLU[EACCEPT]) from within the enclave can the kernel
remove the page from a running enclave.
Test the behavior when the user's request to change the page type
succeeds, but the ENCLU[EACCEPT] instruction is not run before the
ioctl() requesting page removal is run. This should not be permitted.
Signed-off-by: Reinette Chatre <reinette.chatre(a)intel.com>
---
tools/testing/selftests/sgx/main.c | 116 +++++++++++++++++++++++++++++
1 file changed, 116 insertions(+)
diff --git a/tools/testing/selftests/sgx/main.c b/tools/testing/selftests/sgx/main.c
index f9872c6746a3..82902dab96bc 100644
--- a/tools/testing/selftests/sgx/main.c
+++ b/tools/testing/selftests/sgx/main.c
@@ -1453,4 +1453,120 @@ TEST_F(enclave, tcs_create)
munmap(addr, 3 * PAGE_SIZE);
}
+/*
+ * Ensure sane behavior if user requests page removal, does not run
+ * EACCEPT from within enclave but still attempts to finalize page removal
+ * with the SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl(). The latter should fail
+ * because the removal was not EACCEPTed from within the enclave.
+ */
+TEST_F(enclave, remove_added_page_no_eaccept)
+{
+ struct sgx_enclave_remove_pages remove_ioc;
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct sgx_enclave_modt modt_ioc;
+ struct sgx_secinfo secinfo;
+ unsigned long data_start;
+ int ret, errno_save;
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ /*
+ * Hardware (SGX2) and kernel support is needed for this test. Start
+ * with check that test has a chance of succeeding.
+ */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPE, &modt_ioc);
+
+ if (ret == -1) {
+ if (errno == ENOTTY)
+ SKIP(return, "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPE ioctl()");
+ else if (errno == ENODEV)
+ SKIP(return, "System does not support SGX2");
+ }
+
+ /*
+ * Invalid parameters were provided during sanity check,
+ * expect command to fail.
+ */
+ EXPECT_EQ(ret, -1);
+
+ /*
+ * Page that will be removed is the second data page in the .data
+ * segment. This forms part of the local encl_buffer within the
+ * enclave.
+ */
+ data_start = self->encl.encl_base +
+ encl_get_data_offset(&self->encl) + PAGE_SIZE;
+
+ /*
+ * Sanity check that page at @data_start is writable before
+ * removing it.
+ *
+ * Start by writing MAGIC to test page.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = data_start;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory that was just written to, confirming that data
+ * previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = data_start;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /* Start page removal by requesting change of page type to PT_TRIM */
+ memset(&modt_ioc, 0, sizeof(modt_ioc));
+ memset(&secinfo, 0, sizeof(secinfo));
+
+ secinfo.flags = SGX_PAGE_TYPE_TRIM << 8;
+ modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
+ modt_ioc.length = PAGE_SIZE;
+ modt_ioc.secinfo = (unsigned long)&secinfo;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPE, &modt_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ EXPECT_EQ(ret, 0);
+ EXPECT_EQ(errno_save, 0);
+ EXPECT_EQ(modt_ioc.result, 0);
+ EXPECT_EQ(modt_ioc.count, 4096);
+
+ /* Skip EACCEPT */
+
+ /* Send final ioctl() to complete page removal */
+ memset(&remove_ioc, 0, sizeof(remove_ioc));
+
+ remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
+ remove_ioc.length = PAGE_SIZE;
+
+ ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
+ errno_save = ret == -1 ? errno : 0;
+
+ /* Operation not permitted since EACCEPT was omitted. */
+ EXPECT_EQ(ret, -1);
+ EXPECT_EQ(errno_save, EPERM);
+ EXPECT_EQ(remove_ioc.count, 0);
+}
+
TEST_HARNESS_MAIN
--
2.35.1
From: Reinette Chatre <reinette.chatre(a)intel.com>
The Thread Control Structure (TCS) contains meta-data used by the
hardware to save and restore thread specific information when
entering/exiting the enclave. A TCS can be added to an initialized
enclave by first adding a new regular enclave page, initializing the
content of the new page from within the enclave, and then changing that
page's type to a TCS.
Support the initialization of a TCS from within the enclave.
The variable information needed that should be provided from outside
the enclave is the address of the TCS, address of the State Save Area
(SSA), and the entry point that the thread should use to enter the
enclave. With this information provided all needed fields of a TCS
can be initialized.
Signed-off-by: Reinette Chatre <reinette.chatre(a)intel.com>
---
tools/testing/selftests/sgx/defines.h | 8 +++++++
tools/testing/selftests/sgx/test_encl.c | 30 +++++++++++++++++++++++++
2 files changed, 38 insertions(+)
diff --git a/tools/testing/selftests/sgx/defines.h b/tools/testing/selftests/sgx/defines.h
index b638eb98c80c..d8587c971941 100644
--- a/tools/testing/selftests/sgx/defines.h
+++ b/tools/testing/selftests/sgx/defines.h
@@ -26,6 +26,7 @@ enum encl_op_type {
ENCL_OP_NOP,
ENCL_OP_EACCEPT,
ENCL_OP_EMODPE,
+ ENCL_OP_INIT_TCS_PAGE,
ENCL_OP_MAX,
};
@@ -68,4 +69,11 @@ struct encl_op_emodpe {
uint64_t flags;
};
+struct encl_op_init_tcs_page {
+ struct encl_op_header header;
+ uint64_t tcs_page;
+ uint64_t ssa;
+ uint64_t entry;
+};
+
#endif /* DEFINES_H */
diff --git a/tools/testing/selftests/sgx/test_encl.c b/tools/testing/selftests/sgx/test_encl.c
index 5b6c65331527..c0d6397295e3 100644
--- a/tools/testing/selftests/sgx/test_encl.c
+++ b/tools/testing/selftests/sgx/test_encl.c
@@ -57,6 +57,35 @@ static void *memcpy(void *dest, const void *src, size_t n)
return dest;
}
+static void *memset(void *dest, int c, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ ((char *)dest)[i] = c;
+
+ return dest;
+}
+
+static void do_encl_init_tcs_page(void *_op)
+{
+ struct encl_op_init_tcs_page *op = _op;
+ void *tcs = (void *)op->tcs_page;
+ uint32_t val_32;
+
+ memset(tcs, 0, 16); /* STATE and FLAGS */
+ memcpy(tcs + 16, &op->ssa, 8); /* OSSA */
+ memset(tcs + 24, 0, 4); /* CSSA */
+ val_32 = 1;
+ memcpy(tcs + 28, &val_32, 4); /* NSSA */
+ memcpy(tcs + 32, &op->entry, 8); /* OENTRY */
+ memset(tcs + 40, 0, 24); /* AEP, OFSBASE, OGSBASE */
+ val_32 = 0xFFFFFFFF;
+ memcpy(tcs + 64, &val_32, 4); /* FSLIMIT */
+ memcpy(tcs + 68, &val_32, 4); /* GSLIMIT */
+ memset(tcs + 72, 0, 4024); /* Reserved */
+}
+
static void do_encl_op_put_to_buf(void *op)
{
struct encl_op_put_to_buf *op2 = op;
@@ -100,6 +129,7 @@ void encl_body(void *rdi, void *rsi)
do_encl_op_nop,
do_encl_eaccept,
do_encl_emodpe,
+ do_encl_init_tcs_page,
};
struct encl_op_header *op = (struct encl_op_header *)rdi;
--
2.35.1
From: Reinette Chatre <reinette.chatre(a)intel.com>
The test enclave (test_encl.elf) is built with two initialized
Thread Control Structures (TCS) included in the binary. Both TCS are
initialized with the same entry point, encl_entry, that correctly
computes the absolute address of the stack based on the stack of each
TCS that is also built into the binary.
A new TCS can be added dynamically to the enclave and requires to be
initialized with an entry point used to enter the enclave. Since the
existing entry point, encl_entry, assumes that the TCS and its stack
exists at particular offsets within the binary it is not able to handle
a dynamically added TCS and its stack.
Introduce a new entry point, encl_dyn_entry, that initializes the
absolute address of that thread's stack to the address immediately
preceding the TCS itself. It is now possible to dynamically add a
contiguous memory region to the enclave with the new stack preceding
the new TCS. With the new TCS initialized with encl_dyn_entry as entry
point the absolute address of the stack is computed correctly on entry.
Signed-off-by: Reinette Chatre <reinette.chatre(a)intel.com>
---
tools/testing/selftests/sgx/test_encl_bootstrap.S | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/tools/testing/selftests/sgx/test_encl_bootstrap.S b/tools/testing/selftests/sgx/test_encl_bootstrap.S
index 82fb0dfcbd23..03ae0f57e29d 100644
--- a/tools/testing/selftests/sgx/test_encl_bootstrap.S
+++ b/tools/testing/selftests/sgx/test_encl_bootstrap.S
@@ -45,6 +45,12 @@ encl_entry:
# TCS #2. By adding the value of encl_stack to it, we get
# the absolute address for the stack.
lea (encl_stack)(%rbx), %rax
+ jmp encl_entry_core
+encl_dyn_entry:
+ # Entry point for dynamically created TCS page expected to follow
+ # its stack directly.
+ lea -1(%rbx), %rax
+encl_entry_core:
xchg %rsp, %rax
push %rax
--
2.35.1
From: Reinette Chatre <reinette.chatre(a)intel.com>
Enclave pages can be added to an initialized enclave when an address
belonging to the enclave but without a backing page is accessed from
within the enclave.
Accessing memory without a backing enclave page from within an enclave
can be in different ways:
1) Pre-emptively run ENCLU[EACCEPT]. Since the addition of a page
always needs to be accepted by the enclave via ENCLU[EACCEPT] this
flow is efficient since the first execution of ENCLU[EACCEPT]
triggers the addition of the page and when execution returns to the
same instruction the second execution would be successful as an
acceptance of the page.
2) A direct read or write. The flow where a direct read or write
triggers the page addition execution cannot resume from the
instruction (read/write) that triggered the fault but instead
the enclave needs to be entered at a different entry point to
run needed ENCLU[EACCEPT] before execution can return to the
original entry point and the read/write instruction that faulted.
Add tests for both flows.
Signed-off-by: Reinette Chatre <reinette.chatre(a)intel.com>
---
tools/testing/selftests/sgx/main.c | 243 +++++++++++++++++++++++++++++
1 file changed, 243 insertions(+)
diff --git a/tools/testing/selftests/sgx/main.c b/tools/testing/selftests/sgx/main.c
index ea5f2e064687..13542c5de66f 100644
--- a/tools/testing/selftests/sgx/main.c
+++ b/tools/testing/selftests/sgx/main.c
@@ -86,6 +86,15 @@ static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
return true;
}
+static inline int sgx2_supported(void)
+{
+ unsigned int eax, ebx, ecx, edx;
+
+ __cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx);
+
+ return eax & 0x2;
+}
+
static unsigned long elf_sym_hash(const char *name)
{
unsigned long h = 0, high;
@@ -863,4 +872,238 @@ TEST_F(enclave, epcm_permissions)
EXPECT_EQ(self->run.exception_addr, 0);
}
+/*
+ * Test the addition of pages to an initialized enclave via writing to
+ * a page belonging to the enclave's address space but was not added
+ * during enclave creation.
+ */
+TEST_F(enclave, augment)
+{
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct encl_op_eaccept eaccept_op;
+ size_t total_size = 0;
+ void *addr;
+ int i;
+
+ if (!sgx2_supported())
+ SKIP(return, "SGX2 not supported");
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ for (i = 0; i < self->encl.nr_segments; i++) {
+ struct encl_segment *seg = &self->encl.segment_tbl[i];
+
+ total_size += seg->size;
+ }
+
+ /*
+ * Actual enclave size is expected to be larger than the loaded
+ * test enclave since enclave size must be a power of 2 in bytes
+ * and test_encl does not consume it all.
+ */
+ EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
+
+ /*
+ * Create memory mapping for the page that will be added. New
+ * memory mapping is for one page right after all existing
+ * mappings.
+ */
+ addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
+ EXPECT_NE(addr, MAP_FAILED);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ /*
+ * Attempt to write to the new page from within enclave.
+ * Expected to fail since page is not (yet) part of the enclave.
+ * The first #PF will trigger the addition of the page to the
+ * enclave, but since the new page needs an EACCEPT from within the
+ * enclave before it can be used it would not be possible
+ * to successfully return to the failing instruction. This is the
+ * cause of the second #PF captured here having the SGX bit set,
+ * it is from hardware preventing the page from being used.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = (unsigned long)addr;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(self->run.function, ERESUME);
+ EXPECT_EQ(self->run.exception_vector, 14);
+ EXPECT_EQ(self->run.exception_addr, (unsigned long)addr);
+
+ if (self->run.exception_error_code == 0x6) {
+ munmap(addr, PAGE_SIZE);
+ SKIP(return, "Kernel does not support adding pages to initialized enclave");
+ }
+
+ EXPECT_EQ(self->run.exception_error_code, 0x8007);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ /* Handle AEX by running EACCEPT from new entry point. */
+ self->run.tcs = self->encl.encl_base + PAGE_SIZE;
+
+ eaccept_op.epc_addr = self->encl.encl_base + total_size;
+ eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /* Can now return to main TCS to resume execution. */
+ self->run.tcs = self->encl.encl_base;
+
+ EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
+ ERESUME, 0, 0,
+ &self->run),
+ 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory from newly added page that was just written to,
+ * confirming that data previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = (unsigned long)addr;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ munmap(addr, PAGE_SIZE);
+}
+
+/*
+ * Test for the addition of pages to an initialized enclave via a
+ * pre-emptive run of EACCEPT on page to be added.
+ */
+TEST_F(enclave, augment_via_eaccept)
+{
+ struct encl_op_get_from_addr get_addr_op;
+ struct encl_op_put_to_addr put_addr_op;
+ struct encl_op_eaccept eaccept_op;
+ size_t total_size = 0;
+ void *addr;
+ int i;
+
+ if (!sgx2_supported())
+ SKIP(return, "SGX2 not supported");
+
+ ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
+
+ memset(&self->run, 0, sizeof(self->run));
+ self->run.tcs = self->encl.encl_base;
+
+ for (i = 0; i < self->encl.nr_segments; i++) {
+ struct encl_segment *seg = &self->encl.segment_tbl[i];
+
+ total_size += seg->size;
+ }
+
+ /*
+ * Actual enclave size is expected to be larger than the loaded
+ * test enclave since enclave size must be a power of 2 in bytes while
+ * test_encl does not consume it all.
+ */
+ EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
+
+ /*
+ * mmap() a page at end of existing enclave to be used for dynamic
+ * EPC page.
+ */
+
+ addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
+ self->encl.fd, 0);
+ EXPECT_NE(addr, MAP_FAILED);
+
+ self->run.exception_vector = 0;
+ self->run.exception_error_code = 0;
+ self->run.exception_addr = 0;
+
+ /*
+ * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again
+ * without a #PF). All should be transparent to userspace.
+ */
+ eaccept_op.epc_addr = self->encl.encl_base + total_size;
+ eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
+ eaccept_op.ret = 0;
+ eaccept_op.header.type = ENCL_OP_EACCEPT;
+
+ EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
+
+ if (self->run.exception_vector == 14 &&
+ self->run.exception_error_code == 4 &&
+ self->run.exception_addr == self->encl.encl_base + total_size) {
+ munmap(addr, PAGE_SIZE);
+ SKIP(return, "Kernel does not support adding pages to initialized enclave");
+ }
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+ EXPECT_EQ(eaccept_op.ret, 0);
+
+ /*
+ * New page should be accessible from within enclave - attempt to
+ * write to it.
+ */
+ put_addr_op.value = MAGIC;
+ put_addr_op.addr = (unsigned long)addr;
+ put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
+
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ /*
+ * Read memory from newly added page that was just written to,
+ * confirming that data previously written (MAGIC) is present.
+ */
+ get_addr_op.value = 0;
+ get_addr_op.addr = (unsigned long)addr;
+ get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
+
+ EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
+
+ EXPECT_EQ(get_addr_op.value, MAGIC);
+ EXPECT_EEXIT(&self->run);
+ EXPECT_EQ(self->run.exception_vector, 0);
+ EXPECT_EQ(self->run.exception_error_code, 0);
+ EXPECT_EQ(self->run.exception_addr, 0);
+
+ munmap(addr, PAGE_SIZE);
+}
+
TEST_HARNESS_MAIN
--
2.35.1
fix following coccicheck warning:
tools/testing/selftests/vm/userfaultfd.c:556:23-24:
WARNING this kind of initialization is deprecated
`unsigned long page_nr = *(&page_nr)` has the same form of
uninitialized_var() macro. I remove the redundant assignement. It has
been tested with gcc (Debian 8.3.0-6) 8.3.0.
The patch which removed uninitialized_var() is:
https://lore.kernel.org/all/20121028102007.GA7547@gmail.com/
And there is very few "/* GCC */" comments in the Linux kernel code now.
Signed-off-by: Guo Zhengkui <guozhengkui(a)vivo.com>
---
tools/testing/selftests/vm/userfaultfd.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index fe404398c65a..203c4a2c2109 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -553,7 +553,7 @@ static void continue_range(int ufd, __u64 start, __u64 len)
static void *locking_thread(void *arg)
{
unsigned long cpu = (unsigned long) arg;
- unsigned long page_nr = *(&(page_nr)); /* uninitialized warning */
+ unsigned long page_nr;
unsigned long long count;
if (!(bounces & BOUNCE_RANDOM)) {
--
2.20.1
When building the vm selftests using clang, some errors are seen due to
having headers in the compilation command:
clang -Wall -I ../../../../usr/include -no-pie gup_test.c ../../../../mm/gup_test.h -lrt -lpthread -o .../tools/testing/selftests/vm/gup_test
clang: error: cannot specify -o when generating multiple output files
make[1]: *** [../lib.mk:146: .../tools/testing/selftests/vm/gup_test] Error 1
Rework to add the header files to LOCAL_HDRS before including ../lib.mk,
since the dependency is evaluated in '$(OUTPUT)/%:%.c $(LOCAL_HDRS)' in
file lib.mk.
Signed-off-by: Yosry Ahmed <yosryahmed(a)google.com>
---
This patch was inspired by:
https://lore.kernel.org/lkml/20211105162530.3307666-1-anders.roxell@linaro.…
---
tools/testing/selftests/vm/Makefile | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 1607322a112c..a14b5b800897 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for vm selftests
+LOCAL_HDRS += $(selfdir)/vm/local_config.h $(top_srcdir)/mm/gup_test.h
+
include local_config.mk
uname_M := $(shell uname -m 2>/dev/null || echo not)
@@ -140,10 +142,6 @@ endif
$(OUTPUT)/mlock-random-test $(OUTPUT)/memfd_secret: LDLIBS += -lcap
-$(OUTPUT)/gup_test: ../../../../mm/gup_test.h
-
-$(OUTPUT)/hmm-tests: local_config.h
-
# HMM_EXTRA_LIBS may get set in local_config.mk, or it may be left empty.
$(OUTPUT)/hmm-tests: LDLIBS += $(HMM_EXTRA_LIBS)
--
2.35.1.616.g0bdcbb4464-goog