diff mbox series

[v3] Hugetlb: Migrating libhugetlbfs shm-perms

Message ID 20240913134036.12078-1-spoorthy@linux.ibm.com
State Changes Requested
Headers show
Series [v3] Hugetlb: Migrating libhugetlbfs shm-perms | expand

Commit Message

Spoorthy Sept. 13, 2024, 1:40 p.m. UTC
Test Description: Tests the behavior of shared memory when
multiple threads attach to a segment with different permissions. 

At one point, reservation accounting of free hugepages between the parent
and child processes may become inconsistent during memory operations.
The parent creates 4 hugepages and a shared memory segment
(size segment_size, permission 0640), attaches it, and initializes
four parts with a pattern (0x55), then detaches it. Child processes
are created in a loop, each reattaching the segment in read-only mode
with SHM_RDONLY, detaching, and exiting. If attach/detach fails or
if the reservation accounting of free hugepages doesn't match
between parent and child, the test fails. If all child processes exit
successfully and accounting matches, the test passes.

Tested and verified the success of test case

Signed-off-by: Spoorthy <spoorthy@linux.ibm.com>
---
 runtest/hugetlb                               |  1 +
 testcases/kernel/mem/.gitignore               |  1 +
 .../mem/hugetlb/hugeshmat/hugeshmat06.c       | 97 +++++++++++++++++++
 3 files changed, 99 insertions(+)
 create mode 100644 testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c

Comments

Li Wang Oct. 18, 2024, 9:31 a.m. UTC | #1
Hi Spoorthy,

On Fri, Sep 13, 2024 at 9:41 PM Spoorthy <spoorthy@linux.ibm.com> wrote:

> Test Description: Tests the behavior of shared memory when
> multiple threads attach to a segment with different permissions.
>
> At one point, reservation accounting of free hugepages between the parent
> and child processes may become inconsistent during memory operations.
> The parent creates 4 hugepages and a shared memory segment
> (size segment_size, permission 0640), attaches it, and initializes
> four parts with a pattern (0x55), then detaches it. Child processes
> are created in a loop, each reattaching the segment in read-only mode
> with SHM_RDONLY, detaching, and exiting. If attach/detach fails or
> if the reservation accounting of free hugepages doesn't match
> between parent and child, the test fails. If all child processes exit
> successfully and accounting matches, the test passes.
>
> Tested and verified the success of test case
>
> Signed-off-by: Spoorthy <spoorthy@linux.ibm.com>
> ---
>  runtest/hugetlb                               |  1 +
>  testcases/kernel/mem/.gitignore               |  1 +
>  .../mem/hugetlb/hugeshmat/hugeshmat06.c       | 97 +++++++++++++++++++
>  3 files changed, 99 insertions(+)
>  create mode 100644 testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c
>
> diff --git a/runtest/hugetlb b/runtest/hugetlb
> index 299c07ac9..240701b2b 100644
> --- a/runtest/hugetlb
> +++ b/runtest/hugetlb
> @@ -44,6 +44,7 @@ hugeshmat02 hugeshmat02 -i 5
>  hugeshmat03 hugeshmat03 -i 5
>  hugeshmat04 hugeshmat04 -i 5
>  hugeshmat05 hugeshmat05 -i 5
> +hugeshmat06 hugeshmat06
>
>  hugeshmctl01 hugeshmctl01 -i 5
>  hugeshmctl02 hugeshmctl02 -i 5
> diff --git a/testcases/kernel/mem/.gitignore
> b/testcases/kernel/mem/.gitignore
> index c96fe8bfc..4ad1dc313 100644
> --- a/testcases/kernel/mem/.gitignore
> +++ b/testcases/kernel/mem/.gitignore
> @@ -39,6 +39,7 @@
>  /hugetlb/hugeshmat/hugeshmat03
>  /hugetlb/hugeshmat/hugeshmat04
>  /hugetlb/hugeshmat/hugeshmat05
> +/hugetlb/hugeshmat/hugeshmat06
>  /hugetlb/hugeshmctl/hugeshmctl01
>  /hugetlb/hugeshmctl/hugeshmctl02
>  /hugetlb/hugeshmctl/hugeshmctl03
> diff --git a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c
> b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c
> new file mode 100644
> index 000000000..bcb31b1d4
> --- /dev/null
> +++ b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c
> @@ -0,0 +1,97 @@
> +// SPDX-License-Identifier: GPL-2.0-or-later
> +/*
> + * Copyright (C) 2005-2006, IBM Corporation.
> + * Author: David Gibson & Adam Litke
> + */
> +/*\
> + * [DESCRIPTION]
> + * Test shared memory behavior when multiple threads are attached
> + * to a segment with different permissions.  A segment is created
> + * and children attach read-only to check reservation accounting.
> + */
> +
> +#include "hugetlb.h"
> +#include "tst_safe_sysv_ipc.h"
> +
> +#define SEGMENT_KEY (0x82ba15ff)
> +#define MNTPOINT "hugetlbfs/"
> +#define HPAGES_IN_SEGMENT 4
> +
> +static int global_shmid = -1;
> +static void *shmaddr;
> +static long segment_size, hpage_size, stride;
> +
> +static int attach_segment(size_t segsize, int shmflags, int shmperms)
> +{
> +       int shmid;
> +
> +       shmid = SAFE_SHMGET(SEGMENT_KEY, segsize, shmflags);
> +       shmaddr = SAFE_SHMAT(shmid, shmaddr, shmperms);
> +       global_shmid = shmid;
> +       return shmid;
> +}
> +
> +static void setup(void)
> +{
> +       hpage_size = tst_get_hugepage_size();
> +       segment_size = 4 * hpage_size;
> +       stride = hpage_size;
> +}
> +
> +static void compare_free_hugepage_memory(long free_end, long free_start)
> +{
> +       if (free_end != free_start)
> +               tst_res(TFAIL, "Free hugepages after attaching multiple
> threads differ from initial allocation");
>

We cannot make such an assumption for TFAIL because the system
may be using hugepages in the background by other processes, and
this test is not alone in occupying huge page so that would be possible
get false positive in the real testing.




> +}
> +
> +static void run_test(void)
> +{
> +       char *p;
> +       int i, iterations;
> +       long total_hpages, free_start, free_end, val;
> +
> +       total_hpages = SAFE_READ_MEMINFO(MEMINFO_HPAGE_TOTAL);
>

If we set num in the tst_test->hugepages at the beginning,
'tst_hugepages' can be used directly since ltp-lib already
saved the reserved hpage numbers.



> +       iterations = (total_hpages * hpage_size) / segment_size+1;
>

This is not a good idea for the iterations counting, because reserving
32 HPAGES is so luxurious since we only need 4 in the test.

Maybe we can count the iterations according to the system cpus?
If the system is too large, only set an acceptable up-limit num (e.g 128).



> +       attach_segment(segment_size, IPC_CREAT | SHM_HUGETLB | 0640, 0);
> +       p = (char *)shmaddr;
> +       for (i = 0; i < 4; i++, p += stride)
> +               memset(p, 0x55, stride);
> +       free_start = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
> +       SAFE_SHMDT((const void *)shmaddr);
> +       for (i = 0; i < iterations; i++) {
>

Move the 'val' declare here but not in the parent.

+               pid_t pid;
> +
> +               pid = SAFE_FORK();
> +               if (!pid) {
> +                       attach_segment(0, 0, SHM_RDONLY);
> +                       for (i = 0; i < HPAGES_IN_SEGMENT; i++)
> +                               val = *((char *)shmaddr + (i *
> hpage_size));
>

I would suggest printing something here to show the child's behavior:

  tst_res(TINFO, "Chid %d attachs the segment successfuly", getpid());



> +                       SAFE_SHMDT(((const void *)shmaddr));
> +                       free_end = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
>


> +                       compare_free_hugepage_memory(free_end, free_start);
>

Probably a safe way is to check the contents of shared memory are correct,
since Children only have READ permission.


> +                       exit(EXIT_SUCCESS);
> +               }
> +       }
> +       free_end = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
> +       compare_free_hugepage_memory(free_end, free_start);
> +       tst_reap_children();
> +       tst_res(TPASS, "Successfully tested shared memory behavior when
> multiple threads are attached");
> +}
> +
> +static void cleanup(void)
> +{
> +       if (global_shmid >= 0)
> +               SAFE_SHMCTL(global_shmid, IPC_RMID, NULL);
> +}
> +
> +static struct tst_test test = {
> +       .needs_root = 1,
> +       .mntpoint = MNTPOINT,
> +       .needs_hugetlbfs = 1,
> +       .needs_tmpdir = 1,
> +       .forks_child = 1,
> +       .setup = setup,
> +       .cleanup = cleanup,
> +       .test_all = run_test,
> +       .hugepages = {32, TST_NEEDS},
>

Why request 32 hpages? In the above test as far as I can see only 4 is
enough.

+};
> --
> 2.43.0
>
>
> --
> Mailing list info: https://lists.linux.it/listinfo/ltp
>
>
Cyril Hrubis Nov. 6, 2024, 9:44 a.m. UTC | #2
Hi!
> > +static void compare_free_hugepage_memory(long free_end, long free_start)
> > +{
> > +       if (free_end != free_start)
> > +               tst_res(TFAIL, "Free hugepages after attaching multiple
> > threads differ from initial allocation");
> >
> 
> We cannot make such an assumption for TFAIL because the system
> may be using hugepages in the background by other processes, and
> this test is not alone in occupying huge page so that would be possible
> get false positive in the real testing.

We already have similar assumptions in the rest of the hugepage tests,
however at least it's clear from the message that is being printed that
it may fail if we run more than one test in parallel or if system
allocates hugepages for something.

But I guess that we can do better, we can look at the HugetlbPages:
field in the "/proc/self/status" instead and check that it's incremented
and decremented properly as the test runs.
diff mbox series

Patch

diff --git a/runtest/hugetlb b/runtest/hugetlb
index 299c07ac9..240701b2b 100644
--- a/runtest/hugetlb
+++ b/runtest/hugetlb
@@ -44,6 +44,7 @@  hugeshmat02 hugeshmat02 -i 5
 hugeshmat03 hugeshmat03 -i 5
 hugeshmat04 hugeshmat04 -i 5
 hugeshmat05 hugeshmat05 -i 5
+hugeshmat06 hugeshmat06
 
 hugeshmctl01 hugeshmctl01 -i 5
 hugeshmctl02 hugeshmctl02 -i 5
diff --git a/testcases/kernel/mem/.gitignore b/testcases/kernel/mem/.gitignore
index c96fe8bfc..4ad1dc313 100644
--- a/testcases/kernel/mem/.gitignore
+++ b/testcases/kernel/mem/.gitignore
@@ -39,6 +39,7 @@ 
 /hugetlb/hugeshmat/hugeshmat03
 /hugetlb/hugeshmat/hugeshmat04
 /hugetlb/hugeshmat/hugeshmat05
+/hugetlb/hugeshmat/hugeshmat06
 /hugetlb/hugeshmctl/hugeshmctl01
 /hugetlb/hugeshmctl/hugeshmctl02
 /hugetlb/hugeshmctl/hugeshmctl03
diff --git a/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c
new file mode 100644
index 000000000..bcb31b1d4
--- /dev/null
+++ b/testcases/kernel/mem/hugetlb/hugeshmat/hugeshmat06.c
@@ -0,0 +1,97 @@ 
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2005-2006, IBM Corporation.
+ * Author: David Gibson & Adam Litke
+ */
+/*\
+ * [DESCRIPTION]
+ * Test shared memory behavior when multiple threads are attached
+ * to a segment with different permissions.  A segment is created
+ * and children attach read-only to check reservation accounting.
+ */
+
+#include "hugetlb.h"
+#include "tst_safe_sysv_ipc.h"
+
+#define SEGMENT_KEY (0x82ba15ff)
+#define MNTPOINT "hugetlbfs/"
+#define HPAGES_IN_SEGMENT 4
+
+static int global_shmid = -1;
+static void *shmaddr;
+static long segment_size, hpage_size, stride;
+
+static int attach_segment(size_t segsize, int shmflags, int shmperms)
+{
+	int shmid;
+
+	shmid = SAFE_SHMGET(SEGMENT_KEY, segsize, shmflags);
+	shmaddr = SAFE_SHMAT(shmid, shmaddr, shmperms);
+	global_shmid = shmid;
+	return shmid;
+}
+
+static void setup(void)
+{
+	hpage_size = tst_get_hugepage_size();
+	segment_size = 4 * hpage_size;
+	stride = hpage_size;
+}
+
+static void compare_free_hugepage_memory(long free_end, long free_start)
+{
+	if (free_end != free_start)
+		tst_res(TFAIL, "Free hugepages after attaching multiple threads differ from initial allocation");
+}
+
+static void run_test(void)
+{
+	char *p;
+	int i, iterations;
+	long total_hpages, free_start, free_end, val;
+
+	total_hpages = SAFE_READ_MEMINFO(MEMINFO_HPAGE_TOTAL);
+	iterations = (total_hpages * hpage_size) / segment_size+1;
+	attach_segment(segment_size, IPC_CREAT | SHM_HUGETLB | 0640, 0);
+	p = (char *)shmaddr;
+	for (i = 0; i < 4; i++, p += stride)
+		memset(p, 0x55, stride);
+	free_start = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
+	SAFE_SHMDT((const void *)shmaddr);
+	for (i = 0; i < iterations; i++) {
+		pid_t pid;
+
+		pid = SAFE_FORK();
+		if (!pid) {
+			attach_segment(0, 0, SHM_RDONLY);
+			for (i = 0; i < HPAGES_IN_SEGMENT; i++)
+				val = *((char *)shmaddr + (i * hpage_size));
+			SAFE_SHMDT(((const void *)shmaddr));
+			free_end = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
+			compare_free_hugepage_memory(free_end, free_start);
+			exit(EXIT_SUCCESS);
+		}
+	}
+	free_end = SAFE_READ_MEMINFO(MEMINFO_HPAGE_FREE);
+	compare_free_hugepage_memory(free_end, free_start);
+	tst_reap_children();
+	tst_res(TPASS, "Successfully tested shared memory behavior when multiple threads are attached");
+}
+
+static void cleanup(void)
+{
+	if (global_shmid >= 0)
+		SAFE_SHMCTL(global_shmid, IPC_RMID, NULL);
+}
+
+static struct tst_test test = {
+	.needs_root = 1,
+	.mntpoint = MNTPOINT,
+	.needs_hugetlbfs = 1,
+	.needs_tmpdir = 1,
+	.forks_child = 1,
+	.setup = setup,
+	.cleanup = cleanup,
+	.test_all = run_test,
+	.hugepages = {32, TST_NEEDS},
+};