[PATCH 06/12] selftests/bpf: Add tests for exclusive maps
KP Singh
kpsingh at kernel.org
Fri Jun 6 23:29:08 UTC 2025
* maps of maps are currently cannot be exclusive.
* inner maps cannot be exclusive
* Check if access is denied to another program for an exclusive map.
Signed-off-by: KP Singh <kpsingh at kernel.org>
---
.../selftests/bpf/prog_tests/map_excl.c | 130 ++++++++++++++++++
tools/testing/selftests/bpf/progs/map_excl.c | 65 +++++++++
2 files changed, 195 insertions(+)
create mode 100644 tools/testing/selftests/bpf/prog_tests/map_excl.c
create mode 100644 tools/testing/selftests/bpf/progs/map_excl.c
diff --git a/tools/testing/selftests/bpf/prog_tests/map_excl.c b/tools/testing/selftests/bpf/prog_tests/map_excl.c
new file mode 100644
index 000000000000..2f6f81ef7ae2
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_excl.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "map_excl.skel.h"
+
+static void test_map_exclusive_inner(void)
+{
+ struct map_excl *skel;
+ int err;
+
+ skel = map_excl__open();
+ if (!ASSERT_OK_PTR(skel, "map_excl open"))
+ return;
+
+ err = bpf_map__make_exclusive(skel->maps.inner_map,
+ skel->progs.should_have_access);
+ if (!ASSERT_OK(err, "bpf_map__make_exclusive"))
+ goto out;
+
+ err = map_excl__load(skel);
+ ASSERT_EQ(err, -EOPNOTSUPP, "map_excl__load");
+
+out:
+ map_excl__destroy(skel);
+}
+
+static void test_map_exclusive_outer_array(void)
+{
+ struct map_excl *skel;
+ int err;
+
+ skel = map_excl__open();
+ if (!ASSERT_OK_PTR(skel, "map_excl open"))
+ return;
+
+ err = bpf_map__make_exclusive(skel->maps.outer_array_map,
+ skel->progs.should_have_access);
+ if (!ASSERT_OK(err, "bpf_map__make_exclusive"))
+ goto out;
+
+ bpf_program__set_autoload(skel->progs.should_have_access, true);
+ bpf_program__set_autoload(skel->progs.should_not_have_access, false);
+
+ err = map_excl__load(skel);
+ ASSERT_EQ(err, -EOPNOTSUPP, "exclusive maps of maps are not supported\n");
+out:
+ map_excl__destroy(skel);
+}
+
+static void test_map_exclusive_outer_htab(void)
+{
+ struct map_excl *skel;
+ int err;
+
+ skel = map_excl__open();
+ if (!ASSERT_OK_PTR(skel, "map_excl open"))
+ return;
+
+ err = bpf_map__make_exclusive(skel->maps.outer_htab_map,
+ skel->progs.should_have_access);
+ if (!ASSERT_OK(err, "bpf_map__make_exclusive"))
+ goto out;
+
+ bpf_program__set_autoload(skel->progs.should_have_access, true);
+ bpf_program__set_autoload(skel->progs.should_not_have_access, false);
+
+ err = map_excl__load(skel);
+ ASSERT_EQ(err, -EOPNOTSUPP, "exclusive maps of maps are not supported\n");
+
+out:
+ map_excl__destroy(skel);
+}
+
+static void test_map_excl_allowed(void)
+{
+ struct map_excl *skel = map_excl__open();
+ int err;
+
+ err = bpf_map__make_exclusive(skel->maps.excl_map, skel->progs.should_have_access);
+ if (!ASSERT_OK(err, "bpf_map__make_exclusive"))
+ goto out;
+
+ bpf_program__set_autoload(skel->progs.should_have_access, true);
+ bpf_program__set_autoload(skel->progs.should_not_have_access, false);
+
+ err = map_excl__load(skel);
+ ASSERT_OK(err, "map_excl__load");
+out:
+ map_excl__destroy(skel);
+}
+
+static void test_map_excl_denied(void)
+{
+ struct map_excl *skel = map_excl__open();
+ int err;
+
+ err = bpf_map__make_exclusive(skel->maps.excl_map, skel->progs.should_have_access);
+ if (!ASSERT_OK(err, "bpf_map__make_exclusive"))
+ goto out;
+
+ bpf_program__set_autoload(skel->progs.should_have_access, false);
+ bpf_program__set_autoload(skel->progs.should_not_have_access, true);
+
+ err = map_excl__load(skel);
+ ASSERT_EQ(err, -EACCES, "exclusive map Paccess not denied\n");
+out:
+ map_excl__destroy(skel);
+
+}
+
+void test_map_excl(void)
+{
+ start_libbpf_log_capture();
+ if (test__start_subtest("map_excl_allowed"))
+ test_map_excl_allowed();
+ stop_libbpf_log_capture();
+ if (test__start_subtest("map_excl_denied"))
+ test_map_excl_denied();
+ if (test__start_subtest("map_exclusive_outer_array"))
+ test_map_exclusive_outer_array();
+ if (test__start_subtest("map_exclusive_outer_htab"))
+ test_map_exclusive_outer_htab();
+ if (test__start_subtest("map_exclusive_inner"))
+ test_map_exclusive_inner();
+}
diff --git a/tools/testing/selftests/bpf/progs/map_excl.c b/tools/testing/selftests/bpf/progs/map_excl.c
new file mode 100644
index 000000000000..9543aa3ab484
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_excl.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
+#include <linux/bpf.h>
+#include <time.h>
+#include <bpf/bpf_helpers.h>
+
+#include "bpf_misc.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 1);
+} excl_map SEC(".maps");
+
+struct inner_map_type {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(key_size, 4);
+ __uint(value_size, 4);
+ __uint(max_entries, 1);
+} inner_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct inner_map_type);
+} outer_array_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __type(key, int);
+ __type(value, int);
+ __uint(max_entries, 1);
+ __array(values, struct inner_map_type);
+} outer_htab_map SEC(".maps") = {
+ .values = {
+ [0] = &inner_map,
+ },
+};
+
+char _license[] SEC("license") = "GPL";
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int should_have_access(void *ctx)
+{
+ int key = 0, value = 0xdeadbeef;
+
+ bpf_map_update_elem(&excl_map, &key, &value, 0);
+ return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int should_not_have_access(void *ctx)
+{
+ int key = 0, value = 0xdeadbeef;
+
+ bpf_map_update_elem(&excl_map, &key, &value, 0);
+ return 0;
+}
--
2.43.0
More information about the Linux-security-module-archive
mailing list