[PATCH bpf-next 22/29] libbpf: further decouple feature checking logic from bpf_object
Andrii Nakryiko
andrii at kernel.org
Wed Jan 3 22:20:27 UTC 2024
Add feat_supported() helper that accepts feature cache instead of
bpf_object. This allows low-level code in bpf.c to not know or care
about higher-level concept of bpf_object, yet it will be able to utilize
custom feature checking in cases where BPF token might influence the
outcome.
Acked-by: John Fastabend <john.fastabend at gmail.com>
Signed-off-by: Andrii Nakryiko <andrii at kernel.org>
---
tools/lib/bpf/bpf.c | 6 +++---
tools/lib/bpf/libbpf.c | 22 +++++++++++++++-------
tools/lib/bpf/libbpf_internal.h | 5 ++++-
3 files changed, 22 insertions(+), 11 deletions(-)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index d69137459abf..10bf11a758bf 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -146,7 +146,7 @@ int bump_rlimit_memlock(void)
struct rlimit rlim;
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
- if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
+ if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
return 0;
memlock_bumped = true;
@@ -181,7 +181,7 @@ int bpf_map_create(enum bpf_map_type map_type,
return libbpf_err(-EINVAL);
attr.map_type = map_type;
- if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.key_size = key_size;
attr.value_size = value_size;
@@ -265,7 +265,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.kern_version = OPTS_GET(opts, kern_version, 0);
attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
- if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
attr.license = ptr_to_u64(license);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 95a7d459b842..aea40c42b90e 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -5076,17 +5076,14 @@ static struct kern_feature_desc {
},
};
-bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
+bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
{
struct kern_feature_desc *feat = &feature_probes[feat_id];
- struct kern_feature_cache *cache = &feature_cache;
int ret;
- if (obj && obj->gen_loader)
- /* To generate loader program assume the latest kernel
- * to avoid doing extra prog_load, map_create syscalls.
- */
- return true;
+ /* assume global feature cache, unless custom one is provided */
+ if (!cache)
+ cache = &feature_cache;
if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
ret = feat->probe();
@@ -5103,6 +5100,17 @@ bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
}
+bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
+{
+ if (obj && obj->gen_loader)
+ /* To generate loader program assume the latest kernel
+ * to avoid doing extra prog_load, map_create syscalls.
+ */
+ return true;
+
+ return feat_supported(NULL, feat_id);
+}
+
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
{
struct bpf_map_info map_info;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index b5d334754e5d..754a432335e4 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -360,8 +360,11 @@ enum kern_feature_id {
__FEAT_CNT,
};
-int probe_memcg_account(void);
+struct kern_feature_cache;
+bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
+
+int probe_memcg_account(void);
int bump_rlimit_memlock(void);
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
--
2.34.1
More information about the Linux-security-module-archive
mailing list