[PATCH v5 bpf-next 4/5] bpf: fs/xattr: Add BPF kfuncs to set and remove xattrs
Alexei Starovoitov
alexei.starovoitov at gmail.com
Wed Dec 18 21:20:40 UTC 2024
On Tue, Dec 17, 2024 at 8:48 PM Song Liu <song at kernel.org> wrote:
>
>
> BTF_KFUNCS_START(bpf_fs_kfunc_set_ids)
> @@ -170,6 +330,10 @@ BTF_ID_FLAGS(func, bpf_put_file, KF_RELEASE)
> BTF_ID_FLAGS(func, bpf_path_d_path, KF_TRUSTED_ARGS)
> BTF_ID_FLAGS(func, bpf_get_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
> BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
> +BTF_ID_FLAGS(func, bpf_set_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
> +BTF_ID_FLAGS(func, bpf_remove_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
> +BTF_ID_FLAGS(func, bpf_set_dentry_xattr_locked, KF_SLEEPABLE | KF_TRUSTED_ARGS)
> +BTF_ID_FLAGS(func, bpf_remove_dentry_xattr_locked, KF_SLEEPABLE | KF_TRUSTED_ARGS)
> BTF_KFUNCS_END(bpf_fs_kfunc_set_ids)
The _locked() versions shouldn't be exposed to bpf prog.
Don't add them to the above set.
Also we need to somehow exclude them from being dumped into vmlinux.h
> static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
> @@ -186,6 +350,37 @@ static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
> .filter = bpf_fs_kfuncs_filter,
> };
>
> +/* bpf_[set|remove]_dentry_xattr.* hooks have KF_TRUSTED_ARGS and
> + * KF_SLEEPABLE, so they are only available to sleepable hooks with
> + * dentry arguments.
> + *
> + * Setting and removing xattr requires exclusive lock on dentry->d_inode.
> + * Some hooks already locked d_inode, while some hooks have not locked
> + * d_inode. Therefore, we need different kfuncs for different hooks.
> + * Specifically, hooks in the following list (d_inode_locked_hooks)
> + * should call bpf_[set|remove]_dentry_xattr_locked; while other hooks
> + * should call bpf_[set|remove]_dentry_xattr.
> + */
> +BTF_SET_START(d_inode_locked_hooks)
> +BTF_ID(func, bpf_lsm_inode_post_removexattr)
> +BTF_ID(func, bpf_lsm_inode_post_setattr)
> +BTF_ID(func, bpf_lsm_inode_post_setxattr)
> +BTF_ID(func, bpf_lsm_inode_removexattr)
> +BTF_ID(func, bpf_lsm_inode_rmdir)
> +BTF_ID(func, bpf_lsm_inode_setattr)
> +BTF_ID(func, bpf_lsm_inode_setxattr)
> +BTF_ID(func, bpf_lsm_inode_unlink)
> +#ifdef CONFIG_SECURITY_PATH
> +BTF_ID(func, bpf_lsm_path_unlink)
> +BTF_ID(func, bpf_lsm_path_rmdir)
> +#endif /* CONFIG_SECURITY_PATH */
> +BTF_SET_END(d_inode_locked_hooks)
> +
> +bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog)
> +{
> + return btf_id_set_contains(&d_inode_locked_hooks, prog->aux->attach_btf_id);
> +}
> +
> static int __init bpf_fs_kfuncs_init(void)
> {
> return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
> diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
> index aefcd6564251..5147b10e16a2 100644
> --- a/include/linux/bpf_lsm.h
> +++ b/include/linux/bpf_lsm.h
> @@ -48,6 +48,9 @@ void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func)
>
> int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
> struct bpf_retval_range *range);
> +
> +bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog);
> +
> #else /* !CONFIG_BPF_LSM */
>
> static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
> @@ -86,6 +89,11 @@ static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
> {
> return -EOPNOTSUPP;
> }
> +
> +static inline bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog)
> +{
> + return false;
> +}
> #endif /* CONFIG_BPF_LSM */
>
> #endif /* _LINUX_BPF_LSM_H */
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index f27274e933e5..f0d240d46e54 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -206,6 +206,7 @@ static int ref_set_non_owning(struct bpf_verifier_env *env,
> static void specialize_kfunc(struct bpf_verifier_env *env,
> u32 func_id, u16 offset, unsigned long *addr);
> static bool is_trusted_reg(const struct bpf_reg_state *reg);
> +static void remap_kfunc_locked_func_id(struct bpf_verifier_env *env, struct bpf_insn *insn);
>
> static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
> {
> @@ -3224,10 +3225,12 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
> return -EPERM;
> }
>
> - if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn))
> + if (bpf_pseudo_func(insn) || bpf_pseudo_call(insn)) {
> ret = add_subprog(env, i + insn->imm + 1);
> - else
> + } else {
> + remap_kfunc_locked_func_id(env, insn);
> ret = add_kfunc_call(env, insn->imm, insn->off);
> + }
>
> if (ret < 0)
> return ret;
> @@ -11690,6 +11693,10 @@ enum special_kfunc_type {
> KF_bpf_get_kmem_cache,
> KF_bpf_local_irq_save,
> KF_bpf_local_irq_restore,
> + KF_bpf_set_dentry_xattr,
> + KF_bpf_remove_dentry_xattr,
> + KF_bpf_set_dentry_xattr_locked,
> + KF_bpf_remove_dentry_xattr_locked,
> };
>
> BTF_SET_START(special_kfunc_set)
> @@ -11719,6 +11726,12 @@ BTF_ID(func, bpf_wq_set_callback_impl)
> #ifdef CONFIG_CGROUPS
> BTF_ID(func, bpf_iter_css_task_new)
> #endif
> +#ifdef CONFIG_BPF_LSM
> +BTF_ID(func, bpf_set_dentry_xattr)
> +BTF_ID(func, bpf_remove_dentry_xattr)
> +BTF_ID(func, bpf_set_dentry_xattr_locked)
> +BTF_ID(func, bpf_remove_dentry_xattr_locked)
> +#endif
> BTF_SET_END(special_kfunc_set)
Do they need to be a part of special_kfunc_set ?
Where is the code that uses that?
>
> BTF_ID_LIST(special_kfunc_list)
> @@ -11762,6 +11775,44 @@ BTF_ID_UNUSED
> BTF_ID(func, bpf_get_kmem_cache)
> BTF_ID(func, bpf_local_irq_save)
> BTF_ID(func, bpf_local_irq_restore)
> +#ifdef CONFIG_BPF_LSM
> +BTF_ID(func, bpf_set_dentry_xattr)
> +BTF_ID(func, bpf_remove_dentry_xattr)
> +BTF_ID(func, bpf_set_dentry_xattr_locked)
> +BTF_ID(func, bpf_remove_dentry_xattr_locked)
> +#else
> +BTF_ID_UNUSED
> +BTF_ID_UNUSED
> +BTF_ID_UNUSED
> +BTF_ID_UNUSED
> +#endif
> +
> +/* Sometimes, we need slightly different verions of a kfunc for different
versions
> + * contexts/hooks, for example, bpf_set_dentry_xattr vs.
> + * bpf_set_dentry_xattr_locked. The former kfunc need to lock the inode
> + * rwsem, while the latter is called with the inode rwsem held (by the
> + * caller).
> + *
> + * To avoid burden on the users, we allow either version of the kfunc in
> + * either context. Then the verifier will remap the kfunc to the proper
> + * version based the context.
> + */
> +static void remap_kfunc_locked_func_id(struct bpf_verifier_env *env, struct bpf_insn *insn)
> +{
> + u32 func_id = insn->imm;
> +
> + if (bpf_lsm_has_d_inode_locked(env->prog)) {
> + if (func_id == special_kfunc_list[KF_bpf_set_dentry_xattr])
> + insn->imm = special_kfunc_list[KF_bpf_set_dentry_xattr_locked];
> + else if (func_id == special_kfunc_list[KF_bpf_remove_dentry_xattr])
> + insn->imm = special_kfunc_list[KF_bpf_remove_dentry_xattr_locked];
> + } else {
> + if (func_id == special_kfunc_list[KF_bpf_set_dentry_xattr_locked])
> + insn->imm = special_kfunc_list[KF_bpf_set_dentry_xattr];
This part is not necessary.
_locked() shouldn't be exposed and it should be an error
if bpf prog attempts to use invalid kfunc.
> + else if (func_id == special_kfunc_list[KF_bpf_remove_dentry_xattr_locked])
> + insn->imm = special_kfunc_list[KF_bpf_remove_dentry_xattr];
> + }
> +}
>
> static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
> {
> --
> 2.43.5
>
More information about the Linux-security-module-archive
mailing list