[PATCH bpf-next v6 4/8] bpf: lsm: Implement attach, detach and execution

Andrii Nakryiko andrii.nakryiko at gmail.com
Thu Mar 26 01:49:09 UTC 2020


On Wed, Mar 25, 2020 at 8:27 AM KP Singh <kpsingh at chromium.org> wrote:
>
> From: KP Singh <kpsingh at google.com>
>
> JITed BPF programs are dynamically attached to the LSM hooks
> using BPF trampolines. The trampoline prologue generates code to handle
> conversion of the signature of the hook to the appropriate BPF context.
>
> The allocated trampoline programs are attached to the nop functions
> initialized as LSM hooks.
>
> BPF_PROG_TYPE_LSM programs must have a GPL compatible license and
> and need CAP_SYS_ADMIN (required for loading eBPF programs).
>
> Upon attachment:
>
> * A BPF fexit trampoline is used for LSM hooks with a void return type.
> * A BPF fmod_ret trampoline is used for LSM hooks which return an
>   int. The attached programs can override the return value of the
>   bpf LSM hook to indicate a MAC Policy decision.
>
> Signed-off-by: KP Singh <kpsingh at google.com>
> Reviewed-by: Brendan Jackman <jackmanb at google.com>
> Reviewed-by: Florent Revest <revest at google.com>
> ---
>  include/linux/bpf_lsm.h | 11 ++++++++
>  kernel/bpf/bpf_lsm.c    | 28 +++++++++++++++++++++
>  kernel/bpf/btf.c        |  9 ++++++-
>  kernel/bpf/syscall.c    | 56 ++++++++++++++++++++++++++++-------------
>  kernel/bpf/trampoline.c | 17 ++++++++++---
>  kernel/bpf/verifier.c   | 19 +++++++++++---
>  6 files changed, 113 insertions(+), 27 deletions(-)
>

[...]

> diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
> index 85567a6ea5f9..3ba30fd6101e 100644
> --- a/kernel/bpf/syscall.c
> +++ b/kernel/bpf/syscall.c
> @@ -25,6 +25,7 @@
>  #include <linux/nospec.h>
>  #include <linux/audit.h>
>  #include <uapi/linux/btf.h>
> +#include <linux/bpf_lsm.h>
>
>  #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
>                           (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
> @@ -1935,6 +1936,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
>
>                 switch (prog_type) {
>                 case BPF_PROG_TYPE_TRACING:
> +               case BPF_PROG_TYPE_LSM:
>                 case BPF_PROG_TYPE_STRUCT_OPS:
>                 case BPF_PROG_TYPE_EXT:
>                         break;
> @@ -2367,10 +2369,28 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog)
>         struct file *link_file;
>         int link_fd, err;
>
> -       if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
> -           prog->expected_attach_type != BPF_TRACE_FEXIT &&
> -           prog->expected_attach_type != BPF_MODIFY_RETURN &&
> -           prog->type != BPF_PROG_TYPE_EXT) {
> +       switch (prog->type) {
> +       case BPF_PROG_TYPE_TRACING:
> +               if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
> +                   prog->expected_attach_type != BPF_TRACE_FEXIT &&
> +                   prog->expected_attach_type != BPF_MODIFY_RETURN) {
> +                       err = -EINVAL;
> +                       goto out_put_prog;
> +               }
> +               break;
> +       case BPF_PROG_TYPE_EXT:
> +               if (prog->expected_attach_type != 0) {
> +                       err = -EINVAL;
> +                       goto out_put_prog;
> +               }
> +               break;
> +       case BPF_PROG_TYPE_LSM:
> +               if (prog->expected_attach_type != BPF_LSM_MAC) {
> +                       err = -EINVAL;
> +                       goto out_put_prog;
> +               }
> +               break;
> +       default:

thanks, this is much more "scalable" in terms of maintenance!

>                 err = -EINVAL;
>                 goto out_put_prog;
>         }
> @@ -2449,16 +2469,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
>         if (IS_ERR(prog))
>                 return PTR_ERR(prog);
>
> -       if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT &&
> -           prog->type != BPF_PROG_TYPE_TRACING &&
> -           prog->type != BPF_PROG_TYPE_EXT &&
> -           prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) {
> -               err = -EINVAL;
> -               goto out_put_prog;
> -       }
> -
> -       if (prog->type == BPF_PROG_TYPE_TRACING ||
> -           prog->type == BPF_PROG_TYPE_EXT) {
> +       switch (prog->type) {
> +       case BPF_PROG_TYPE_TRACING:
> +       case BPF_PROG_TYPE_EXT:
> +       case BPF_PROG_TYPE_LSM:
>                 if (attr->raw_tracepoint.name) {
>                         /* The attach point for this category of programs
>                          * should be specified via btf_id during program load.
> @@ -2466,11 +2480,13 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
>                         err = -EINVAL;
>                         goto out_put_prog;
>                 }
> -               if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
> +               if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {

this should probably also ensure prog->type == BPF_PROG_TYPE_TRACING ?
Otherwise you can trick kernel with BPF_PROG_TYPE_LSM and
expected_attach_type == BPF_TRACE_RAW_TP, no?

>                         tp_name = prog->aux->attach_func_name;
> -               else
> -                       return bpf_tracing_prog_attach(prog);
> -       } else {
> +                       break;
> +               }
> +               return bpf_tracing_prog_attach(prog);
> +       case BPF_PROG_TYPE_RAW_TRACEPOINT:
> +       case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
>                 if (strncpy_from_user(buf,
>                                       u64_to_user_ptr(attr->raw_tracepoint.name),
>                                       sizeof(buf) - 1) < 0) {
> @@ -2479,6 +2495,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
>                }

[...]



More information about the Linux-security-module-archive mailing list