[PATCH v3 26/57] perf: Simplify event_function*()
Namhyung Kim
namhyung at kernel.org
Tue Jun 13 05:56:06 UTC 2023
Hi Peter,
On Mon, Jun 12, 2023 at 2:39 AM Peter Zijlstra <peterz at infradead.org> wrote:
>
> Use guards to reduce gotos and simplify control flow.
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz at infradead.org>
> ---
> kernel/events/core.c | 39 ++++++++++++++++++++++++++-------------
> 1 file changed, 26 insertions(+), 13 deletions(-)
>
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -214,6 +214,25 @@ struct event_function_struct {
> void *data;
> };
>
> +typedef struct {
> + struct perf_cpu_context *cpuctx;
> + struct perf_event_context *ctx;
> +} class_perf_ctx_lock_t;
> +
> +static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T)
> +{
> + if (_T->cpuctx)
> + perf_ctx_unlock(_T->cpuctx, _T->ctx);
Shouldn't it be called unconditionally?
Thanks,
Namhyung
> +}
> +
> +static inline class_perf_ctx_lock_t
> +class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx,
> + struct perf_event_context *ctx)
> +{
> + perf_ctx_lock(cpuctx, ctx);
> + return (class_perf_ctx_lock_t){ cpuctx, ctx };
> +}
> +
> static int event_function(void *info)
> {
> struct event_function_struct *efs = info;
> @@ -224,17 +243,15 @@ static int event_function(void *info)
> int ret = 0;
>
> lockdep_assert_irqs_disabled();
> + guard(perf_ctx_lock)(cpuctx, task_ctx);
>
> - perf_ctx_lock(cpuctx, task_ctx);
> /*
> * Since we do the IPI call without holding ctx->lock things can have
> * changed, double check we hit the task we set out to hit.
> */
> if (ctx->task) {
> - if (ctx->task != current) {
> - ret = -ESRCH;
> - goto unlock;
> - }
> + if (ctx->task != current)
> + return -ESRCH;
>
> /*
> * We only use event_function_call() on established contexts,
> @@ -254,8 +271,6 @@ static int event_function(void *info)
> }
>
> efs->func(event, cpuctx, ctx, efs->data);
> -unlock:
> - perf_ctx_unlock(cpuctx, task_ctx);
>
> return ret;
> }
> @@ -329,11 +344,11 @@ static void event_function_local(struct
> task_ctx = ctx;
> }
>
> - perf_ctx_lock(cpuctx, task_ctx);
> + guard(perf_ctx_lock)(cpuctx, task_ctx);
>
> task = ctx->task;
> if (task == TASK_TOMBSTONE)
> - goto unlock;
> + return;
>
> if (task) {
> /*
> @@ -343,18 +358,16 @@ static void event_function_local(struct
> */
> if (ctx->is_active) {
> if (WARN_ON_ONCE(task != current))
> - goto unlock;
> + return;
>
> if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
> - goto unlock;
> + return;
> }
> } else {
> WARN_ON_ONCE(&cpuctx->ctx != ctx);
> }
>
> func(event, cpuctx, ctx, data);
> -unlock:
> - perf_ctx_unlock(cpuctx, task_ctx);
> }
>
> #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
>
>
More information about the Linux-security-module-archive
mailing list