[PATCH bpf-next v8 1/7] A purely mechanical change to split the renaming from the actual generalization.

Martin KaFai Lau kafai at fb.com
Mon Aug 17 23:56:21 UTC 2020


On Mon, Aug 03, 2020 at 06:46:49PM +0200, KP Singh wrote:
> From: KP Singh <kpsingh at google.com>
> 
> Flags/consts:
> 
>   SK_STORAGE_CREATE_FLAG_MASK	BPF_LOCAL_STORAGE_CREATE_FLAG_MASK
>   BPF_SK_STORAGE_CACHE_SIZE	BPF_LOCAL_STORAGE_CACHE_SIZE
>   MAX_VALUE_SIZE		BPF_LOCAL_STORAGE_MAX_VALUE_SIZE
> 
> Structs:
> 
>   bucket			bpf_local_storage_map_bucket
>   bpf_sk_storage_map		bpf_local_storage_map
>   bpf_sk_storage_data		bpf_local_storage_data
>   bpf_sk_storage_elem		bpf_local_storage_elem
>   bpf_sk_storage		bpf_local_storage
> 
> The "sk" member in bpf_local_storage is also updated to "owner"
> in preparation for changing the type to void * in a subsequent patch.
> 
> Functions:
> 
>   selem_linked_to_sk			selem_linked_to_storage
>   selem_alloc				bpf_selem_alloc
>   __selem_unlink_sk			bpf_selem_unlink_storage
>   __selem_link_sk			bpf_selem_link_storage
>   selem_unlink_sk			__bpf_selem_unlink_storage
>   sk_storage_update			bpf_local_storage_update
>   __sk_storage_lookup			bpf_local_storage_lookup
>   bpf_sk_storage_map_free		bpf_local_storage_map_free
>   bpf_sk_storage_map_alloc		bpf_local_storage_map_alloc
>   bpf_sk_storage_map_alloc_check	bpf_local_storage_map_alloc_check
>   bpf_sk_storage_map_check_btf		bpf_local_storage_map_check_btf
> 

[ ... ]

> @@ -147,85 +148,86 @@ static struct bpf_sk_storage_elem *selem_alloc(struct bpf_sk_storage_map *smap,
>   * The caller must ensure selem->smap is still valid to be
>   * dereferenced for its smap->elem_size and smap->cache_idx.
>   */
> -static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage,
> -			      struct bpf_sk_storage_elem *selem,
> -			      bool uncharge_omem)
> +static bool bpf_selem_unlink_storage(struct bpf_local_storage *local_storage,
> +				     struct bpf_local_storage_elem *selem,
> +				     bool uncharge_omem)
Please add a "_nolock()" suffix, just to be clear that the unlink_map()
counter part is locked.  It could be a follow up later.

>  {
> -	struct bpf_sk_storage_map *smap;
> -	bool free_sk_storage;
> +	struct bpf_local_storage_map *smap;
> +	bool free_local_storage;
>  	struct sock *sk;
>  
>  	smap = rcu_dereference(SDATA(selem)->smap);
> -	sk = sk_storage->sk;
> +	sk = local_storage->owner;
>  
>  	/* All uncharging on sk->sk_omem_alloc must be done first.
> -	 * sk may be freed once the last selem is unlinked from sk_storage.
> +	 * sk may be freed once the last selem is unlinked from local_storage.
>  	 */
>  	if (uncharge_omem)
>  		atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
>  
> -	free_sk_storage = hlist_is_singular_node(&selem->snode,
> -						 &sk_storage->list);
> -	if (free_sk_storage) {
> -		atomic_sub(sizeof(struct bpf_sk_storage), &sk->sk_omem_alloc);
> -		sk_storage->sk = NULL;
> +	free_local_storage = hlist_is_singular_node(&selem->snode,
> +						    &local_storage->list);
> +	if (free_local_storage) {
> +		atomic_sub(sizeof(struct bpf_local_storage), &sk->sk_omem_alloc);
> +		local_storage->owner = NULL;
>  		/* After this RCU_INIT, sk may be freed and cannot be used */
>  		RCU_INIT_POINTER(sk->sk_bpf_storage, NULL);
>  
> -		/* sk_storage is not freed now.  sk_storage->lock is
> -		 * still held and raw_spin_unlock_bh(&sk_storage->lock)
> +		/* local_storage is not freed now.  local_storage->lock is
> +		 * still held and raw_spin_unlock_bh(&local_storage->lock)
>  		 * will be done by the caller.
>  		 *
>  		 * Although the unlock will be done under
>  		 * rcu_read_lock(),  it is more intutivie to
> -		 * read if kfree_rcu(sk_storage, rcu) is done
> -		 * after the raw_spin_unlock_bh(&sk_storage->lock).
> +		 * read if kfree_rcu(local_storage, rcu) is done
> +		 * after the raw_spin_unlock_bh(&local_storage->lock).
>  		 *
> -		 * Hence, a "bool free_sk_storage" is returned
> +		 * Hence, a "bool free_local_storage" is returned
>  		 * to the caller which then calls the kfree_rcu()
>  		 * after unlock.
>  		 */
>  	}
>  	hlist_del_init_rcu(&selem->snode);
> -	if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) ==
> +	if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
>  	    SDATA(selem))
> -		RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL);
> +		RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
>  
>  	kfree_rcu(selem, rcu);
>  
> -	return free_sk_storage;
> +	return free_local_storage;
>  }
>  
> -static void selem_unlink_sk(struct bpf_sk_storage_elem *selem)
> +static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
>  {
> -	struct bpf_sk_storage *sk_storage;
> -	bool free_sk_storage = false;
> +	struct bpf_local_storage *local_storage;
> +	bool free_local_storage = false;
>  
> -	if (unlikely(!selem_linked_to_sk(selem)))
> +	if (unlikely(!selem_linked_to_storage(selem)))
>  		/* selem has already been unlinked from sk */
>  		return;
>  
> -	sk_storage = rcu_dereference(selem->sk_storage);
> -	raw_spin_lock_bh(&sk_storage->lock);
> -	if (likely(selem_linked_to_sk(selem)))
> -		free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
> -	raw_spin_unlock_bh(&sk_storage->lock);
> +	local_storage = rcu_dereference(selem->local_storage);
> +	raw_spin_lock_bh(&local_storage->lock);
> +	if (likely(selem_linked_to_storage(selem)))
> +		free_local_storage =
> +			bpf_selem_unlink_storage(local_storage, selem, true);
> +	raw_spin_unlock_bh(&local_storage->lock);
>  
> -	if (free_sk_storage)
> -		kfree_rcu(sk_storage, rcu);
> +	if (free_local_storage)
> +		kfree_rcu(local_storage, rcu);
>  }
>  
> -static void __selem_link_sk(struct bpf_sk_storage *sk_storage,
> -			    struct bpf_sk_storage_elem *selem)
> +static void bpf_selem_link_storage(struct bpf_local_storage *local_storage,
> +				   struct bpf_local_storage_elem *selem)
Same here. bpf_selem_link_storage"_nolock"().

Please tag the Subject line with "bpf:".

Acked-by: Martin KaFai Lau <kafai at fb.com>



More information about the Linux-security-module-archive mailing list