[PATCH RFC 4/9] net: optimize GRO for the common case.
Paolo Abeni
pabeni at redhat.com
Wed Jul 21 16:44:36 UTC 2021
After the previous patches, at GRO time, skb->_state is
usually 0, unless the packets comes from some H/W offload
slowpath or tunnel without rx checksum offload.
We can optimize the GRO code assuming !skb->_state is likely.
This remove multiple conditionals in the fast-path, at the
price of an additional one when we hit the above "slow-paths".
Signed-off-by: Paolo Abeni <pabeni at redhat.com>
---
net/core/dev.c | 29 +++++++++++++++++++++--------
net/core/skbuff.c | 8 +++++---
2 files changed, 26 insertions(+), 11 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index 3ee58876e8f5..70c24ed9ca67 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6002,7 +6002,6 @@ static void gro_list_prepare(const struct list_head *head,
diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
if (skb_vlan_tag_present(p))
diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
- diffs |= skb_metadata_dst_cmp(p, skb);
diffs |= skb_metadata_differs(p, skb);
if (maclen == ETH_HLEN)
diffs |= compare_ether_header(skb_mac_header(p),
@@ -6012,17 +6011,29 @@ static void gro_list_prepare(const struct list_head *head,
skb_mac_header(skb),
maclen);
- diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
+ /* in most common scenarions _state is 0
+ * otherwise we are already on some slower paths
+ * either skip all the infrequent tests altogether or
+ * avoid trying too hard to skip each of them individually
+ */
+ if (!diffs && unlikely(skb->_state | p->_state)) {
+#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ struct tc_skb_ext *skb_ext;
+ struct tc_skb_ext *p_ext;
+#endif
+
+ diffs |= skb_metadata_dst_cmp(p, skb);
+ diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
+
#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- if (!diffs) {
- struct tc_skb_ext *skb_ext = skb_ext_find(skb, TC_SKB_EXT);
- struct tc_skb_ext *p_ext = skb_ext_find(p, TC_SKB_EXT);
+ skb_ext = skb_ext_find(skb, TC_SKB_EXT);
+ p_ext = skb_ext_find(p, TC_SKB_EXT);
diffs |= (!!p_ext) ^ (!!skb_ext);
if (!diffs && unlikely(skb_ext))
diffs |= p_ext->chain ^ skb_ext->chain;
- }
#endif
+ }
NAPI_GRO_CB(p)->same_flow = !diffs;
}
@@ -6287,8 +6298,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->encapsulation = 0;
skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
- skb_ext_reset(skb);
- nf_reset_ct(skb);
+ if (unlikely(skb->_state)) {
+ skb_ext_reset(skb);
+ nf_reset_ct(skb);
+ }
napi->skb = skb;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2ffe18595635..befb49d1a756 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -943,9 +943,11 @@ void __kfree_skb_defer(struct sk_buff *skb)
void napi_skb_free_stolen_head(struct sk_buff *skb)
{
- nf_reset_ct(skb);
- skb_dst_drop(skb);
- skb_ext_put(skb);
+ if (unlikely(skb->_state)) {
+ nf_reset_ct(skb);
+ skb_dst_drop(skb);
+ skb_ext_put(skb);
+ }
napi_skb_cache_put(skb);
}
--
2.26.3
More information about the Linux-security-module-archive
mailing list