net_sched: sch_fq: prefetch one skb ahead in dequeue()

prefetch the skb that we are likely to dequeue at the next dequeue().

Also call fq_dequeue_skb() a bit sooner in fq_dequeue().

This reduces the window between read of q.qlen and
changes of fields in the cache line that could be dirtied
by another cpu trying to queue a packet.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20251121083256.674562-10-edumazet@google.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
pull/1354/merge
Eric Dumazet 2025-11-21 08:32:51 +00:00 committed by Paolo Abeni
parent 3c1100f042
commit 2f9babc04d
1 changed files with 5 additions and 2 deletions

View File

@ -480,7 +480,10 @@ static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
struct sk_buff *skb)
{
if (skb == flow->head) {
flow->head = skb->next;
struct sk_buff *next = skb->next;
prefetch(next);
flow->head = next;
} else {
rb_erase(&skb->rbnode, &flow->t_root);
skb->dev = qdisc_dev(sch);
@ -712,6 +715,7 @@ begin:
goto begin;
}
prefetch(&skb->end);
fq_dequeue_skb(sch, f, skb);
if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
INET_ECN_set_ce(skb);
q->stat_ce_mark++;
@ -719,7 +723,6 @@ begin:
if (--f->qlen == 0)
q->inactive_flows++;
q->band_pkt_count[fq_skb_cb(skb)->band]--;
fq_dequeue_skb(sch, f, skb);
} else {
head->first = f->next;
/* force a pass through old_flows to prevent starvation */