tcp: move tcp_rate_skb_sent() to tcp_output.c
It is only called from __tcp_transmit_skb() and __tcp_retransmit_skb(). Move it in tcp_output.c and make it static. clang compiler is now able to inline it from __tcp_transmit_skb(). gcc compiler inlines it in the two callers, which is also fine. Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Neal Cardwell <ncardwell@google.com> Link: https://patch.msgid.link/20260114165109.1747722-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>master
parent
799759e610
commit
f10ab9d3a7
|
|
@ -1356,7 +1356,6 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
|
|||
void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
|
||||
|
||||
/* From tcp_rate.c */
|
||||
void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
|
||||
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
|
||||
struct rate_sample *rs);
|
||||
void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
|
||||
|
|
|
|||
|
|
@ -1432,6 +1432,41 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
|
|||
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
|
||||
}
|
||||
|
||||
/* Snapshot the current delivery information in the skb, to generate
|
||||
* a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
|
||||
*/
|
||||
static void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* In general we need to start delivery rate samples from the
|
||||
* time we received the most recent ACK, to ensure we include
|
||||
* the full time the network needs to deliver all in-flight
|
||||
* packets. If there are no packets in flight yet, then we
|
||||
* know that any ACKs after now indicate that the network was
|
||||
* able to deliver those packets completely in the sampling
|
||||
* interval between now and the next ACK.
|
||||
*
|
||||
* Note that we use packets_out instead of tcp_packets_in_flight(tp)
|
||||
* because the latter is a guess based on RTO and loss-marking
|
||||
* heuristics. We don't want spurious RTOs or loss markings to cause
|
||||
* a spuriously small time interval, causing a spuriously high
|
||||
* bandwidth estimate.
|
||||
*/
|
||||
if (!tp->packets_out) {
|
||||
u64 tstamp_us = tcp_skb_timestamp_us(skb);
|
||||
|
||||
tp->first_tx_mstamp = tstamp_us;
|
||||
tp->delivered_mstamp = tstamp_us;
|
||||
}
|
||||
|
||||
TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
|
||||
TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
|
||||
TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
|
||||
TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce;
|
||||
TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
|
||||
}
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
|
||||
INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
|
||||
INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
|
||||
|
|
|
|||
|
|
@ -34,41 +34,6 @@
|
|||
* ready to send in the write queue.
|
||||
*/
|
||||
|
||||
/* Snapshot the current delivery information in the skb, to generate
|
||||
* a rate sample later when the skb is (s)acked in tcp_rate_skb_delivered().
|
||||
*/
|
||||
void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* In general we need to start delivery rate samples from the
|
||||
* time we received the most recent ACK, to ensure we include
|
||||
* the full time the network needs to deliver all in-flight
|
||||
* packets. If there are no packets in flight yet, then we
|
||||
* know that any ACKs after now indicate that the network was
|
||||
* able to deliver those packets completely in the sampling
|
||||
* interval between now and the next ACK.
|
||||
*
|
||||
* Note that we use packets_out instead of tcp_packets_in_flight(tp)
|
||||
* because the latter is a guess based on RTO and loss-marking
|
||||
* heuristics. We don't want spurious RTOs or loss markings to cause
|
||||
* a spuriously small time interval, causing a spuriously high
|
||||
* bandwidth estimate.
|
||||
*/
|
||||
if (!tp->packets_out) {
|
||||
u64 tstamp_us = tcp_skb_timestamp_us(skb);
|
||||
|
||||
tp->first_tx_mstamp = tstamp_us;
|
||||
tp->delivered_mstamp = tstamp_us;
|
||||
}
|
||||
|
||||
TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp;
|
||||
TCP_SKB_CB(skb)->tx.delivered_mstamp = tp->delivered_mstamp;
|
||||
TCP_SKB_CB(skb)->tx.delivered = tp->delivered;
|
||||
TCP_SKB_CB(skb)->tx.delivered_ce = tp->delivered_ce;
|
||||
TCP_SKB_CB(skb)->tx.is_app_limited = tp->app_limited ? 1 : 0;
|
||||
}
|
||||
|
||||
/* When an skb is sacked or acked, we fill in the rate sample with the (prior)
|
||||
* delivery information when the skb was last transmitted.
|
||||
*
|
||||
|
|
|
|||
Loading…
Reference in New Issue