usbnet: Add support for Byte Queue Limits (BQL)
In the current implementation, usbnet uses a fixed tx_qlen of: USB2: 60 * 1518 bytes = 91.08 KB USB3: 60 * 5 * 1518 bytes = 454.80 KB Such large transmit queues can be problematic, especially for cellular modems. For example, with a typical celluar link speed of 10 Mbit/s, a fully occupied USB3 transmit queue results in: 454.80 KB / (10 Mbit/s / 8 bit/byte) = 363.84 ms of additional latency. This patch adds support for Byte Queue Limits (BQL) [1] to dynamically manage the transmit queue size and reduce latency without sacrificing throughput. Testing was performed on various devices using the usbnet driver for packet transmission: - DELOCK 66045: USB3 to 2.5 GbE adapter (ax88179_178a) - DELOCK 61969: USB2 to 1 GbE adapter (asix) - Quectel RM520: 5G modem (qmi_wwan) - USB2 Android tethering (cdc_ncm) No performance degradation was observed for iperf3 TCP or UDP traffic, while latency for a prioritized ping application was significantly reduced. For example, using the USB3 to 2.5 GbE adapter, which was fully utilized by iperf3 UDP traffic, the prioritized ping was improved from 1.6 ms to 0.6 ms. With the same setup but with a 100 Mbit/s Ethernet connection, the prioritized ping was improved from 35 ms to 5 ms. [1] https://lwn.net/Articles/469652/ Signed-off-by: Simon Schippers <simon.schippers@tu-dortmund.de> Reviewed-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20251106175615.26948-1-simon.schippers@tu-dortmund.de Signed-off-by: Jakub Kicinski <kuba@kernel.org>pull/1354/merge
parent
23c52b58cc
commit
7ff14c5204
|
|
@ -831,6 +831,7 @@ int usbnet_stop(struct net_device *net)
|
||||||
|
|
||||||
clear_bit(EVENT_DEV_OPEN, &dev->flags);
|
clear_bit(EVENT_DEV_OPEN, &dev->flags);
|
||||||
netif_stop_queue (net);
|
netif_stop_queue (net);
|
||||||
|
netdev_reset_queue(net);
|
||||||
|
|
||||||
netif_info(dev, ifdown, dev->net,
|
netif_info(dev, ifdown, dev->net,
|
||||||
"stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
|
"stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
|
||||||
|
|
@ -939,6 +940,7 @@ int usbnet_open(struct net_device *net)
|
||||||
}
|
}
|
||||||
|
|
||||||
set_bit(EVENT_DEV_OPEN, &dev->flags);
|
set_bit(EVENT_DEV_OPEN, &dev->flags);
|
||||||
|
netdev_reset_queue(net);
|
||||||
netif_start_queue (net);
|
netif_start_queue (net);
|
||||||
netif_info(dev, ifup, dev->net,
|
netif_info(dev, ifup, dev->net,
|
||||||
"open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
|
"open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
|
||||||
|
|
@ -1500,6 +1502,7 @@ netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
|
||||||
case 0:
|
case 0:
|
||||||
netif_trans_update(net);
|
netif_trans_update(net);
|
||||||
__usbnet_queue_skb(&dev->txq, skb, tx_start);
|
__usbnet_queue_skb(&dev->txq, skb, tx_start);
|
||||||
|
netdev_sent_queue(net, skb->len);
|
||||||
if (dev->txq.qlen >= TX_QLEN (dev))
|
if (dev->txq.qlen >= TX_QLEN (dev))
|
||||||
netif_stop_queue (net);
|
netif_stop_queue (net);
|
||||||
}
|
}
|
||||||
|
|
@ -1563,6 +1566,7 @@ static inline void usb_free_skb(struct sk_buff *skb)
|
||||||
static void usbnet_bh(struct timer_list *t)
|
static void usbnet_bh(struct timer_list *t)
|
||||||
{
|
{
|
||||||
struct usbnet *dev = timer_container_of(dev, t, delay);
|
struct usbnet *dev = timer_container_of(dev, t, delay);
|
||||||
|
unsigned int bytes_compl = 0, pkts_compl = 0;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct skb_data *entry;
|
struct skb_data *entry;
|
||||||
|
|
||||||
|
|
@ -1574,6 +1578,8 @@ static void usbnet_bh(struct timer_list *t)
|
||||||
usb_free_skb(skb);
|
usb_free_skb(skb);
|
||||||
continue;
|
continue;
|
||||||
case tx_done:
|
case tx_done:
|
||||||
|
bytes_compl += skb->len;
|
||||||
|
pkts_compl++;
|
||||||
kfree(entry->urb->sg);
|
kfree(entry->urb->sg);
|
||||||
fallthrough;
|
fallthrough;
|
||||||
case rx_cleanup:
|
case rx_cleanup:
|
||||||
|
|
@ -1584,6 +1590,10 @@ static void usbnet_bh(struct timer_list *t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_bh(&dev->bql_spinlock);
|
||||||
|
netdev_completed_queue(dev->net, pkts_compl, bytes_compl);
|
||||||
|
spin_unlock_bh(&dev->bql_spinlock);
|
||||||
|
|
||||||
/* restart RX again after disabling due to high error rate */
|
/* restart RX again after disabling due to high error rate */
|
||||||
clear_bit(EVENT_RX_KILL, &dev->flags);
|
clear_bit(EVENT_RX_KILL, &dev->flags);
|
||||||
|
|
||||||
|
|
@ -1755,6 +1765,7 @@ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
|
||||||
skb_queue_head_init (&dev->txq);
|
skb_queue_head_init (&dev->txq);
|
||||||
skb_queue_head_init (&dev->done);
|
skb_queue_head_init (&dev->done);
|
||||||
skb_queue_head_init(&dev->rxq_pause);
|
skb_queue_head_init(&dev->rxq_pause);
|
||||||
|
spin_lock_init(&dev->bql_spinlock);
|
||||||
INIT_WORK(&dev->bh_work, usbnet_bh_work);
|
INIT_WORK(&dev->bh_work, usbnet_bh_work);
|
||||||
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
|
INIT_WORK (&dev->kevent, usbnet_deferred_kevent);
|
||||||
init_usb_anchor(&dev->deferred);
|
init_usb_anchor(&dev->deferred);
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@
|
||||||
#include <linux/skbuff.h>
|
#include <linux/skbuff.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/usb.h>
|
#include <linux/usb.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
/* interface from usbnet core to each USB networking link we handle */
|
/* interface from usbnet core to each USB networking link we handle */
|
||||||
struct usbnet {
|
struct usbnet {
|
||||||
|
|
@ -59,6 +60,7 @@ struct usbnet {
|
||||||
struct mutex interrupt_mutex;
|
struct mutex interrupt_mutex;
|
||||||
struct usb_anchor deferred;
|
struct usb_anchor deferred;
|
||||||
struct work_struct bh_work;
|
struct work_struct bh_work;
|
||||||
|
spinlock_t bql_spinlock;
|
||||||
|
|
||||||
struct work_struct kevent;
|
struct work_struct kevent;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue