net: s/dev_set_threaded/netif_set_threaded/
Commit cc34acd577 ("docs: net: document new locking reality")
introduced netif_ vs dev_ function semantics: the former expects locked
netdev, the latter takes care of the locking. We don't strictly
follow this semantics on either side, but there are more dev_xxx handlers
now that don't fit. Rename them to netif_xxx where appropriate.
Note that one dev_set_threaded call still remains in mt76 for debugfs file.
Signed-off-by: Stanislav Fomichev <sdf@fomichev.me>
Link: https://patch.msgid.link/20250717172333.1288349-7-sdf@fomichev.me
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
pull/1309/head
parent
93893a57ef
commit
5d4d84618e
|
|
@ -165,7 +165,7 @@ struct sfp_bus* sfp_bus
|
|||
struct lock_class_key* qdisc_tx_busylock
|
||||
bool proto_down
|
||||
unsigned:1 wol_enabled
|
||||
unsigned:1 threaded napi_poll(napi_enable,dev_set_threaded)
|
||||
unsigned:1 threaded napi_poll(napi_enable,netif_set_threaded)
|
||||
unsigned_long:1 see_all_hwtstamp_requests
|
||||
unsigned_long:1 change_proto_down
|
||||
unsigned_long:1 netns_immutable
|
||||
|
|
|
|||
|
|
@ -2688,7 +2688,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
adapter->mii.mdio_write = atl1c_mdio_write;
|
||||
adapter->mii.phy_id_mask = 0x1f;
|
||||
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
|
||||
dev_set_threaded(netdev, true);
|
||||
netif_set_threaded(netdev, true);
|
||||
for (i = 0; i < adapter->rx_queue_count; ++i)
|
||||
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
|
||||
atl1c_clean_rx);
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci)
|
|||
}
|
||||
strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx",
|
||||
sizeof(mlxsw_pci->napi_dev_rx->name));
|
||||
dev_set_threaded(mlxsw_pci->napi_dev_rx, true);
|
||||
netif_set_threaded(mlxsw_pci->napi_dev_rx, true);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -3075,7 +3075,7 @@ static int ravb_probe(struct platform_device *pdev)
|
|||
if (info->coalesce_irqs) {
|
||||
netdev_sw_irq_coalesce_default_on(ndev);
|
||||
if (num_present_cpus() == 1)
|
||||
dev_set_threaded(ndev, true);
|
||||
netif_set_threaded(ndev, true);
|
||||
}
|
||||
|
||||
/* Network device register */
|
||||
|
|
|
|||
|
|
@ -366,7 +366,7 @@ static int wg_newlink(struct net_device *dev,
|
|||
if (ret < 0)
|
||||
goto err_free_handshake_queue;
|
||||
|
||||
dev_set_threaded(dev, true);
|
||||
netif_set_threaded(dev, true);
|
||||
ret = register_netdevice(dev);
|
||||
if (ret < 0)
|
||||
goto err_uninit_ratelimiter;
|
||||
|
|
|
|||
|
|
@ -936,7 +936,7 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
|
|||
|
||||
bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
|
||||
|
||||
dev_set_threaded(ar->napi_dev, true);
|
||||
netif_set_threaded(ar->napi_dev, true);
|
||||
ath10k_core_napi_enable(ar);
|
||||
/* IRQs are left enabled when we restart due to a firmware crash */
|
||||
if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
|
||||
|
|
|
|||
|
|
@ -589,6 +589,7 @@ static inline bool napi_complete(struct napi_struct *n)
|
|||
return napi_complete_done(n, 0);
|
||||
}
|
||||
|
||||
int netif_set_threaded(struct net_device *dev, bool threaded);
|
||||
int dev_set_threaded(struct net_device *dev, bool threaded);
|
||||
|
||||
void napi_disable(struct napi_struct *n);
|
||||
|
|
|
|||
|
|
@ -4798,7 +4798,7 @@ static inline void ____napi_schedule(struct softnet_data *sd,
|
|||
|
||||
if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
|
||||
/* Paired with smp_mb__before_atomic() in
|
||||
* napi_enable()/dev_set_threaded().
|
||||
* napi_enable()/netif_set_threaded().
|
||||
* Use READ_ONCE() to guarantee a complete
|
||||
* read on napi->thread. Only call
|
||||
* wake_up_process() when it's not NULL.
|
||||
|
|
@ -6990,7 +6990,7 @@ int napi_set_threaded(struct napi_struct *napi, bool threaded)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int dev_set_threaded(struct net_device *dev, bool threaded)
|
||||
int netif_set_threaded(struct net_device *dev, bool threaded)
|
||||
{
|
||||
struct napi_struct *napi;
|
||||
int err = 0;
|
||||
|
|
@ -7031,7 +7031,7 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
|
|||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_set_threaded);
|
||||
EXPORT_SYMBOL(netif_set_threaded);
|
||||
|
||||
/**
|
||||
* netif_queue_set_napi - Associate queue with the napi
|
||||
|
|
|
|||
|
|
@ -367,3 +367,15 @@ void netdev_state_change(struct net_device *dev)
|
|||
netdev_unlock_ops(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_state_change);
|
||||
|
||||
int dev_set_threaded(struct net_device *dev, bool threaded)
|
||||
{
|
||||
int ret;
|
||||
|
||||
netdev_lock(dev);
|
||||
ret = netif_set_threaded(dev, threaded);
|
||||
netdev_unlock(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_set_threaded);
|
||||
|
|
|
|||
|
|
@ -757,7 +757,7 @@ static int modify_napi_threaded(struct net_device *dev, unsigned long val)
|
|||
if (val != 0 && val != 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = dev_set_threaded(dev, val);
|
||||
ret = netif_set_threaded(dev, val);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue