A set of updates for SMP function calls:
- Improve localitu of smp_call_function_any() by utilizing
sched_numa_find_nth_cpu() instead of picking a random CPU
- Wait for work completion in smp_call_function_many_cond() only when
there was actually work enqueued
- Simplify functions by unutlizing the appropriate cpumask_*()
interfaces
- Trivial cleanups
-----BEGIN PGP SIGNATURE-----
iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAmiGkfQTHHRnbHhAbGlu
dXRyb25peC5kZQAKCRCmGPVMDXSYoVFFD/9OyKVhAlk3fP4PJG3VBZs/8IDp52Wo
vXHZPAyjRm0mtgonmRKQfNh9Xow6/ISiSxoE6yy98aEXRnzPgygHpwZfVwpEP5Q+
Ys0Y6DpaDW2Uw+a9qfBvpnEawmWK+b5N58ApLSMbabv6MdZhElI2SEjZKtqTda0j
161nRGADXPYm6uIw2kbAGseHpTslKCqTLdMHvvCnSx2Qa6Otw3VMWlYBpsOoqf7n
9+OA7rwpSArjgjGHJJKgwtdRfvobIYReEWUXOP6QF7Vgm4H5i9kgvD7NuFCa9Ykv
2kZnknuIplp9V+AvSsFjMu+RdxpktlL348Pnl6tZdjYrHQrgCWjhb11aD8gi8pb5
sdqAupJ2+N7woqfwuKFuzcEBjnjSbV0Jeks8GDQzuWOiniMn4BCj3qWPtIszZ80z
YddgGXf4RNJjytWjMyohh472YBQ+O3rlvVDmR011GnNdIphl8ovrtI9r+Ra6FwVg
eHmjr8yGjzmntay6KjbP+iQVjzqCFz6Lz7kTQBXGP3MPcd7du9R7KBGY6rm1+FJ5
3D4yIxIgK9sWg5GEr//1fdoi9wIrxsAfvgIsqpliwpHZ7wScyG98Iq74QsPGoimP
LgTHkHsxcMnsaHM8lLTo4mArbunQJTFtx/lYRk++lj1jfqxlLNUXmH6mQmKC+fla
Jz6duXcmFOoI3A==
=dFmz
-----END PGP SIGNATURE-----
Merge tag 'smp-core-2025-07-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp updates from Thomas Gleixner:
"A set of updates for SMP function calls:
- Improve locality of smp_call_function_any() by utilizing
sched_numa_find_nth_cpu() instead of picking a random CPU
- Wait for work completion in smp_call_function_many_cond() only when
there was actually work enqueued
- Simplify functions by unutlizing the appropriate cpumask_*()
interfaces
- Trivial cleanups"
* tag 'smp-core-2025-07-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
smp: Wait only if work was enqueued
smp: Defer check for local execution in smp_call_function_many_cond()
smp: Use cpumask_any_but() in smp_call_function_many_cond()
smp: Improve locality in smp_call_function_any()
smp: Fix typo in comment for raw_smp_processor_id()
pull/1309/head
commit
b34111a89f
|
|
@ -234,7 +234,7 @@ static inline int get_boot_cpu_id(void)
|
|||
#endif /* !SMP */
|
||||
|
||||
/**
|
||||
* raw_processor_id() - get the current (unstable) CPU id
|
||||
* raw_smp_processor_id() - get the current (unstable) CPU id
|
||||
*
|
||||
* For then you know what you are doing and need an unstable
|
||||
* CPU id.
|
||||
|
|
|
|||
44
kernel/smp.c
44
kernel/smp.c
|
|
@ -741,32 +741,19 @@ EXPORT_SYMBOL_GPL(smp_call_function_single_async);
|
|||
*
|
||||
* Selection preference:
|
||||
* 1) current cpu if in @mask
|
||||
* 2) any cpu of current node if in @mask
|
||||
* 3) any other online cpu in @mask
|
||||
* 2) nearest cpu in @mask, based on NUMA topology
|
||||
*/
|
||||
int smp_call_function_any(const struct cpumask *mask,
|
||||
smp_call_func_t func, void *info, int wait)
|
||||
{
|
||||
unsigned int cpu;
|
||||
const struct cpumask *nodemask;
|
||||
int ret;
|
||||
|
||||
/* Try for same CPU (cheapest) */
|
||||
cpu = get_cpu();
|
||||
if (cpumask_test_cpu(cpu, mask))
|
||||
goto call;
|
||||
if (!cpumask_test_cpu(cpu, mask))
|
||||
cpu = sched_numa_find_nth_cpu(mask, 0, cpu_to_node(cpu));
|
||||
|
||||
/* Try for same node. */
|
||||
nodemask = cpumask_of_node(cpu_to_node(cpu));
|
||||
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
|
||||
cpu = cpumask_next_and(cpu, nodemask, mask)) {
|
||||
if (cpu_online(cpu))
|
||||
goto call;
|
||||
}
|
||||
|
||||
/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
|
||||
cpu = cpumask_any_and(mask, cpu_online_mask);
|
||||
call:
|
||||
ret = smp_call_function_single(cpu, func, info, wait);
|
||||
put_cpu();
|
||||
return ret;
|
||||
|
|
@ -792,7 +779,6 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
|||
bool wait = scf_flags & SCF_WAIT;
|
||||
int nr_cpus = 0;
|
||||
bool run_remote = false;
|
||||
bool run_local = false;
|
||||
|
||||
lockdep_assert_preemption_disabled();
|
||||
|
||||
|
|
@ -814,19 +800,8 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
|||
*/
|
||||
WARN_ON_ONCE(!in_task());
|
||||
|
||||
/* Check if we need local execution. */
|
||||
if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
|
||||
(!cond_func || cond_func(this_cpu, info)))
|
||||
run_local = true;
|
||||
|
||||
/* Check if we need remote execution, i.e., any CPU excluding this one. */
|
||||
cpu = cpumask_first_and(mask, cpu_online_mask);
|
||||
if (cpu == this_cpu)
|
||||
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
run_remote = true;
|
||||
|
||||
if (run_remote) {
|
||||
if (cpumask_any_and_but(mask, cpu_online_mask, this_cpu) < nr_cpu_ids) {
|
||||
cfd = this_cpu_ptr(&cfd_data);
|
||||
cpumask_and(cfd->cpumask, mask, cpu_online_mask);
|
||||
__cpumask_clear_cpu(this_cpu, cfd->cpumask);
|
||||
|
|
@ -840,6 +815,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
|||
continue;
|
||||
}
|
||||
|
||||
/* Work is enqueued on a remote CPU. */
|
||||
run_remote = true;
|
||||
|
||||
csd_lock(csd);
|
||||
if (wait)
|
||||
csd->node.u_flags |= CSD_TYPE_SYNC;
|
||||
|
|
@ -851,6 +829,10 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
|||
#endif
|
||||
trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
|
||||
|
||||
/*
|
||||
* Kick the remote CPU if this is the first work
|
||||
* item enqueued.
|
||||
*/
|
||||
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
|
||||
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
|
||||
nr_cpus++;
|
||||
|
|
@ -869,7 +851,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
|||
send_call_function_ipi_mask(cfd->cpumask_ipi);
|
||||
}
|
||||
|
||||
if (run_local) {
|
||||
/* Check if we need local execution. */
|
||||
if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
|
||||
(!cond_func || cond_func(this_cpu, info))) {
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
|
|
|||
Loading…
Reference in New Issue