fs: replace use of system_unbound_wq with system_dfl_wq

Currently if a user enqueue a work item using schedule_delayed_work() the
used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use
WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to
schedule_work() that is using system_wq and queue_work(), that makes use
again of WORK_CPU_UNBOUND.

This lack of consistentcy cannot be addressed without refactoring the API.

system_unbound_wq should be the default workqueue so as not to enforce
locality constraints for random work whenever it's not required.

Adding system_dfl_wq to encourage its use when unbound work should be used.

The old system_unbound_wq will be kept for a few release cycles.

Suggested-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Marco Crivellari <marco.crivellari@suse.com>
Link: https://lore.kernel.org/20250916082906.77439-2-marco.crivellari@suse.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
pull/1354/merge
Marco Crivellari 2025-09-16 10:29:04 +02:00 committed by Christian Brauner
parent 8f5ae30d69
commit 7a4f92d39f
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
16 changed files with 22 additions and 22 deletions

View File

@ -42,7 +42,7 @@ static void afs_volume_init_callback(struct afs_volume *volume)
list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) { list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) {
if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) { if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) {
afs_clear_cb_promise(vnode, afs_cb_promise_clear_vol_init_cb); afs_clear_cb_promise(vnode, afs_cb_promise_clear_vol_init_cb);
queue_work(system_unbound_wq, &vnode->cb_work); queue_work(system_dfl_wq, &vnode->cb_work);
} }
} }
@ -90,7 +90,7 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
if (reason != afs_cb_break_for_deleted && if (reason != afs_cb_break_for_deleted &&
vnode->status.type == AFS_FTYPE_FILE && vnode->status.type == AFS_FTYPE_FILE &&
atomic_read(&vnode->cb_nr_mmap)) atomic_read(&vnode->cb_nr_mmap))
queue_work(system_unbound_wq, &vnode->cb_work); queue_work(system_dfl_wq, &vnode->cb_work);
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true); trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
} else { } else {

View File

@ -172,7 +172,7 @@ static void afs_issue_write_worker(struct work_struct *work)
void afs_issue_write(struct netfs_io_subrequest *subreq) void afs_issue_write(struct netfs_io_subrequest *subreq)
{ {
subreq->work.func = afs_issue_write_worker; subreq->work.func = afs_issue_write_worker;
if (!queue_work(system_unbound_wq, &subreq->work)) if (!queue_work(system_dfl_wq, &subreq->work))
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} }

View File

@ -827,7 +827,7 @@ int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_
if (bch2_btree_write_buffer_should_flush(c) && if (bch2_btree_write_buffer_should_flush(c) &&
__enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_write_buffer) && __enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_write_buffer) &&
!queue_work(system_unbound_wq, &c->btree_write_buffer.flush_work)) !queue_work(system_dfl_wq, &c->btree_write_buffer.flush_work))
enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer); enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer);
if (dst->wb == &wb->flushing) if (dst->wb == &wb->flushing)

View File

@ -684,7 +684,7 @@ static void bch2_rbio_error(struct bch_read_bio *rbio,
if (bch2_err_matches(ret, BCH_ERR_data_read_retry)) { if (bch2_err_matches(ret, BCH_ERR_data_read_retry)) {
bch2_rbio_punt(rbio, bch2_rbio_retry, bch2_rbio_punt(rbio, bch2_rbio_retry,
RBIO_CONTEXT_UNBOUND, system_unbound_wq); RBIO_CONTEXT_UNBOUND, system_dfl_wq);
} else { } else {
rbio = bch2_rbio_free(rbio); rbio = bch2_rbio_free(rbio);
@ -921,10 +921,10 @@ csum_err:
bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR); bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR);
goto out; goto out;
decompression_err: decompression_err:
bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_dfl_wq);
goto out; goto out;
decrypt_err: decrypt_err:
bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_dfl_wq);
goto out; goto out;
} }
@ -963,7 +963,7 @@ static void bch2_read_endio(struct bio *bio)
rbio->promote || rbio->promote ||
crc_is_compressed(rbio->pick.crc) || crc_is_compressed(rbio->pick.crc) ||
bch2_csum_type_is_encryption(rbio->pick.crc.csum_type)) bch2_csum_type_is_encryption(rbio->pick.crc.csum_type))
context = RBIO_CONTEXT_UNBOUND, wq = system_unbound_wq; context = RBIO_CONTEXT_UNBOUND, wq = system_dfl_wq;
else if (rbio->pick.crc.csum_type) else if (rbio->pick.crc.csum_type)
context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq; context = RBIO_CONTEXT_HIGHPRI, wq = system_highpri_wq;

View File

@ -1362,7 +1362,7 @@ int bch2_journal_read(struct bch_fs *c,
BCH_DEV_READ_REF_journal_read)) BCH_DEV_READ_REF_journal_read))
closure_call(&ca->journal.read, closure_call(&ca->journal.read,
bch2_journal_read_device, bch2_journal_read_device,
system_unbound_wq, system_dfl_wq,
&jlist.cl); &jlist.cl);
else else
degraded = true; degraded = true;

View File

@ -2031,7 +2031,7 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
btrfs_reclaim_sweep(fs_info); btrfs_reclaim_sweep(fs_info);
spin_lock(&fs_info->unused_bgs_lock); spin_lock(&fs_info->unused_bgs_lock);
if (!list_empty(&fs_info->reclaim_bgs)) if (!list_empty(&fs_info->reclaim_bgs))
queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work); queue_work(system_dfl_wq, &fs_info->reclaim_bgs_work);
spin_unlock(&fs_info->unused_bgs_lock); spin_unlock(&fs_info->unused_bgs_lock);
} }

View File

@ -1372,7 +1372,7 @@ void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0) if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0)
return; return;
queue_work(system_unbound_wq, &fs_info->em_shrinker_work); queue_work(system_dfl_wq, &fs_info->em_shrinker_work);
} }
void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info) void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info)

View File

@ -1830,7 +1830,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
space_info->flags, space_info->flags,
orig_bytes, flush, orig_bytes, flush,
"enospc"); "enospc");
queue_work(system_unbound_wq, async_work); queue_work(system_dfl_wq, async_work);
} }
} else { } else {
list_add_tail(&ticket.list, list_add_tail(&ticket.list,
@ -1847,7 +1847,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
need_preemptive_reclaim(fs_info, space_info)) { need_preemptive_reclaim(fs_info, space_info)) {
trace_btrfs_trigger_flush(fs_info, space_info->flags, trace_btrfs_trigger_flush(fs_info, space_info->flags,
orig_bytes, flush, "preempt"); orig_bytes, flush, "preempt");
queue_work(system_unbound_wq, queue_work(system_dfl_wq,
&fs_info->preempt_reclaim_work); &fs_info->preempt_reclaim_work);
} }
} }

View File

@ -2488,7 +2488,7 @@ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
refcount_inc(&eb->refs); refcount_inc(&eb->refs);
bg->last_eb = eb; bg->last_eb = eb;
INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn); INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);
queue_work(system_unbound_wq, &bg->zone_finish_work); queue_work(system_dfl_wq, &bg->zone_finish_work);
} }
void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg)

View File

@ -635,7 +635,7 @@ static int umh_coredump_setup(struct subprocess_info *info, struct cred *new)
/* /*
* Usermode helpers are childen of either * Usermode helpers are childen of either
* system_unbound_wq or of kthreadd. So we know that * system_dfl_wq or of kthreadd. So we know that
* we're starting off with a clean file descriptor * we're starting off with a clean file descriptor
* table. So we should always be able to use * table. So we should always be able to use
* COREDUMP_PIDFD_NUMBER as our file descriptor value. * COREDUMP_PIDFD_NUMBER as our file descriptor value.

View File

@ -3995,7 +3995,7 @@ void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
list_splice_tail(&freed_data_list, &sbi->s_discard_list); list_splice_tail(&freed_data_list, &sbi->s_discard_list);
spin_unlock(&sbi->s_md_lock); spin_unlock(&sbi->s_md_lock);
if (wake) if (wake)
queue_work(system_unbound_wq, &sbi->s_discard_work); queue_work(system_dfl_wq, &sbi->s_discard_work);
} else { } else {
list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
kmem_cache_free(ext4_free_data_cachep, entry); kmem_cache_free(ext4_free_data_cachep, entry);

View File

@ -321,7 +321,7 @@ void netfs_wake_collector(struct netfs_io_request *rreq)
{ {
if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) && if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) &&
!test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) { !test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) {
queue_work(system_unbound_wq, &rreq->work); queue_work(system_dfl_wq, &rreq->work);
} else { } else {
trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue); trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue);
wake_up(&rreq->waitq); wake_up(&rreq->waitq);

View File

@ -163,7 +163,7 @@ void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace
dead = __refcount_dec_and_test(&rreq->ref, &r); dead = __refcount_dec_and_test(&rreq->ref, &r);
trace_netfs_rreq_ref(debug_id, r - 1, what); trace_netfs_rreq_ref(debug_id, r - 1, what);
if (dead) if (dead)
WARN_ON(!queue_work(system_unbound_wq, &rreq->cleanup_work)); WARN_ON(!queue_work(system_dfl_wq, &rreq->cleanup_work));
} }
} }

View File

@ -113,7 +113,7 @@ static void
nfsd_file_schedule_laundrette(void) nfsd_file_schedule_laundrette(void)
{ {
if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags)) if (test_bit(NFSD_FILE_CACHE_UP, &nfsd_file_flags))
queue_delayed_work(system_unbound_wq, &nfsd_filecache_laundrette, queue_delayed_work(system_dfl_wq, &nfsd_filecache_laundrette,
NFSD_LAUNDRETTE_DELAY); NFSD_LAUNDRETTE_DELAY);
} }

View File

@ -428,7 +428,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
conn->destroy_next = connector_destroy_list; conn->destroy_next = connector_destroy_list;
connector_destroy_list = conn; connector_destroy_list = conn;
spin_unlock(&destroy_lock); spin_unlock(&destroy_lock);
queue_work(system_unbound_wq, &connector_reaper_work); queue_work(system_dfl_wq, &connector_reaper_work);
} }
/* /*
* Note that we didn't update flags telling whether inode cares about * Note that we didn't update flags telling whether inode cares about
@ -439,7 +439,7 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)
spin_lock(&destroy_lock); spin_lock(&destroy_lock);
list_add(&mark->g_list, &destroy_list); list_add(&mark->g_list, &destroy_list);
spin_unlock(&destroy_lock); spin_unlock(&destroy_lock);
queue_delayed_work(system_unbound_wq, &reaper_work, queue_delayed_work(system_dfl_wq, &reaper_work,
FSNOTIFY_REAPER_DELAY); FSNOTIFY_REAPER_DELAY);
} }
EXPORT_SYMBOL_GPL(fsnotify_put_mark); EXPORT_SYMBOL_GPL(fsnotify_put_mark);

View File

@ -881,7 +881,7 @@ void dqput(struct dquot *dquot)
put_releasing_dquots(dquot); put_releasing_dquots(dquot);
atomic_dec(&dquot->dq_count); atomic_dec(&dquot->dq_count);
spin_unlock(&dq_list_lock); spin_unlock(&dq_list_lock);
queue_delayed_work(system_unbound_wq, &quota_release_work, 1); queue_delayed_work(system_dfl_wq, &quota_release_work, 1);
} }
EXPORT_SYMBOL(dqput); EXPORT_SYMBOL(dqput);