fs: fall back to file_ref_put() for non-last reference

This reduces the slowdown in face of multiple callers issuing close on
what turns out to not be the last reference.

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Link: https://lore.kernel.org/20250418125756.59677-1-mjguzik@gmail.com
Reviewed-by: Jan Kara <jack@suse.cz>
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202504171513.6d6f8a16-lkp@intel.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
pull/1210/head
Mateusz Guzik 2025-04-18 14:57:56 +02:00 committed by Christian Brauner
parent 53f7eedd88
commit d1f7256a5a
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2
2 changed files with 7 additions and 14 deletions

View File

@ -26,7 +26,7 @@
#include "internal.h"
bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
static noinline bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
{
/*
* If the reference count was already in the dead zone, then this

View File

@ -61,7 +61,6 @@ static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
atomic_long_set(&ref->refcnt, cnt - 1);
}
bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt);
bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
/**
@ -178,20 +177,14 @@ static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
*/
static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
{
long old, new;
long old;
old = atomic_long_read(&ref->refcnt);
do {
if (unlikely(old < 0))
return __file_ref_put_badval(ref, old);
if (old == FILE_REF_ONEREF)
new = FILE_REF_DEAD;
else
new = old - 1;
} while (!atomic_long_try_cmpxchg(&ref->refcnt, &old, new));
return new == FILE_REF_DEAD;
if (likely(old == FILE_REF_ONEREF)) {
if (likely(atomic_long_try_cmpxchg(&ref->refcnt, &old, FILE_REF_DEAD)))
return true;
}
return file_ref_put(ref);
}
/**