mm: shmem: use SHMEM_F_* flags instead of VM_* flags
shmem_inode_info::flags can have the VM flags VM_NORESERVE and VM_LOCKED. These are used to suppress pre-accounting or to lock the pages in the inode respectively. Using the VM flags directly makes it difficult to add shmem-specific flags that are unrelated to VM behavior since one would need to find a VM flag not used by shmem and re-purpose it. Introduce SHMEM_F_NORESERVE and SHMEM_F_LOCKED which represent the same information, but their bits are independent of the VM flags. Callers can still pass VM_NORESERVE to shmem_get_inode(), but it gets transformed to the shmem-specific flag internally. No functional changes intended. Link: https://lkml.kernel.org/r/20251125165850.3389713-11-pasha.tatashin@soleen.com Signed-off-by: Pratyush Yadav <ptyadav@amazon.de> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Tested-by: David Matlack <dmatlack@google.com> Cc: Aleksander Lobakin <aleksander.lobakin@intel.com> Cc: Alexander Graf <graf@amazon.com> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andriy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: anish kumar <yesanishhere@gmail.com> Cc: Anna Schumaker <anna.schumaker@oracle.com> Cc: Bartosz Golaszewski <bartosz.golaszewski@linaro.org> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Borislav Betkov <bp@alien8.de> Cc: Chanwoo Choi <cw00.choi@samsung.com> Cc: Chen Ridong <chenridong@huawei.com> Cc: Chris Li <chrisl@kernel.org> Cc: Christian Brauner <brauner@kernel.org> Cc: Daniel Wagner <wagi@kernel.org> Cc: Danilo Krummrich <dakr@kernel.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Jeffery <djeffery@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guixin Liu <kanie@linux.alibaba.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ira Weiny <ira.weiny@intel.com> Cc: Jann Horn <jannh@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Joanthan Cameron <Jonathan.Cameron@huawei.com> Cc: Joel Granados <joel.granados@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Lennart Poettering <lennart@poettering.net> Cc: Leon Romanovsky <leon@kernel.org> Cc: Leon Romanovsky <leonro@nvidia.com> Cc: Lukas Wunner <lukas@wunner.de> Cc: Marc Rutland <mark.rutland@arm.com> Cc: Masahiro Yamada <masahiroy@kernel.org> Cc: Matthew Maurer <mmaurer@google.com> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Myugnjoo Ham <myungjoo.ham@samsung.com> Cc: Parav Pandit <parav@nvidia.com> Cc: Pratyush Yadav <pratyush@kernel.org> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Saeed Mahameed <saeedm@nvidia.com> Cc: Samiullah Khawaja <skhawaja@google.com> Cc: Song Liu <song@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Stuart Hayes <stuart.w.hayes@gmail.com> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleinxer <tglx@linutronix.de> Cc: Thomas Weißschuh <linux@weissschuh.net> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: William Tu <witu@nvidia.com> Cc: Yoann Congal <yoann.congal@smile.fr> Cc: Zhu Yanjun <yanjun.zhu@linux.dev> Cc: Zijun Hu <quic_zijuhu@quicinc.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>pull/1354/merge
parent
7a5afa7ea8
commit
6ff1610ced
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/xattr.h>
|
||||
#include <linux/fs_parser.h>
|
||||
#include <linux/userfaultfd_k.h>
|
||||
#include <linux/bits.h>
|
||||
|
||||
struct swap_iocb;
|
||||
|
||||
|
|
@ -19,6 +20,11 @@ struct swap_iocb;
|
|||
#define SHMEM_MAXQUOTAS 2
|
||||
#endif
|
||||
|
||||
/* Suppress pre-accounting of the entire object size. */
|
||||
#define SHMEM_F_NORESERVE BIT(0)
|
||||
/* Disallow swapping. */
|
||||
#define SHMEM_F_LOCKED BIT(1)
|
||||
|
||||
struct shmem_inode_info {
|
||||
spinlock_t lock;
|
||||
unsigned int seals; /* shmem seals */
|
||||
|
|
|
|||
28
mm/shmem.c
28
mm/shmem.c
|
|
@ -175,20 +175,20 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
|
|||
*/
|
||||
static inline int shmem_acct_size(unsigned long flags, loff_t size)
|
||||
{
|
||||
return (flags & VM_NORESERVE) ?
|
||||
return (flags & SHMEM_F_NORESERVE) ?
|
||||
0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
|
||||
}
|
||||
|
||||
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
|
||||
{
|
||||
if (!(flags & VM_NORESERVE))
|
||||
if (!(flags & SHMEM_F_NORESERVE))
|
||||
vm_unacct_memory(VM_ACCT(size));
|
||||
}
|
||||
|
||||
static inline int shmem_reacct_size(unsigned long flags,
|
||||
loff_t oldsize, loff_t newsize)
|
||||
{
|
||||
if (!(flags & VM_NORESERVE)) {
|
||||
if (!(flags & SHMEM_F_NORESERVE)) {
|
||||
if (VM_ACCT(newsize) > VM_ACCT(oldsize))
|
||||
return security_vm_enough_memory_mm(current->mm,
|
||||
VM_ACCT(newsize) - VM_ACCT(oldsize));
|
||||
|
|
@ -206,7 +206,7 @@ static inline int shmem_reacct_size(unsigned long flags,
|
|||
*/
|
||||
static inline int shmem_acct_blocks(unsigned long flags, long pages)
|
||||
{
|
||||
if (!(flags & VM_NORESERVE))
|
||||
if (!(flags & SHMEM_F_NORESERVE))
|
||||
return 0;
|
||||
|
||||
return security_vm_enough_memory_mm(current->mm,
|
||||
|
|
@ -215,7 +215,7 @@ static inline int shmem_acct_blocks(unsigned long flags, long pages)
|
|||
|
||||
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
|
||||
{
|
||||
if (flags & VM_NORESERVE)
|
||||
if (flags & SHMEM_F_NORESERVE)
|
||||
vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
|
||||
}
|
||||
|
||||
|
|
@ -1551,7 +1551,7 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
|
|||
int nr_pages;
|
||||
bool split = false;
|
||||
|
||||
if ((info->flags & VM_LOCKED) || sbinfo->noswap)
|
||||
if ((info->flags & SHMEM_F_LOCKED) || sbinfo->noswap)
|
||||
goto redirty;
|
||||
|
||||
if (!total_swap_pages)
|
||||
|
|
@ -2910,15 +2910,15 @@ int shmem_lock(struct file *file, int lock, struct ucounts *ucounts)
|
|||
* ipc_lock_object() when called from shmctl_do_lock(),
|
||||
* no serialization needed when called from shm_destroy().
|
||||
*/
|
||||
if (lock && !(info->flags & VM_LOCKED)) {
|
||||
if (lock && !(info->flags & SHMEM_F_LOCKED)) {
|
||||
if (!user_shm_lock(inode->i_size, ucounts))
|
||||
goto out_nomem;
|
||||
info->flags |= VM_LOCKED;
|
||||
info->flags |= SHMEM_F_LOCKED;
|
||||
mapping_set_unevictable(file->f_mapping);
|
||||
}
|
||||
if (!lock && (info->flags & VM_LOCKED) && ucounts) {
|
||||
if (!lock && (info->flags & SHMEM_F_LOCKED) && ucounts) {
|
||||
user_shm_unlock(inode->i_size, ucounts);
|
||||
info->flags &= ~VM_LOCKED;
|
||||
info->flags &= ~SHMEM_F_LOCKED;
|
||||
mapping_clear_unevictable(file->f_mapping);
|
||||
}
|
||||
retval = 0;
|
||||
|
|
@ -3062,7 +3062,7 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
|
|||
spin_lock_init(&info->lock);
|
||||
atomic_set(&info->stop_eviction, 0);
|
||||
info->seals = F_SEAL_SEAL;
|
||||
info->flags = flags & VM_NORESERVE;
|
||||
info->flags = (flags & VM_NORESERVE) ? SHMEM_F_NORESERVE : 0;
|
||||
info->i_crtime = inode_get_mtime(inode);
|
||||
info->fsflags = (dir == NULL) ? 0 :
|
||||
SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED;
|
||||
|
|
@ -5804,8 +5804,10 @@ static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
|
|||
/* common code */
|
||||
|
||||
static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
|
||||
loff_t size, unsigned long flags, unsigned int i_flags)
|
||||
loff_t size, unsigned long vm_flags,
|
||||
unsigned int i_flags)
|
||||
{
|
||||
unsigned long flags = (vm_flags & VM_NORESERVE) ? SHMEM_F_NORESERVE : 0;
|
||||
struct inode *inode;
|
||||
struct file *res;
|
||||
|
||||
|
|
@ -5822,7 +5824,7 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
|
||||
S_IFREG | S_IRWXUGO, 0, flags);
|
||||
S_IFREG | S_IRWXUGO, 0, vm_flags);
|
||||
if (IS_ERR(inode)) {
|
||||
shmem_unacct_size(flags, size);
|
||||
return ERR_CAST(inode);
|
||||
|
|
|
|||
Loading…
Reference in New Issue