mm: swap_cgroup: get rid of __lookup_swap_cgroup()

Because swap_cgroup map is now virtually contiguous, swap_cgroup_record()
can be simplified, which eliminates a need to use __lookup_swap_cgroup().

Now as __lookup_swap_cgroup() is really trivial and is used only once, it
can be inlined.

Link: https://lkml.kernel.org/r/20241115190229.676440-2-roman.gushchin@linux.dev
Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
pull/1134/merge
Roman Gushchin 2024-11-15 19:02:29 +00:00 committed by Andrew Morton
parent 8eb92ed254
commit 146ca40193
1 changed files with 2 additions and 16 deletions

View File

@ -33,13 +33,6 @@ static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
*
* TODO: we can push these buffers out to HIGHMEM.
*/
static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl,
pgoff_t offset)
{
return &ctrl->map[offset];
}
static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
struct swap_cgroup_ctrl **ctrlp)
{
@ -49,7 +42,7 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
ctrl = &swap_cgroup_ctrl[swp_type(ent)];
if (ctrlp)
*ctrlp = ctrl;
return __lookup_swap_cgroup(ctrl, offset);
return &ctrl->map[offset];
}
/**
@ -104,16 +97,9 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id,
spin_lock_irqsave(&ctrl->lock, flags);
old = sc->id;
for (;;) {
for (; offset < end; offset++, sc++) {
VM_BUG_ON(sc->id != old);
sc->id = id;
offset++;
if (offset == end)
break;
if (offset % SC_PER_PAGE)
sc++;
else
sc = __lookup_swap_cgroup(ctrl, offset);
}
spin_unlock_irqrestore(&ctrl->lock, flags);