mm: make vmg->target consistent and further simplify commit_merge()
It is confusing for vmg->target to sometimes be the target merged VMA and in one case not. Fix this by having commit_merge() use its awareness of the vmg->_adjust_next_start case to know that it is manipulating a separate vma, abstracted in the 'vma' local variable. Place removal and adjust VMA determination logic into init_multi_vma_prep(), as the flags give us enough information to do so, and since this is the function that sets up the vma_prepare struct it makes sense to do so here. Doing this significantly simplifies commit_merge(), allowing us to eliminate the 'merge_target' handling, initialise the VMA iterator in a more sensible place and simply return vmg->target consistently. This also allows us to simplify setting vmg->target in vma_merge_existing_range() since we are then left only with two cases - merge left (or both) where the target is vmg->prev or merge right in which the target is vmg->next. This makes it easy for somebody reading the code to know what VMA will actually be the one returned and merged into and removes a great deal of the confusing 'adjust' nonsense. This patch has no change in functional behaviour. Link: https://lkml.kernel.org/r/50f96e31ab1980eaaf1006e34a4f6e6dad9320b8.1738326519.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Liam Howlett <liam.howlett@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>pull/1193/head
parent
fe3e9cf0d7
commit
0e5ffe9b2b
117
mm/vma.c
117
mm/vma.c
|
|
@ -106,24 +106,40 @@ static inline bool are_anon_vmas_compatible(struct vm_area_struct *vma1,
|
||||||
* init_multi_vma_prep() - Initializer for struct vma_prepare
|
* init_multi_vma_prep() - Initializer for struct vma_prepare
|
||||||
* @vp: The vma_prepare struct
|
* @vp: The vma_prepare struct
|
||||||
* @vma: The vma that will be altered once locked
|
* @vma: The vma that will be altered once locked
|
||||||
* @next: The next vma if it is to be adjusted
|
* @vmg: The merge state that will be used to determine adjustment and VMA
|
||||||
* @remove: The first vma to be removed
|
* removal.
|
||||||
* @remove2: The second vma to be removed
|
|
||||||
*/
|
*/
|
||||||
static void init_multi_vma_prep(struct vma_prepare *vp,
|
static void init_multi_vma_prep(struct vma_prepare *vp,
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
struct vm_area_struct *next,
|
struct vma_merge_struct *vmg)
|
||||||
struct vm_area_struct *remove,
|
|
||||||
struct vm_area_struct *remove2)
|
|
||||||
{
|
{
|
||||||
|
struct vm_area_struct *adjust;
|
||||||
|
struct vm_area_struct **remove = &vp->remove;
|
||||||
|
|
||||||
memset(vp, 0, sizeof(struct vma_prepare));
|
memset(vp, 0, sizeof(struct vma_prepare));
|
||||||
vp->vma = vma;
|
vp->vma = vma;
|
||||||
vp->anon_vma = vma->anon_vma;
|
vp->anon_vma = vma->anon_vma;
|
||||||
vp->remove = remove ? remove : remove2;
|
|
||||||
vp->remove2 = remove ? remove2 : NULL;
|
if (vmg && vmg->__remove_middle) {
|
||||||
vp->adj_next = next;
|
*remove = vmg->middle;
|
||||||
if (!vp->anon_vma && next)
|
remove = &vp->remove2;
|
||||||
vp->anon_vma = next->anon_vma;
|
}
|
||||||
|
if (vmg && vmg->__remove_next)
|
||||||
|
*remove = vmg->next;
|
||||||
|
|
||||||
|
if (vmg && vmg->__adjust_middle_start)
|
||||||
|
adjust = vmg->middle;
|
||||||
|
else if (vmg && vmg->__adjust_next_start)
|
||||||
|
adjust = vmg->next;
|
||||||
|
else
|
||||||
|
adjust = NULL;
|
||||||
|
|
||||||
|
vp->adj_next = adjust;
|
||||||
|
if (!vp->anon_vma && adjust)
|
||||||
|
vp->anon_vma = adjust->anon_vma;
|
||||||
|
|
||||||
|
VM_WARN_ON(vp->anon_vma && adjust && adjust->anon_vma &&
|
||||||
|
vp->anon_vma != adjust->anon_vma);
|
||||||
|
|
||||||
vp->file = vma->vm_file;
|
vp->file = vma->vm_file;
|
||||||
if (vp->file)
|
if (vp->file)
|
||||||
|
|
@ -360,7 +376,7 @@ again:
|
||||||
*/
|
*/
|
||||||
static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
|
static void init_vma_prep(struct vma_prepare *vp, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
|
init_multi_vma_prep(vp, vma, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -634,76 +650,63 @@ void validate_mm(struct mm_struct *mm)
|
||||||
*/
|
*/
|
||||||
static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg)
|
static struct vm_area_struct *commit_merge(struct vma_merge_struct *vmg)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *remove = NULL;
|
struct vm_area_struct *vma;
|
||||||
struct vm_area_struct *remove2 = NULL;
|
|
||||||
struct vma_prepare vp;
|
struct vma_prepare vp;
|
||||||
struct vm_area_struct *adjust = NULL;
|
struct vm_area_struct *adjust;
|
||||||
long adj_start;
|
long adj_start;
|
||||||
bool merge_target;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If modifying an existing VMA and we don't remove vmg->middle, then we
|
* If modifying an existing VMA and we don't remove vmg->middle, then we
|
||||||
* shrink the adjacent VMA.
|
* shrink the adjacent VMA.
|
||||||
*/
|
*/
|
||||||
if (vmg->__adjust_middle_start) {
|
if (vmg->__adjust_middle_start) {
|
||||||
|
vma = vmg->target;
|
||||||
adjust = vmg->middle;
|
adjust = vmg->middle;
|
||||||
/* The POSITIVE value by which we offset vmg->middle->vm_start. */
|
/* The POSITIVE value by which we offset vmg->middle->vm_start. */
|
||||||
adj_start = vmg->end - vmg->middle->vm_start;
|
adj_start = vmg->end - vmg->middle->vm_start;
|
||||||
merge_target = true;
|
|
||||||
|
/* Note: vma iterator must be pointing to 'start'. */
|
||||||
|
vma_iter_config(vmg->vmi, vmg->start, vmg->end);
|
||||||
} else if (vmg->__adjust_next_start) {
|
} else if (vmg->__adjust_next_start) {
|
||||||
|
/*
|
||||||
|
* In this case alone, the VMA we manipulate is vmg->middle, but
|
||||||
|
* we ultimately return vmg->next.
|
||||||
|
*/
|
||||||
|
vma = vmg->middle;
|
||||||
adjust = vmg->next;
|
adjust = vmg->next;
|
||||||
/* The NEGATIVE value by which we offset vmg->next->vm_start. */
|
/* The NEGATIVE value by which we offset vmg->next->vm_start. */
|
||||||
adj_start = -(vmg->middle->vm_end - vmg->end);
|
adj_start = -(vmg->middle->vm_end - vmg->end);
|
||||||
/*
|
|
||||||
* In all cases but this - merge right, shrink next - we write
|
vma_iter_config(vmg->vmi, vmg->next->vm_start + adj_start,
|
||||||
* vmg->target to the maple tree and return this as the merged VMA.
|
vmg->next->vm_end);
|
||||||
*/
|
|
||||||
merge_target = false;
|
|
||||||
} else {
|
} else {
|
||||||
|
vma = vmg->target;
|
||||||
adjust = NULL;
|
adjust = NULL;
|
||||||
adj_start = 0;
|
adj_start = 0;
|
||||||
merge_target = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vmg->__remove_middle)
|
|
||||||
remove = vmg->middle;
|
|
||||||
if (vmg->__remove_next)
|
|
||||||
remove2 = vmg->next;
|
|
||||||
|
|
||||||
init_multi_vma_prep(&vp, vmg->target, adjust, remove, remove2);
|
|
||||||
|
|
||||||
VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
|
|
||||||
vp.anon_vma != adjust->anon_vma);
|
|
||||||
|
|
||||||
if (merge_target) {
|
|
||||||
/* Note: vma iterator must be pointing to 'start'. */
|
/* Note: vma iterator must be pointing to 'start'. */
|
||||||
vma_iter_config(vmg->vmi, vmg->start, vmg->end);
|
vma_iter_config(vmg->vmi, vmg->start, vmg->end);
|
||||||
} else {
|
|
||||||
vma_iter_config(vmg->vmi, adjust->vm_start + adj_start,
|
|
||||||
adjust->vm_end);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vma_iter_prealloc(vmg->vmi, vmg->target))
|
init_multi_vma_prep(&vp, vma, vmg);
|
||||||
|
|
||||||
|
if (vma_iter_prealloc(vmg->vmi, vma))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
vma_prepare(&vp);
|
vma_prepare(&vp);
|
||||||
vma_adjust_trans_huge(vmg->target, vmg->start, vmg->end, adj_start);
|
vma_adjust_trans_huge(vma, vmg->start, vmg->end, adj_start);
|
||||||
vma_set_range(vmg->target, vmg->start, vmg->end, vmg->pgoff);
|
vma_set_range(vma, vmg->start, vmg->end, vmg->pgoff);
|
||||||
|
|
||||||
if (merge_target)
|
|
||||||
vma_iter_store(vmg->vmi, vmg->target);
|
|
||||||
|
|
||||||
if (adj_start) {
|
if (adj_start) {
|
||||||
adjust->vm_start += adj_start;
|
adjust->vm_start += adj_start;
|
||||||
adjust->vm_pgoff += PHYS_PFN(adj_start);
|
adjust->vm_pgoff += PHYS_PFN(adj_start);
|
||||||
|
|
||||||
if (!merge_target)
|
|
||||||
vma_iter_store(vmg->vmi, adjust);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
vma_complete(&vp, vmg->vmi, vmg->target->vm_mm);
|
vma_iter_store(vmg->vmi, vmg->target);
|
||||||
|
|
||||||
return merge_target ? vmg->target : vmg->next;
|
vma_complete(&vp, vmg->vmi, vma->vm_mm);
|
||||||
|
|
||||||
|
return vmg->target;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* We can only remove VMAs when merging if they do not have a close hook. */
|
/* We can only remove VMAs when merging if they do not have a close hook. */
|
||||||
|
|
@ -833,11 +836,15 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
|
||||||
/* No matter what happens, we will be adjusting middle. */
|
/* No matter what happens, we will be adjusting middle. */
|
||||||
vma_start_write(middle);
|
vma_start_write(middle);
|
||||||
|
|
||||||
if (merge_left)
|
if (merge_right) {
|
||||||
vma_start_write(prev);
|
|
||||||
|
|
||||||
if (merge_right)
|
|
||||||
vma_start_write(next);
|
vma_start_write(next);
|
||||||
|
vmg->target = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (merge_left) {
|
||||||
|
vma_start_write(prev);
|
||||||
|
vmg->target = prev;
|
||||||
|
}
|
||||||
|
|
||||||
if (merge_both) {
|
if (merge_both) {
|
||||||
/*
|
/*
|
||||||
|
|
@ -847,7 +854,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
|
||||||
* extend delete delete
|
* extend delete delete
|
||||||
*/
|
*/
|
||||||
|
|
||||||
vmg->target = prev;
|
|
||||||
vmg->start = prev->vm_start;
|
vmg->start = prev->vm_start;
|
||||||
vmg->end = next->vm_end;
|
vmg->end = next->vm_end;
|
||||||
vmg->pgoff = prev->vm_pgoff;
|
vmg->pgoff = prev->vm_pgoff;
|
||||||
|
|
@ -868,7 +874,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
|
||||||
* extend shrink/delete
|
* extend shrink/delete
|
||||||
*/
|
*/
|
||||||
|
|
||||||
vmg->target = prev;
|
|
||||||
vmg->start = prev->vm_start;
|
vmg->start = prev->vm_start;
|
||||||
vmg->pgoff = prev->vm_pgoff;
|
vmg->pgoff = prev->vm_pgoff;
|
||||||
|
|
||||||
|
|
@ -892,7 +897,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
|
||||||
VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg);
|
VM_WARN_ON_VMG(vmg->start > middle->vm_start && prev && middle != prev, vmg);
|
||||||
|
|
||||||
if (vmg->__remove_middle) {
|
if (vmg->__remove_middle) {
|
||||||
vmg->target = next;
|
|
||||||
vmg->end = next->vm_end;
|
vmg->end = next->vm_end;
|
||||||
vmg->pgoff = next->vm_pgoff - pglen;
|
vmg->pgoff = next->vm_pgoff - pglen;
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -903,7 +907,6 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
|
||||||
* merged VMA is NOT vmg->target, but rather vmg->next.
|
* merged VMA is NOT vmg->target, but rather vmg->next.
|
||||||
*/
|
*/
|
||||||
vmg->__adjust_next_start = true;
|
vmg->__adjust_next_start = true;
|
||||||
vmg->target = middle;
|
|
||||||
vmg->start = middle->vm_start;
|
vmg->start = middle->vm_start;
|
||||||
vmg->end = start;
|
vmg->end = start;
|
||||||
vmg->pgoff = middle->vm_pgoff;
|
vmg->pgoff = middle->vm_pgoff;
|
||||||
|
|
|
||||||
6
mm/vma.h
6
mm/vma.h
|
|
@ -82,11 +82,7 @@ struct vma_merge_struct {
|
||||||
struct vm_area_struct *prev;
|
struct vm_area_struct *prev;
|
||||||
struct vm_area_struct *middle;
|
struct vm_area_struct *middle;
|
||||||
struct vm_area_struct *next;
|
struct vm_area_struct *next;
|
||||||
/*
|
/* This is the VMA we ultimately target to become the merged VMA. */
|
||||||
* This is the VMA we ultimately target to become the merged VMA, except
|
|
||||||
* for the one exception of merge right, shrink next (for details of
|
|
||||||
* this scenario see vma_merge_existing_range()).
|
|
||||||
*/
|
|
||||||
struct vm_area_struct *target;
|
struct vm_area_struct *target;
|
||||||
/*
|
/*
|
||||||
* Initially, the start, end, pgoff fields are provided by the caller
|
* Initially, the start, end, pgoff fields are provided by the caller
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue