Skip to content

Commit 4fbc764

Browse files
Yang Shigregkh
Yang Shi
authored andcommitted
mm: mempolicy: keep VMA walk if both MPOL_MF_STRICT and MPOL_MF_MOVE are specified
commit 2452626 upstream. When calling mbind() with MPOL_MF_{MOVE|MOVEALL} | MPOL_MF_STRICT, kernel should attempt to migrate all existing pages, and return -EIO if there is misplaced or unmovable page. Then commit 6f4576e ("mempolicy: apply page table walker on queue_pages_range()") messed up the return value and didn't break VMA scan early ianymore when MPOL_MF_STRICT alone. The return value problem was fixed by commit a7f40cf ("mm: mempolicy: make mbind() return -EIO when MPOL_MF_STRICT is specified"), but it broke the VMA walk early if unmovable page is met, it may cause some pages are not migrated as expected. The code should conceptually do: if (MPOL_MF_MOVE|MOVEALL) scan all vmas try to migrate the existing pages return success else if (MPOL_MF_MOVE* | MPOL_MF_STRICT) scan all vmas try to migrate the existing pages return -EIO if unmovable or migration failed else /* MPOL_MF_STRICT alone */ break early if meets unmovable and don't call mbind_range() at all else /* none of those flags */ check the ranges in test_walk, EFAULT without mbind_range() if discontig. Fixed the behavior. Link: https://lkml.kernel.org/r/[email protected] Fixes: a7f40cf ("mm: mempolicy: make mbind() return -EIO when MPOL_MF_STRICT is specified") Signed-off-by: Yang Shi <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Suren Baghdasaryan <[email protected]> Cc: Matthew Wilcox <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Oscar Salvador <[email protected]> Cc: Rafael Aquini <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: David Rientjes <[email protected]> Cc: <[email protected]> [4.9+] Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 23264bd commit 4fbc764

File tree

1 file changed

+19
-20
lines changed

1 file changed

+19
-20
lines changed

Diff for: mm/mempolicy.c

+19-20
Original file line numberDiff line numberDiff line change
@@ -426,6 +426,7 @@ struct queue_pages {
426426
unsigned long start;
427427
unsigned long end;
428428
struct vm_area_struct *first;
429+
bool has_unmovable;
429430
};
430431

431432
/*
@@ -446,9 +447,8 @@ static inline bool queue_folio_required(struct folio *folio,
446447
/*
447448
* queue_folios_pmd() has three possible return values:
448449
* 0 - folios are placed on the right node or queued successfully, or
449-
* special page is met, i.e. huge zero page.
450-
* 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
451-
* specified.
450+
* special page is met, i.e. zero page, or unmovable page is found
451+
* but continue walking (indicated by queue_pages.has_unmovable).
452452
* -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
453453
* existing folio was already on a node that does not follow the
454454
* policy.
@@ -479,7 +479,7 @@ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
479479
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
480480
if (!vma_migratable(walk->vma) ||
481481
migrate_folio_add(folio, qp->pagelist, flags)) {
482-
ret = 1;
482+
qp->has_unmovable = true;
483483
goto unlock;
484484
}
485485
} else
@@ -495,9 +495,8 @@ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
495495
*
496496
* queue_folios_pte_range() has three possible return values:
497497
* 0 - folios are placed on the right node or queued successfully, or
498-
* special page is met, i.e. zero page.
499-
* 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
500-
* specified.
498+
* special page is met, i.e. zero page, or unmovable page is found
499+
* but continue walking (indicated by queue_pages.has_unmovable).
501500
* -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
502501
* on a node that does not follow the policy.
503502
*/
@@ -508,7 +507,6 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
508507
struct folio *folio;
509508
struct queue_pages *qp = walk->private;
510509
unsigned long flags = qp->flags;
511-
bool has_unmovable = false;
512510
pte_t *pte, *mapped_pte;
513511
pte_t ptent;
514512
spinlock_t *ptl;
@@ -538,28 +536,26 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
538536
if (!queue_folio_required(folio, qp))
539537
continue;
540538
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
541-
/* MPOL_MF_STRICT must be specified if we get here */
542-
if (!vma_migratable(vma)) {
543-
has_unmovable = true;
544-
break;
545-
}
539+
/*
540+
* MPOL_MF_STRICT must be specified if we get here.
541+
* Continue walking vmas due to MPOL_MF_MOVE* flags.
542+
*/
543+
if (!vma_migratable(vma))
544+
qp->has_unmovable = true;
546545

547546
/*
548547
* Do not abort immediately since there may be
549548
* temporary off LRU pages in the range. Still
550549
* need migrate other LRU pages.
551550
*/
552551
if (migrate_folio_add(folio, qp->pagelist, flags))
553-
has_unmovable = true;
552+
qp->has_unmovable = true;
554553
} else
555554
break;
556555
}
557556
pte_unmap_unlock(mapped_pte, ptl);
558557
cond_resched();
559558

560-
if (has_unmovable)
561-
return 1;
562-
563559
return addr != end ? -EIO : 0;
564560
}
565561

@@ -599,7 +595,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
599595
* Detecting misplaced folio but allow migrating folios which
600596
* have been queued.
601597
*/
602-
ret = 1;
598+
qp->has_unmovable = true;
603599
goto unlock;
604600
}
605601

@@ -620,7 +616,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
620616
* Failed to isolate folio but allow migrating pages
621617
* which have been queued.
622618
*/
623-
ret = 1;
619+
qp->has_unmovable = true;
624620
}
625621
unlock:
626622
spin_unlock(ptl);
@@ -756,12 +752,15 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
756752
.start = start,
757753
.end = end,
758754
.first = NULL,
755+
.has_unmovable = false,
759756
};
760757
const struct mm_walk_ops *ops = lock_vma ?
761758
&queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
762759

763760
err = walk_page_range(mm, start, end, ops, &qp);
764761

762+
if (qp.has_unmovable)
763+
err = 1;
765764
if (!qp.first)
766765
/* whole range in hole */
767766
err = -EFAULT;
@@ -1358,7 +1357,7 @@ static long do_mbind(unsigned long start, unsigned long len,
13581357
putback_movable_pages(&pagelist);
13591358
}
13601359

1361-
if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1360+
if (((ret > 0) || nr_failed) && (flags & MPOL_MF_STRICT))
13621361
err = -EIO;
13631362
} else {
13641363
up_out:

0 commit comments

Comments
 (0)