[PATCH 1/2] mm: Allow architectures to request 'old' entries when prefaulting
Kirill A. Shutemov
kirill at shutemov.name
Sat Dec 26 17:40:16 EST 2020
On Sat, Dec 26, 2020 at 01:16:09PM -0800, Linus Torvalds wrote:
> On Sat, Dec 26, 2020 at 1:04 PM Hugh Dickins <hughd at google.com> wrote:
> >
> >
> > Hold on. I guess this one will suffer from the same bug as the previous.
> > I was about to report back, after satisfactory overnight testing of that
> > version - provided that one big little bug is fixed:
> >
> > --- a/mm/filemap.c
> > +++ b/mm/filemap.c
> > @@ -2919,7 +2919,7 @@ static bool filemap_map_pmd(struct vm_fa
> >
> > if (pmd_none(*vmf->pmd) &&
> > PageTransHuge(page) &&
> > - do_set_pmd(vmf, page)) {
> > + do_set_pmd(vmf, page) == 0) {
> > unlock_page(page);
> > return true;
> > }
>
> I missed that entirely, because when just reading the patch it looks
> fine and I didn't look at what do_set_pmd() function returns outside
> the patch.
>
> And maybe it would be better to write it as
>
> if (pmd_none(*vmf->pmd) && PageTransHuge(page)) {
> vm_fault_t ret = do_set_pmd(vmf, page);
> if (!ret) {
> ...
>
> instead to make it a bit more explicit about how that return value is
> a vm_fault_t there...
>
> And see my other email about how I suspect there is still a leak in
> that patch for the previous test-case.
Ughh...
Here's the fixup I have so far. It doesn't blow up immediately, but please
take a closer look. Who knows what stupid mistake I did this time. :/
diff --git a/mm/filemap.c b/mm/filemap.c
index 3a92aaa59b9b..c4b374678e7d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2837,16 +2837,21 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
struct mm_struct *mm = vmf->vma->vm_mm;
/* Huge page is mapped? No need to proceed. */
- if (pmd_trans_huge(*vmf->pmd))
- return true;
-
- if (pmd_none(*vmf->pmd) &&
- PageTransHuge(page) &&
- do_set_pmd(vmf, page)) {
+ if (pmd_trans_huge(*vmf->pmd)) {
unlock_page(page);
+ put_page(page);
return true;
}
+ if (pmd_none(*vmf->pmd) && PageTransHuge(page)) {
+ vm_fault_t ret = do_set_pmd(vmf, page);
+ if (!ret) {
+ /* The page is mapped successfully, reference consumed. */
+ unlock_page(page);
+ return true;
+ }
+ }
+
if (pmd_none(*vmf->pmd)) {
vmf->ptl = pmd_lock(mm, vmf->pmd);
if (likely(pmd_none(*vmf->pmd))) {
@@ -2867,7 +2872,7 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
return false;
}
-static struct page *next_stable_page(struct page *page, struct vm_fault *vmf,
+static struct page *next_uptodate_page(struct page *page, struct vm_fault *vmf,
struct xa_state *xas, pgoff_t end_pgoff)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -2914,15 +2919,16 @@ static inline struct page *first_map_page(struct vm_fault *vmf,
struct xa_state *xas,
pgoff_t end_pgoff)
{
- return next_stable_page(xas_find(xas, end_pgoff), vmf, xas, end_pgoff);
+ return next_uptodate_page(xas_find(xas, end_pgoff),
+ vmf, xas, end_pgoff);
}
static inline struct page *next_map_page(struct vm_fault *vmf,
struct xa_state *xas,
pgoff_t end_pgoff)
{
- return next_stable_page(xas_next_entry(xas, end_pgoff),
- vmf, xas, end_pgoff);
+ return next_uptodate_page(xas_next_entry(xas, end_pgoff),
+ vmf, xas, end_pgoff);
}
void filemap_map_pages(struct vm_fault *vmf,
--
Kirill A. Shutemov
More information about the linux-arm-kernel
mailing list