@@ -842,20 +842,15 @@ EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);
842
842
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
843
843
844
844
static void touch_pmd (struct vm_area_struct * vma , unsigned long addr ,
845
- pmd_t * pmd )
845
+ pmd_t * pmd , int flags )
846
846
{
847
847
pmd_t _pmd ;
848
848
849
- /*
850
- * We should set the dirty bit only for FOLL_WRITE but for now
851
- * the dirty bit in the pmd is meaningless. And if the dirty
852
- * bit will become meaningful and we'll only set it with
853
- * FOLL_WRITE, an atomic set_bit will be required on the pmd to
854
- * set the young bit, instead of the current set_pmd_at.
855
- */
856
- _pmd = pmd_mkyoung (pmd_mkdirty (* pmd ));
849
+ _pmd = pmd_mkyoung (* pmd );
850
+ if (flags & FOLL_WRITE )
851
+ _pmd = pmd_mkdirty (_pmd );
857
852
if (pmdp_set_access_flags (vma , addr & HPAGE_PMD_MASK ,
858
- pmd , _pmd , 1 ))
853
+ pmd , _pmd , flags & FOLL_WRITE ))
859
854
update_mmu_cache_pmd (vma , addr , pmd );
860
855
}
861
856
@@ -884,7 +879,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
884
879
return NULL ;
885
880
886
881
if (flags & FOLL_TOUCH )
887
- touch_pmd (vma , addr , pmd );
882
+ touch_pmd (vma , addr , pmd , flags );
888
883
889
884
/*
890
885
* device mapped pages can only be returned if the
@@ -973,20 +968,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
973
968
974
969
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
975
970
static void touch_pud (struct vm_area_struct * vma , unsigned long addr ,
976
- pud_t * pud )
971
+ pud_t * pud , int flags )
977
972
{
978
973
pud_t _pud ;
979
974
980
- /*
981
- * We should set the dirty bit only for FOLL_WRITE but for now
982
- * the dirty bit in the pud is meaningless. And if the dirty
983
- * bit will become meaningful and we'll only set it with
984
- * FOLL_WRITE, an atomic set_bit will be required on the pud to
985
- * set the young bit, instead of the current set_pud_at.
986
- */
987
- _pud = pud_mkyoung (pud_mkdirty (* pud ));
975
+ _pud = pud_mkyoung (* pud );
976
+ if (flags & FOLL_WRITE )
977
+ _pud = pud_mkdirty (_pud );
988
978
if (pudp_set_access_flags (vma , addr & HPAGE_PUD_MASK ,
989
- pud , _pud , 1 ))
979
+ pud , _pud , flags & FOLL_WRITE ))
990
980
update_mmu_cache_pud (vma , addr , pud );
991
981
}
992
982
@@ -1009,7 +999,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1009
999
return NULL ;
1010
1000
1011
1001
if (flags & FOLL_TOUCH )
1012
- touch_pud (vma , addr , pud );
1002
+ touch_pud (vma , addr , pud , flags );
1013
1003
1014
1004
/*
1015
1005
* device mapped pages can only be returned if the
@@ -1371,7 +1361,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1371
1361
page = pmd_page (* pmd );
1372
1362
VM_BUG_ON_PAGE (!PageHead (page ) && !is_zone_device_page (page ), page );
1373
1363
if (flags & FOLL_TOUCH )
1374
- touch_pmd (vma , addr , pmd );
1364
+ touch_pmd (vma , addr , pmd , flags );
1375
1365
if ((flags & FOLL_MLOCK ) && (vma -> vm_flags & VM_LOCKED )) {
1376
1366
/*
1377
1367
* We don't mlock() pte-mapped THPs. This way we can avoid
0 commit comments