powerpc/mm: Change function prototype
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 29 May 2018 14:28:40 +0000 (19:58 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 3 Jun 2018 10:40:34 +0000 (20:40 +1000)
In later patch, we use the vma and psize to do tlb flush. Do the prototype
update in separate patch to make the review easy.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/32/pgtable.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/book3s/64/radix.h
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/nohash/64/pgtable.h
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pgtable.c

index c615abdce119ea34ff6c33d02109cd700036db64..39d3a4245694a0d93d748e11e059edea1be87639 100644 (file)
@@ -235,9 +235,10 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 }
 
 
-static inline void __ptep_set_access_flags(struct mm_struct *mm,
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
                                           pte_t *ptep, pte_t entry,
-                                          unsigned long address)
+                                          unsigned long address,
+                                          int psize)
 {
        unsigned long set = pte_val(entry) &
                (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
index c233915abb68ea7698950cf17b14d9e31fc2ed06..42fe7c2ff2df9f8a86142b3b614f07113842becb 100644 (file)
@@ -767,12 +767,14 @@ static inline bool check_pte_access(unsigned long access, unsigned long ptev)
  * Generic functions with hash/radix callbacks
  */
 
-static inline void __ptep_set_access_flags(struct mm_struct *mm,
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
                                           pte_t *ptep, pte_t entry,
-                                          unsigned long address)
+                                          unsigned long address,
+                                          int psize)
 {
        if (radix_enabled())
-               return radix__ptep_set_access_flags(mm, ptep, entry, address);
+               return radix__ptep_set_access_flags(vma, ptep, entry,
+                                                   address, psize);
        return hash__ptep_set_access_flags(ptep, entry);
 }
 
index 36ed025b4e13930634435e4e183258da76f85444..62a73a7a78a40f43cb9f6d25cd9a25bfd20bbe30 100644 (file)
@@ -124,8 +124,9 @@ extern void radix__mark_rodata_ro(void);
 extern void radix__mark_initmem_nx(void);
 #endif
 
-extern void radix__ptep_set_access_flags(struct mm_struct *mm, pte_t *ptep,
-                                        pte_t entry, unsigned long address);
+extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+                                        pte_t entry, unsigned long address,
+                                        int psize);
 
 static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
                                               unsigned long set)
index 987a658b18e1cdad60ba515284b173a25ad0684a..c2471bac86b9de46c4c3de287816d58c8ed60ab5 100644 (file)
@@ -256,9 +256,10 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 }
 
 
-static inline void __ptep_set_access_flags(struct mm_struct *mm,
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
                                           pte_t *ptep, pte_t entry,
-                                          unsigned long address)
+                                          unsigned long address,
+                                          int psize)
 {
        unsigned long set = pte_val(entry) &
                (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
index de78eda5f84143b7d55f977c31198d77569311d7..180161d714fb2f7eb7e92af14fc29a1a3e4e8cf3 100644 (file)
@@ -281,9 +281,10 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
  * function doesn't need to flush the hash entry
  */
-static inline void __ptep_set_access_flags(struct mm_struct *mm,
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
                                           pte_t *ptep, pte_t entry,
-                                          unsigned long address)
+                                          unsigned long address,
+                                          int psize)
 {
        unsigned long bits = pte_val(entry) &
                (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
index abda2b92f1baab4831d8f604cb9c0687371646c0..4a8150481a8893781478374fdad69c36a7bacf73 100644 (file)
@@ -46,8 +46,12 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 #endif
        changed = !pmd_same(*(pmdp), entry);
        if (changed) {
-               __ptep_set_access_flags(vma->vm_mm, pmdp_ptep(pmdp),
-                                       pmd_pte(entry), address);
+               /*
+                * We can use MMU_PAGE_2M here, because only radix
+                * path look at the psize.
+                */
+               __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
+                                       pmd_pte(entry), address, MMU_PAGE_2M);
                flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        }
        return changed;
index a6eec41dd3475f4b6308ab04d831f93b30b346ef..2034cbc9aa5605a22d56190531e0378b98fbff18 100644 (file)
@@ -1085,10 +1085,10 @@ int radix__has_transparent_hugepage(void)
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-void radix__ptep_set_access_flags(struct mm_struct *mm,
-                                 pte_t *ptep, pte_t entry,
-                                 unsigned long address)
+void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+                                 pte_t entry, unsigned long address, int psize)
 {
+       struct mm_struct *mm = vma->vm_mm;
        unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
                                              _PAGE_RW | _PAGE_EXEC);
 
index e70af99393793e5fea8b6388393cf307e0d0d993..20cacd33e5be2bc133e73f27081bbf8d1d061ffc 100644 (file)
@@ -222,7 +222,8 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
        changed = !pte_same(*(ptep), entry);
        if (changed) {
                assert_pte_locked(vma->vm_mm, address);
-               __ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
+               __ptep_set_access_flags(vma, ptep, entry,
+                                       address, mmu_virtual_psize);
                flush_tlb_page(vma, address);
        }
        return changed;
@@ -242,15 +243,26 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
        ptep_set_access_flags(vma, addr, ptep, pte, dirty);
        return 1;
 #else
-       int changed;
+       int changed, psize;
 
        pte = set_access_flags_filter(pte, vma, dirty);
        changed = !pte_same(*(ptep), pte);
        if (changed) {
+
+#ifdef CONFIG_PPC_BOOK3S_64
+               struct hstate *hstate = hstate_file(vma->vm_file);
+               psize = hstate_get_psize(hstate);
+#else
+               /*
+                * Not used on non book3s64 platforms. But 8xx
+                * can possibly use tsize derived from hstate.
+                */
+               psize = 0;
+#endif
 #ifdef CONFIG_DEBUG_VM
                assert_spin_locked(&vma->vm_mm->page_table_lock);
 #endif
-               __ptep_set_access_flags(vma->vm_mm, ptep, pte, addr);
+               __ptep_set_access_flags(vma, ptep, pte, addr, psize);
                flush_hugetlb_page(vma, addr);
        }
        return changed;