s390/mm: remove page table downgrade support
authorAlexander Gordeev <agordeev@linux.ibm.com>
Fri, 28 Feb 2020 10:32:01 +0000 (11:32 +0100)
committerVasily Gorbik <gor@linux.ibm.com>
Sat, 28 Mar 2020 11:46:12 +0000 (12:46 +0100)
This update consolidates page table handling code. Because
there are hardly any 31-bit binaries left we do not need to
optimize for that.

No extra efforts are needed to ensure that a compat task does
not map anything above 2GB. The TASK_SIZE limit for 31-bit
tasks is 2GB already and the generic code does check that a
resulting map address would not surpass that limit.

Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/processor.h
arch/s390/mm/pgalloc.c

index bcfb6371086f2319f6901d2cc52a1d8c44fd0a1a..a8418e1379eb7ee08c92acd034eae000cb19c695 100644 (file)
@@ -32,8 +32,6 @@ typedef struct {
        unsigned int uses_cmm:1;
        /* The gmaps associated with this context are allowed to use huge pages. */
        unsigned int allow_gmap_hpage_1m:1;
-       /* The mmu context is for compat task */
-       unsigned int compat_mm:1;
 } mm_context_t;
 
 #define INIT_MM_CONTEXT(name)                                             \
index 8d04e6f3f79649d460376f09217c9e8fe211a850..3763734965e4134d582e1cb60e5704aae1bf4a76 100644 (file)
@@ -25,7 +25,6 @@ static inline int init_new_context(struct task_struct *tsk,
        atomic_set(&mm->context.flush_count, 0);
        mm->context.gmap_asce = 0;
        mm->context.flush_mm = 0;
-       mm->context.compat_mm = test_thread_flag(TIF_31BIT);
 #ifdef CONFIG_PGSTE
        mm->context.alloc_pgste = page_table_allocate_pgste ||
                test_thread_flag(TIF_PGSTE) ||
@@ -57,10 +56,6 @@ static inline int init_new_context(struct task_struct *tsk,
                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
                                   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
                break;
-       case _REGION3_SIZE:
-               /* forked 2-level compat task, set new asce with new mm->pgd */
-               mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
-                                  _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
        }
        crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
        return 0;
index f0d7457fa1da58a113c8465798ff2f32dd6d4684..5e3ff9f7a58680175cc3a9752e73ad99655a2e20 100644 (file)
@@ -46,7 +46,6 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 }
 
 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
-void crst_table_downgrade(struct mm_struct *);
 
 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr,
                                             unsigned long len)
@@ -130,24 +129,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       unsigned long *table = crst_table_alloc(mm);
-
-       if (!table)
-               return NULL;
-       if (mm->context.asce_limit == _REGION3_SIZE) {
-               /* Forking a compat process with 2 page table levels */
-               if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
-                       crst_table_free(mm, table);
-                       return NULL;
-               }
-       }
-       return (pgd_t *) table;
+       return (pgd_t *) crst_table_alloc(mm);
 }
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-       if (mm->context.asce_limit == _REGION3_SIZE)
-               pgtable_pmd_page_dtor(virt_to_page(pgd));
        crst_table_free(mm, (unsigned long *) pgd);
 }
 
index c9522346799f4701dd9a42022ac2eb86fe47fac6..dee5e57da518d4bd9d6c1ee573f1f373e56a8d53 100644 (file)
@@ -179,7 +179,6 @@ typedef struct thread_struct thread_struct;
        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_BA;                  \
        regs->psw.addr  = new_psw;                                      \
        regs->gprs[15]  = new_stackp;                                   \
-       crst_table_downgrade(current->mm);                              \
        execve_tail();                                                  \
 } while (0)
 
index af3bddd5e56801ccac6952420170b00c443fd089..4630fb7705ca9b644b91e6d12ef321c7436768df 100644 (file)
@@ -138,30 +138,6 @@ err_p4d:
        return -ENOMEM;
 }
 
-void crst_table_downgrade(struct mm_struct *mm)
-{
-       pgd_t *pgd;
-
-       /* downgrade should only happen from 3 to 2 levels (compat only) */
-       VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
-
-       if (current->active_mm == mm) {
-               clear_user_asce();
-               __tlb_flush_mm(mm);
-       }
-
-       pgd = mm->pgd;
-       mm_dec_nr_pmds(mm);
-       mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
-       mm->context.asce_limit = _REGION3_SIZE;
-       mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
-                          _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
-       crst_table_free(mm, (unsigned long *) pgd);
-
-       if (current->active_mm == mm)
-               set_user_asce(mm);
-}
-
 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
 {
        unsigned int old, new;