s390/mm: move pfault code to own C file
authorHeiko Carstens <hca@linux.ibm.com>
Mon, 24 Jul 2023 15:20:26 +0000 (17:20 +0200)
committerHeiko Carstens <hca@linux.ibm.com>
Sat, 29 Jul 2023 12:57:18 +0000 (14:57 +0200)
The pfault code has nothing to do with regular fault handling.

Therefore move it to an own C file. Also add an own pfault header
file. This way changes to setup.h don't cause a recompile of the
pfault code and vice versa.

Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
arch/s390/include/asm/pfault.h [new file with mode: 0644]
arch/s390/include/asm/setup.h
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/smp.c
arch/s390/mm/Makefile
arch/s390/mm/fault.c
arch/s390/mm/pfault.c [new file with mode: 0644]

diff --git a/arch/s390/include/asm/pfault.h b/arch/s390/include/asm/pfault.h
new file mode 100644 (file)
index 0000000..beabeeb
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *    Copyright IBM Corp. 1999, 2023
+ */
+#ifndef _ASM_S390_PFAULT_H
+#define _ASM_S390_PFAULT_H
+
+int __pfault_init(void);
+void __pfault_fini(void);
+
+static inline int pfault_init(void)
+{
+       if (IS_ENABLED(CONFIG_PFAULT))
+               return __pfault_init();
+       return -1;
+}
+
+static inline void pfault_fini(void)
+{
+       if (IS_ENABLED(CONFIG_PFAULT))
+               __pfault_fini();
+}
+
+#endif /* _ASM_S390_PFAULT_H */
index e795f425627a9c3ce4b739195727f5867d30525e..b30fe91166e3312c9c8579b12882e55a252b143e 100644 (file)
@@ -118,14 +118,6 @@ extern unsigned int console_irq;
 #define SET_CONSOLE_VT220      do { console_mode = 4; } while (0)
 #define SET_CONSOLE_HVC                do { console_mode = 5; } while (0)
 
-#ifdef CONFIG_PFAULT
-extern int pfault_init(void);
-extern void pfault_fini(void);
-#else /* CONFIG_PFAULT */
-#define pfault_init()          ({-1;})
-#define pfault_fini()          do { } while (0)
-#endif /* CONFIG_PFAULT */
-
 #ifdef CONFIG_VMCP
 void vmcp_cma_reserve(void);
 #else
index 6d9276c096a61115a93ad8f3116529758b242f9e..12a2bd4fc88cb99f9116176c0948fc04170a2f31 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/reboot.h>
 #include <linux/ftrace.h>
 #include <linux/debug_locks.h>
+#include <asm/pfault.h>
 #include <asm/cio.h>
 #include <asm/setup.h>
 #include <asm/smp.h>
index f9a2b755f510527c84e4d1c3a0ebe2d35bb4f01a..9244130721d6f3214b3e37b06d2b0e08ce4dcbe2 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/crash_dump.h>
 #include <linux/kprobes.h>
 #include <asm/asm-offsets.h>
+#include <asm/pfault.h>
 #include <asm/diag.h>
 #include <asm/switch_to.h>
 #include <asm/facility.h>
index d90db06a8af5776937060c94d17baa9b6392f05d..352ff520fd9430a1acffe262c2d7046ba7083890 100644 (file)
@@ -10,3 +10,4 @@ obj-$(CONFIG_CMM)             += cmm.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
 obj-$(CONFIG_PTDUMP_CORE)      += dump_pagetables.o
 obj-$(CONFIG_PGSTE)            += gmap.o
+obj-$(CONFIG_PFAULT)           += pfault.o
index 2f123429a291b7457561ee8e56eeaf39fbbda49c..b5e1bea9194c28d3db654da449ef2621c165621a 100644 (file)
@@ -43,8 +43,6 @@
 #include "../kernel/entry.h"
 
 #define __FAIL_ADDR_MASK -4096L
-#define __SUBCODE_MASK 0x0600
-#define __PF_RES_FIELD 0x8000000000000000ULL
 
 /*
  * Allocate private vm_fault_reason from top.  Please make sure it won't
@@ -583,232 +581,6 @@ void do_dat_exception(struct pt_regs *regs)
 }
 NOKPROBE_SYMBOL(do_dat_exception);
 
-#ifdef CONFIG_PFAULT 
-/*
- * 'pfault' pseudo page faults routines.
- */
-static int pfault_disable;
-
-static int __init nopfault(char *str)
-{
-       pfault_disable = 1;
-       return 1;
-}
-
-__setup("nopfault", nopfault);
-
-struct pfault_refbk {
-       u16 refdiagc;
-       u16 reffcode;
-       u16 refdwlen;
-       u16 refversn;
-       u64 refgaddr;
-       u64 refselmk;
-       u64 refcmpmk;
-       u64 reserved;
-} __attribute__ ((packed, aligned(8)));
-
-static struct pfault_refbk pfault_init_refbk = {
-       .refdiagc = 0x258,
-       .reffcode = 0,
-       .refdwlen = 5,
-       .refversn = 2,
-       .refgaddr = __LC_LPP,
-       .refselmk = 1ULL << 48,
-       .refcmpmk = 1ULL << 48,
-       .reserved = __PF_RES_FIELD
-};
-
-int pfault_init(void)
-{
-        int rc;
-
-       if (pfault_disable)
-               return -1;
-       diag_stat_inc(DIAG_STAT_X258);
-       asm volatile(
-               "       diag    %1,%0,0x258\n"
-               "0:     j       2f\n"
-               "1:     la      %0,8\n"
-               "2:\n"
-               EX_TABLE(0b,1b)
-               : "=d" (rc)
-               : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
-        return rc;
-}
-
-static struct pfault_refbk pfault_fini_refbk = {
-       .refdiagc = 0x258,
-       .reffcode = 1,
-       .refdwlen = 5,
-       .refversn = 2,
-};
-
-void pfault_fini(void)
-{
-
-       if (pfault_disable)
-               return;
-       diag_stat_inc(DIAG_STAT_X258);
-       asm volatile(
-               "       diag    %0,0,0x258\n"
-               "0:     nopr    %%r7\n"
-               EX_TABLE(0b,0b)
-               : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
-}
-
-static DEFINE_SPINLOCK(pfault_lock);
-static LIST_HEAD(pfault_list);
-
-#define PF_COMPLETE    0x0080
-
-/*
- * The mechanism of our pfault code: if Linux is running as guest, runs a user
- * space process and the user space process accesses a page that the host has
- * paged out we get a pfault interrupt.
- *
- * This allows us, within the guest, to schedule a different process. Without
- * this mechanism the host would have to suspend the whole virtual cpu until
- * the page has been paged in.
- *
- * So when we get such an interrupt then we set the state of the current task
- * to uninterruptible and also set the need_resched flag. Both happens within
- * interrupt context(!). If we later on want to return to user space we
- * recognize the need_resched flag and then call schedule().  It's not very
- * obvious how this works...
- *
- * Of course we have a lot of additional fun with the completion interrupt (->
- * host signals that a page of a process has been paged in and the process can
- * continue to run). This interrupt can arrive on any cpu and, since we have
- * virtual cpus, actually appear before the interrupt that signals that a page
- * is missing.
- */
-static void pfault_interrupt(struct ext_code ext_code,
-                            unsigned int param32, unsigned long param64)
-{
-       struct task_struct *tsk;
-       __u16 subcode;
-       pid_t pid;
-
-       /*
-        * Get the external interruption subcode & pfault initial/completion
-        * signal bit. VM stores this in the 'cpu address' field associated
-        * with the external interrupt.
-        */
-       subcode = ext_code.subcode;
-       if ((subcode & 0xff00) != __SUBCODE_MASK)
-               return;
-       inc_irq_stat(IRQEXT_PFL);
-       /* Get the token (= pid of the affected task). */
-       pid = param64 & LPP_PID_MASK;
-       rcu_read_lock();
-       tsk = find_task_by_pid_ns(pid, &init_pid_ns);
-       if (tsk)
-               get_task_struct(tsk);
-       rcu_read_unlock();
-       if (!tsk)
-               return;
-       spin_lock(&pfault_lock);
-       if (subcode & PF_COMPLETE) {
-               /* signal bit is set -> a page has been swapped in by VM */
-               if (tsk->thread.pfault_wait == 1) {
-                       /* Initial interrupt was faster than the completion
-                        * interrupt. pfault_wait is valid. Set pfault_wait
-                        * back to zero and wake up the process. This can
-                        * safely be done because the task is still sleeping
-                        * and can't produce new pfaults. */
-                       tsk->thread.pfault_wait = 0;
-                       list_del(&tsk->thread.list);
-                       wake_up_process(tsk);
-                       put_task_struct(tsk);
-               } else {
-                       /* Completion interrupt was faster than initial
-                        * interrupt. Set pfault_wait to -1 so the initial
-                        * interrupt doesn't put the task to sleep.
-                        * If the task is not running, ignore the completion
-                        * interrupt since it must be a leftover of a PFAULT
-                        * CANCEL operation which didn't remove all pending
-                        * completion interrupts. */
-                       if (task_is_running(tsk))
-                               tsk->thread.pfault_wait = -1;
-               }
-       } else {
-               /* signal bit not set -> a real page is missing. */
-               if (WARN_ON_ONCE(tsk != current))
-                       goto out;
-               if (tsk->thread.pfault_wait == 1) {
-                       /* Already on the list with a reference: put to sleep */
-                       goto block;
-               } else if (tsk->thread.pfault_wait == -1) {
-                       /* Completion interrupt was faster than the initial
-                        * interrupt (pfault_wait == -1). Set pfault_wait
-                        * back to zero and exit. */
-                       tsk->thread.pfault_wait = 0;
-               } else {
-                       /* Initial interrupt arrived before completion
-                        * interrupt. Let the task sleep.
-                        * An extra task reference is needed since a different
-                        * cpu may set the task state to TASK_RUNNING again
-                        * before the scheduler is reached. */
-                       get_task_struct(tsk);
-                       tsk->thread.pfault_wait = 1;
-                       list_add(&tsk->thread.list, &pfault_list);
-block:
-                       /* Since this must be a userspace fault, there
-                        * is no kernel task state to trample. Rely on the
-                        * return to userspace schedule() to block. */
-                       __set_current_state(TASK_UNINTERRUPTIBLE);
-                       set_tsk_need_resched(tsk);
-                       set_preempt_need_resched();
-               }
-       }
-out:
-       spin_unlock(&pfault_lock);
-       put_task_struct(tsk);
-}
-
-static int pfault_cpu_dead(unsigned int cpu)
-{
-       struct thread_struct *thread, *next;
-       struct task_struct *tsk;
-
-       spin_lock_irq(&pfault_lock);
-       list_for_each_entry_safe(thread, next, &pfault_list, list) {
-               thread->pfault_wait = 0;
-               list_del(&thread->list);
-               tsk = container_of(thread, struct task_struct, thread);
-               wake_up_process(tsk);
-               put_task_struct(tsk);
-       }
-       spin_unlock_irq(&pfault_lock);
-       return 0;
-}
-
-static int __init pfault_irq_init(void)
-{
-       int rc;
-
-       rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
-       if (rc)
-               goto out_extint;
-       rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
-       if (rc)
-               goto out_pfault;
-       irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
-       cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
-                                 NULL, pfault_cpu_dead);
-       return 0;
-
-out_pfault:
-       unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
-out_extint:
-       pfault_disable = 1;
-       return rc;
-}
-early_initcall(pfault_irq_init);
-
-#endif /* CONFIG_PFAULT */
-
 #if IS_ENABLED(CONFIG_PGSTE)
 
 void do_secure_storage_access(struct pt_regs *regs)
diff --git a/arch/s390/mm/pfault.c b/arch/s390/mm/pfault.c
new file mode 100644 (file)
index 0000000..5c0547f
--- /dev/null
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 1999, 2023
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/sched/task.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <asm/asm-extable.h>
+#include <asm/pfault.h>
+#include <asm/diag.h>
+
+#define __SUBCODE_MASK 0x0600
+#define __PF_RES_FIELD 0x8000000000000000ULL
+
+/*
+ * 'pfault' pseudo page faults routines.
+ */
+static int pfault_disable;
+
+static int __init nopfault(char *str)
+{
+       pfault_disable = 1;
+       return 1;
+}
+
+__setup("nopfault", nopfault);
+
+struct pfault_refbk {
+       u16 refdiagc;
+       u16 reffcode;
+       u16 refdwlen;
+       u16 refversn;
+       u64 refgaddr;
+       u64 refselmk;
+       u64 refcmpmk;
+       u64 reserved;
+} __attribute__ ((packed, aligned(8)));
+
+static struct pfault_refbk pfault_init_refbk = {
+       .refdiagc = 0x258,
+       .reffcode = 0,
+       .refdwlen = 5,
+       .refversn = 2,
+       .refgaddr = __LC_LPP,
+       .refselmk = 1ULL << 48,
+       .refcmpmk = 1ULL << 48,
+       .reserved = __PF_RES_FIELD
+};
+
+int __pfault_init(void)
+{
+        int rc;
+
+       if (pfault_disable)
+               return -1;
+       diag_stat_inc(DIAG_STAT_X258);
+       asm volatile(
+               "       diag    %1,%0,0x258\n"
+               "0:     j       2f\n"
+               "1:     la      %0,8\n"
+               "2:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc)
+               : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
+        return rc;
+}
+
+static struct pfault_refbk pfault_fini_refbk = {
+       .refdiagc = 0x258,
+       .reffcode = 1,
+       .refdwlen = 5,
+       .refversn = 2,
+};
+
+void __pfault_fini(void)
+{
+
+       if (pfault_disable)
+               return;
+       diag_stat_inc(DIAG_STAT_X258);
+       asm volatile(
+               "       diag    %0,0,0x258\n"
+               "0:     nopr    %%r7\n"
+               EX_TABLE(0b,0b)
+               : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
+}
+
+static DEFINE_SPINLOCK(pfault_lock);
+static LIST_HEAD(pfault_list);
+
+#define PF_COMPLETE    0x0080
+
+/*
+ * The mechanism of our pfault code: if Linux is running as guest, runs a user
+ * space process and the user space process accesses a page that the host has
+ * paged out we get a pfault interrupt.
+ *
+ * This allows us, within the guest, to schedule a different process. Without
+ * this mechanism the host would have to suspend the whole virtual cpu until
+ * the page has been paged in.
+ *
+ * So when we get such an interrupt then we set the state of the current task
+ * to uninterruptible and also set the need_resched flag. Both happens within
+ * interrupt context(!). If we later on want to return to user space we
+ * recognize the need_resched flag and then call schedule().  It's not very
+ * obvious how this works...
+ *
+ * Of course we have a lot of additional fun with the completion interrupt (->
+ * host signals that a page of a process has been paged in and the process can
+ * continue to run). This interrupt can arrive on any cpu and, since we have
+ * virtual cpus, actually appear before the interrupt that signals that a page
+ * is missing.
+ */
+static void pfault_interrupt(struct ext_code ext_code,
+                            unsigned int param32, unsigned long param64)
+{
+       struct task_struct *tsk;
+       __u16 subcode;
+       pid_t pid;
+
+       /*
+        * Get the external interruption subcode & pfault initial/completion
+        * signal bit. VM stores this in the 'cpu address' field associated
+        * with the external interrupt.
+        */
+       subcode = ext_code.subcode;
+       if ((subcode & 0xff00) != __SUBCODE_MASK)
+               return;
+       inc_irq_stat(IRQEXT_PFL);
+       /* Get the token (= pid of the affected task). */
+       pid = param64 & LPP_PID_MASK;
+       rcu_read_lock();
+       tsk = find_task_by_pid_ns(pid, &init_pid_ns);
+       if (tsk)
+               get_task_struct(tsk);
+       rcu_read_unlock();
+       if (!tsk)
+               return;
+       spin_lock(&pfault_lock);
+       if (subcode & PF_COMPLETE) {
+               /* signal bit is set -> a page has been swapped in by VM */
+               if (tsk->thread.pfault_wait == 1) {
+                       /* Initial interrupt was faster than the completion
+                        * interrupt. pfault_wait is valid. Set pfault_wait
+                        * back to zero and wake up the process. This can
+                        * safely be done because the task is still sleeping
+                        * and can't produce new pfaults. */
+                       tsk->thread.pfault_wait = 0;
+                       list_del(&tsk->thread.list);
+                       wake_up_process(tsk);
+                       put_task_struct(tsk);
+               } else {
+                       /* Completion interrupt was faster than initial
+                        * interrupt. Set pfault_wait to -1 so the initial
+                        * interrupt doesn't put the task to sleep.
+                        * If the task is not running, ignore the completion
+                        * interrupt since it must be a leftover of a PFAULT
+                        * CANCEL operation which didn't remove all pending
+                        * completion interrupts. */
+                       if (task_is_running(tsk))
+                               tsk->thread.pfault_wait = -1;
+               }
+       } else {
+               /* signal bit not set -> a real page is missing. */
+               if (WARN_ON_ONCE(tsk != current))
+                       goto out;
+               if (tsk->thread.pfault_wait == 1) {
+                       /* Already on the list with a reference: put to sleep */
+                       goto block;
+               } else if (tsk->thread.pfault_wait == -1) {
+                       /* Completion interrupt was faster than the initial
+                        * interrupt (pfault_wait == -1). Set pfault_wait
+                        * back to zero and exit. */
+                       tsk->thread.pfault_wait = 0;
+               } else {
+                       /* Initial interrupt arrived before completion
+                        * interrupt. Let the task sleep.
+                        * An extra task reference is needed since a different
+                        * cpu may set the task state to TASK_RUNNING again
+                        * before the scheduler is reached. */
+                       get_task_struct(tsk);
+                       tsk->thread.pfault_wait = 1;
+                       list_add(&tsk->thread.list, &pfault_list);
+block:
+                       /* Since this must be a userspace fault, there
+                        * is no kernel task state to trample. Rely on the
+                        * return to userspace schedule() to block. */
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+                       set_tsk_need_resched(tsk);
+                       set_preempt_need_resched();
+               }
+       }
+out:
+       spin_unlock(&pfault_lock);
+       put_task_struct(tsk);
+}
+
+static int pfault_cpu_dead(unsigned int cpu)
+{
+       struct thread_struct *thread, *next;
+       struct task_struct *tsk;
+
+       spin_lock_irq(&pfault_lock);
+       list_for_each_entry_safe(thread, next, &pfault_list, list) {
+               thread->pfault_wait = 0;
+               list_del(&thread->list);
+               tsk = container_of(thread, struct task_struct, thread);
+               wake_up_process(tsk);
+               put_task_struct(tsk);
+       }
+       spin_unlock_irq(&pfault_lock);
+       return 0;
+}
+
+static int __init pfault_irq_init(void)
+{
+       int rc;
+
+       rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
+       if (rc)
+               goto out_extint;
+       rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
+       if (rc)
+               goto out_pfault;
+       irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
+       cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
+                                 NULL, pfault_cpu_dead);
+       return 0;
+
+out_pfault:
+       unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
+out_extint:
+       pfault_disable = 1;
+       return rc;
+}
+early_initcall(pfault_irq_init);