powerpc/bpf: Move bpf_jit64.h into bpf_jit_comp64.c
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Mon, 14 Feb 2022 10:41:48 +0000 (16:11 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 7 Mar 2022 13:04:59 +0000 (00:04 +1100)
There is no need for a separate header anymore. Move the contents of
bpf_jit64.h into bpf_jit_comp64.c

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/b873a8e6eff7d91bf2a2cabdd53082aadfe20761.1644834730.git.naveen.n.rao@linux.vnet.ibm.com
arch/powerpc/net/bpf_jit64.h [deleted file]
arch/powerpc/net/bpf_jit_comp64.c

diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
deleted file mode 100644 (file)
index 199348b..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * bpf_jit64.h: BPF JIT compiler for PPC64
- *
- * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
- *               IBM Corporation
- */
-#ifndef _BPF_JIT64_H
-#define _BPF_JIT64_H
-
-#include "bpf_jit.h"
-
-/*
- * Stack layout:
- * Ensure the top half (upto local_tmp_var) stays consistent
- * with our redzone usage.
- *
- *             [       prev sp         ] <-------------
- *             [   nv gpr save area    ] 5*8           |
- *             [    tail_call_cnt      ] 8             |
- *             [    local_tmp_var      ] 16            |
- * fp (r31) -->        [   ebpf stack space    ] upto 512      |
- *             [     frame header      ] 32/112        |
- * sp (r1) --->        [    stack pointer      ] --------------
- */
-
-/* for gpr non volatile registers BPG_REG_6 to 10 */
-#define BPF_PPC_STACK_SAVE     (5*8)
-/* for bpf JIT code internal usage */
-#define BPF_PPC_STACK_LOCALS   24
-/* stack frame excluding BPF stack, ensure this is quadword aligned */
-#define BPF_PPC_STACKFRAME     (STACK_FRAME_MIN_SIZE + \
-                                BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
-
-#ifndef __ASSEMBLY__
-
-/* BPF register usage */
-#define TMP_REG_1      (MAX_BPF_JIT_REG + 0)
-#define TMP_REG_2      (MAX_BPF_JIT_REG + 1)
-
-/* BPF to ppc register mappings */
-const int b2p[MAX_BPF_JIT_REG + 2] = {
-       /* function return value */
-       [BPF_REG_0] = 8,
-       /* function arguments */
-       [BPF_REG_1] = 3,
-       [BPF_REG_2] = 4,
-       [BPF_REG_3] = 5,
-       [BPF_REG_4] = 6,
-       [BPF_REG_5] = 7,
-       /* non volatile registers */
-       [BPF_REG_6] = 27,
-       [BPF_REG_7] = 28,
-       [BPF_REG_8] = 29,
-       [BPF_REG_9] = 30,
-       /* frame pointer aka BPF_REG_10 */
-       [BPF_REG_FP] = 31,
-       /* eBPF jit internal registers */
-       [BPF_REG_AX] = 12,
-       [TMP_REG_1] = 9,
-       [TMP_REG_2] = 10
-};
-
-/* PPC NVR range -- update this if we ever use NVRs below r27 */
-#define BPF_PPC_NVR_MIN                27
-
-#endif /* !__ASSEMBLY__ */
-
-#endif
index eeda636cd7be645ba0487ef2a9d90674e4897c23..3e4ed5560947702ae85e600ec331e622735adb23 100644 (file)
 #include <linux/bpf.h>
 #include <asm/security_features.h>
 
-#include "bpf_jit64.h"
+#include "bpf_jit.h"
+
+/*
+ * Stack layout:
+ * Ensure the top half (upto local_tmp_var) stays consistent
+ * with our redzone usage.
+ *
+ *             [       prev sp         ] <-------------
+ *             [   nv gpr save area    ] 5*8           |
+ *             [    tail_call_cnt      ] 8             |
+ *             [    local_tmp_var      ] 16            |
+ * fp (r31) -->        [   ebpf stack space    ] upto 512      |
+ *             [     frame header      ] 32/112        |
+ * sp (r1) --->        [    stack pointer      ] --------------
+ */
+
+/* for gpr non volatile registers BPG_REG_6 to 10 */
+#define BPF_PPC_STACK_SAVE     (5*8)
+/* for bpf JIT code internal usage */
+#define BPF_PPC_STACK_LOCALS   24
+/* stack frame excluding BPF stack, ensure this is quadword aligned */
+#define BPF_PPC_STACKFRAME     (STACK_FRAME_MIN_SIZE + \
+                                BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
+
+/* BPF register usage */
+#define TMP_REG_1      (MAX_BPF_JIT_REG + 0)
+#define TMP_REG_2      (MAX_BPF_JIT_REG + 1)
+
+/* BPF to ppc register mappings */
+const int b2p[MAX_BPF_JIT_REG + 2] = {
+       /* function return value */
+       [BPF_REG_0] = 8,
+       /* function arguments */
+       [BPF_REG_1] = 3,
+       [BPF_REG_2] = 4,
+       [BPF_REG_3] = 5,
+       [BPF_REG_4] = 6,
+       [BPF_REG_5] = 7,
+       /* non volatile registers */
+       [BPF_REG_6] = 27,
+       [BPF_REG_7] = 28,
+       [BPF_REG_8] = 29,
+       [BPF_REG_9] = 30,
+       /* frame pointer aka BPF_REG_10 */
+       [BPF_REG_FP] = 31,
+       /* eBPF jit internal registers */
+       [BPF_REG_AX] = 12,
+       [TMP_REG_1] = 9,
+       [TMP_REG_2] = 10
+};
+
+/* PPC NVR range -- update this if we ever use NVRs below r27 */
+#define BPF_PPC_NVR_MIN                27
 
 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
 {