platform/x86: intel_turbo_max_3: Move to intel sub-directory
authorKate Hsuan <hpa@redhat.com>
Fri, 20 Aug 2021 11:04:49 +0000 (14:04 +0300)
committerHans de Goede <hdegoede@redhat.com>
Fri, 20 Aug 2021 18:11:22 +0000 (20:11 +0200)
Move Intel Turbo Max 3 driver to intel sub-directory to improve readability
and rename it from intel_turbo_max_3.c to turbo_max_3.c.

Signed-off-by: Kate Hsuan <hpa@redhat.com>
Acked-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Reviewed-by: Hans de Goede <hdegoede@redhat.com>
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Link: https://lore.kernel.org/r/20210820110458.73018-12-andriy.shevchenko@linux.intel.com
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/intel/Kconfig
drivers/platform/x86/intel/Makefile
drivers/platform/x86/intel/turbo_max_3.c [new file with mode: 0644]
drivers/platform/x86/intel_turbo_max_3.c [deleted file]

index 7820f4e63d017137409f81a9d1318f3dd6f40c49..a9acd44c2076ce06ce0057363c3b5c193ef47b3f 100644 (file)
@@ -1112,16 +1112,6 @@ config INTEL_IPS
 
 source "drivers/platform/x86/intel_speed_select_if/Kconfig"
 
-config INTEL_TURBO_MAX_3
-       bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
-       depends on X86_64 && SCHED_MC_PRIO
-       help
-         This driver reads maximum performance ratio of each CPU and set up
-         the scheduler priority metrics. In this way scheduler can prefer
-         CPU with higher performance to schedule tasks.
-         This driver is only required when the system is not using Hardware
-         P-States (HWP). In HWP mode, priority can be read from ACPI tables.
-
 config INTEL_UNCORE_FREQ_CONTROL
        tristate "Intel Uncore frequency control driver"
        depends on X86_64
index 44f990f7c2c4c5dfccc2220d514ae8f6f489b831..15d0754363ea46e60e17db779ac9876181a2e543 100644 (file)
@@ -121,7 +121,6 @@ obj-$(CONFIG_WIRELESS_HOTKEY)               += wireless-hotkey.o
 # Intel uncore drivers
 obj-$(CONFIG_INTEL_IPS)                                += intel_ips.o
 obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE)     += intel_speed_select_if/
-obj-$(CONFIG_INTEL_TURBO_MAX_3)                        += intel_turbo_max_3.o
 obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL)                += intel-uncore-frequency.o
 
 # Intel PMIC / PMC / P-Unit devices
index 24cd26579d5245e6664b7eaecb6e26b57c3e0839..701479386305cf6f507bdf39eba0c4d34d235853 100644 (file)
@@ -87,4 +87,15 @@ config INTEL_SMARTCONNECT
          This driver checks to determine whether the device has Intel Smart
          Connect enabled, and if so disables it.
 
+config INTEL_TURBO_MAX_3
+       bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
+       depends on X86_64 && SCHED_MC_PRIO
+       help
+         This driver reads maximum performance ratio of each CPU and set up
+         the scheduler priority metrics. In this way scheduler can prefer
+         CPU with higher performance to schedule tasks.
+
+         This driver is only required when the system is not using Hardware
+         P-States (HWP). In HWP mode, priority can be read from ACPI tables.
+
 endif # X86_PLATFORM_DRIVERS_INTEL
index cefcc92d93f596ea85abfebfb4052e4414a29747..1ecdf774e4907be76bbaa37edff5b50cd23c869b 100644 (file)
@@ -26,3 +26,5 @@ intel-rst-y                           := rst.o
 obj-$(CONFIG_INTEL_RST)                        += intel-rst.o
 intel-smartconnect-y                   := smartconnect.o
 obj-$(CONFIG_INTEL_SMARTCONNECT)       += intel-smartconnect.o
+intel_turbo_max_3-y                    := turbo_max_3.o
+obj-$(CONFIG_INTEL_TURBO_MAX_3)                += intel_turbo_max_3.o
diff --git a/drivers/platform/x86/intel/turbo_max_3.c b/drivers/platform/x86/intel/turbo_max_3.c
new file mode 100644 (file)
index 0000000..892140b
--- /dev/null
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
+ * Copyright (c) 2017, Intel Corporation.
+ * All rights reserved.
+ *
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/topology.h>
+#include <linux/workqueue.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+#define MSR_OC_MAILBOX                 0x150
+#define MSR_OC_MAILBOX_CMD_OFFSET      32
+#define MSR_OC_MAILBOX_RSP_OFFSET      32
+#define MSR_OC_MAILBOX_BUSY_BIT                63
+#define OC_MAILBOX_FC_CONTROL_CMD      0x1C
+
+/*
+ * Typical latency to get mail box response is ~3us, It takes +3 us to
+ * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
+ * system. So for most of the time, the first mailbox read should have the
+ * response, but to avoid some boundary cases retry twice.
+ */
+#define OC_MAILBOX_RETRY_COUNT         2
+
+static int get_oc_core_priority(unsigned int cpu)
+{
+       u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
+       int ret, i;
+
+       /* Issue favored core read command */
+       value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
+       /* Set the busy bit to indicate OS is trying to issue command */
+       value |=  BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
+       ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
+       if (ret) {
+               pr_debug("cpu %d OC mailbox write failed\n", cpu);
+               return ret;
+       }
+
+       for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
+               ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
+               if (ret) {
+                       pr_debug("cpu %d OC mailbox read failed\n", cpu);
+                       break;
+               }
+
+               if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
+                       pr_debug("cpu %d OC mailbox still processing\n", cpu);
+                       ret = -EBUSY;
+                       continue;
+               }
+
+               if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
+                       pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
+                       ret = -ENXIO;
+                       break;
+               }
+
+               ret = value & 0xff;
+               pr_debug("cpu %d max_ratio %d\n", cpu, ret);
+               break;
+       }
+
+       return ret;
+}
+
+/*
+ * The work item is needed to avoid CPU hotplug locking issues. The function
+ * itmt_legacy_set_priority() is called from CPU online callback, so can't
+ * call sched_set_itmt_support() from there as this function will aquire
+ * hotplug locks in its path.
+ */
+static void itmt_legacy_work_fn(struct work_struct *work)
+{
+       sched_set_itmt_support();
+}
+
+static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
+
+static int itmt_legacy_cpu_online(unsigned int cpu)
+{
+       static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
+       int priority;
+
+       priority = get_oc_core_priority(cpu);
+       if (priority < 0)
+               return 0;
+
+       sched_set_itmt_core_prio(priority, cpu);
+
+       /* Enable ITMT feature when a core with different priority is found */
+       if (max_highest_perf <= min_highest_perf) {
+               if (priority > max_highest_perf)
+                       max_highest_perf = priority;
+
+               if (priority < min_highest_perf)
+                       min_highest_perf = priority;
+
+               if (max_highest_perf > min_highest_perf)
+                       schedule_work(&sched_itmt_work);
+       }
+
+       return 0;
+}
+
+static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
+       X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
+       X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,   NULL),
+       {}
+};
+
+static int __init itmt_legacy_init(void)
+{
+       const struct x86_cpu_id *id;
+       int ret;
+
+       id = x86_match_cpu(itmt_legacy_cpu_ids);
+       if (!id)
+               return -ENODEV;
+
+       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                               "platform/x86/turbo_max_3:online",
+                               itmt_legacy_cpu_online, NULL);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+late_initcall(itmt_legacy_init)
diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c
deleted file mode 100644 (file)
index 892140b..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
- * Copyright (c) 2017, Intel Corporation.
- * All rights reserved.
- *
- * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/cpufeature.h>
-#include <linux/cpuhotplug.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/topology.h>
-#include <linux/workqueue.h>
-
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-
-#define MSR_OC_MAILBOX                 0x150
-#define MSR_OC_MAILBOX_CMD_OFFSET      32
-#define MSR_OC_MAILBOX_RSP_OFFSET      32
-#define MSR_OC_MAILBOX_BUSY_BIT                63
-#define OC_MAILBOX_FC_CONTROL_CMD      0x1C
-
-/*
- * Typical latency to get mail box response is ~3us, It takes +3 us to
- * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
- * system. So for most of the time, the first mailbox read should have the
- * response, but to avoid some boundary cases retry twice.
- */
-#define OC_MAILBOX_RETRY_COUNT         2
-
-static int get_oc_core_priority(unsigned int cpu)
-{
-       u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
-       int ret, i;
-
-       /* Issue favored core read command */
-       value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
-       /* Set the busy bit to indicate OS is trying to issue command */
-       value |=  BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
-       ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
-       if (ret) {
-               pr_debug("cpu %d OC mailbox write failed\n", cpu);
-               return ret;
-       }
-
-       for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
-               ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
-               if (ret) {
-                       pr_debug("cpu %d OC mailbox read failed\n", cpu);
-                       break;
-               }
-
-               if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
-                       pr_debug("cpu %d OC mailbox still processing\n", cpu);
-                       ret = -EBUSY;
-                       continue;
-               }
-
-               if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
-                       pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
-                       ret = -ENXIO;
-                       break;
-               }
-
-               ret = value & 0xff;
-               pr_debug("cpu %d max_ratio %d\n", cpu, ret);
-               break;
-       }
-
-       return ret;
-}
-
-/*
- * The work item is needed to avoid CPU hotplug locking issues. The function
- * itmt_legacy_set_priority() is called from CPU online callback, so can't
- * call sched_set_itmt_support() from there as this function will aquire
- * hotplug locks in its path.
- */
-static void itmt_legacy_work_fn(struct work_struct *work)
-{
-       sched_set_itmt_support();
-}
-
-static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
-
-static int itmt_legacy_cpu_online(unsigned int cpu)
-{
-       static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
-       int priority;
-
-       priority = get_oc_core_priority(cpu);
-       if (priority < 0)
-               return 0;
-
-       sched_set_itmt_core_prio(priority, cpu);
-
-       /* Enable ITMT feature when a core with different priority is found */
-       if (max_highest_perf <= min_highest_perf) {
-               if (priority > max_highest_perf)
-                       max_highest_perf = priority;
-
-               if (priority < min_highest_perf)
-                       min_highest_perf = priority;
-
-               if (max_highest_perf > min_highest_perf)
-                       schedule_work(&sched_itmt_work);
-       }
-
-       return 0;
-}
-
-static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
-       X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
-       X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,   NULL),
-       {}
-};
-
-static int __init itmt_legacy_init(void)
-{
-       const struct x86_cpu_id *id;
-       int ret;
-
-       id = x86_match_cpu(itmt_legacy_cpu_ids);
-       if (!id)
-               return -ENODEV;
-
-       ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
-                               "platform/x86/turbo_max_3:online",
-                               itmt_legacy_cpu_online, NULL);
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-late_initcall(itmt_legacy_init)