highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
 
        WRITE_ONCE(cpudata->highest_perf, highest_perf);
-
+       WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
        WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
        WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
        WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
-
+       WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
        return 0;
 }
 
                highest_perf = cppc_perf.highest_perf;
 
        WRITE_ONCE(cpudata->highest_perf, highest_perf);
-
+       WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
        WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
        WRITE_ONCE(cpudata->lowest_nonlinear_perf,
                   cppc_perf.lowest_nonlinear_perf);
        WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+       WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
 
        if (cppc_state == AMD_PSTATE_ACTIVE)
                return 0;
        u64 prev = READ_ONCE(cpudata->cppc_req_cached);
        u64 value = prev;
 
+       min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+                       cpudata->max_limit_perf);
+       max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+                       cpudata->max_limit_perf);
        des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
 
        if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
        return 0;
 }
 
+static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
+{
+       u32 max_limit_perf, min_limit_perf;
+       struct amd_cpudata *cpudata = policy->driver_data;
+
+       max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
+       min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
+       WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+       WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+       WRITE_ONCE(cpudata->max_limit_freq, policy->max);
+       WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+
+       return 0;
+}
+
 static int amd_pstate_update_freq(struct cpufreq_policy *policy,
                                  unsigned int target_freq, bool fast_switch)
 {
        if (!cpudata->max_freq)
                return -ENODEV;
 
+       if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+               amd_pstate_update_min_max_limit(policy);
+
        cap_perf = READ_ONCE(cpudata->highest_perf);
        min_perf = READ_ONCE(cpudata->lowest_perf);
        max_perf = cap_perf;
        struct amd_cpudata *cpudata = policy->driver_data;
        unsigned int target_freq;
 
+       if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+               amd_pstate_update_min_max_limit(policy);
+
+
        cap_perf = READ_ONCE(cpudata->highest_perf);
        lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
        max_freq = READ_ONCE(cpudata->max_freq);
        /* Initial processor data capability frequencies */
        cpudata->max_freq = max_freq;
        cpudata->min_freq = min_freq;
+       cpudata->max_limit_freq = max_freq;
+       cpudata->min_limit_freq = min_freq;
        cpudata->nominal_freq = nominal_freq;
        cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
 
        return 0;
 }
 
-static void amd_pstate_epp_init(unsigned int cpu)
+static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
 {
-       struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
        struct amd_cpudata *cpudata = policy->driver_data;
-       u32 max_perf, min_perf;
+       u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
        u64 value;
        s16 epp;
 
        max_perf = READ_ONCE(cpudata->highest_perf);
        min_perf = READ_ONCE(cpudata->lowest_perf);
+       max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
+       min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
+       max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+                       cpudata->max_limit_perf);
+       min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+                       cpudata->max_limit_perf);
+
+       WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+       WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
 
        value = READ_ONCE(cpudata->cppc_req_cached);
 
        value &= ~AMD_CPPC_DES_PERF(~0L);
        value |= AMD_CPPC_DES_PERF(0);
 
-       if (cpudata->epp_policy == cpudata->policy)
-               goto skip_epp;
-
        cpudata->epp_policy = cpudata->policy;
 
        /* Get BIOS pre-defined epp value */
                 * This return value can only be negative for shared_memory
                 * systems where EPP register read/write not supported.
                 */
-               goto skip_epp;
+               return;
        }
 
        if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
 
        WRITE_ONCE(cpudata->cppc_req_cached, value);
        amd_pstate_set_epp(cpudata, epp);
-skip_epp:
-       cpufreq_cpu_put(policy);
 }
 
 static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
 
        cpudata->policy = policy->policy;
 
-       amd_pstate_epp_init(policy->cpu);
+       amd_pstate_epp_update_limit(policy);
 
        return 0;
 }