unsigned int evntsel;
        struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
 
-       perfctr_msr = MSR_K7_PERFCTR0;
-       evntsel_msr = MSR_K7_EVNTSEL0;
+       perfctr_msr = wd_ops->perfctr;
+       evntsel_msr = wd_ops->evntsel;
 
        wrmsrl(perfctr_msr, 0UL);
 
        unsigned int evntsel;
        struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
 
-       perfctr_msr = MSR_P6_PERFCTR0;
-       evntsel_msr = MSR_P6_EVNTSEL0;
+       perfctr_msr = wd_ops->perfctr;
+       evntsel_msr = wd_ops->evntsel;
 
        /* KVM doesn't implement this MSR */
        if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
            (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
                return 0;
 
-       perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1;
-       evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1;
+       perfctr_msr = wd_ops->perfctr;
+       evntsel_msr = wd_ops->evntsel;
 
        wrmsrl(perfctr_msr, 0UL);
 
        .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
 };
 
+static struct wd_ops coreduo_wd_ops = {
+       .reserve = single_msr_reserve,
+       .unreserve = single_msr_unreserve,
+       .setup = setup_intel_arch_watchdog,
+       .rearm = p6_rearm,
+       .stop = single_msr_stop_watchdog,
+       .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+       .evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
+};
+
 static void probe_nmi_watchdog(void)
 {
        switch (boot_cpu_data.x86_vendor) {
                wd_ops = &k7_wd_ops;
                break;
        case X86_VENDOR_INTEL:
+               /* Work around Core Duo (Yonah) errata AE49 where perfctr1
+                  doesn't have a working enable bit. */
+               if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
+                       wd_ops = &coreduo_wd_ops;
+                       break;
+               }
                if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
                        wd_ops = &intel_arch_wd_ops;
                        break;