0 /* Invalid value */)
static uint32_t max_thread_ids_for_cache(X86CPUTopoInfo *topo_info,
- enum CPUTopoLevel share_level)
+ enum CpuTopologyLevel share_level)
{
uint32_t num_ids = 0;
switch (share_level) {
- case CPU_TOPO_LEVEL_CORE:
+ case CPU_TOPOLOGY_LEVEL_CORE:
num_ids = 1 << apicid_core_offset(topo_info);
break;
- case CPU_TOPO_LEVEL_DIE:
+ case CPU_TOPOLOGY_LEVEL_DIE:
num_ids = 1 << apicid_die_offset(topo_info);
break;
- case CPU_TOPO_LEVEL_PACKAGE:
+ case CPU_TOPOLOGY_LEVEL_SOCKET:
num_ids = 1 << apicid_pkg_offset(topo_info);
break;
default:
/*
- * Currently there is no use case for SMT and MODULE, so use
+ * Currently there is no use case for THREAD and MODULE, so use
* assert directly to facilitate debugging.
*/
g_assert_not_reached();
}
static uint32_t num_threads_by_topo_level(X86CPUTopoInfo *topo_info,
- enum CPUTopoLevel topo_level)
+ enum CpuTopologyLevel topo_level)
{
switch (topo_level) {
- case CPU_TOPO_LEVEL_SMT:
+ case CPU_TOPOLOGY_LEVEL_THREAD:
return 1;
- case CPU_TOPO_LEVEL_CORE:
+ case CPU_TOPOLOGY_LEVEL_CORE:
return topo_info->threads_per_core;
- case CPU_TOPO_LEVEL_MODULE:
+ case CPU_TOPOLOGY_LEVEL_MODULE:
return topo_info->threads_per_core * topo_info->cores_per_module;
- case CPU_TOPO_LEVEL_DIE:
+ case CPU_TOPOLOGY_LEVEL_DIE:
return topo_info->threads_per_core * topo_info->cores_per_module *
topo_info->modules_per_die;
- case CPU_TOPO_LEVEL_PACKAGE:
+ case CPU_TOPOLOGY_LEVEL_SOCKET:
return topo_info->threads_per_core * topo_info->cores_per_module *
topo_info->modules_per_die * topo_info->dies_per_pkg;
default:
}
static uint32_t apicid_offset_by_topo_level(X86CPUTopoInfo *topo_info,
- enum CPUTopoLevel topo_level)
+ enum CpuTopologyLevel topo_level)
{
switch (topo_level) {
- case CPU_TOPO_LEVEL_SMT:
+ case CPU_TOPOLOGY_LEVEL_THREAD:
return 0;
- case CPU_TOPO_LEVEL_CORE:
+ case CPU_TOPOLOGY_LEVEL_CORE:
return apicid_core_offset(topo_info);
- case CPU_TOPO_LEVEL_MODULE:
+ case CPU_TOPOLOGY_LEVEL_MODULE:
return apicid_module_offset(topo_info);
- case CPU_TOPO_LEVEL_DIE:
+ case CPU_TOPOLOGY_LEVEL_DIE:
return apicid_die_offset(topo_info);
- case CPU_TOPO_LEVEL_PACKAGE:
+ case CPU_TOPOLOGY_LEVEL_SOCKET:
return apicid_pkg_offset(topo_info);
default:
g_assert_not_reached();
return 0;
}
-static uint32_t cpuid1f_topo_type(enum CPUTopoLevel topo_level)
+static uint32_t cpuid1f_topo_type(enum CpuTopologyLevel topo_level)
{
switch (topo_level) {
- case CPU_TOPO_LEVEL_INVALID:
+ case CPU_TOPOLOGY_LEVEL_INVALID:
return CPUID_1F_ECX_TOPO_LEVEL_INVALID;
- case CPU_TOPO_LEVEL_SMT:
+ case CPU_TOPOLOGY_LEVEL_THREAD:
return CPUID_1F_ECX_TOPO_LEVEL_SMT;
- case CPU_TOPO_LEVEL_CORE:
+ case CPU_TOPOLOGY_LEVEL_CORE:
return CPUID_1F_ECX_TOPO_LEVEL_CORE;
- case CPU_TOPO_LEVEL_MODULE:
+ case CPU_TOPOLOGY_LEVEL_MODULE:
return CPUID_1F_ECX_TOPO_LEVEL_MODULE;
- case CPU_TOPO_LEVEL_DIE:
+ case CPU_TOPOLOGY_LEVEL_DIE:
return CPUID_1F_ECX_TOPO_LEVEL_DIE;
default:
/* Other types are not supported in QEMU. */
unsigned long level, base_level, next_level;
uint32_t num_threads_next_level, offset_next_level;
- assert(count <= CPU_TOPO_LEVEL_PACKAGE);
+ assert(count <= CPU_TOPOLOGY_LEVEL_SOCKET);
/*
* Find the No.(count + 1) topology level in avail_cpu_topo bitmap.
- * The search starts from bit 0 (CPU_TOPO_LEVEL_SMT).
+ * The search starts from bit 0 (CPU_TOPOLOGY_LEVEL_THREAD).
*/
- level = CPU_TOPO_LEVEL_SMT;
+ level = CPU_TOPOLOGY_LEVEL_THREAD;
base_level = level;
for (int i = 0; i <= count; i++) {
level = find_next_bit(env->avail_cpu_topo,
- CPU_TOPO_LEVEL_PACKAGE,
+ CPU_TOPOLOGY_LEVEL_SOCKET,
base_level);
/*
* and it just encodes the invalid level (all fields are 0)
* into the last subleaf of 0x1f.
*/
- if (level == CPU_TOPO_LEVEL_PACKAGE) {
- level = CPU_TOPO_LEVEL_INVALID;
+ if (level == CPU_TOPOLOGY_LEVEL_SOCKET) {
+ level = CPU_TOPOLOGY_LEVEL_INVALID;
break;
}
/* Search the next level. */
base_level = level + 1;
}
- if (level == CPU_TOPO_LEVEL_INVALID) {
+ if (level == CPU_TOPOLOGY_LEVEL_INVALID) {
num_threads_next_level = 0;
offset_next_level = 0;
} else {
next_level = find_next_bit(env->avail_cpu_topo,
- CPU_TOPO_LEVEL_PACKAGE,
+ CPU_TOPOLOGY_LEVEL_SOCKET,
level + 1);
num_threads_next_level = num_threads_by_topo_level(topo_info,
next_level);
.sets = 64,
.partitions = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
.partitions = 1,
.lines_per_tag = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/* L1 instruction cache: */
.sets = 64,
.partitions = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
.partitions = 1,
.lines_per_tag = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/* Level 2 unified cache: */
.sets = 4096,
.partitions = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
.size = 2 * MiB,
.line_size = 64,
.associativity = 8,
- .share_level = CPU_TOPO_LEVEL_INVALID,
+ .share_level = CPU_TOPOLOGY_LEVEL_INVALID,
};
.associativity = 16,
.sets = 512,
.partitions = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
};
/* Level 3 unified cache: */
.self_init = true,
.inclusive = true,
.complex_indexing = true,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
};
/* TLB definitions: */
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.self_init = true,
.inclusive = true,
.complex_indexing = true,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.self_init = true,
.inclusive = true,
.complex_indexing = false,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.self_init = true,
.inclusive = true,
.complex_indexing = true,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.self_init = true,
.inclusive = true,
.complex_indexing = false,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.self_init = true,
.inclusive = true,
.complex_indexing = true,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.partitions = 1,
.sets = 1024,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.self_init = true,
.inclusive = true,
.complex_indexing = false,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l1i_cache = &(CPUCacheInfo) {
.type = INSTRUCTION_CACHE,
.lines_per_tag = 1,
.self_init = 1,
.no_invd_sharing = true,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l2_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.partitions = 1,
.sets = 2048,
.lines_per_tag = 1,
- .share_level = CPU_TOPO_LEVEL_CORE,
+ .share_level = CPU_TOPOLOGY_LEVEL_CORE,
},
.l3_cache = &(CPUCacheInfo) {
.type = UNIFIED_CACHE,
.self_init = true,
.inclusive = true,
.complex_indexing = false,
- .share_level = CPU_TOPO_LEVEL_DIE,
+ .share_level = CPU_TOPOLOGY_LEVEL_DIE,
},
};
/* Share the cache at package level. */
*eax |= max_thread_ids_for_cache(&topo_info,
- CPU_TOPO_LEVEL_PACKAGE) << 14;
+ CPU_TOPOLOGY_LEVEL_SOCKET) << 14;
}
}
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
env->nr_modules = 1;
env->nr_dies = 1;
- /* SMT, core and package levels are set by default. */
- set_bit(CPU_TOPO_LEVEL_SMT, env->avail_cpu_topo);
- set_bit(CPU_TOPO_LEVEL_CORE, env->avail_cpu_topo);
- set_bit(CPU_TOPO_LEVEL_PACKAGE, env->avail_cpu_topo);
+ /* thread, core and socket levels are set by default. */
+ set_bit(CPU_TOPOLOGY_LEVEL_THREAD, env->avail_cpu_topo);
+ set_bit(CPU_TOPOLOGY_LEVEL_CORE, env->avail_cpu_topo);
+ set_bit(CPU_TOPOLOGY_LEVEL_SOCKET, env->avail_cpu_topo);
}
static void x86_cpu_initfn(Object *obj)