{
HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(backend);
#ifdef CONFIG_POSIX
- gchar *path;
+ gchar *name;
#endif
if (!backend->size) {
error_setg(errp, "-mem-path not supported on this host");
#else
backend->force_prealloc = mem_prealloc;
- path = object_get_canonical_path(OBJECT(backend));
+ name = host_memory_backend_get_name(backend);
memory_region_init_ram_from_file(&backend->mr, OBJECT(backend),
- path,
+ name,
backend->size, fb->align,
(backend->share ? RAM_SHARED : 0) |
(fb->is_pmem ? RAM_PMEM : 0),
fb->mem_path, errp);
- g_free(path);
+ g_free(name);
#endif
}
return;
}
- name = object_get_canonical_path(OBJECT(backend));
+ name = host_memory_backend_get_name(backend);
memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend),
name, backend->size,
backend->share, fd, errp);
#define TYPE_MEMORY_BACKEND_RAM "memory-backend-ram"
-
static void
ram_backend_memory_alloc(HostMemoryBackend *backend, Error **errp)
{
- char *path;
+ char *name;
if (!backend->size) {
error_setg(errp, "can't create backend with size 0");
return;
}
- path = object_get_canonical_path_component(OBJECT(backend));
- memory_region_init_ram_shared_nomigrate(&backend->mr, OBJECT(backend), path,
+ name = host_memory_backend_get_name(backend);
+ memory_region_init_ram_shared_nomigrate(&backend->mr, OBJECT(backend), name,
backend->size, backend->share, errp);
- g_free(path);
+ g_free(name);
}
static void
QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE);
#endif
+char *
+host_memory_backend_get_name(HostMemoryBackend *backend)
+{
+ if (!backend->use_canonical_path) {
+ return object_get_canonical_path_component(OBJECT(backend));
+ }
+
+ return object_get_canonical_path(OBJECT(backend));
+}
+
static void
host_memory_backend_get_size(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
backend->prealloc = mem_prealloc;
}
+static void host_memory_backend_post_init(Object *obj)
+{
+ object_apply_compat_props(obj);
+}
+
bool host_memory_backend_mr_inited(HostMemoryBackend *backend)
{
/*
backend->share = value;
}
+static bool
+host_memory_backend_get_use_canonical_path(Object *obj, Error **errp)
+{
+ HostMemoryBackend *backend = MEMORY_BACKEND(obj);
+
+ return backend->use_canonical_path;
+}
+
+static void
+host_memory_backend_set_use_canonical_path(Object *obj, bool value,
+ Error **errp)
+{
+ HostMemoryBackend *backend = MEMORY_BACKEND(obj);
+
+ backend->use_canonical_path = value;
+}
+
static void
host_memory_backend_class_init(ObjectClass *oc, void *data)
{
&error_abort);
object_class_property_set_description(oc, "share",
"Mark the memory as private to QEMU or shared", &error_abort);
+ object_class_property_add_bool(oc, "x-use-canonical-path-for-ramblock-id",
+ host_memory_backend_get_use_canonical_path,
+ host_memory_backend_set_use_canonical_path, &error_abort);
}
static const TypeInfo host_memory_backend_info = {
.class_init = host_memory_backend_class_init,
.instance_size = sizeof(HostMemoryBackend),
.instance_init = host_memory_backend_init,
+ .instance_post_init = host_memory_backend_post_init,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
.driver = "pcie-root-port",
.property = "x-width",
.value = "1",
+ },{
+ .driver = "memory-backend-file",
+ .property = "x-use-canonical-path-for-ramblock-id",
+ .value = "true",
+ },{
+ .driver = "memory-backend-memfd",
+ .property = "x-use-canonical-path-for-ramblock-id",
+ .value = "true",
},
};
const size_t hw_compat_3_1_len = G_N_ELEMENTS(hw_compat_3_1);
/* protected */
uint64_t size;
- bool merge, dump;
+ bool merge, dump, use_canonical_path;
bool prealloc, force_prealloc, is_mapped, share;
DECLARE_BITMAP(host_nodes, MAX_NODES + 1);
HostMemPolicy policy;
void host_memory_backend_set_mapped(HostMemoryBackend *backend, bool mapped);
bool host_memory_backend_is_mapped(HostMemoryBackend *backend);
size_t host_memory_backend_pagesize(HostMemoryBackend *memdev);
+char *host_memory_backend_get_name(HostMemoryBackend *backend);
#endif