11f070489SIgor Mammedov /* 21f070489SIgor Mammedov * QEMU Host Memory Backend 31f070489SIgor Mammedov * 41f070489SIgor Mammedov * Copyright (C) 2013-2014 Red Hat Inc 51f070489SIgor Mammedov * 61f070489SIgor Mammedov * Authors: 71f070489SIgor Mammedov * Igor Mammedov <imammedo@redhat.com> 81f070489SIgor Mammedov * 91f070489SIgor Mammedov * This work is licensed under the terms of the GNU GPL, version 2 or later. 101f070489SIgor Mammedov * See the COPYING file in the top-level directory. 111f070489SIgor Mammedov */ 129c058332SPeter Maydell #include "qemu/osdep.h" 131f070489SIgor Mammedov #include "sysemu/hostmem.h" 146b269967SEduardo Habkost #include "hw/boards.h" 15da34e65cSMarkus Armbruster #include "qapi/error.h" 161f070489SIgor Mammedov #include "qapi/visitor.h" 174cf1b76bSHu Tao #include "qapi-types.h" 184cf1b76bSHu Tao #include "qapi-visit.h" 191f070489SIgor Mammedov #include "qemu/config-file.h" 201f070489SIgor Mammedov #include "qom/object_interfaces.h" 211f070489SIgor Mammedov 224cf1b76bSHu Tao #ifdef CONFIG_NUMA 234cf1b76bSHu Tao #include <numaif.h> 244cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT); 254cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED); 264cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND); 274cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE); 284cf1b76bSHu Tao #endif 294cf1b76bSHu Tao 301f070489SIgor Mammedov static void 31d7bce999SEric Blake host_memory_backend_get_size(Object *obj, Visitor *v, const char *name, 32d7bce999SEric Blake void *opaque, Error **errp) 331f070489SIgor Mammedov { 341f070489SIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 351f070489SIgor Mammedov uint64_t value = backend->size; 361f070489SIgor Mammedov 3751e72bc1SEric Blake visit_type_size(v, name, &value, errp); 381f070489SIgor Mammedov } 391f070489SIgor Mammedov 401f070489SIgor Mammedov static void 41d7bce999SEric Blake host_memory_backend_set_size(Object *obj, Visitor *v, const char *name, 42d7bce999SEric Blake void *opaque, Error **errp) 431f070489SIgor Mammedov { 441f070489SIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 451f070489SIgor Mammedov Error *local_err = NULL; 461f070489SIgor Mammedov uint64_t value; 471f070489SIgor Mammedov 481f070489SIgor Mammedov if (memory_region_size(&backend->mr)) { 491f070489SIgor Mammedov error_setg(&local_err, "cannot change property value"); 501f070489SIgor Mammedov goto out; 511f070489SIgor Mammedov } 521f070489SIgor Mammedov 5351e72bc1SEric Blake visit_type_size(v, name, &value, &local_err); 541f070489SIgor Mammedov if (local_err) { 551f070489SIgor Mammedov goto out; 561f070489SIgor Mammedov } 571f070489SIgor Mammedov if (!value) { 581f070489SIgor Mammedov error_setg(&local_err, "Property '%s.%s' doesn't take value '%" 591f070489SIgor Mammedov PRIu64 "'", object_get_typename(obj), name, value); 601f070489SIgor Mammedov goto out; 611f070489SIgor Mammedov } 621f070489SIgor Mammedov backend->size = value; 631f070489SIgor Mammedov out: 641f070489SIgor Mammedov error_propagate(errp, local_err); 651f070489SIgor Mammedov } 661f070489SIgor Mammedov 671454d33fSXiao Guangrong static uint16List **host_memory_append_node(uint16List **node, 681454d33fSXiao Guangrong unsigned long value) 691454d33fSXiao Guangrong { 701454d33fSXiao Guangrong *node = g_malloc0(sizeof(**node)); 711454d33fSXiao Guangrong (*node)->value = value; 721454d33fSXiao Guangrong return &(*node)->next; 731454d33fSXiao Guangrong } 741454d33fSXiao Guangrong 754cf1b76bSHu Tao static void 76d7bce999SEric Blake host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name, 77d7bce999SEric Blake void *opaque, Error **errp) 784cf1b76bSHu Tao { 794cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 804cf1b76bSHu Tao uint16List *host_nodes = NULL; 814cf1b76bSHu Tao uint16List **node = &host_nodes; 824cf1b76bSHu Tao unsigned long value; 834cf1b76bSHu Tao 844cf1b76bSHu Tao value = find_first_bit(backend->host_nodes, MAX_NODES); 854cf1b76bSHu Tao 861454d33fSXiao Guangrong node = host_memory_append_node(node, value); 871454d33fSXiao Guangrong 881454d33fSXiao Guangrong if (value == MAX_NODES) { 891454d33fSXiao Guangrong goto out; 901454d33fSXiao Guangrong } 914cf1b76bSHu Tao 924cf1b76bSHu Tao do { 934cf1b76bSHu Tao value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1); 944cf1b76bSHu Tao if (value == MAX_NODES) { 954cf1b76bSHu Tao break; 964cf1b76bSHu Tao } 974cf1b76bSHu Tao 981454d33fSXiao Guangrong node = host_memory_append_node(node, value); 994cf1b76bSHu Tao } while (true); 1004cf1b76bSHu Tao 1011454d33fSXiao Guangrong out: 10251e72bc1SEric Blake visit_type_uint16List(v, name, &host_nodes, errp); 1034cf1b76bSHu Tao } 1044cf1b76bSHu Tao 1054cf1b76bSHu Tao static void 106d7bce999SEric Blake host_memory_backend_set_host_nodes(Object *obj, Visitor *v, const char *name, 107d7bce999SEric Blake void *opaque, Error **errp) 1084cf1b76bSHu Tao { 1094cf1b76bSHu Tao #ifdef CONFIG_NUMA 1104cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1114cf1b76bSHu Tao uint16List *l = NULL; 1124cf1b76bSHu Tao 11351e72bc1SEric Blake visit_type_uint16List(v, name, &l, errp); 1144cf1b76bSHu Tao 1154cf1b76bSHu Tao while (l) { 1164cf1b76bSHu Tao bitmap_set(backend->host_nodes, l->value, 1); 1174cf1b76bSHu Tao l = l->next; 1184cf1b76bSHu Tao } 1194cf1b76bSHu Tao #else 1204cf1b76bSHu Tao error_setg(errp, "NUMA node binding are not supported by this QEMU"); 1214cf1b76bSHu Tao #endif 1224cf1b76bSHu Tao } 1234cf1b76bSHu Tao 124a3590dacSDaniel P. Berrange static int 125a3590dacSDaniel P. Berrange host_memory_backend_get_policy(Object *obj, Error **errp G_GNUC_UNUSED) 1264cf1b76bSHu Tao { 1274cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 128a3590dacSDaniel P. Berrange return backend->policy; 1294cf1b76bSHu Tao } 1304cf1b76bSHu Tao 1314cf1b76bSHu Tao static void 132a3590dacSDaniel P. Berrange host_memory_backend_set_policy(Object *obj, int policy, Error **errp) 1334cf1b76bSHu Tao { 1344cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1354cf1b76bSHu Tao backend->policy = policy; 1364cf1b76bSHu Tao 1374cf1b76bSHu Tao #ifndef CONFIG_NUMA 1384cf1b76bSHu Tao if (policy != HOST_MEM_POLICY_DEFAULT) { 1394cf1b76bSHu Tao error_setg(errp, "NUMA policies are not supported by this QEMU"); 1404cf1b76bSHu Tao } 1414cf1b76bSHu Tao #endif 1424cf1b76bSHu Tao } 1434cf1b76bSHu Tao 144605d0a94SPaolo Bonzini static bool host_memory_backend_get_merge(Object *obj, Error **errp) 145605d0a94SPaolo Bonzini { 146605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 147605d0a94SPaolo Bonzini 148605d0a94SPaolo Bonzini return backend->merge; 149605d0a94SPaolo Bonzini } 150605d0a94SPaolo Bonzini 151605d0a94SPaolo Bonzini static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp) 152605d0a94SPaolo Bonzini { 153605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 154605d0a94SPaolo Bonzini 155605d0a94SPaolo Bonzini if (!memory_region_size(&backend->mr)) { 156605d0a94SPaolo Bonzini backend->merge = value; 157605d0a94SPaolo Bonzini return; 158605d0a94SPaolo Bonzini } 159605d0a94SPaolo Bonzini 160605d0a94SPaolo Bonzini if (value != backend->merge) { 161605d0a94SPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 162605d0a94SPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 163605d0a94SPaolo Bonzini 164605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, 165605d0a94SPaolo Bonzini value ? QEMU_MADV_MERGEABLE : QEMU_MADV_UNMERGEABLE); 166605d0a94SPaolo Bonzini backend->merge = value; 167605d0a94SPaolo Bonzini } 168605d0a94SPaolo Bonzini } 169605d0a94SPaolo Bonzini 170605d0a94SPaolo Bonzini static bool host_memory_backend_get_dump(Object *obj, Error **errp) 171605d0a94SPaolo Bonzini { 172605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 173605d0a94SPaolo Bonzini 174605d0a94SPaolo Bonzini return backend->dump; 175605d0a94SPaolo Bonzini } 176605d0a94SPaolo Bonzini 177605d0a94SPaolo Bonzini static void host_memory_backend_set_dump(Object *obj, bool value, Error **errp) 178605d0a94SPaolo Bonzini { 179605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 180605d0a94SPaolo Bonzini 181605d0a94SPaolo Bonzini if (!memory_region_size(&backend->mr)) { 182605d0a94SPaolo Bonzini backend->dump = value; 183605d0a94SPaolo Bonzini return; 184605d0a94SPaolo Bonzini } 185605d0a94SPaolo Bonzini 186605d0a94SPaolo Bonzini if (value != backend->dump) { 187605d0a94SPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 188605d0a94SPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 189605d0a94SPaolo Bonzini 190605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, 191605d0a94SPaolo Bonzini value ? QEMU_MADV_DODUMP : QEMU_MADV_DONTDUMP); 192605d0a94SPaolo Bonzini backend->dump = value; 193605d0a94SPaolo Bonzini } 194605d0a94SPaolo Bonzini } 195605d0a94SPaolo Bonzini 196a35ba7beSPaolo Bonzini static bool host_memory_backend_get_prealloc(Object *obj, Error **errp) 197a35ba7beSPaolo Bonzini { 198a35ba7beSPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 199a35ba7beSPaolo Bonzini 200a35ba7beSPaolo Bonzini return backend->prealloc || backend->force_prealloc; 201a35ba7beSPaolo Bonzini } 202a35ba7beSPaolo Bonzini 203a35ba7beSPaolo Bonzini static void host_memory_backend_set_prealloc(Object *obj, bool value, 204a35ba7beSPaolo Bonzini Error **errp) 205a35ba7beSPaolo Bonzini { 206*056b68afSIgor Mammedov Error *local_err = NULL; 207a35ba7beSPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 208a35ba7beSPaolo Bonzini 209a35ba7beSPaolo Bonzini if (backend->force_prealloc) { 210a35ba7beSPaolo Bonzini if (value) { 211a35ba7beSPaolo Bonzini error_setg(errp, 212a35ba7beSPaolo Bonzini "remove -mem-prealloc to use the prealloc property"); 213a35ba7beSPaolo Bonzini return; 214a35ba7beSPaolo Bonzini } 215a35ba7beSPaolo Bonzini } 216a35ba7beSPaolo Bonzini 217a35ba7beSPaolo Bonzini if (!memory_region_size(&backend->mr)) { 218a35ba7beSPaolo Bonzini backend->prealloc = value; 219a35ba7beSPaolo Bonzini return; 220a35ba7beSPaolo Bonzini } 221a35ba7beSPaolo Bonzini 222a35ba7beSPaolo Bonzini if (value && !backend->prealloc) { 223a35ba7beSPaolo Bonzini int fd = memory_region_get_fd(&backend->mr); 224a35ba7beSPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 225a35ba7beSPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 226a35ba7beSPaolo Bonzini 227*056b68afSIgor Mammedov os_mem_prealloc(fd, ptr, sz, &local_err); 228*056b68afSIgor Mammedov if (local_err) { 229*056b68afSIgor Mammedov error_propagate(errp, local_err); 230*056b68afSIgor Mammedov return; 231*056b68afSIgor Mammedov } 232a35ba7beSPaolo Bonzini backend->prealloc = true; 233a35ba7beSPaolo Bonzini } 234a35ba7beSPaolo Bonzini } 235a35ba7beSPaolo Bonzini 23658f4662cSHu Tao static void host_memory_backend_init(Object *obj) 2371f070489SIgor Mammedov { 238605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 2396b269967SEduardo Habkost MachineState *machine = MACHINE(qdev_get_machine()); 240605d0a94SPaolo Bonzini 2416b269967SEduardo Habkost backend->merge = machine_mem_merge(machine); 2426b269967SEduardo Habkost backend->dump = machine_dump_guest_core(machine); 243a35ba7beSPaolo Bonzini backend->prealloc = mem_prealloc; 244605d0a94SPaolo Bonzini 245605d0a94SPaolo Bonzini object_property_add_bool(obj, "merge", 246605d0a94SPaolo Bonzini host_memory_backend_get_merge, 247605d0a94SPaolo Bonzini host_memory_backend_set_merge, NULL); 248605d0a94SPaolo Bonzini object_property_add_bool(obj, "dump", 249605d0a94SPaolo Bonzini host_memory_backend_get_dump, 250605d0a94SPaolo Bonzini host_memory_backend_set_dump, NULL); 251a35ba7beSPaolo Bonzini object_property_add_bool(obj, "prealloc", 252a35ba7beSPaolo Bonzini host_memory_backend_get_prealloc, 253a35ba7beSPaolo Bonzini host_memory_backend_set_prealloc, NULL); 2541f070489SIgor Mammedov object_property_add(obj, "size", "int", 25558f4662cSHu Tao host_memory_backend_get_size, 25658f4662cSHu Tao host_memory_backend_set_size, NULL, NULL, NULL); 2574cf1b76bSHu Tao object_property_add(obj, "host-nodes", "int", 2584cf1b76bSHu Tao host_memory_backend_get_host_nodes, 2594cf1b76bSHu Tao host_memory_backend_set_host_nodes, NULL, NULL, NULL); 260a3590dacSDaniel P. Berrange object_property_add_enum(obj, "policy", "HostMemPolicy", 261a3590dacSDaniel P. Berrange HostMemPolicy_lookup, 2624cf1b76bSHu Tao host_memory_backend_get_policy, 263a3590dacSDaniel P. Berrange host_memory_backend_set_policy, NULL); 2641f070489SIgor Mammedov } 2651f070489SIgor Mammedov 2661f070489SIgor Mammedov MemoryRegion * 2671f070489SIgor Mammedov host_memory_backend_get_memory(HostMemoryBackend *backend, Error **errp) 2681f070489SIgor Mammedov { 2691f070489SIgor Mammedov return memory_region_size(&backend->mr) ? &backend->mr : NULL; 2701f070489SIgor Mammedov } 2711f070489SIgor Mammedov 2722aece63cSXiao Guangrong void host_memory_backend_set_mapped(HostMemoryBackend *backend, bool mapped) 2732aece63cSXiao Guangrong { 2742aece63cSXiao Guangrong backend->is_mapped = mapped; 2752aece63cSXiao Guangrong } 2762aece63cSXiao Guangrong 2772aece63cSXiao Guangrong bool host_memory_backend_is_mapped(HostMemoryBackend *backend) 2782aece63cSXiao Guangrong { 2792aece63cSXiao Guangrong return backend->is_mapped; 2802aece63cSXiao Guangrong } 2812aece63cSXiao Guangrong 282bd9262d9SHu Tao static void 283bd9262d9SHu Tao host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) 284bd9262d9SHu Tao { 285bd9262d9SHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(uc); 286bd9262d9SHu Tao HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc); 287605d0a94SPaolo Bonzini Error *local_err = NULL; 288605d0a94SPaolo Bonzini void *ptr; 289605d0a94SPaolo Bonzini uint64_t sz; 290bd9262d9SHu Tao 291bd9262d9SHu Tao if (bc->alloc) { 292605d0a94SPaolo Bonzini bc->alloc(backend, &local_err); 293605d0a94SPaolo Bonzini if (local_err) { 294*056b68afSIgor Mammedov goto out; 295605d0a94SPaolo Bonzini } 296605d0a94SPaolo Bonzini 297605d0a94SPaolo Bonzini ptr = memory_region_get_ram_ptr(&backend->mr); 298605d0a94SPaolo Bonzini sz = memory_region_size(&backend->mr); 299605d0a94SPaolo Bonzini 300605d0a94SPaolo Bonzini if (backend->merge) { 301605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); 302605d0a94SPaolo Bonzini } 303605d0a94SPaolo Bonzini if (!backend->dump) { 304605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); 305605d0a94SPaolo Bonzini } 3064cf1b76bSHu Tao #ifdef CONFIG_NUMA 3074cf1b76bSHu Tao unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); 3084cf1b76bSHu Tao /* lastbit == MAX_NODES means maxnode = 0 */ 3094cf1b76bSHu Tao unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); 3104cf1b76bSHu Tao /* ensure policy won't be ignored in case memory is preallocated 3114cf1b76bSHu Tao * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so 3124cf1b76bSHu Tao * this doesn't catch hugepage case. */ 313288d3322SMichael S. Tsirkin unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; 3144cf1b76bSHu Tao 3154cf1b76bSHu Tao /* check for invalid host-nodes and policies and give more verbose 3164cf1b76bSHu Tao * error messages than mbind(). */ 3174cf1b76bSHu Tao if (maxnode && backend->policy == MPOL_DEFAULT) { 3184cf1b76bSHu Tao error_setg(errp, "host-nodes must be empty for policy default," 3194cf1b76bSHu Tao " or you should explicitly specify a policy other" 3204cf1b76bSHu Tao " than default"); 3214cf1b76bSHu Tao return; 3224cf1b76bSHu Tao } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { 3234cf1b76bSHu Tao error_setg(errp, "host-nodes must be set for policy %s", 3244cf1b76bSHu Tao HostMemPolicy_lookup[backend->policy]); 3254cf1b76bSHu Tao return; 3264cf1b76bSHu Tao } 3274cf1b76bSHu Tao 3284cf1b76bSHu Tao /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 3294cf1b76bSHu Tao * as argument to mbind() due to an old Linux bug (feature?) which 3304cf1b76bSHu Tao * cuts off the last specified node. This means backend->host_nodes 3314cf1b76bSHu Tao * must have MAX_NODES+1 bits available. 3324cf1b76bSHu Tao */ 3334cf1b76bSHu Tao assert(sizeof(backend->host_nodes) >= 3344cf1b76bSHu Tao BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); 3354cf1b76bSHu Tao assert(maxnode <= MAX_NODES); 3364cf1b76bSHu Tao if (mbind(ptr, sz, backend->policy, 3374cf1b76bSHu Tao maxnode ? backend->host_nodes : NULL, maxnode + 1, flags)) { 338a3567ba1SPavel Fedin if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { 3394cf1b76bSHu Tao error_setg_errno(errp, errno, 3404cf1b76bSHu Tao "cannot bind memory to host NUMA nodes"); 3414cf1b76bSHu Tao return; 3424cf1b76bSHu Tao } 343a3567ba1SPavel Fedin } 3444cf1b76bSHu Tao #endif 3454cf1b76bSHu Tao /* Preallocate memory after the NUMA policy has been instantiated. 3464cf1b76bSHu Tao * This is necessary to guarantee memory is allocated with 3474cf1b76bSHu Tao * specified NUMA policy in place. 3484cf1b76bSHu Tao */ 349a35ba7beSPaolo Bonzini if (backend->prealloc) { 350*056b68afSIgor Mammedov os_mem_prealloc(memory_region_get_fd(&backend->mr), ptr, sz, 351*056b68afSIgor Mammedov &local_err); 352*056b68afSIgor Mammedov if (local_err) { 353*056b68afSIgor Mammedov goto out; 354a35ba7beSPaolo Bonzini } 355bd9262d9SHu Tao } 356bd9262d9SHu Tao } 357*056b68afSIgor Mammedov out: 358*056b68afSIgor Mammedov error_propagate(errp, local_err); 359*056b68afSIgor Mammedov } 360bd9262d9SHu Tao 36136bce5caSLin Ma static bool 36236bce5caSLin Ma host_memory_backend_can_be_deleted(UserCreatable *uc, Error **errp) 36336bce5caSLin Ma { 3642aece63cSXiao Guangrong if (host_memory_backend_is_mapped(MEMORY_BACKEND(uc))) { 36536bce5caSLin Ma return false; 36636bce5caSLin Ma } else { 36736bce5caSLin Ma return true; 36836bce5caSLin Ma } 36936bce5caSLin Ma } 37036bce5caSLin Ma 371bd9262d9SHu Tao static void 372bd9262d9SHu Tao host_memory_backend_class_init(ObjectClass *oc, void *data) 373bd9262d9SHu Tao { 374bd9262d9SHu Tao UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); 375bd9262d9SHu Tao 376bd9262d9SHu Tao ucc->complete = host_memory_backend_memory_complete; 37736bce5caSLin Ma ucc->can_be_deleted = host_memory_backend_can_be_deleted; 378bd9262d9SHu Tao } 379bd9262d9SHu Tao 38058f4662cSHu Tao static const TypeInfo host_memory_backend_info = { 3811f070489SIgor Mammedov .name = TYPE_MEMORY_BACKEND, 3821f070489SIgor Mammedov .parent = TYPE_OBJECT, 3831f070489SIgor Mammedov .abstract = true, 3841f070489SIgor Mammedov .class_size = sizeof(HostMemoryBackendClass), 385bd9262d9SHu Tao .class_init = host_memory_backend_class_init, 3861f070489SIgor Mammedov .instance_size = sizeof(HostMemoryBackend), 38758f4662cSHu Tao .instance_init = host_memory_backend_init, 3881f070489SIgor Mammedov .interfaces = (InterfaceInfo[]) { 3891f070489SIgor Mammedov { TYPE_USER_CREATABLE }, 3901f070489SIgor Mammedov { } 3911f070489SIgor Mammedov } 3921f070489SIgor Mammedov }; 3931f070489SIgor Mammedov 3941f070489SIgor Mammedov static void register_types(void) 3951f070489SIgor Mammedov { 39658f4662cSHu Tao type_register_static(&host_memory_backend_info); 3971f070489SIgor Mammedov } 3981f070489SIgor Mammedov 3991f070489SIgor Mammedov type_init(register_types); 400