11f070489SIgor Mammedov /* 21f070489SIgor Mammedov * QEMU Host Memory Backend 31f070489SIgor Mammedov * 41f070489SIgor Mammedov * Copyright (C) 2013-2014 Red Hat Inc 51f070489SIgor Mammedov * 61f070489SIgor Mammedov * Authors: 71f070489SIgor Mammedov * Igor Mammedov <imammedo@redhat.com> 81f070489SIgor Mammedov * 91f070489SIgor Mammedov * This work is licensed under the terms of the GNU GPL, version 2 or later. 101f070489SIgor Mammedov * See the COPYING file in the top-level directory. 111f070489SIgor Mammedov */ 121f070489SIgor Mammedov #include "sysemu/hostmem.h" 131f070489SIgor Mammedov #include "qapi/visitor.h" 14*4cf1b76bSHu Tao #include "qapi-types.h" 15*4cf1b76bSHu Tao #include "qapi-visit.h" 161f070489SIgor Mammedov #include "qapi/qmp/qerror.h" 171f070489SIgor Mammedov #include "qemu/config-file.h" 181f070489SIgor Mammedov #include "qom/object_interfaces.h" 191f070489SIgor Mammedov 20*4cf1b76bSHu Tao #ifdef CONFIG_NUMA 21*4cf1b76bSHu Tao #include <numaif.h> 22*4cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT); 23*4cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED); 24*4cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND); 25*4cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE); 26*4cf1b76bSHu Tao #endif 27*4cf1b76bSHu Tao 281f070489SIgor Mammedov static void 2958f4662cSHu Tao host_memory_backend_get_size(Object *obj, Visitor *v, void *opaque, 301f070489SIgor Mammedov const char *name, Error **errp) 311f070489SIgor Mammedov { 321f070489SIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 331f070489SIgor Mammedov uint64_t value = backend->size; 341f070489SIgor Mammedov 351f070489SIgor Mammedov visit_type_size(v, &value, name, errp); 361f070489SIgor Mammedov } 371f070489SIgor Mammedov 381f070489SIgor Mammedov static void 3958f4662cSHu Tao host_memory_backend_set_size(Object *obj, Visitor *v, void *opaque, 401f070489SIgor Mammedov const char *name, Error **errp) 411f070489SIgor Mammedov { 421f070489SIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 431f070489SIgor Mammedov Error *local_err = NULL; 441f070489SIgor Mammedov uint64_t value; 451f070489SIgor Mammedov 461f070489SIgor Mammedov if (memory_region_size(&backend->mr)) { 471f070489SIgor Mammedov error_setg(&local_err, "cannot change property value"); 481f070489SIgor Mammedov goto out; 491f070489SIgor Mammedov } 501f070489SIgor Mammedov 511f070489SIgor Mammedov visit_type_size(v, &value, name, &local_err); 521f070489SIgor Mammedov if (local_err) { 531f070489SIgor Mammedov goto out; 541f070489SIgor Mammedov } 551f070489SIgor Mammedov if (!value) { 561f070489SIgor Mammedov error_setg(&local_err, "Property '%s.%s' doesn't take value '%" 571f070489SIgor Mammedov PRIu64 "'", object_get_typename(obj), name, value); 581f070489SIgor Mammedov goto out; 591f070489SIgor Mammedov } 601f070489SIgor Mammedov backend->size = value; 611f070489SIgor Mammedov out: 621f070489SIgor Mammedov error_propagate(errp, local_err); 631f070489SIgor Mammedov } 641f070489SIgor Mammedov 65*4cf1b76bSHu Tao static void 66*4cf1b76bSHu Tao host_memory_backend_get_host_nodes(Object *obj, Visitor *v, void *opaque, 67*4cf1b76bSHu Tao const char *name, Error **errp) 68*4cf1b76bSHu Tao { 69*4cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 70*4cf1b76bSHu Tao uint16List *host_nodes = NULL; 71*4cf1b76bSHu Tao uint16List **node = &host_nodes; 72*4cf1b76bSHu Tao unsigned long value; 73*4cf1b76bSHu Tao 74*4cf1b76bSHu Tao value = find_first_bit(backend->host_nodes, MAX_NODES); 75*4cf1b76bSHu Tao if (value == MAX_NODES) { 76*4cf1b76bSHu Tao return; 77*4cf1b76bSHu Tao } 78*4cf1b76bSHu Tao 79*4cf1b76bSHu Tao *node = g_malloc0(sizeof(**node)); 80*4cf1b76bSHu Tao (*node)->value = value; 81*4cf1b76bSHu Tao node = &(*node)->next; 82*4cf1b76bSHu Tao 83*4cf1b76bSHu Tao do { 84*4cf1b76bSHu Tao value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1); 85*4cf1b76bSHu Tao if (value == MAX_NODES) { 86*4cf1b76bSHu Tao break; 87*4cf1b76bSHu Tao } 88*4cf1b76bSHu Tao 89*4cf1b76bSHu Tao *node = g_malloc0(sizeof(**node)); 90*4cf1b76bSHu Tao (*node)->value = value; 91*4cf1b76bSHu Tao node = &(*node)->next; 92*4cf1b76bSHu Tao } while (true); 93*4cf1b76bSHu Tao 94*4cf1b76bSHu Tao visit_type_uint16List(v, &host_nodes, name, errp); 95*4cf1b76bSHu Tao } 96*4cf1b76bSHu Tao 97*4cf1b76bSHu Tao static void 98*4cf1b76bSHu Tao host_memory_backend_set_host_nodes(Object *obj, Visitor *v, void *opaque, 99*4cf1b76bSHu Tao const char *name, Error **errp) 100*4cf1b76bSHu Tao { 101*4cf1b76bSHu Tao #ifdef CONFIG_NUMA 102*4cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 103*4cf1b76bSHu Tao uint16List *l = NULL; 104*4cf1b76bSHu Tao 105*4cf1b76bSHu Tao visit_type_uint16List(v, &l, name, errp); 106*4cf1b76bSHu Tao 107*4cf1b76bSHu Tao while (l) { 108*4cf1b76bSHu Tao bitmap_set(backend->host_nodes, l->value, 1); 109*4cf1b76bSHu Tao l = l->next; 110*4cf1b76bSHu Tao } 111*4cf1b76bSHu Tao #else 112*4cf1b76bSHu Tao error_setg(errp, "NUMA node binding are not supported by this QEMU"); 113*4cf1b76bSHu Tao #endif 114*4cf1b76bSHu Tao } 115*4cf1b76bSHu Tao 116*4cf1b76bSHu Tao static void 117*4cf1b76bSHu Tao host_memory_backend_get_policy(Object *obj, Visitor *v, void *opaque, 118*4cf1b76bSHu Tao const char *name, Error **errp) 119*4cf1b76bSHu Tao { 120*4cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 121*4cf1b76bSHu Tao int policy = backend->policy; 122*4cf1b76bSHu Tao 123*4cf1b76bSHu Tao visit_type_enum(v, &policy, HostMemPolicy_lookup, NULL, name, errp); 124*4cf1b76bSHu Tao } 125*4cf1b76bSHu Tao 126*4cf1b76bSHu Tao static void 127*4cf1b76bSHu Tao host_memory_backend_set_policy(Object *obj, Visitor *v, void *opaque, 128*4cf1b76bSHu Tao const char *name, Error **errp) 129*4cf1b76bSHu Tao { 130*4cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 131*4cf1b76bSHu Tao int policy; 132*4cf1b76bSHu Tao 133*4cf1b76bSHu Tao visit_type_enum(v, &policy, HostMemPolicy_lookup, NULL, name, errp); 134*4cf1b76bSHu Tao backend->policy = policy; 135*4cf1b76bSHu Tao 136*4cf1b76bSHu Tao #ifndef CONFIG_NUMA 137*4cf1b76bSHu Tao if (policy != HOST_MEM_POLICY_DEFAULT) { 138*4cf1b76bSHu Tao error_setg(errp, "NUMA policies are not supported by this QEMU"); 139*4cf1b76bSHu Tao } 140*4cf1b76bSHu Tao #endif 141*4cf1b76bSHu Tao } 142*4cf1b76bSHu Tao 143605d0a94SPaolo Bonzini static bool host_memory_backend_get_merge(Object *obj, Error **errp) 144605d0a94SPaolo Bonzini { 145605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 146605d0a94SPaolo Bonzini 147605d0a94SPaolo Bonzini return backend->merge; 148605d0a94SPaolo Bonzini } 149605d0a94SPaolo Bonzini 150605d0a94SPaolo Bonzini static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp) 151605d0a94SPaolo Bonzini { 152605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 153605d0a94SPaolo Bonzini 154605d0a94SPaolo Bonzini if (!memory_region_size(&backend->mr)) { 155605d0a94SPaolo Bonzini backend->merge = value; 156605d0a94SPaolo Bonzini return; 157605d0a94SPaolo Bonzini } 158605d0a94SPaolo Bonzini 159605d0a94SPaolo Bonzini if (value != backend->merge) { 160605d0a94SPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 161605d0a94SPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 162605d0a94SPaolo Bonzini 163605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, 164605d0a94SPaolo Bonzini value ? QEMU_MADV_MERGEABLE : QEMU_MADV_UNMERGEABLE); 165605d0a94SPaolo Bonzini backend->merge = value; 166605d0a94SPaolo Bonzini } 167605d0a94SPaolo Bonzini } 168605d0a94SPaolo Bonzini 169605d0a94SPaolo Bonzini static bool host_memory_backend_get_dump(Object *obj, Error **errp) 170605d0a94SPaolo Bonzini { 171605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 172605d0a94SPaolo Bonzini 173605d0a94SPaolo Bonzini return backend->dump; 174605d0a94SPaolo Bonzini } 175605d0a94SPaolo Bonzini 176605d0a94SPaolo Bonzini static void host_memory_backend_set_dump(Object *obj, bool value, Error **errp) 177605d0a94SPaolo Bonzini { 178605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 179605d0a94SPaolo Bonzini 180605d0a94SPaolo Bonzini if (!memory_region_size(&backend->mr)) { 181605d0a94SPaolo Bonzini backend->dump = value; 182605d0a94SPaolo Bonzini return; 183605d0a94SPaolo Bonzini } 184605d0a94SPaolo Bonzini 185605d0a94SPaolo Bonzini if (value != backend->dump) { 186605d0a94SPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 187605d0a94SPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 188605d0a94SPaolo Bonzini 189605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, 190605d0a94SPaolo Bonzini value ? QEMU_MADV_DODUMP : QEMU_MADV_DONTDUMP); 191605d0a94SPaolo Bonzini backend->dump = value; 192605d0a94SPaolo Bonzini } 193605d0a94SPaolo Bonzini } 194605d0a94SPaolo Bonzini 195a35ba7beSPaolo Bonzini static bool host_memory_backend_get_prealloc(Object *obj, Error **errp) 196a35ba7beSPaolo Bonzini { 197a35ba7beSPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 198a35ba7beSPaolo Bonzini 199a35ba7beSPaolo Bonzini return backend->prealloc || backend->force_prealloc; 200a35ba7beSPaolo Bonzini } 201a35ba7beSPaolo Bonzini 202a35ba7beSPaolo Bonzini static void host_memory_backend_set_prealloc(Object *obj, bool value, 203a35ba7beSPaolo Bonzini Error **errp) 204a35ba7beSPaolo Bonzini { 205a35ba7beSPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 206a35ba7beSPaolo Bonzini 207a35ba7beSPaolo Bonzini if (backend->force_prealloc) { 208a35ba7beSPaolo Bonzini if (value) { 209a35ba7beSPaolo Bonzini error_setg(errp, 210a35ba7beSPaolo Bonzini "remove -mem-prealloc to use the prealloc property"); 211a35ba7beSPaolo Bonzini return; 212a35ba7beSPaolo Bonzini } 213a35ba7beSPaolo Bonzini } 214a35ba7beSPaolo Bonzini 215a35ba7beSPaolo Bonzini if (!memory_region_size(&backend->mr)) { 216a35ba7beSPaolo Bonzini backend->prealloc = value; 217a35ba7beSPaolo Bonzini return; 218a35ba7beSPaolo Bonzini } 219a35ba7beSPaolo Bonzini 220a35ba7beSPaolo Bonzini if (value && !backend->prealloc) { 221a35ba7beSPaolo Bonzini int fd = memory_region_get_fd(&backend->mr); 222a35ba7beSPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 223a35ba7beSPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 224a35ba7beSPaolo Bonzini 225a35ba7beSPaolo Bonzini os_mem_prealloc(fd, ptr, sz); 226a35ba7beSPaolo Bonzini backend->prealloc = true; 227a35ba7beSPaolo Bonzini } 228a35ba7beSPaolo Bonzini } 229a35ba7beSPaolo Bonzini 23058f4662cSHu Tao static void host_memory_backend_init(Object *obj) 2311f070489SIgor Mammedov { 232605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 233605d0a94SPaolo Bonzini 234605d0a94SPaolo Bonzini backend->merge = qemu_opt_get_bool(qemu_get_machine_opts(), 235605d0a94SPaolo Bonzini "mem-merge", true); 236605d0a94SPaolo Bonzini backend->dump = qemu_opt_get_bool(qemu_get_machine_opts(), 237605d0a94SPaolo Bonzini "dump-guest-core", true); 238a35ba7beSPaolo Bonzini backend->prealloc = mem_prealloc; 239605d0a94SPaolo Bonzini 240605d0a94SPaolo Bonzini object_property_add_bool(obj, "merge", 241605d0a94SPaolo Bonzini host_memory_backend_get_merge, 242605d0a94SPaolo Bonzini host_memory_backend_set_merge, NULL); 243605d0a94SPaolo Bonzini object_property_add_bool(obj, "dump", 244605d0a94SPaolo Bonzini host_memory_backend_get_dump, 245605d0a94SPaolo Bonzini host_memory_backend_set_dump, NULL); 246a35ba7beSPaolo Bonzini object_property_add_bool(obj, "prealloc", 247a35ba7beSPaolo Bonzini host_memory_backend_get_prealloc, 248a35ba7beSPaolo Bonzini host_memory_backend_set_prealloc, NULL); 2491f070489SIgor Mammedov object_property_add(obj, "size", "int", 25058f4662cSHu Tao host_memory_backend_get_size, 25158f4662cSHu Tao host_memory_backend_set_size, NULL, NULL, NULL); 252*4cf1b76bSHu Tao object_property_add(obj, "host-nodes", "int", 253*4cf1b76bSHu Tao host_memory_backend_get_host_nodes, 254*4cf1b76bSHu Tao host_memory_backend_set_host_nodes, NULL, NULL, NULL); 255*4cf1b76bSHu Tao object_property_add(obj, "policy", "str", 256*4cf1b76bSHu Tao host_memory_backend_get_policy, 257*4cf1b76bSHu Tao host_memory_backend_set_policy, NULL, NULL, NULL); 2581f070489SIgor Mammedov } 2591f070489SIgor Mammedov 26058f4662cSHu Tao static void host_memory_backend_finalize(Object *obj) 2611f070489SIgor Mammedov { 2621f070489SIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 2631f070489SIgor Mammedov 2641f070489SIgor Mammedov if (memory_region_size(&backend->mr)) { 2651f070489SIgor Mammedov memory_region_destroy(&backend->mr); 2661f070489SIgor Mammedov } 2671f070489SIgor Mammedov } 2681f070489SIgor Mammedov 2691f070489SIgor Mammedov MemoryRegion * 2701f070489SIgor Mammedov host_memory_backend_get_memory(HostMemoryBackend *backend, Error **errp) 2711f070489SIgor Mammedov { 2721f070489SIgor Mammedov return memory_region_size(&backend->mr) ? &backend->mr : NULL; 2731f070489SIgor Mammedov } 2741f070489SIgor Mammedov 275bd9262d9SHu Tao static void 276bd9262d9SHu Tao host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) 277bd9262d9SHu Tao { 278bd9262d9SHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(uc); 279bd9262d9SHu Tao HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc); 280605d0a94SPaolo Bonzini Error *local_err = NULL; 281605d0a94SPaolo Bonzini void *ptr; 282605d0a94SPaolo Bonzini uint64_t sz; 283bd9262d9SHu Tao 284bd9262d9SHu Tao if (bc->alloc) { 285605d0a94SPaolo Bonzini bc->alloc(backend, &local_err); 286605d0a94SPaolo Bonzini if (local_err) { 287605d0a94SPaolo Bonzini error_propagate(errp, local_err); 288605d0a94SPaolo Bonzini return; 289605d0a94SPaolo Bonzini } 290605d0a94SPaolo Bonzini 291605d0a94SPaolo Bonzini ptr = memory_region_get_ram_ptr(&backend->mr); 292605d0a94SPaolo Bonzini sz = memory_region_size(&backend->mr); 293605d0a94SPaolo Bonzini 294605d0a94SPaolo Bonzini if (backend->merge) { 295605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); 296605d0a94SPaolo Bonzini } 297605d0a94SPaolo Bonzini if (!backend->dump) { 298605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); 299605d0a94SPaolo Bonzini } 300*4cf1b76bSHu Tao #ifdef CONFIG_NUMA 301*4cf1b76bSHu Tao unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); 302*4cf1b76bSHu Tao /* lastbit == MAX_NODES means maxnode = 0 */ 303*4cf1b76bSHu Tao unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); 304*4cf1b76bSHu Tao /* ensure policy won't be ignored in case memory is preallocated 305*4cf1b76bSHu Tao * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so 306*4cf1b76bSHu Tao * this doesn't catch hugepage case. */ 307*4cf1b76bSHu Tao unsigned flags = MPOL_MF_STRICT; 308*4cf1b76bSHu Tao 309*4cf1b76bSHu Tao /* check for invalid host-nodes and policies and give more verbose 310*4cf1b76bSHu Tao * error messages than mbind(). */ 311*4cf1b76bSHu Tao if (maxnode && backend->policy == MPOL_DEFAULT) { 312*4cf1b76bSHu Tao error_setg(errp, "host-nodes must be empty for policy default," 313*4cf1b76bSHu Tao " or you should explicitly specify a policy other" 314*4cf1b76bSHu Tao " than default"); 315*4cf1b76bSHu Tao return; 316*4cf1b76bSHu Tao } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { 317*4cf1b76bSHu Tao error_setg(errp, "host-nodes must be set for policy %s", 318*4cf1b76bSHu Tao HostMemPolicy_lookup[backend->policy]); 319*4cf1b76bSHu Tao return; 320*4cf1b76bSHu Tao } 321*4cf1b76bSHu Tao 322*4cf1b76bSHu Tao /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 323*4cf1b76bSHu Tao * as argument to mbind() due to an old Linux bug (feature?) which 324*4cf1b76bSHu Tao * cuts off the last specified node. This means backend->host_nodes 325*4cf1b76bSHu Tao * must have MAX_NODES+1 bits available. 326*4cf1b76bSHu Tao */ 327*4cf1b76bSHu Tao assert(sizeof(backend->host_nodes) >= 328*4cf1b76bSHu Tao BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); 329*4cf1b76bSHu Tao assert(maxnode <= MAX_NODES); 330*4cf1b76bSHu Tao if (mbind(ptr, sz, backend->policy, 331*4cf1b76bSHu Tao maxnode ? backend->host_nodes : NULL, maxnode + 1, flags)) { 332*4cf1b76bSHu Tao error_setg_errno(errp, errno, 333*4cf1b76bSHu Tao "cannot bind memory to host NUMA nodes"); 334*4cf1b76bSHu Tao return; 335*4cf1b76bSHu Tao } 336*4cf1b76bSHu Tao #endif 337*4cf1b76bSHu Tao /* Preallocate memory after the NUMA policy has been instantiated. 338*4cf1b76bSHu Tao * This is necessary to guarantee memory is allocated with 339*4cf1b76bSHu Tao * specified NUMA policy in place. 340*4cf1b76bSHu Tao */ 341a35ba7beSPaolo Bonzini if (backend->prealloc) { 342a35ba7beSPaolo Bonzini os_mem_prealloc(memory_region_get_fd(&backend->mr), ptr, sz); 343a35ba7beSPaolo Bonzini } 344bd9262d9SHu Tao } 345bd9262d9SHu Tao } 346bd9262d9SHu Tao 347bd9262d9SHu Tao static void 348bd9262d9SHu Tao host_memory_backend_class_init(ObjectClass *oc, void *data) 349bd9262d9SHu Tao { 350bd9262d9SHu Tao UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); 351bd9262d9SHu Tao 352bd9262d9SHu Tao ucc->complete = host_memory_backend_memory_complete; 353bd9262d9SHu Tao } 354bd9262d9SHu Tao 35558f4662cSHu Tao static const TypeInfo host_memory_backend_info = { 3561f070489SIgor Mammedov .name = TYPE_MEMORY_BACKEND, 3571f070489SIgor Mammedov .parent = TYPE_OBJECT, 3581f070489SIgor Mammedov .abstract = true, 3591f070489SIgor Mammedov .class_size = sizeof(HostMemoryBackendClass), 360bd9262d9SHu Tao .class_init = host_memory_backend_class_init, 3611f070489SIgor Mammedov .instance_size = sizeof(HostMemoryBackend), 36258f4662cSHu Tao .instance_init = host_memory_backend_init, 36358f4662cSHu Tao .instance_finalize = host_memory_backend_finalize, 3641f070489SIgor Mammedov .interfaces = (InterfaceInfo[]) { 3651f070489SIgor Mammedov { TYPE_USER_CREATABLE }, 3661f070489SIgor Mammedov { } 3671f070489SIgor Mammedov } 3681f070489SIgor Mammedov }; 3691f070489SIgor Mammedov 3701f070489SIgor Mammedov static void register_types(void) 3711f070489SIgor Mammedov { 37258f4662cSHu Tao type_register_static(&host_memory_backend_info); 3731f070489SIgor Mammedov } 3741f070489SIgor Mammedov 3751f070489SIgor Mammedov type_init(register_types); 376