11f070489SIgor Mammedov /* 21f070489SIgor Mammedov * QEMU Host Memory Backend 31f070489SIgor Mammedov * 41f070489SIgor Mammedov * Copyright (C) 2013-2014 Red Hat Inc 51f070489SIgor Mammedov * 61f070489SIgor Mammedov * Authors: 71f070489SIgor Mammedov * Igor Mammedov <imammedo@redhat.com> 81f070489SIgor Mammedov * 91f070489SIgor Mammedov * This work is licensed under the terms of the GNU GPL, version 2 or later. 101f070489SIgor Mammedov * See the COPYING file in the top-level directory. 111f070489SIgor Mammedov */ 129c058332SPeter Maydell #include "qemu/osdep.h" 131f070489SIgor Mammedov #include "sysemu/hostmem.h" 146b269967SEduardo Habkost #include "hw/boards.h" 151f070489SIgor Mammedov #include "qapi/visitor.h" 164cf1b76bSHu Tao #include "qapi-types.h" 174cf1b76bSHu Tao #include "qapi-visit.h" 181f070489SIgor Mammedov #include "qemu/config-file.h" 191f070489SIgor Mammedov #include "qom/object_interfaces.h" 201f070489SIgor Mammedov 214cf1b76bSHu Tao #ifdef CONFIG_NUMA 224cf1b76bSHu Tao #include <numaif.h> 234cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT); 244cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED); 254cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND); 264cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE); 274cf1b76bSHu Tao #endif 284cf1b76bSHu Tao 291f070489SIgor Mammedov static void 3058f4662cSHu Tao host_memory_backend_get_size(Object *obj, Visitor *v, void *opaque, 311f070489SIgor Mammedov const char *name, Error **errp) 321f070489SIgor Mammedov { 331f070489SIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 341f070489SIgor Mammedov uint64_t value = backend->size; 351f070489SIgor Mammedov 36*51e72bc1SEric Blake visit_type_size(v, name, &value, errp); 371f070489SIgor Mammedov } 381f070489SIgor Mammedov 391f070489SIgor Mammedov static void 4058f4662cSHu Tao host_memory_backend_set_size(Object *obj, Visitor *v, void *opaque, 411f070489SIgor Mammedov const char *name, Error **errp) 421f070489SIgor Mammedov { 431f070489SIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 441f070489SIgor Mammedov Error *local_err = NULL; 451f070489SIgor Mammedov uint64_t value; 461f070489SIgor Mammedov 471f070489SIgor Mammedov if (memory_region_size(&backend->mr)) { 481f070489SIgor Mammedov error_setg(&local_err, "cannot change property value"); 491f070489SIgor Mammedov goto out; 501f070489SIgor Mammedov } 511f070489SIgor Mammedov 52*51e72bc1SEric Blake visit_type_size(v, name, &value, &local_err); 531f070489SIgor Mammedov if (local_err) { 541f070489SIgor Mammedov goto out; 551f070489SIgor Mammedov } 561f070489SIgor Mammedov if (!value) { 571f070489SIgor Mammedov error_setg(&local_err, "Property '%s.%s' doesn't take value '%" 581f070489SIgor Mammedov PRIu64 "'", object_get_typename(obj), name, value); 591f070489SIgor Mammedov goto out; 601f070489SIgor Mammedov } 611f070489SIgor Mammedov backend->size = value; 621f070489SIgor Mammedov out: 631f070489SIgor Mammedov error_propagate(errp, local_err); 641f070489SIgor Mammedov } 651f070489SIgor Mammedov 664cf1b76bSHu Tao static void 674cf1b76bSHu Tao host_memory_backend_get_host_nodes(Object *obj, Visitor *v, void *opaque, 684cf1b76bSHu Tao const char *name, Error **errp) 694cf1b76bSHu Tao { 704cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 714cf1b76bSHu Tao uint16List *host_nodes = NULL; 724cf1b76bSHu Tao uint16List **node = &host_nodes; 734cf1b76bSHu Tao unsigned long value; 744cf1b76bSHu Tao 754cf1b76bSHu Tao value = find_first_bit(backend->host_nodes, MAX_NODES); 764cf1b76bSHu Tao if (value == MAX_NODES) { 774cf1b76bSHu Tao return; 784cf1b76bSHu Tao } 794cf1b76bSHu Tao 804cf1b76bSHu Tao *node = g_malloc0(sizeof(**node)); 814cf1b76bSHu Tao (*node)->value = value; 824cf1b76bSHu Tao node = &(*node)->next; 834cf1b76bSHu Tao 844cf1b76bSHu Tao do { 854cf1b76bSHu Tao value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1); 864cf1b76bSHu Tao if (value == MAX_NODES) { 874cf1b76bSHu Tao break; 884cf1b76bSHu Tao } 894cf1b76bSHu Tao 904cf1b76bSHu Tao *node = g_malloc0(sizeof(**node)); 914cf1b76bSHu Tao (*node)->value = value; 924cf1b76bSHu Tao node = &(*node)->next; 934cf1b76bSHu Tao } while (true); 944cf1b76bSHu Tao 95*51e72bc1SEric Blake visit_type_uint16List(v, name, &host_nodes, errp); 964cf1b76bSHu Tao } 974cf1b76bSHu Tao 984cf1b76bSHu Tao static void 994cf1b76bSHu Tao host_memory_backend_set_host_nodes(Object *obj, Visitor *v, void *opaque, 1004cf1b76bSHu Tao const char *name, Error **errp) 1014cf1b76bSHu Tao { 1024cf1b76bSHu Tao #ifdef CONFIG_NUMA 1034cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1044cf1b76bSHu Tao uint16List *l = NULL; 1054cf1b76bSHu Tao 106*51e72bc1SEric Blake visit_type_uint16List(v, name, &l, errp); 1074cf1b76bSHu Tao 1084cf1b76bSHu Tao while (l) { 1094cf1b76bSHu Tao bitmap_set(backend->host_nodes, l->value, 1); 1104cf1b76bSHu Tao l = l->next; 1114cf1b76bSHu Tao } 1124cf1b76bSHu Tao #else 1134cf1b76bSHu Tao error_setg(errp, "NUMA node binding are not supported by this QEMU"); 1144cf1b76bSHu Tao #endif 1154cf1b76bSHu Tao } 1164cf1b76bSHu Tao 117a3590dacSDaniel P. Berrange static int 118a3590dacSDaniel P. Berrange host_memory_backend_get_policy(Object *obj, Error **errp G_GNUC_UNUSED) 1194cf1b76bSHu Tao { 1204cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 121a3590dacSDaniel P. Berrange return backend->policy; 1224cf1b76bSHu Tao } 1234cf1b76bSHu Tao 1244cf1b76bSHu Tao static void 125a3590dacSDaniel P. Berrange host_memory_backend_set_policy(Object *obj, int policy, Error **errp) 1264cf1b76bSHu Tao { 1274cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1284cf1b76bSHu Tao backend->policy = policy; 1294cf1b76bSHu Tao 1304cf1b76bSHu Tao #ifndef CONFIG_NUMA 1314cf1b76bSHu Tao if (policy != HOST_MEM_POLICY_DEFAULT) { 1324cf1b76bSHu Tao error_setg(errp, "NUMA policies are not supported by this QEMU"); 1334cf1b76bSHu Tao } 1344cf1b76bSHu Tao #endif 1354cf1b76bSHu Tao } 1364cf1b76bSHu Tao 137605d0a94SPaolo Bonzini static bool host_memory_backend_get_merge(Object *obj, Error **errp) 138605d0a94SPaolo Bonzini { 139605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 140605d0a94SPaolo Bonzini 141605d0a94SPaolo Bonzini return backend->merge; 142605d0a94SPaolo Bonzini } 143605d0a94SPaolo Bonzini 144605d0a94SPaolo Bonzini static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp) 145605d0a94SPaolo Bonzini { 146605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 147605d0a94SPaolo Bonzini 148605d0a94SPaolo Bonzini if (!memory_region_size(&backend->mr)) { 149605d0a94SPaolo Bonzini backend->merge = value; 150605d0a94SPaolo Bonzini return; 151605d0a94SPaolo Bonzini } 152605d0a94SPaolo Bonzini 153605d0a94SPaolo Bonzini if (value != backend->merge) { 154605d0a94SPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 155605d0a94SPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 156605d0a94SPaolo Bonzini 157605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, 158605d0a94SPaolo Bonzini value ? QEMU_MADV_MERGEABLE : QEMU_MADV_UNMERGEABLE); 159605d0a94SPaolo Bonzini backend->merge = value; 160605d0a94SPaolo Bonzini } 161605d0a94SPaolo Bonzini } 162605d0a94SPaolo Bonzini 163605d0a94SPaolo Bonzini static bool host_memory_backend_get_dump(Object *obj, Error **errp) 164605d0a94SPaolo Bonzini { 165605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 166605d0a94SPaolo Bonzini 167605d0a94SPaolo Bonzini return backend->dump; 168605d0a94SPaolo Bonzini } 169605d0a94SPaolo Bonzini 170605d0a94SPaolo Bonzini static void host_memory_backend_set_dump(Object *obj, bool value, Error **errp) 171605d0a94SPaolo Bonzini { 172605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 173605d0a94SPaolo Bonzini 174605d0a94SPaolo Bonzini if (!memory_region_size(&backend->mr)) { 175605d0a94SPaolo Bonzini backend->dump = value; 176605d0a94SPaolo Bonzini return; 177605d0a94SPaolo Bonzini } 178605d0a94SPaolo Bonzini 179605d0a94SPaolo Bonzini if (value != backend->dump) { 180605d0a94SPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 181605d0a94SPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 182605d0a94SPaolo Bonzini 183605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, 184605d0a94SPaolo Bonzini value ? QEMU_MADV_DODUMP : QEMU_MADV_DONTDUMP); 185605d0a94SPaolo Bonzini backend->dump = value; 186605d0a94SPaolo Bonzini } 187605d0a94SPaolo Bonzini } 188605d0a94SPaolo Bonzini 189a35ba7beSPaolo Bonzini static bool host_memory_backend_get_prealloc(Object *obj, Error **errp) 190a35ba7beSPaolo Bonzini { 191a35ba7beSPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 192a35ba7beSPaolo Bonzini 193a35ba7beSPaolo Bonzini return backend->prealloc || backend->force_prealloc; 194a35ba7beSPaolo Bonzini } 195a35ba7beSPaolo Bonzini 196a35ba7beSPaolo Bonzini static void host_memory_backend_set_prealloc(Object *obj, bool value, 197a35ba7beSPaolo Bonzini Error **errp) 198a35ba7beSPaolo Bonzini { 199a35ba7beSPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 200a35ba7beSPaolo Bonzini 201a35ba7beSPaolo Bonzini if (backend->force_prealloc) { 202a35ba7beSPaolo Bonzini if (value) { 203a35ba7beSPaolo Bonzini error_setg(errp, 204a35ba7beSPaolo Bonzini "remove -mem-prealloc to use the prealloc property"); 205a35ba7beSPaolo Bonzini return; 206a35ba7beSPaolo Bonzini } 207a35ba7beSPaolo Bonzini } 208a35ba7beSPaolo Bonzini 209a35ba7beSPaolo Bonzini if (!memory_region_size(&backend->mr)) { 210a35ba7beSPaolo Bonzini backend->prealloc = value; 211a35ba7beSPaolo Bonzini return; 212a35ba7beSPaolo Bonzini } 213a35ba7beSPaolo Bonzini 214a35ba7beSPaolo Bonzini if (value && !backend->prealloc) { 215a35ba7beSPaolo Bonzini int fd = memory_region_get_fd(&backend->mr); 216a35ba7beSPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 217a35ba7beSPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 218a35ba7beSPaolo Bonzini 219a35ba7beSPaolo Bonzini os_mem_prealloc(fd, ptr, sz); 220a35ba7beSPaolo Bonzini backend->prealloc = true; 221a35ba7beSPaolo Bonzini } 222a35ba7beSPaolo Bonzini } 223a35ba7beSPaolo Bonzini 22458f4662cSHu Tao static void host_memory_backend_init(Object *obj) 2251f070489SIgor Mammedov { 226605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 2276b269967SEduardo Habkost MachineState *machine = MACHINE(qdev_get_machine()); 228605d0a94SPaolo Bonzini 2296b269967SEduardo Habkost backend->merge = machine_mem_merge(machine); 2306b269967SEduardo Habkost backend->dump = machine_dump_guest_core(machine); 231a35ba7beSPaolo Bonzini backend->prealloc = mem_prealloc; 232605d0a94SPaolo Bonzini 233605d0a94SPaolo Bonzini object_property_add_bool(obj, "merge", 234605d0a94SPaolo Bonzini host_memory_backend_get_merge, 235605d0a94SPaolo Bonzini host_memory_backend_set_merge, NULL); 236605d0a94SPaolo Bonzini object_property_add_bool(obj, "dump", 237605d0a94SPaolo Bonzini host_memory_backend_get_dump, 238605d0a94SPaolo Bonzini host_memory_backend_set_dump, NULL); 239a35ba7beSPaolo Bonzini object_property_add_bool(obj, "prealloc", 240a35ba7beSPaolo Bonzini host_memory_backend_get_prealloc, 241a35ba7beSPaolo Bonzini host_memory_backend_set_prealloc, NULL); 2421f070489SIgor Mammedov object_property_add(obj, "size", "int", 24358f4662cSHu Tao host_memory_backend_get_size, 24458f4662cSHu Tao host_memory_backend_set_size, NULL, NULL, NULL); 2454cf1b76bSHu Tao object_property_add(obj, "host-nodes", "int", 2464cf1b76bSHu Tao host_memory_backend_get_host_nodes, 2474cf1b76bSHu Tao host_memory_backend_set_host_nodes, NULL, NULL, NULL); 248a3590dacSDaniel P. Berrange object_property_add_enum(obj, "policy", "HostMemPolicy", 249a3590dacSDaniel P. Berrange HostMemPolicy_lookup, 2504cf1b76bSHu Tao host_memory_backend_get_policy, 251a3590dacSDaniel P. Berrange host_memory_backend_set_policy, NULL); 2521f070489SIgor Mammedov } 2531f070489SIgor Mammedov 2541f070489SIgor Mammedov MemoryRegion * 2551f070489SIgor Mammedov host_memory_backend_get_memory(HostMemoryBackend *backend, Error **errp) 2561f070489SIgor Mammedov { 2571f070489SIgor Mammedov return memory_region_size(&backend->mr) ? &backend->mr : NULL; 2581f070489SIgor Mammedov } 2591f070489SIgor Mammedov 260bd9262d9SHu Tao static void 261bd9262d9SHu Tao host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) 262bd9262d9SHu Tao { 263bd9262d9SHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(uc); 264bd9262d9SHu Tao HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc); 265605d0a94SPaolo Bonzini Error *local_err = NULL; 266605d0a94SPaolo Bonzini void *ptr; 267605d0a94SPaolo Bonzini uint64_t sz; 268bd9262d9SHu Tao 269bd9262d9SHu Tao if (bc->alloc) { 270605d0a94SPaolo Bonzini bc->alloc(backend, &local_err); 271605d0a94SPaolo Bonzini if (local_err) { 272605d0a94SPaolo Bonzini error_propagate(errp, local_err); 273605d0a94SPaolo Bonzini return; 274605d0a94SPaolo Bonzini } 275605d0a94SPaolo Bonzini 276605d0a94SPaolo Bonzini ptr = memory_region_get_ram_ptr(&backend->mr); 277605d0a94SPaolo Bonzini sz = memory_region_size(&backend->mr); 278605d0a94SPaolo Bonzini 279605d0a94SPaolo Bonzini if (backend->merge) { 280605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); 281605d0a94SPaolo Bonzini } 282605d0a94SPaolo Bonzini if (!backend->dump) { 283605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); 284605d0a94SPaolo Bonzini } 2854cf1b76bSHu Tao #ifdef CONFIG_NUMA 2864cf1b76bSHu Tao unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); 2874cf1b76bSHu Tao /* lastbit == MAX_NODES means maxnode = 0 */ 2884cf1b76bSHu Tao unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); 2894cf1b76bSHu Tao /* ensure policy won't be ignored in case memory is preallocated 2904cf1b76bSHu Tao * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so 2914cf1b76bSHu Tao * this doesn't catch hugepage case. */ 292288d3322SMichael S. Tsirkin unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; 2934cf1b76bSHu Tao 2944cf1b76bSHu Tao /* check for invalid host-nodes and policies and give more verbose 2954cf1b76bSHu Tao * error messages than mbind(). */ 2964cf1b76bSHu Tao if (maxnode && backend->policy == MPOL_DEFAULT) { 2974cf1b76bSHu Tao error_setg(errp, "host-nodes must be empty for policy default," 2984cf1b76bSHu Tao " or you should explicitly specify a policy other" 2994cf1b76bSHu Tao " than default"); 3004cf1b76bSHu Tao return; 3014cf1b76bSHu Tao } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { 3024cf1b76bSHu Tao error_setg(errp, "host-nodes must be set for policy %s", 3034cf1b76bSHu Tao HostMemPolicy_lookup[backend->policy]); 3044cf1b76bSHu Tao return; 3054cf1b76bSHu Tao } 3064cf1b76bSHu Tao 3074cf1b76bSHu Tao /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1 3084cf1b76bSHu Tao * as argument to mbind() due to an old Linux bug (feature?) which 3094cf1b76bSHu Tao * cuts off the last specified node. This means backend->host_nodes 3104cf1b76bSHu Tao * must have MAX_NODES+1 bits available. 3114cf1b76bSHu Tao */ 3124cf1b76bSHu Tao assert(sizeof(backend->host_nodes) >= 3134cf1b76bSHu Tao BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); 3144cf1b76bSHu Tao assert(maxnode <= MAX_NODES); 3154cf1b76bSHu Tao if (mbind(ptr, sz, backend->policy, 3164cf1b76bSHu Tao maxnode ? backend->host_nodes : NULL, maxnode + 1, flags)) { 317a3567ba1SPavel Fedin if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { 3184cf1b76bSHu Tao error_setg_errno(errp, errno, 3194cf1b76bSHu Tao "cannot bind memory to host NUMA nodes"); 3204cf1b76bSHu Tao return; 3214cf1b76bSHu Tao } 322a3567ba1SPavel Fedin } 3234cf1b76bSHu Tao #endif 3244cf1b76bSHu Tao /* Preallocate memory after the NUMA policy has been instantiated. 3254cf1b76bSHu Tao * This is necessary to guarantee memory is allocated with 3264cf1b76bSHu Tao * specified NUMA policy in place. 3274cf1b76bSHu Tao */ 328a35ba7beSPaolo Bonzini if (backend->prealloc) { 329a35ba7beSPaolo Bonzini os_mem_prealloc(memory_region_get_fd(&backend->mr), ptr, sz); 330a35ba7beSPaolo Bonzini } 331bd9262d9SHu Tao } 332bd9262d9SHu Tao } 333bd9262d9SHu Tao 33436bce5caSLin Ma static bool 33536bce5caSLin Ma host_memory_backend_can_be_deleted(UserCreatable *uc, Error **errp) 33636bce5caSLin Ma { 33736bce5caSLin Ma MemoryRegion *mr; 33836bce5caSLin Ma 33936bce5caSLin Ma mr = host_memory_backend_get_memory(MEMORY_BACKEND(uc), errp); 34036bce5caSLin Ma if (memory_region_is_mapped(mr)) { 34136bce5caSLin Ma return false; 34236bce5caSLin Ma } else { 34336bce5caSLin Ma return true; 34436bce5caSLin Ma } 34536bce5caSLin Ma } 34636bce5caSLin Ma 347bd9262d9SHu Tao static void 348bd9262d9SHu Tao host_memory_backend_class_init(ObjectClass *oc, void *data) 349bd9262d9SHu Tao { 350bd9262d9SHu Tao UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); 351bd9262d9SHu Tao 352bd9262d9SHu Tao ucc->complete = host_memory_backend_memory_complete; 35336bce5caSLin Ma ucc->can_be_deleted = host_memory_backend_can_be_deleted; 354bd9262d9SHu Tao } 355bd9262d9SHu Tao 35658f4662cSHu Tao static const TypeInfo host_memory_backend_info = { 3571f070489SIgor Mammedov .name = TYPE_MEMORY_BACKEND, 3581f070489SIgor Mammedov .parent = TYPE_OBJECT, 3591f070489SIgor Mammedov .abstract = true, 3601f070489SIgor Mammedov .class_size = sizeof(HostMemoryBackendClass), 361bd9262d9SHu Tao .class_init = host_memory_backend_class_init, 3621f070489SIgor Mammedov .instance_size = sizeof(HostMemoryBackend), 36358f4662cSHu Tao .instance_init = host_memory_backend_init, 3641f070489SIgor Mammedov .interfaces = (InterfaceInfo[]) { 3651f070489SIgor Mammedov { TYPE_USER_CREATABLE }, 3661f070489SIgor Mammedov { } 3671f070489SIgor Mammedov } 3681f070489SIgor Mammedov }; 3691f070489SIgor Mammedov 3701f070489SIgor Mammedov static void register_types(void) 3711f070489SIgor Mammedov { 37258f4662cSHu Tao type_register_static(&host_memory_backend_info); 3731f070489SIgor Mammedov } 3741f070489SIgor Mammedov 3751f070489SIgor Mammedov type_init(register_types); 376