11f070489SIgor Mammedov /* 21f070489SIgor Mammedov * QEMU Host Memory Backend 31f070489SIgor Mammedov * 41f070489SIgor Mammedov * Copyright (C) 2013-2014 Red Hat Inc 51f070489SIgor Mammedov * 61f070489SIgor Mammedov * Authors: 71f070489SIgor Mammedov * Igor Mammedov <imammedo@redhat.com> 81f070489SIgor Mammedov * 91f070489SIgor Mammedov * This work is licensed under the terms of the GNU GPL, version 2 or later. 101f070489SIgor Mammedov * See the COPYING file in the top-level directory. 111f070489SIgor Mammedov */ 129af23989SMarkus Armbruster 139c058332SPeter Maydell #include "qemu/osdep.h" 141f070489SIgor Mammedov #include "sysemu/hostmem.h" 156b269967SEduardo Habkost #include "hw/boards.h" 16da34e65cSMarkus Armbruster #include "qapi/error.h" 17eb815e24SMarkus Armbruster #include "qapi/qapi-builtin-visit.h" 181f070489SIgor Mammedov #include "qapi/visitor.h" 191f070489SIgor Mammedov #include "qemu/config-file.h" 201f070489SIgor Mammedov #include "qom/object_interfaces.h" 212b108085SDavid Gibson #include "qemu/mmap-alloc.h" 22b85ea5faSPeter Maydell #include "qemu/madvise.h" 23*04accf43SMark Kanda #include "hw/qdev-core.h" 241f070489SIgor Mammedov 254cf1b76bSHu Tao #ifdef CONFIG_NUMA 264cf1b76bSHu Tao #include <numaif.h> 276bb613f0SMichal Privoznik #include <numa.h> 284cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT); 296bb613f0SMichal Privoznik /* 306bb613f0SMichal Privoznik * HOST_MEM_POLICY_PREFERRED may either translate to MPOL_PREFERRED or 316bb613f0SMichal Privoznik * MPOL_PREFERRED_MANY, see comments further below. 326bb613f0SMichal Privoznik */ 334cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED); 344cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND); 354cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE); 364cf1b76bSHu Tao #endif 374cf1b76bSHu Tao 38fa0cb34dSMarc-André Lureau char * 39fa0cb34dSMarc-André Lureau host_memory_backend_get_name(HostMemoryBackend *backend) 40fa0cb34dSMarc-André Lureau { 41fa0cb34dSMarc-André Lureau if (!backend->use_canonical_path) { 427a309cc9SMarkus Armbruster return g_strdup(object_get_canonical_path_component(OBJECT(backend))); 43fa0cb34dSMarc-André Lureau } 44fa0cb34dSMarc-André Lureau 45fa0cb34dSMarc-André Lureau return object_get_canonical_path(OBJECT(backend)); 46fa0cb34dSMarc-André Lureau } 47fa0cb34dSMarc-André Lureau 481f070489SIgor Mammedov static void 49d7bce999SEric Blake host_memory_backend_get_size(Object *obj, Visitor *v, const char *name, 50d7bce999SEric Blake void *opaque, Error **errp) 511f070489SIgor Mammedov { 521f070489SIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 531f070489SIgor Mammedov uint64_t value = backend->size; 541f070489SIgor Mammedov 5551e72bc1SEric Blake visit_type_size(v, name, &value, errp); 561f070489SIgor Mammedov } 571f070489SIgor Mammedov 581f070489SIgor Mammedov static void 59d7bce999SEric Blake host_memory_backend_set_size(Object *obj, Visitor *v, const char *name, 60d7bce999SEric Blake void *opaque, Error **errp) 611f070489SIgor Mammedov { 621f070489SIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 631f070489SIgor Mammedov uint64_t value; 641f070489SIgor Mammedov 656f4c60e4SPeter Xu if (host_memory_backend_mr_inited(backend)) { 66dcfe4805SMarkus Armbruster error_setg(errp, "cannot change property %s of %s ", name, 67dcfe4805SMarkus Armbruster object_get_typename(obj)); 68dcfe4805SMarkus Armbruster return; 691f070489SIgor Mammedov } 701f070489SIgor Mammedov 71668f62ecSMarkus Armbruster if (!visit_type_size(v, name, &value, errp)) { 72dcfe4805SMarkus Armbruster return; 731f070489SIgor Mammedov } 741f070489SIgor Mammedov if (!value) { 75dcfe4805SMarkus Armbruster error_setg(errp, 7621d16836SZhang Yi "property '%s' of %s doesn't take value '%" PRIu64 "'", 7721d16836SZhang Yi name, object_get_typename(obj), value); 78dcfe4805SMarkus Armbruster return; 791f070489SIgor Mammedov } 801f070489SIgor Mammedov backend->size = value; 811f070489SIgor Mammedov } 821f070489SIgor Mammedov 834cf1b76bSHu Tao static void 84d7bce999SEric Blake host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name, 85d7bce999SEric Blake void *opaque, Error **errp) 864cf1b76bSHu Tao { 874cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 884cf1b76bSHu Tao uint16List *host_nodes = NULL; 89c3033fd3SEric Blake uint16List **tail = &host_nodes; 904cf1b76bSHu Tao unsigned long value; 914cf1b76bSHu Tao 924cf1b76bSHu Tao value = find_first_bit(backend->host_nodes, MAX_NODES); 931454d33fSXiao Guangrong if (value == MAX_NODES) { 9415160ab7SIgor Mammedov goto ret; 951454d33fSXiao Guangrong } 964cf1b76bSHu Tao 97c3033fd3SEric Blake QAPI_LIST_APPEND(tail, value); 98658ae5a7SMarkus Armbruster 994cf1b76bSHu Tao do { 1004cf1b76bSHu Tao value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1); 1014cf1b76bSHu Tao if (value == MAX_NODES) { 1024cf1b76bSHu Tao break; 1034cf1b76bSHu Tao } 1044cf1b76bSHu Tao 105c3033fd3SEric Blake QAPI_LIST_APPEND(tail, value); 1064cf1b76bSHu Tao } while (true); 1074cf1b76bSHu Tao 10815160ab7SIgor Mammedov ret: 10951e72bc1SEric Blake visit_type_uint16List(v, name, &host_nodes, errp); 110bdd5ce05SKeqian Zhu qapi_free_uint16List(host_nodes); 1114cf1b76bSHu Tao } 1124cf1b76bSHu Tao 1134cf1b76bSHu Tao static void 114d7bce999SEric Blake host_memory_backend_set_host_nodes(Object *obj, Visitor *v, const char *name, 115d7bce999SEric Blake void *opaque, Error **errp) 1164cf1b76bSHu Tao { 1174cf1b76bSHu Tao #ifdef CONFIG_NUMA 1184cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 119ffa144b3SEduardo Habkost uint16List *l, *host_nodes = NULL; 1204cf1b76bSHu Tao 121ffa144b3SEduardo Habkost visit_type_uint16List(v, name, &host_nodes, errp); 1224cf1b76bSHu Tao 123ffa144b3SEduardo Habkost for (l = host_nodes; l; l = l->next) { 124ffa144b3SEduardo Habkost if (l->value >= MAX_NODES) { 125ffa144b3SEduardo Habkost error_setg(errp, "Invalid host-nodes value: %d", l->value); 126ffa144b3SEduardo Habkost goto out; 1274cf1b76bSHu Tao } 128ffa144b3SEduardo Habkost } 129ffa144b3SEduardo Habkost 130ffa144b3SEduardo Habkost for (l = host_nodes; l; l = l->next) { 131ffa144b3SEduardo Habkost bitmap_set(backend->host_nodes, l->value, 1); 132ffa144b3SEduardo Habkost } 133ffa144b3SEduardo Habkost 134ffa144b3SEduardo Habkost out: 135ffa144b3SEduardo Habkost qapi_free_uint16List(host_nodes); 1364cf1b76bSHu Tao #else 1374cf1b76bSHu Tao error_setg(errp, "NUMA node binding are not supported by this QEMU"); 1384cf1b76bSHu Tao #endif 1394cf1b76bSHu Tao } 1404cf1b76bSHu Tao 141a3590dacSDaniel P. Berrange static int 142a3590dacSDaniel P. Berrange host_memory_backend_get_policy(Object *obj, Error **errp G_GNUC_UNUSED) 1434cf1b76bSHu Tao { 1444cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 145a3590dacSDaniel P. Berrange return backend->policy; 1464cf1b76bSHu Tao } 1474cf1b76bSHu Tao 1484cf1b76bSHu Tao static void 149a3590dacSDaniel P. Berrange host_memory_backend_set_policy(Object *obj, int policy, Error **errp) 1504cf1b76bSHu Tao { 1514cf1b76bSHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(obj); 1524cf1b76bSHu Tao backend->policy = policy; 1534cf1b76bSHu Tao 1544cf1b76bSHu Tao #ifndef CONFIG_NUMA 1554cf1b76bSHu Tao if (policy != HOST_MEM_POLICY_DEFAULT) { 1564cf1b76bSHu Tao error_setg(errp, "NUMA policies are not supported by this QEMU"); 1574cf1b76bSHu Tao } 1584cf1b76bSHu Tao #endif 1594cf1b76bSHu Tao } 1604cf1b76bSHu Tao 161605d0a94SPaolo Bonzini static bool host_memory_backend_get_merge(Object *obj, Error **errp) 162605d0a94SPaolo Bonzini { 163605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 164605d0a94SPaolo Bonzini 165605d0a94SPaolo Bonzini return backend->merge; 166605d0a94SPaolo Bonzini } 167605d0a94SPaolo Bonzini 168605d0a94SPaolo Bonzini static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp) 169605d0a94SPaolo Bonzini { 170605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 171605d0a94SPaolo Bonzini 1726f4c60e4SPeter Xu if (!host_memory_backend_mr_inited(backend)) { 173605d0a94SPaolo Bonzini backend->merge = value; 174605d0a94SPaolo Bonzini return; 175605d0a94SPaolo Bonzini } 176605d0a94SPaolo Bonzini 177605d0a94SPaolo Bonzini if (value != backend->merge) { 178605d0a94SPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 179605d0a94SPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 180605d0a94SPaolo Bonzini 181605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, 182605d0a94SPaolo Bonzini value ? QEMU_MADV_MERGEABLE : QEMU_MADV_UNMERGEABLE); 183605d0a94SPaolo Bonzini backend->merge = value; 184605d0a94SPaolo Bonzini } 185605d0a94SPaolo Bonzini } 186605d0a94SPaolo Bonzini 187605d0a94SPaolo Bonzini static bool host_memory_backend_get_dump(Object *obj, Error **errp) 188605d0a94SPaolo Bonzini { 189605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 190605d0a94SPaolo Bonzini 191605d0a94SPaolo Bonzini return backend->dump; 192605d0a94SPaolo Bonzini } 193605d0a94SPaolo Bonzini 194605d0a94SPaolo Bonzini static void host_memory_backend_set_dump(Object *obj, bool value, Error **errp) 195605d0a94SPaolo Bonzini { 196605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 197605d0a94SPaolo Bonzini 1986f4c60e4SPeter Xu if (!host_memory_backend_mr_inited(backend)) { 199605d0a94SPaolo Bonzini backend->dump = value; 200605d0a94SPaolo Bonzini return; 201605d0a94SPaolo Bonzini } 202605d0a94SPaolo Bonzini 203605d0a94SPaolo Bonzini if (value != backend->dump) { 204605d0a94SPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 205605d0a94SPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 206605d0a94SPaolo Bonzini 207605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, 208605d0a94SPaolo Bonzini value ? QEMU_MADV_DODUMP : QEMU_MADV_DONTDUMP); 209605d0a94SPaolo Bonzini backend->dump = value; 210605d0a94SPaolo Bonzini } 211605d0a94SPaolo Bonzini } 212605d0a94SPaolo Bonzini 213a35ba7beSPaolo Bonzini static bool host_memory_backend_get_prealloc(Object *obj, Error **errp) 214a35ba7beSPaolo Bonzini { 215a35ba7beSPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 216a35ba7beSPaolo Bonzini 2174ebc74dbSIgor Mammedov return backend->prealloc; 218a35ba7beSPaolo Bonzini } 219a35ba7beSPaolo Bonzini 220a35ba7beSPaolo Bonzini static void host_memory_backend_set_prealloc(Object *obj, bool value, 221a35ba7beSPaolo Bonzini Error **errp) 222a35ba7beSPaolo Bonzini { 223a35ba7beSPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 224a35ba7beSPaolo Bonzini 2259181fb70SDavid Hildenbrand if (!backend->reserve && value) { 2269181fb70SDavid Hildenbrand error_setg(errp, "'prealloc=on' and 'reserve=off' are incompatible"); 2279181fb70SDavid Hildenbrand return; 2289181fb70SDavid Hildenbrand } 2299181fb70SDavid Hildenbrand 2306f4c60e4SPeter Xu if (!host_memory_backend_mr_inited(backend)) { 231a35ba7beSPaolo Bonzini backend->prealloc = value; 232a35ba7beSPaolo Bonzini return; 233a35ba7beSPaolo Bonzini } 234a35ba7beSPaolo Bonzini 235a35ba7beSPaolo Bonzini if (value && !backend->prealloc) { 236a35ba7beSPaolo Bonzini int fd = memory_region_get_fd(&backend->mr); 237a35ba7beSPaolo Bonzini void *ptr = memory_region_get_ram_ptr(&backend->mr); 238a35ba7beSPaolo Bonzini uint64_t sz = memory_region_size(&backend->mr); 239a35ba7beSPaolo Bonzini 2409c878ad6SPhilippe Mathieu-Daudé if (!qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads, 241*04accf43SMark Kanda backend->prealloc_context, false, errp)) { 242056b68afSIgor Mammedov return; 243056b68afSIgor Mammedov } 244a35ba7beSPaolo Bonzini backend->prealloc = true; 245a35ba7beSPaolo Bonzini } 246a35ba7beSPaolo Bonzini } 247a35ba7beSPaolo Bonzini 248ffac16faSIgor Mammedov static void host_memory_backend_get_prealloc_threads(Object *obj, Visitor *v, 249ffac16faSIgor Mammedov const char *name, void *opaque, Error **errp) 250ffac16faSIgor Mammedov { 251ffac16faSIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 252ffac16faSIgor Mammedov visit_type_uint32(v, name, &backend->prealloc_threads, errp); 253ffac16faSIgor Mammedov } 254ffac16faSIgor Mammedov 255ffac16faSIgor Mammedov static void host_memory_backend_set_prealloc_threads(Object *obj, Visitor *v, 256ffac16faSIgor Mammedov const char *name, void *opaque, Error **errp) 257ffac16faSIgor Mammedov { 258ffac16faSIgor Mammedov HostMemoryBackend *backend = MEMORY_BACKEND(obj); 259ffac16faSIgor Mammedov uint32_t value; 260ffac16faSIgor Mammedov 261668f62ecSMarkus Armbruster if (!visit_type_uint32(v, name, &value, errp)) { 262dcfe4805SMarkus Armbruster return; 263ffac16faSIgor Mammedov } 264ffac16faSIgor Mammedov if (value <= 0) { 265dcfe4805SMarkus Armbruster error_setg(errp, "property '%s' of %s doesn't take value '%d'", name, 266dcfe4805SMarkus Armbruster object_get_typename(obj), value); 267dcfe4805SMarkus Armbruster return; 268ffac16faSIgor Mammedov } 269ffac16faSIgor Mammedov backend->prealloc_threads = value; 270ffac16faSIgor Mammedov } 271ffac16faSIgor Mammedov 27258f4662cSHu Tao static void host_memory_backend_init(Object *obj) 2731f070489SIgor Mammedov { 274605d0a94SPaolo Bonzini HostMemoryBackend *backend = MEMORY_BACKEND(obj); 2756b269967SEduardo Habkost MachineState *machine = MACHINE(qdev_get_machine()); 276605d0a94SPaolo Bonzini 277ffac16faSIgor Mammedov /* TODO: convert access to globals to compat properties */ 2786b269967SEduardo Habkost backend->merge = machine_mem_merge(machine); 2796b269967SEduardo Habkost backend->dump = machine_dump_guest_core(machine); 2809181fb70SDavid Hildenbrand backend->reserve = true; 281f8d426a6SJaroslav Jindrak backend->prealloc_threads = machine->smp.cpus; 2821f070489SIgor Mammedov } 2831f070489SIgor Mammedov 284fa0cb34dSMarc-André Lureau static void host_memory_backend_post_init(Object *obj) 285fa0cb34dSMarc-André Lureau { 286fa0cb34dSMarc-André Lureau object_apply_compat_props(obj); 287fa0cb34dSMarc-André Lureau } 288fa0cb34dSMarc-André Lureau 2894728b574SPeter Xu bool host_memory_backend_mr_inited(HostMemoryBackend *backend) 2904728b574SPeter Xu { 2914728b574SPeter Xu /* 2924728b574SPeter Xu * NOTE: We forbid zero-length memory backend, so here zero means 2934728b574SPeter Xu * "we haven't inited the backend memory region yet". 2944728b574SPeter Xu */ 2954728b574SPeter Xu return memory_region_size(&backend->mr) != 0; 2964728b574SPeter Xu } 2974728b574SPeter Xu 2987943e97bSDavid Hildenbrand MemoryRegion *host_memory_backend_get_memory(HostMemoryBackend *backend) 2991f070489SIgor Mammedov { 3006f4c60e4SPeter Xu return host_memory_backend_mr_inited(backend) ? &backend->mr : NULL; 3011f070489SIgor Mammedov } 3021f070489SIgor Mammedov 3032aece63cSXiao Guangrong void host_memory_backend_set_mapped(HostMemoryBackend *backend, bool mapped) 3042aece63cSXiao Guangrong { 3052aece63cSXiao Guangrong backend->is_mapped = mapped; 3062aece63cSXiao Guangrong } 3072aece63cSXiao Guangrong 3082aece63cSXiao Guangrong bool host_memory_backend_is_mapped(HostMemoryBackend *backend) 3092aece63cSXiao Guangrong { 3102aece63cSXiao Guangrong return backend->is_mapped; 3112aece63cSXiao Guangrong } 3122aece63cSXiao Guangrong 3132b108085SDavid Gibson size_t host_memory_backend_pagesize(HostMemoryBackend *memdev) 3142b108085SDavid Gibson { 3158be934b7SThomas Huth size_t pagesize = qemu_ram_pagesize(memdev->mr.ram_block); 3168be934b7SThomas Huth g_assert(pagesize >= qemu_real_host_page_size()); 3172b108085SDavid Gibson return pagesize; 3182b108085SDavid Gibson } 3192b108085SDavid Gibson 320bd9262d9SHu Tao static void 321bd9262d9SHu Tao host_memory_backend_memory_complete(UserCreatable *uc, Error **errp) 322bd9262d9SHu Tao { 323bd9262d9SHu Tao HostMemoryBackend *backend = MEMORY_BACKEND(uc); 324bd9262d9SHu Tao HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc); 325605d0a94SPaolo Bonzini void *ptr; 326605d0a94SPaolo Bonzini uint64_t sz; 327*04accf43SMark Kanda bool async = !phase_check(PHASE_LATE_BACKENDS_CREATED); 328bd9262d9SHu Tao 329e199f7adSPhilippe Mathieu-Daudé if (!bc->alloc) { 330e199f7adSPhilippe Mathieu-Daudé return; 331e199f7adSPhilippe Mathieu-Daudé } 332fdb63cf3SPhilippe Mathieu-Daudé if (!bc->alloc(backend, errp)) { 333fdb63cf3SPhilippe Mathieu-Daudé return; 334605d0a94SPaolo Bonzini } 335605d0a94SPaolo Bonzini 336605d0a94SPaolo Bonzini ptr = memory_region_get_ram_ptr(&backend->mr); 337605d0a94SPaolo Bonzini sz = memory_region_size(&backend->mr); 338605d0a94SPaolo Bonzini 339605d0a94SPaolo Bonzini if (backend->merge) { 340605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE); 341605d0a94SPaolo Bonzini } 342605d0a94SPaolo Bonzini if (!backend->dump) { 343605d0a94SPaolo Bonzini qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP); 344605d0a94SPaolo Bonzini } 3454cf1b76bSHu Tao #ifdef CONFIG_NUMA 3464cf1b76bSHu Tao unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES); 3474cf1b76bSHu Tao /* lastbit == MAX_NODES means maxnode = 0 */ 3484cf1b76bSHu Tao unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1); 34993e08863SPhilippe Mathieu-Daudé /* 35093e08863SPhilippe Mathieu-Daudé * Ensure policy won't be ignored in case memory is preallocated 3514cf1b76bSHu Tao * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so 35293e08863SPhilippe Mathieu-Daudé * this doesn't catch hugepage case. 35393e08863SPhilippe Mathieu-Daudé */ 354288d3322SMichael S. Tsirkin unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE; 3556bb613f0SMichal Privoznik int mode = backend->policy; 3564cf1b76bSHu Tao 3574cf1b76bSHu Tao /* check for invalid host-nodes and policies and give more verbose 3584cf1b76bSHu Tao * error messages than mbind(). */ 3594cf1b76bSHu Tao if (maxnode && backend->policy == MPOL_DEFAULT) { 3604cf1b76bSHu Tao error_setg(errp, "host-nodes must be empty for policy default," 3614cf1b76bSHu Tao " or you should explicitly specify a policy other" 3624cf1b76bSHu Tao " than default"); 3634cf1b76bSHu Tao return; 3644cf1b76bSHu Tao } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) { 3654cf1b76bSHu Tao error_setg(errp, "host-nodes must be set for policy %s", 366977c736fSMarkus Armbruster HostMemPolicy_str(backend->policy)); 3674cf1b76bSHu Tao return; 3684cf1b76bSHu Tao } 3694cf1b76bSHu Tao 37093e08863SPhilippe Mathieu-Daudé /* 37193e08863SPhilippe Mathieu-Daudé * We can have up to MAX_NODES nodes, but we need to pass maxnode+1 3724cf1b76bSHu Tao * as argument to mbind() due to an old Linux bug (feature?) which 3734cf1b76bSHu Tao * cuts off the last specified node. This means backend->host_nodes 3744cf1b76bSHu Tao * must have MAX_NODES+1 bits available. 3754cf1b76bSHu Tao */ 3764cf1b76bSHu Tao assert(sizeof(backend->host_nodes) >= 3774cf1b76bSHu Tao BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long)); 3784cf1b76bSHu Tao assert(maxnode <= MAX_NODES); 37970b6d525SIgor Mammedov 3806bb613f0SMichal Privoznik #ifdef HAVE_NUMA_HAS_PREFERRED_MANY 3816bb613f0SMichal Privoznik if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) { 3826bb613f0SMichal Privoznik /* 3836bb613f0SMichal Privoznik * Replace with MPOL_PREFERRED_MANY otherwise the mbind() below 3846bb613f0SMichal Privoznik * silently picks the first node. 3856bb613f0SMichal Privoznik */ 3866bb613f0SMichal Privoznik mode = MPOL_PREFERRED_MANY; 3876bb613f0SMichal Privoznik } 3886bb613f0SMichal Privoznik #endif 3896bb613f0SMichal Privoznik 39070b6d525SIgor Mammedov if (maxnode && 3916bb613f0SMichal Privoznik mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) { 392a3567ba1SPavel Fedin if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) { 3934cf1b76bSHu Tao error_setg_errno(errp, errno, 3944cf1b76bSHu Tao "cannot bind memory to host NUMA nodes"); 3954cf1b76bSHu Tao return; 3964cf1b76bSHu Tao } 397a3567ba1SPavel Fedin } 3984cf1b76bSHu Tao #endif 39993e08863SPhilippe Mathieu-Daudé /* 40093e08863SPhilippe Mathieu-Daudé * Preallocate memory after the NUMA policy has been instantiated. 4014cf1b76bSHu Tao * This is necessary to guarantee memory is allocated with 4024cf1b76bSHu Tao * specified NUMA policy in place. 4034cf1b76bSHu Tao */ 4049c878ad6SPhilippe Mathieu-Daudé if (backend->prealloc && !qemu_prealloc_mem(memory_region_get_fd(&backend->mr), 4059c878ad6SPhilippe Mathieu-Daudé ptr, sz, 406e6816458SDavid Hildenbrand backend->prealloc_threads, 407*04accf43SMark Kanda backend->prealloc_context, 408*04accf43SMark Kanda async, errp)) { 4093961613aSPhilippe Mathieu-Daudé return; 4103961613aSPhilippe Mathieu-Daudé } 4113961613aSPhilippe Mathieu-Daudé } 412bd9262d9SHu Tao 41336bce5caSLin Ma static bool 4143beacfb9SEduardo Habkost host_memory_backend_can_be_deleted(UserCreatable *uc) 41536bce5caSLin Ma { 4162aece63cSXiao Guangrong if (host_memory_backend_is_mapped(MEMORY_BACKEND(uc))) { 41736bce5caSLin Ma return false; 41836bce5caSLin Ma } else { 41936bce5caSLin Ma return true; 42036bce5caSLin Ma } 42136bce5caSLin Ma } 42236bce5caSLin Ma 42306329cceSMarcel Apfelbaum static bool host_memory_backend_get_share(Object *o, Error **errp) 42406329cceSMarcel Apfelbaum { 42506329cceSMarcel Apfelbaum HostMemoryBackend *backend = MEMORY_BACKEND(o); 42606329cceSMarcel Apfelbaum 42706329cceSMarcel Apfelbaum return backend->share; 42806329cceSMarcel Apfelbaum } 42906329cceSMarcel Apfelbaum 43006329cceSMarcel Apfelbaum static void host_memory_backend_set_share(Object *o, bool value, Error **errp) 43106329cceSMarcel Apfelbaum { 43206329cceSMarcel Apfelbaum HostMemoryBackend *backend = MEMORY_BACKEND(o); 43306329cceSMarcel Apfelbaum 43406329cceSMarcel Apfelbaum if (host_memory_backend_mr_inited(backend)) { 43506329cceSMarcel Apfelbaum error_setg(errp, "cannot change property value"); 43606329cceSMarcel Apfelbaum return; 43706329cceSMarcel Apfelbaum } 43806329cceSMarcel Apfelbaum backend->share = value; 43906329cceSMarcel Apfelbaum } 44006329cceSMarcel Apfelbaum 4419181fb70SDavid Hildenbrand #ifdef CONFIG_LINUX 4429181fb70SDavid Hildenbrand static bool host_memory_backend_get_reserve(Object *o, Error **errp) 4439181fb70SDavid Hildenbrand { 4449181fb70SDavid Hildenbrand HostMemoryBackend *backend = MEMORY_BACKEND(o); 4459181fb70SDavid Hildenbrand 4469181fb70SDavid Hildenbrand return backend->reserve; 4479181fb70SDavid Hildenbrand } 4489181fb70SDavid Hildenbrand 4499181fb70SDavid Hildenbrand static void host_memory_backend_set_reserve(Object *o, bool value, Error **errp) 4509181fb70SDavid Hildenbrand { 4519181fb70SDavid Hildenbrand HostMemoryBackend *backend = MEMORY_BACKEND(o); 4529181fb70SDavid Hildenbrand 4539181fb70SDavid Hildenbrand if (host_memory_backend_mr_inited(backend)) { 4549181fb70SDavid Hildenbrand error_setg(errp, "cannot change property value"); 4559181fb70SDavid Hildenbrand return; 4569181fb70SDavid Hildenbrand } 4579181fb70SDavid Hildenbrand if (backend->prealloc && !value) { 4589181fb70SDavid Hildenbrand error_setg(errp, "'prealloc=on' and 'reserve=off' are incompatible"); 4599181fb70SDavid Hildenbrand return; 4609181fb70SDavid Hildenbrand } 4619181fb70SDavid Hildenbrand backend->reserve = value; 4629181fb70SDavid Hildenbrand } 4639181fb70SDavid Hildenbrand #endif /* CONFIG_LINUX */ 4649181fb70SDavid Hildenbrand 465fa0cb34dSMarc-André Lureau static bool 466fa0cb34dSMarc-André Lureau host_memory_backend_get_use_canonical_path(Object *obj, Error **errp) 467fa0cb34dSMarc-André Lureau { 468fa0cb34dSMarc-André Lureau HostMemoryBackend *backend = MEMORY_BACKEND(obj); 469fa0cb34dSMarc-André Lureau 470fa0cb34dSMarc-André Lureau return backend->use_canonical_path; 471fa0cb34dSMarc-André Lureau } 472fa0cb34dSMarc-André Lureau 473fa0cb34dSMarc-André Lureau static void 474fa0cb34dSMarc-André Lureau host_memory_backend_set_use_canonical_path(Object *obj, bool value, 475fa0cb34dSMarc-André Lureau Error **errp) 476fa0cb34dSMarc-André Lureau { 477fa0cb34dSMarc-André Lureau HostMemoryBackend *backend = MEMORY_BACKEND(obj); 478fa0cb34dSMarc-André Lureau 479fa0cb34dSMarc-André Lureau backend->use_canonical_path = value; 480fa0cb34dSMarc-André Lureau } 481fa0cb34dSMarc-André Lureau 482bd9262d9SHu Tao static void 483bd9262d9SHu Tao host_memory_backend_class_init(ObjectClass *oc, void *data) 484bd9262d9SHu Tao { 485bd9262d9SHu Tao UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); 486bd9262d9SHu Tao 487bd9262d9SHu Tao ucc->complete = host_memory_backend_memory_complete; 48836bce5caSLin Ma ucc->can_be_deleted = host_memory_backend_can_be_deleted; 489e62834caSEduardo Habkost 490e62834caSEduardo Habkost object_class_property_add_bool(oc, "merge", 491e62834caSEduardo Habkost host_memory_backend_get_merge, 492d2623129SMarkus Armbruster host_memory_backend_set_merge); 493033bfc5eSMarc-André Lureau object_class_property_set_description(oc, "merge", 4947eecec7dSMarkus Armbruster "Mark memory as mergeable"); 495e62834caSEduardo Habkost object_class_property_add_bool(oc, "dump", 496e62834caSEduardo Habkost host_memory_backend_get_dump, 497d2623129SMarkus Armbruster host_memory_backend_set_dump); 498033bfc5eSMarc-André Lureau object_class_property_set_description(oc, "dump", 4997eecec7dSMarkus Armbruster "Set to 'off' to exclude from core dump"); 500e62834caSEduardo Habkost object_class_property_add_bool(oc, "prealloc", 501e62834caSEduardo Habkost host_memory_backend_get_prealloc, 502d2623129SMarkus Armbruster host_memory_backend_set_prealloc); 503033bfc5eSMarc-André Lureau object_class_property_set_description(oc, "prealloc", 5047eecec7dSMarkus Armbruster "Preallocate memory"); 505ffac16faSIgor Mammedov object_class_property_add(oc, "prealloc-threads", "int", 506ffac16faSIgor Mammedov host_memory_backend_get_prealloc_threads, 507ffac16faSIgor Mammedov host_memory_backend_set_prealloc_threads, 508d2623129SMarkus Armbruster NULL, NULL); 509ffac16faSIgor Mammedov object_class_property_set_description(oc, "prealloc-threads", 5107eecec7dSMarkus Armbruster "Number of CPU threads to use for prealloc"); 511e6816458SDavid Hildenbrand object_class_property_add_link(oc, "prealloc-context", 512e6816458SDavid Hildenbrand TYPE_THREAD_CONTEXT, offsetof(HostMemoryBackend, prealloc_context), 513e6816458SDavid Hildenbrand object_property_allow_set_link, OBJ_PROP_LINK_STRONG); 514e6816458SDavid Hildenbrand object_class_property_set_description(oc, "prealloc-context", 515e6816458SDavid Hildenbrand "Context to use for creating CPU threads for preallocation"); 516e62834caSEduardo Habkost object_class_property_add(oc, "size", "int", 517e62834caSEduardo Habkost host_memory_backend_get_size, 518e62834caSEduardo Habkost host_memory_backend_set_size, 519d2623129SMarkus Armbruster NULL, NULL); 520033bfc5eSMarc-André Lureau object_class_property_set_description(oc, "size", 5217eecec7dSMarkus Armbruster "Size of the memory region (ex: 500M)"); 522e62834caSEduardo Habkost object_class_property_add(oc, "host-nodes", "int", 523e62834caSEduardo Habkost host_memory_backend_get_host_nodes, 524e62834caSEduardo Habkost host_memory_backend_set_host_nodes, 525d2623129SMarkus Armbruster NULL, NULL); 526033bfc5eSMarc-André Lureau object_class_property_set_description(oc, "host-nodes", 5277eecec7dSMarkus Armbruster "Binds memory to the list of NUMA host nodes"); 528e62834caSEduardo Habkost object_class_property_add_enum(oc, "policy", "HostMemPolicy", 529f7abe0ecSMarc-André Lureau &HostMemPolicy_lookup, 530e62834caSEduardo Habkost host_memory_backend_get_policy, 531d2623129SMarkus Armbruster host_memory_backend_set_policy); 532033bfc5eSMarc-André Lureau object_class_property_set_description(oc, "policy", 5337eecec7dSMarkus Armbruster "Set the NUMA policy"); 53406329cceSMarcel Apfelbaum object_class_property_add_bool(oc, "share", 535d2623129SMarkus Armbruster host_memory_backend_get_share, host_memory_backend_set_share); 536033bfc5eSMarc-André Lureau object_class_property_set_description(oc, "share", 5377eecec7dSMarkus Armbruster "Mark the memory as private to QEMU or shared"); 5389181fb70SDavid Hildenbrand #ifdef CONFIG_LINUX 5399181fb70SDavid Hildenbrand object_class_property_add_bool(oc, "reserve", 5409181fb70SDavid Hildenbrand host_memory_backend_get_reserve, host_memory_backend_set_reserve); 5419181fb70SDavid Hildenbrand object_class_property_set_description(oc, "reserve", 5429181fb70SDavid Hildenbrand "Reserve swap space (or huge pages) if applicable"); 5439181fb70SDavid Hildenbrand #endif /* CONFIG_LINUX */ 5448db0b204SIgor Mammedov /* 5458db0b204SIgor Mammedov * Do not delete/rename option. This option must be considered stable 5468db0b204SIgor Mammedov * (as if it didn't have the 'x-' prefix including deprecation period) as 5478db0b204SIgor Mammedov * long as 4.0 and older machine types exists. 5488db0b204SIgor Mammedov * Option will be used by upper layers to override (disable) canonical path 5498db0b204SIgor Mammedov * for ramblock-id set by compat properties on old machine types ( <= 4.0), 5508db0b204SIgor Mammedov * to keep migration working when backend is used for main RAM with 5518db0b204SIgor Mammedov * -machine memory-backend= option (main RAM historically used prefix-less 5528db0b204SIgor Mammedov * ramblock-id). 5538db0b204SIgor Mammedov */ 554fa0cb34dSMarc-André Lureau object_class_property_add_bool(oc, "x-use-canonical-path-for-ramblock-id", 555fa0cb34dSMarc-André Lureau host_memory_backend_get_use_canonical_path, 556d2623129SMarkus Armbruster host_memory_backend_set_use_canonical_path); 557e1ff3c67SIgor Mammedov } 558e1ff3c67SIgor Mammedov 55958f4662cSHu Tao static const TypeInfo host_memory_backend_info = { 5601f070489SIgor Mammedov .name = TYPE_MEMORY_BACKEND, 5611f070489SIgor Mammedov .parent = TYPE_OBJECT, 5621f070489SIgor Mammedov .abstract = true, 5631f070489SIgor Mammedov .class_size = sizeof(HostMemoryBackendClass), 564bd9262d9SHu Tao .class_init = host_memory_backend_class_init, 5651f070489SIgor Mammedov .instance_size = sizeof(HostMemoryBackend), 56658f4662cSHu Tao .instance_init = host_memory_backend_init, 567fa0cb34dSMarc-André Lureau .instance_post_init = host_memory_backend_post_init, 5681f070489SIgor Mammedov .interfaces = (InterfaceInfo[]) { 5691f070489SIgor Mammedov { TYPE_USER_CREATABLE }, 5701f070489SIgor Mammedov { } 5711f070489SIgor Mammedov } 5721f070489SIgor Mammedov }; 5731f070489SIgor Mammedov 5741f070489SIgor Mammedov static void register_types(void) 5751f070489SIgor Mammedov { 57658f4662cSHu Tao type_register_static(&host_memory_backend_info); 5771f070489SIgor Mammedov } 5781f070489SIgor Mammedov 5791f070489SIgor Mammedov type_init(register_types); 580