xref: /qemu/backends/hostmem.c (revision 9c878ad6fbfc7934c9c4f6ba607c0842202afdbb)
11f070489SIgor Mammedov /*
21f070489SIgor Mammedov  * QEMU Host Memory Backend
31f070489SIgor Mammedov  *
41f070489SIgor Mammedov  * Copyright (C) 2013-2014 Red Hat Inc
51f070489SIgor Mammedov  *
61f070489SIgor Mammedov  * Authors:
71f070489SIgor Mammedov  *   Igor Mammedov <imammedo@redhat.com>
81f070489SIgor Mammedov  *
91f070489SIgor Mammedov  * This work is licensed under the terms of the GNU GPL, version 2 or later.
101f070489SIgor Mammedov  * See the COPYING file in the top-level directory.
111f070489SIgor Mammedov  */
129af23989SMarkus Armbruster 
139c058332SPeter Maydell #include "qemu/osdep.h"
141f070489SIgor Mammedov #include "sysemu/hostmem.h"
156b269967SEduardo Habkost #include "hw/boards.h"
16da34e65cSMarkus Armbruster #include "qapi/error.h"
17eb815e24SMarkus Armbruster #include "qapi/qapi-builtin-visit.h"
181f070489SIgor Mammedov #include "qapi/visitor.h"
191f070489SIgor Mammedov #include "qemu/config-file.h"
201f070489SIgor Mammedov #include "qom/object_interfaces.h"
212b108085SDavid Gibson #include "qemu/mmap-alloc.h"
22b85ea5faSPeter Maydell #include "qemu/madvise.h"
231f070489SIgor Mammedov 
244cf1b76bSHu Tao #ifdef CONFIG_NUMA
254cf1b76bSHu Tao #include <numaif.h>
266bb613f0SMichal Privoznik #include <numa.h>
274cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_DEFAULT != MPOL_DEFAULT);
286bb613f0SMichal Privoznik /*
296bb613f0SMichal Privoznik  * HOST_MEM_POLICY_PREFERRED may either translate to MPOL_PREFERRED or
306bb613f0SMichal Privoznik  * MPOL_PREFERRED_MANY, see comments further below.
316bb613f0SMichal Privoznik  */
324cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_PREFERRED != MPOL_PREFERRED);
334cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_BIND != MPOL_BIND);
344cf1b76bSHu Tao QEMU_BUILD_BUG_ON(HOST_MEM_POLICY_INTERLEAVE != MPOL_INTERLEAVE);
354cf1b76bSHu Tao #endif
364cf1b76bSHu Tao 
37fa0cb34dSMarc-André Lureau char *
38fa0cb34dSMarc-André Lureau host_memory_backend_get_name(HostMemoryBackend *backend)
39fa0cb34dSMarc-André Lureau {
40fa0cb34dSMarc-André Lureau     if (!backend->use_canonical_path) {
417a309cc9SMarkus Armbruster         return g_strdup(object_get_canonical_path_component(OBJECT(backend)));
42fa0cb34dSMarc-André Lureau     }
43fa0cb34dSMarc-André Lureau 
44fa0cb34dSMarc-André Lureau     return object_get_canonical_path(OBJECT(backend));
45fa0cb34dSMarc-André Lureau }
46fa0cb34dSMarc-André Lureau 
471f070489SIgor Mammedov static void
48d7bce999SEric Blake host_memory_backend_get_size(Object *obj, Visitor *v, const char *name,
49d7bce999SEric Blake                              void *opaque, Error **errp)
501f070489SIgor Mammedov {
511f070489SIgor Mammedov     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
521f070489SIgor Mammedov     uint64_t value = backend->size;
531f070489SIgor Mammedov 
5451e72bc1SEric Blake     visit_type_size(v, name, &value, errp);
551f070489SIgor Mammedov }
561f070489SIgor Mammedov 
571f070489SIgor Mammedov static void
58d7bce999SEric Blake host_memory_backend_set_size(Object *obj, Visitor *v, const char *name,
59d7bce999SEric Blake                              void *opaque, Error **errp)
601f070489SIgor Mammedov {
611f070489SIgor Mammedov     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
621f070489SIgor Mammedov     uint64_t value;
631f070489SIgor Mammedov 
646f4c60e4SPeter Xu     if (host_memory_backend_mr_inited(backend)) {
65dcfe4805SMarkus Armbruster         error_setg(errp, "cannot change property %s of %s ", name,
66dcfe4805SMarkus Armbruster                    object_get_typename(obj));
67dcfe4805SMarkus Armbruster         return;
681f070489SIgor Mammedov     }
691f070489SIgor Mammedov 
70668f62ecSMarkus Armbruster     if (!visit_type_size(v, name, &value, errp)) {
71dcfe4805SMarkus Armbruster         return;
721f070489SIgor Mammedov     }
731f070489SIgor Mammedov     if (!value) {
74dcfe4805SMarkus Armbruster         error_setg(errp,
7521d16836SZhang Yi                    "property '%s' of %s doesn't take value '%" PRIu64 "'",
7621d16836SZhang Yi                    name, object_get_typename(obj), value);
77dcfe4805SMarkus Armbruster         return;
781f070489SIgor Mammedov     }
791f070489SIgor Mammedov     backend->size = value;
801f070489SIgor Mammedov }
811f070489SIgor Mammedov 
824cf1b76bSHu Tao static void
83d7bce999SEric Blake host_memory_backend_get_host_nodes(Object *obj, Visitor *v, const char *name,
84d7bce999SEric Blake                                    void *opaque, Error **errp)
854cf1b76bSHu Tao {
864cf1b76bSHu Tao     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
874cf1b76bSHu Tao     uint16List *host_nodes = NULL;
88c3033fd3SEric Blake     uint16List **tail = &host_nodes;
894cf1b76bSHu Tao     unsigned long value;
904cf1b76bSHu Tao 
914cf1b76bSHu Tao     value = find_first_bit(backend->host_nodes, MAX_NODES);
921454d33fSXiao Guangrong     if (value == MAX_NODES) {
9315160ab7SIgor Mammedov         goto ret;
941454d33fSXiao Guangrong     }
954cf1b76bSHu Tao 
96c3033fd3SEric Blake     QAPI_LIST_APPEND(tail, value);
97658ae5a7SMarkus Armbruster 
984cf1b76bSHu Tao     do {
994cf1b76bSHu Tao         value = find_next_bit(backend->host_nodes, MAX_NODES, value + 1);
1004cf1b76bSHu Tao         if (value == MAX_NODES) {
1014cf1b76bSHu Tao             break;
1024cf1b76bSHu Tao         }
1034cf1b76bSHu Tao 
104c3033fd3SEric Blake         QAPI_LIST_APPEND(tail, value);
1054cf1b76bSHu Tao     } while (true);
1064cf1b76bSHu Tao 
10715160ab7SIgor Mammedov ret:
10851e72bc1SEric Blake     visit_type_uint16List(v, name, &host_nodes, errp);
109bdd5ce05SKeqian Zhu     qapi_free_uint16List(host_nodes);
1104cf1b76bSHu Tao }
1114cf1b76bSHu Tao 
1124cf1b76bSHu Tao static void
113d7bce999SEric Blake host_memory_backend_set_host_nodes(Object *obj, Visitor *v, const char *name,
114d7bce999SEric Blake                                    void *opaque, Error **errp)
1154cf1b76bSHu Tao {
1164cf1b76bSHu Tao #ifdef CONFIG_NUMA
1174cf1b76bSHu Tao     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
118ffa144b3SEduardo Habkost     uint16List *l, *host_nodes = NULL;
1194cf1b76bSHu Tao 
120ffa144b3SEduardo Habkost     visit_type_uint16List(v, name, &host_nodes, errp);
1214cf1b76bSHu Tao 
122ffa144b3SEduardo Habkost     for (l = host_nodes; l; l = l->next) {
123ffa144b3SEduardo Habkost         if (l->value >= MAX_NODES) {
124ffa144b3SEduardo Habkost             error_setg(errp, "Invalid host-nodes value: %d", l->value);
125ffa144b3SEduardo Habkost             goto out;
1264cf1b76bSHu Tao         }
127ffa144b3SEduardo Habkost     }
128ffa144b3SEduardo Habkost 
129ffa144b3SEduardo Habkost     for (l = host_nodes; l; l = l->next) {
130ffa144b3SEduardo Habkost         bitmap_set(backend->host_nodes, l->value, 1);
131ffa144b3SEduardo Habkost     }
132ffa144b3SEduardo Habkost 
133ffa144b3SEduardo Habkost out:
134ffa144b3SEduardo Habkost     qapi_free_uint16List(host_nodes);
1354cf1b76bSHu Tao #else
1364cf1b76bSHu Tao     error_setg(errp, "NUMA node binding are not supported by this QEMU");
1374cf1b76bSHu Tao #endif
1384cf1b76bSHu Tao }
1394cf1b76bSHu Tao 
140a3590dacSDaniel P. Berrange static int
141a3590dacSDaniel P. Berrange host_memory_backend_get_policy(Object *obj, Error **errp G_GNUC_UNUSED)
1424cf1b76bSHu Tao {
1434cf1b76bSHu Tao     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
144a3590dacSDaniel P. Berrange     return backend->policy;
1454cf1b76bSHu Tao }
1464cf1b76bSHu Tao 
1474cf1b76bSHu Tao static void
148a3590dacSDaniel P. Berrange host_memory_backend_set_policy(Object *obj, int policy, Error **errp)
1494cf1b76bSHu Tao {
1504cf1b76bSHu Tao     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
1514cf1b76bSHu Tao     backend->policy = policy;
1524cf1b76bSHu Tao 
1534cf1b76bSHu Tao #ifndef CONFIG_NUMA
1544cf1b76bSHu Tao     if (policy != HOST_MEM_POLICY_DEFAULT) {
1554cf1b76bSHu Tao         error_setg(errp, "NUMA policies are not supported by this QEMU");
1564cf1b76bSHu Tao     }
1574cf1b76bSHu Tao #endif
1584cf1b76bSHu Tao }
1594cf1b76bSHu Tao 
160605d0a94SPaolo Bonzini static bool host_memory_backend_get_merge(Object *obj, Error **errp)
161605d0a94SPaolo Bonzini {
162605d0a94SPaolo Bonzini     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
163605d0a94SPaolo Bonzini 
164605d0a94SPaolo Bonzini     return backend->merge;
165605d0a94SPaolo Bonzini }
166605d0a94SPaolo Bonzini 
167605d0a94SPaolo Bonzini static void host_memory_backend_set_merge(Object *obj, bool value, Error **errp)
168605d0a94SPaolo Bonzini {
169605d0a94SPaolo Bonzini     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
170605d0a94SPaolo Bonzini 
1716f4c60e4SPeter Xu     if (!host_memory_backend_mr_inited(backend)) {
172605d0a94SPaolo Bonzini         backend->merge = value;
173605d0a94SPaolo Bonzini         return;
174605d0a94SPaolo Bonzini     }
175605d0a94SPaolo Bonzini 
176605d0a94SPaolo Bonzini     if (value != backend->merge) {
177605d0a94SPaolo Bonzini         void *ptr = memory_region_get_ram_ptr(&backend->mr);
178605d0a94SPaolo Bonzini         uint64_t sz = memory_region_size(&backend->mr);
179605d0a94SPaolo Bonzini 
180605d0a94SPaolo Bonzini         qemu_madvise(ptr, sz,
181605d0a94SPaolo Bonzini                      value ? QEMU_MADV_MERGEABLE : QEMU_MADV_UNMERGEABLE);
182605d0a94SPaolo Bonzini         backend->merge = value;
183605d0a94SPaolo Bonzini     }
184605d0a94SPaolo Bonzini }
185605d0a94SPaolo Bonzini 
186605d0a94SPaolo Bonzini static bool host_memory_backend_get_dump(Object *obj, Error **errp)
187605d0a94SPaolo Bonzini {
188605d0a94SPaolo Bonzini     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
189605d0a94SPaolo Bonzini 
190605d0a94SPaolo Bonzini     return backend->dump;
191605d0a94SPaolo Bonzini }
192605d0a94SPaolo Bonzini 
193605d0a94SPaolo Bonzini static void host_memory_backend_set_dump(Object *obj, bool value, Error **errp)
194605d0a94SPaolo Bonzini {
195605d0a94SPaolo Bonzini     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
196605d0a94SPaolo Bonzini 
1976f4c60e4SPeter Xu     if (!host_memory_backend_mr_inited(backend)) {
198605d0a94SPaolo Bonzini         backend->dump = value;
199605d0a94SPaolo Bonzini         return;
200605d0a94SPaolo Bonzini     }
201605d0a94SPaolo Bonzini 
202605d0a94SPaolo Bonzini     if (value != backend->dump) {
203605d0a94SPaolo Bonzini         void *ptr = memory_region_get_ram_ptr(&backend->mr);
204605d0a94SPaolo Bonzini         uint64_t sz = memory_region_size(&backend->mr);
205605d0a94SPaolo Bonzini 
206605d0a94SPaolo Bonzini         qemu_madvise(ptr, sz,
207605d0a94SPaolo Bonzini                      value ? QEMU_MADV_DODUMP : QEMU_MADV_DONTDUMP);
208605d0a94SPaolo Bonzini         backend->dump = value;
209605d0a94SPaolo Bonzini     }
210605d0a94SPaolo Bonzini }
211605d0a94SPaolo Bonzini 
212a35ba7beSPaolo Bonzini static bool host_memory_backend_get_prealloc(Object *obj, Error **errp)
213a35ba7beSPaolo Bonzini {
214a35ba7beSPaolo Bonzini     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
215a35ba7beSPaolo Bonzini 
2164ebc74dbSIgor Mammedov     return backend->prealloc;
217a35ba7beSPaolo Bonzini }
218a35ba7beSPaolo Bonzini 
219a35ba7beSPaolo Bonzini static void host_memory_backend_set_prealloc(Object *obj, bool value,
220a35ba7beSPaolo Bonzini                                              Error **errp)
221a35ba7beSPaolo Bonzini {
222a35ba7beSPaolo Bonzini     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
223a35ba7beSPaolo Bonzini 
2249181fb70SDavid Hildenbrand     if (!backend->reserve && value) {
2259181fb70SDavid Hildenbrand         error_setg(errp, "'prealloc=on' and 'reserve=off' are incompatible");
2269181fb70SDavid Hildenbrand         return;
2279181fb70SDavid Hildenbrand     }
2289181fb70SDavid Hildenbrand 
2296f4c60e4SPeter Xu     if (!host_memory_backend_mr_inited(backend)) {
230a35ba7beSPaolo Bonzini         backend->prealloc = value;
231a35ba7beSPaolo Bonzini         return;
232a35ba7beSPaolo Bonzini     }
233a35ba7beSPaolo Bonzini 
234a35ba7beSPaolo Bonzini     if (value && !backend->prealloc) {
235a35ba7beSPaolo Bonzini         int fd = memory_region_get_fd(&backend->mr);
236a35ba7beSPaolo Bonzini         void *ptr = memory_region_get_ram_ptr(&backend->mr);
237a35ba7beSPaolo Bonzini         uint64_t sz = memory_region_size(&backend->mr);
238a35ba7beSPaolo Bonzini 
239*9c878ad6SPhilippe Mathieu-Daudé         if (!qemu_prealloc_mem(fd, ptr, sz, backend->prealloc_threads,
240*9c878ad6SPhilippe Mathieu-Daudé                                backend->prealloc_context, errp)) {
241056b68afSIgor Mammedov             return;
242056b68afSIgor Mammedov         }
243a35ba7beSPaolo Bonzini         backend->prealloc = true;
244a35ba7beSPaolo Bonzini     }
245a35ba7beSPaolo Bonzini }
246a35ba7beSPaolo Bonzini 
247ffac16faSIgor Mammedov static void host_memory_backend_get_prealloc_threads(Object *obj, Visitor *v,
248ffac16faSIgor Mammedov     const char *name, void *opaque, Error **errp)
249ffac16faSIgor Mammedov {
250ffac16faSIgor Mammedov     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
251ffac16faSIgor Mammedov     visit_type_uint32(v, name, &backend->prealloc_threads, errp);
252ffac16faSIgor Mammedov }
253ffac16faSIgor Mammedov 
254ffac16faSIgor Mammedov static void host_memory_backend_set_prealloc_threads(Object *obj, Visitor *v,
255ffac16faSIgor Mammedov     const char *name, void *opaque, Error **errp)
256ffac16faSIgor Mammedov {
257ffac16faSIgor Mammedov     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
258ffac16faSIgor Mammedov     uint32_t value;
259ffac16faSIgor Mammedov 
260668f62ecSMarkus Armbruster     if (!visit_type_uint32(v, name, &value, errp)) {
261dcfe4805SMarkus Armbruster         return;
262ffac16faSIgor Mammedov     }
263ffac16faSIgor Mammedov     if (value <= 0) {
264dcfe4805SMarkus Armbruster         error_setg(errp, "property '%s' of %s doesn't take value '%d'", name,
265dcfe4805SMarkus Armbruster                    object_get_typename(obj), value);
266dcfe4805SMarkus Armbruster         return;
267ffac16faSIgor Mammedov     }
268ffac16faSIgor Mammedov     backend->prealloc_threads = value;
269ffac16faSIgor Mammedov }
270ffac16faSIgor Mammedov 
27158f4662cSHu Tao static void host_memory_backend_init(Object *obj)
2721f070489SIgor Mammedov {
273605d0a94SPaolo Bonzini     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
2746b269967SEduardo Habkost     MachineState *machine = MACHINE(qdev_get_machine());
275605d0a94SPaolo Bonzini 
276ffac16faSIgor Mammedov     /* TODO: convert access to globals to compat properties */
2776b269967SEduardo Habkost     backend->merge = machine_mem_merge(machine);
2786b269967SEduardo Habkost     backend->dump = machine_dump_guest_core(machine);
2799181fb70SDavid Hildenbrand     backend->reserve = true;
280f8d426a6SJaroslav Jindrak     backend->prealloc_threads = machine->smp.cpus;
2811f070489SIgor Mammedov }
2821f070489SIgor Mammedov 
283fa0cb34dSMarc-André Lureau static void host_memory_backend_post_init(Object *obj)
284fa0cb34dSMarc-André Lureau {
285fa0cb34dSMarc-André Lureau     object_apply_compat_props(obj);
286fa0cb34dSMarc-André Lureau }
287fa0cb34dSMarc-André Lureau 
2884728b574SPeter Xu bool host_memory_backend_mr_inited(HostMemoryBackend *backend)
2894728b574SPeter Xu {
2904728b574SPeter Xu     /*
2914728b574SPeter Xu      * NOTE: We forbid zero-length memory backend, so here zero means
2924728b574SPeter Xu      * "we haven't inited the backend memory region yet".
2934728b574SPeter Xu      */
2944728b574SPeter Xu     return memory_region_size(&backend->mr) != 0;
2954728b574SPeter Xu }
2964728b574SPeter Xu 
2977943e97bSDavid Hildenbrand MemoryRegion *host_memory_backend_get_memory(HostMemoryBackend *backend)
2981f070489SIgor Mammedov {
2996f4c60e4SPeter Xu     return host_memory_backend_mr_inited(backend) ? &backend->mr : NULL;
3001f070489SIgor Mammedov }
3011f070489SIgor Mammedov 
3022aece63cSXiao Guangrong void host_memory_backend_set_mapped(HostMemoryBackend *backend, bool mapped)
3032aece63cSXiao Guangrong {
3042aece63cSXiao Guangrong     backend->is_mapped = mapped;
3052aece63cSXiao Guangrong }
3062aece63cSXiao Guangrong 
3072aece63cSXiao Guangrong bool host_memory_backend_is_mapped(HostMemoryBackend *backend)
3082aece63cSXiao Guangrong {
3092aece63cSXiao Guangrong     return backend->is_mapped;
3102aece63cSXiao Guangrong }
3112aece63cSXiao Guangrong 
3122b108085SDavid Gibson size_t host_memory_backend_pagesize(HostMemoryBackend *memdev)
3132b108085SDavid Gibson {
3148be934b7SThomas Huth     size_t pagesize = qemu_ram_pagesize(memdev->mr.ram_block);
3158be934b7SThomas Huth     g_assert(pagesize >= qemu_real_host_page_size());
3162b108085SDavid Gibson     return pagesize;
3172b108085SDavid Gibson }
3182b108085SDavid Gibson 
319bd9262d9SHu Tao static void
320bd9262d9SHu Tao host_memory_backend_memory_complete(UserCreatable *uc, Error **errp)
321bd9262d9SHu Tao {
322bd9262d9SHu Tao     HostMemoryBackend *backend = MEMORY_BACKEND(uc);
323bd9262d9SHu Tao     HostMemoryBackendClass *bc = MEMORY_BACKEND_GET_CLASS(uc);
324605d0a94SPaolo Bonzini     void *ptr;
325605d0a94SPaolo Bonzini     uint64_t sz;
326bd9262d9SHu Tao 
327e199f7adSPhilippe Mathieu-Daudé     if (!bc->alloc) {
328e199f7adSPhilippe Mathieu-Daudé         return;
329e199f7adSPhilippe Mathieu-Daudé     }
330fdb63cf3SPhilippe Mathieu-Daudé     if (!bc->alloc(backend, errp)) {
331fdb63cf3SPhilippe Mathieu-Daudé         return;
332605d0a94SPaolo Bonzini     }
333605d0a94SPaolo Bonzini 
334605d0a94SPaolo Bonzini     ptr = memory_region_get_ram_ptr(&backend->mr);
335605d0a94SPaolo Bonzini     sz = memory_region_size(&backend->mr);
336605d0a94SPaolo Bonzini 
337605d0a94SPaolo Bonzini     if (backend->merge) {
338605d0a94SPaolo Bonzini         qemu_madvise(ptr, sz, QEMU_MADV_MERGEABLE);
339605d0a94SPaolo Bonzini     }
340605d0a94SPaolo Bonzini     if (!backend->dump) {
341605d0a94SPaolo Bonzini         qemu_madvise(ptr, sz, QEMU_MADV_DONTDUMP);
342605d0a94SPaolo Bonzini     }
3434cf1b76bSHu Tao #ifdef CONFIG_NUMA
3444cf1b76bSHu Tao     unsigned long lastbit = find_last_bit(backend->host_nodes, MAX_NODES);
3454cf1b76bSHu Tao     /* lastbit == MAX_NODES means maxnode = 0 */
3464cf1b76bSHu Tao     unsigned long maxnode = (lastbit + 1) % (MAX_NODES + 1);
3474cf1b76bSHu Tao     /* ensure policy won't be ignored in case memory is preallocated
3484cf1b76bSHu Tao      * before mbind(). note: MPOL_MF_STRICT is ignored on hugepages so
3494cf1b76bSHu Tao      * this doesn't catch hugepage case. */
350288d3322SMichael S. Tsirkin     unsigned flags = MPOL_MF_STRICT | MPOL_MF_MOVE;
3516bb613f0SMichal Privoznik     int mode = backend->policy;
3524cf1b76bSHu Tao 
3534cf1b76bSHu Tao     /* check for invalid host-nodes and policies and give more verbose
3544cf1b76bSHu Tao      * error messages than mbind(). */
3554cf1b76bSHu Tao     if (maxnode && backend->policy == MPOL_DEFAULT) {
3564cf1b76bSHu Tao         error_setg(errp, "host-nodes must be empty for policy default,"
3574cf1b76bSHu Tao                    " or you should explicitly specify a policy other"
3584cf1b76bSHu Tao                    " than default");
3594cf1b76bSHu Tao         return;
3604cf1b76bSHu Tao     } else if (maxnode == 0 && backend->policy != MPOL_DEFAULT) {
3614cf1b76bSHu Tao         error_setg(errp, "host-nodes must be set for policy %s",
362977c736fSMarkus Armbruster                    HostMemPolicy_str(backend->policy));
3634cf1b76bSHu Tao         return;
3644cf1b76bSHu Tao     }
3654cf1b76bSHu Tao 
3664cf1b76bSHu Tao     /* We can have up to MAX_NODES nodes, but we need to pass maxnode+1
3674cf1b76bSHu Tao      * as argument to mbind() due to an old Linux bug (feature?) which
3684cf1b76bSHu Tao      * cuts off the last specified node. This means backend->host_nodes
3694cf1b76bSHu Tao      * must have MAX_NODES+1 bits available.
3704cf1b76bSHu Tao      */
3714cf1b76bSHu Tao     assert(sizeof(backend->host_nodes) >=
3724cf1b76bSHu Tao            BITS_TO_LONGS(MAX_NODES + 1) * sizeof(unsigned long));
3734cf1b76bSHu Tao     assert(maxnode <= MAX_NODES);
37470b6d525SIgor Mammedov 
3756bb613f0SMichal Privoznik #ifdef HAVE_NUMA_HAS_PREFERRED_MANY
3766bb613f0SMichal Privoznik     if (mode == MPOL_PREFERRED && numa_has_preferred_many() > 0) {
3776bb613f0SMichal Privoznik         /*
3786bb613f0SMichal Privoznik          * Replace with MPOL_PREFERRED_MANY otherwise the mbind() below
3796bb613f0SMichal Privoznik          * silently picks the first node.
3806bb613f0SMichal Privoznik          */
3816bb613f0SMichal Privoznik         mode = MPOL_PREFERRED_MANY;
3826bb613f0SMichal Privoznik     }
3836bb613f0SMichal Privoznik #endif
3846bb613f0SMichal Privoznik 
38570b6d525SIgor Mammedov     if (maxnode &&
3866bb613f0SMichal Privoznik         mbind(ptr, sz, mode, backend->host_nodes, maxnode + 1, flags)) {
387a3567ba1SPavel Fedin         if (backend->policy != MPOL_DEFAULT || errno != ENOSYS) {
3884cf1b76bSHu Tao             error_setg_errno(errp, errno,
3894cf1b76bSHu Tao                              "cannot bind memory to host NUMA nodes");
3904cf1b76bSHu Tao             return;
3914cf1b76bSHu Tao         }
392a3567ba1SPavel Fedin     }
3934cf1b76bSHu Tao #endif
3944cf1b76bSHu Tao     /* Preallocate memory after the NUMA policy has been instantiated.
3954cf1b76bSHu Tao      * This is necessary to guarantee memory is allocated with
3964cf1b76bSHu Tao      * specified NUMA policy in place.
3974cf1b76bSHu Tao      */
398*9c878ad6SPhilippe Mathieu-Daudé     if (backend->prealloc && !qemu_prealloc_mem(memory_region_get_fd(&backend->mr),
399*9c878ad6SPhilippe Mathieu-Daudé                                                 ptr, sz,
400e6816458SDavid Hildenbrand                                                 backend->prealloc_threads,
401*9c878ad6SPhilippe Mathieu-Daudé                                                 backend->prealloc_context, errp)) {
4023961613aSPhilippe Mathieu-Daudé         return;
4033961613aSPhilippe Mathieu-Daudé     }
4043961613aSPhilippe Mathieu-Daudé }
405bd9262d9SHu Tao 
40636bce5caSLin Ma static bool
4073beacfb9SEduardo Habkost host_memory_backend_can_be_deleted(UserCreatable *uc)
40836bce5caSLin Ma {
4092aece63cSXiao Guangrong     if (host_memory_backend_is_mapped(MEMORY_BACKEND(uc))) {
41036bce5caSLin Ma         return false;
41136bce5caSLin Ma     } else {
41236bce5caSLin Ma         return true;
41336bce5caSLin Ma     }
41436bce5caSLin Ma }
41536bce5caSLin Ma 
41606329cceSMarcel Apfelbaum static bool host_memory_backend_get_share(Object *o, Error **errp)
41706329cceSMarcel Apfelbaum {
41806329cceSMarcel Apfelbaum     HostMemoryBackend *backend = MEMORY_BACKEND(o);
41906329cceSMarcel Apfelbaum 
42006329cceSMarcel Apfelbaum     return backend->share;
42106329cceSMarcel Apfelbaum }
42206329cceSMarcel Apfelbaum 
42306329cceSMarcel Apfelbaum static void host_memory_backend_set_share(Object *o, bool value, Error **errp)
42406329cceSMarcel Apfelbaum {
42506329cceSMarcel Apfelbaum     HostMemoryBackend *backend = MEMORY_BACKEND(o);
42606329cceSMarcel Apfelbaum 
42706329cceSMarcel Apfelbaum     if (host_memory_backend_mr_inited(backend)) {
42806329cceSMarcel Apfelbaum         error_setg(errp, "cannot change property value");
42906329cceSMarcel Apfelbaum         return;
43006329cceSMarcel Apfelbaum     }
43106329cceSMarcel Apfelbaum     backend->share = value;
43206329cceSMarcel Apfelbaum }
43306329cceSMarcel Apfelbaum 
4349181fb70SDavid Hildenbrand #ifdef CONFIG_LINUX
4359181fb70SDavid Hildenbrand static bool host_memory_backend_get_reserve(Object *o, Error **errp)
4369181fb70SDavid Hildenbrand {
4379181fb70SDavid Hildenbrand     HostMemoryBackend *backend = MEMORY_BACKEND(o);
4389181fb70SDavid Hildenbrand 
4399181fb70SDavid Hildenbrand     return backend->reserve;
4409181fb70SDavid Hildenbrand }
4419181fb70SDavid Hildenbrand 
4429181fb70SDavid Hildenbrand static void host_memory_backend_set_reserve(Object *o, bool value, Error **errp)
4439181fb70SDavid Hildenbrand {
4449181fb70SDavid Hildenbrand     HostMemoryBackend *backend = MEMORY_BACKEND(o);
4459181fb70SDavid Hildenbrand 
4469181fb70SDavid Hildenbrand     if (host_memory_backend_mr_inited(backend)) {
4479181fb70SDavid Hildenbrand         error_setg(errp, "cannot change property value");
4489181fb70SDavid Hildenbrand         return;
4499181fb70SDavid Hildenbrand     }
4509181fb70SDavid Hildenbrand     if (backend->prealloc && !value) {
4519181fb70SDavid Hildenbrand         error_setg(errp, "'prealloc=on' and 'reserve=off' are incompatible");
4529181fb70SDavid Hildenbrand         return;
4539181fb70SDavid Hildenbrand     }
4549181fb70SDavid Hildenbrand     backend->reserve = value;
4559181fb70SDavid Hildenbrand }
4569181fb70SDavid Hildenbrand #endif /* CONFIG_LINUX */
4579181fb70SDavid Hildenbrand 
458fa0cb34dSMarc-André Lureau static bool
459fa0cb34dSMarc-André Lureau host_memory_backend_get_use_canonical_path(Object *obj, Error **errp)
460fa0cb34dSMarc-André Lureau {
461fa0cb34dSMarc-André Lureau     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
462fa0cb34dSMarc-André Lureau 
463fa0cb34dSMarc-André Lureau     return backend->use_canonical_path;
464fa0cb34dSMarc-André Lureau }
465fa0cb34dSMarc-André Lureau 
466fa0cb34dSMarc-André Lureau static void
467fa0cb34dSMarc-André Lureau host_memory_backend_set_use_canonical_path(Object *obj, bool value,
468fa0cb34dSMarc-André Lureau                                            Error **errp)
469fa0cb34dSMarc-André Lureau {
470fa0cb34dSMarc-André Lureau     HostMemoryBackend *backend = MEMORY_BACKEND(obj);
471fa0cb34dSMarc-André Lureau 
472fa0cb34dSMarc-André Lureau     backend->use_canonical_path = value;
473fa0cb34dSMarc-André Lureau }
474fa0cb34dSMarc-André Lureau 
475bd9262d9SHu Tao static void
476bd9262d9SHu Tao host_memory_backend_class_init(ObjectClass *oc, void *data)
477bd9262d9SHu Tao {
478bd9262d9SHu Tao     UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
479bd9262d9SHu Tao 
480bd9262d9SHu Tao     ucc->complete = host_memory_backend_memory_complete;
48136bce5caSLin Ma     ucc->can_be_deleted = host_memory_backend_can_be_deleted;
482e62834caSEduardo Habkost 
483e62834caSEduardo Habkost     object_class_property_add_bool(oc, "merge",
484e62834caSEduardo Habkost         host_memory_backend_get_merge,
485d2623129SMarkus Armbruster         host_memory_backend_set_merge);
486033bfc5eSMarc-André Lureau     object_class_property_set_description(oc, "merge",
4877eecec7dSMarkus Armbruster         "Mark memory as mergeable");
488e62834caSEduardo Habkost     object_class_property_add_bool(oc, "dump",
489e62834caSEduardo Habkost         host_memory_backend_get_dump,
490d2623129SMarkus Armbruster         host_memory_backend_set_dump);
491033bfc5eSMarc-André Lureau     object_class_property_set_description(oc, "dump",
4927eecec7dSMarkus Armbruster         "Set to 'off' to exclude from core dump");
493e62834caSEduardo Habkost     object_class_property_add_bool(oc, "prealloc",
494e62834caSEduardo Habkost         host_memory_backend_get_prealloc,
495d2623129SMarkus Armbruster         host_memory_backend_set_prealloc);
496033bfc5eSMarc-André Lureau     object_class_property_set_description(oc, "prealloc",
4977eecec7dSMarkus Armbruster         "Preallocate memory");
498ffac16faSIgor Mammedov     object_class_property_add(oc, "prealloc-threads", "int",
499ffac16faSIgor Mammedov         host_memory_backend_get_prealloc_threads,
500ffac16faSIgor Mammedov         host_memory_backend_set_prealloc_threads,
501d2623129SMarkus Armbruster         NULL, NULL);
502ffac16faSIgor Mammedov     object_class_property_set_description(oc, "prealloc-threads",
5037eecec7dSMarkus Armbruster         "Number of CPU threads to use for prealloc");
504e6816458SDavid Hildenbrand     object_class_property_add_link(oc, "prealloc-context",
505e6816458SDavid Hildenbrand         TYPE_THREAD_CONTEXT, offsetof(HostMemoryBackend, prealloc_context),
506e6816458SDavid Hildenbrand         object_property_allow_set_link, OBJ_PROP_LINK_STRONG);
507e6816458SDavid Hildenbrand     object_class_property_set_description(oc, "prealloc-context",
508e6816458SDavid Hildenbrand         "Context to use for creating CPU threads for preallocation");
509e62834caSEduardo Habkost     object_class_property_add(oc, "size", "int",
510e62834caSEduardo Habkost         host_memory_backend_get_size,
511e62834caSEduardo Habkost         host_memory_backend_set_size,
512d2623129SMarkus Armbruster         NULL, NULL);
513033bfc5eSMarc-André Lureau     object_class_property_set_description(oc, "size",
5147eecec7dSMarkus Armbruster         "Size of the memory region (ex: 500M)");
515e62834caSEduardo Habkost     object_class_property_add(oc, "host-nodes", "int",
516e62834caSEduardo Habkost         host_memory_backend_get_host_nodes,
517e62834caSEduardo Habkost         host_memory_backend_set_host_nodes,
518d2623129SMarkus Armbruster         NULL, NULL);
519033bfc5eSMarc-André Lureau     object_class_property_set_description(oc, "host-nodes",
5207eecec7dSMarkus Armbruster         "Binds memory to the list of NUMA host nodes");
521e62834caSEduardo Habkost     object_class_property_add_enum(oc, "policy", "HostMemPolicy",
522f7abe0ecSMarc-André Lureau         &HostMemPolicy_lookup,
523e62834caSEduardo Habkost         host_memory_backend_get_policy,
524d2623129SMarkus Armbruster         host_memory_backend_set_policy);
525033bfc5eSMarc-André Lureau     object_class_property_set_description(oc, "policy",
5267eecec7dSMarkus Armbruster         "Set the NUMA policy");
52706329cceSMarcel Apfelbaum     object_class_property_add_bool(oc, "share",
528d2623129SMarkus Armbruster         host_memory_backend_get_share, host_memory_backend_set_share);
529033bfc5eSMarc-André Lureau     object_class_property_set_description(oc, "share",
5307eecec7dSMarkus Armbruster         "Mark the memory as private to QEMU or shared");
5319181fb70SDavid Hildenbrand #ifdef CONFIG_LINUX
5329181fb70SDavid Hildenbrand     object_class_property_add_bool(oc, "reserve",
5339181fb70SDavid Hildenbrand         host_memory_backend_get_reserve, host_memory_backend_set_reserve);
5349181fb70SDavid Hildenbrand     object_class_property_set_description(oc, "reserve",
5359181fb70SDavid Hildenbrand         "Reserve swap space (or huge pages) if applicable");
5369181fb70SDavid Hildenbrand #endif /* CONFIG_LINUX */
5378db0b204SIgor Mammedov     /*
5388db0b204SIgor Mammedov      * Do not delete/rename option. This option must be considered stable
5398db0b204SIgor Mammedov      * (as if it didn't have the 'x-' prefix including deprecation period) as
5408db0b204SIgor Mammedov      * long as 4.0 and older machine types exists.
5418db0b204SIgor Mammedov      * Option will be used by upper layers to override (disable) canonical path
5428db0b204SIgor Mammedov      * for ramblock-id set by compat properties on old machine types ( <= 4.0),
5438db0b204SIgor Mammedov      * to keep migration working when backend is used for main RAM with
5448db0b204SIgor Mammedov      * -machine memory-backend= option (main RAM historically used prefix-less
5458db0b204SIgor Mammedov      * ramblock-id).
5468db0b204SIgor Mammedov      */
547fa0cb34dSMarc-André Lureau     object_class_property_add_bool(oc, "x-use-canonical-path-for-ramblock-id",
548fa0cb34dSMarc-André Lureau         host_memory_backend_get_use_canonical_path,
549d2623129SMarkus Armbruster         host_memory_backend_set_use_canonical_path);
550e1ff3c67SIgor Mammedov }
551e1ff3c67SIgor Mammedov 
55258f4662cSHu Tao static const TypeInfo host_memory_backend_info = {
5531f070489SIgor Mammedov     .name = TYPE_MEMORY_BACKEND,
5541f070489SIgor Mammedov     .parent = TYPE_OBJECT,
5551f070489SIgor Mammedov     .abstract = true,
5561f070489SIgor Mammedov     .class_size = sizeof(HostMemoryBackendClass),
557bd9262d9SHu Tao     .class_init = host_memory_backend_class_init,
5581f070489SIgor Mammedov     .instance_size = sizeof(HostMemoryBackend),
55958f4662cSHu Tao     .instance_init = host_memory_backend_init,
560fa0cb34dSMarc-André Lureau     .instance_post_init = host_memory_backend_post_init,
5611f070489SIgor Mammedov     .interfaces = (InterfaceInfo[]) {
5621f070489SIgor Mammedov         { TYPE_USER_CREATABLE },
5631f070489SIgor Mammedov         { }
5641f070489SIgor Mammedov     }
5651f070489SIgor Mammedov };
5661f070489SIgor Mammedov 
5671f070489SIgor Mammedov static void register_types(void)
5681f070489SIgor Mammedov {
56958f4662cSHu Tao     type_register_static(&host_memory_backend_info);
5701f070489SIgor Mammedov }
5711f070489SIgor Mammedov 
5721f070489SIgor Mammedov type_init(register_types);
573