Home
last modified time | relevance | path

Searched full:pages (Results 1 – 25 of 311) sorted by relevance

12345678910>>...13

/qemu/hw/i2c/
H A Dpmbus_device.c232 pmdev->pages = g_new0(PMBusPage, pmdev->num_pages); in pmbus_pages_alloc()
238 if ((pmdev->pages[i].operation & PB_OP_ON) == 0) { in pmbus_check_limits()
242 if (pmdev->pages[i].read_vout > pmdev->pages[i].vout_ov_fault_limit) { in pmbus_check_limits()
243 pmdev->pages[i].status_word |= PB_STATUS_VOUT; in pmbus_check_limits()
244 pmdev->pages[i].status_vout |= PB_STATUS_VOUT_OV_FAULT; in pmbus_check_limits()
247 if (pmdev->pages[i].read_vout > pmdev->pages[i].vout_ov_warn_limit) { in pmbus_check_limits()
248 pmdev->pages[i].status_word |= PB_STATUS_VOUT; in pmbus_check_limits()
249 pmdev->pages[i].status_vout |= PB_STATUS_VOUT_OV_WARN; in pmbus_check_limits()
252 if (pmdev->pages[i].read_vout < pmdev->pages[i].vout_uv_warn_limit) { in pmbus_check_limits()
253 pmdev->pages[i].status_word |= PB_STATUS_VOUT; in pmbus_check_limits()
[all …]
/qemu/hw/sensor/
H A Disl_pmbus_vr.c73 pmdev->pages[i].operation = ISL_OPERATION_DEFAULT; in isl_pmbus_vr_exit_reset()
74 pmdev->pages[i].on_off_config = ISL_ON_OFF_CONFIG_DEFAULT; in isl_pmbus_vr_exit_reset()
75 pmdev->pages[i].vout_mode = ISL_VOUT_MODE_DEFAULT; in isl_pmbus_vr_exit_reset()
76 pmdev->pages[i].vout_command = ISL_VOUT_COMMAND_DEFAULT; in isl_pmbus_vr_exit_reset()
77 pmdev->pages[i].vout_max = ISL_VOUT_MAX_DEFAULT; in isl_pmbus_vr_exit_reset()
78 pmdev->pages[i].vout_margin_high = ISL_VOUT_MARGIN_HIGH_DEFAULT; in isl_pmbus_vr_exit_reset()
79 pmdev->pages[i].vout_margin_low = ISL_VOUT_MARGIN_LOW_DEFAULT; in isl_pmbus_vr_exit_reset()
80 pmdev->pages[i].vout_transition_rate = ISL_VOUT_TRANSITION_RATE_DEFAULT; in isl_pmbus_vr_exit_reset()
81 pmdev->pages[i].vout_ov_fault_limit = ISL_VOUT_OV_FAULT_LIMIT_DEFAULT; in isl_pmbus_vr_exit_reset()
82 pmdev->pages[i].ot_fault_limit = ISL_OT_FAULT_LIMIT_DEFAULT; in isl_pmbus_vr_exit_reset()
[all …]
H A Dadm1272.c118 if (pmdev->pages[0].read_vout > s->peak_vout) { in adm1272_check_limits()
119 s->peak_vout = pmdev->pages[0].read_vout; in adm1272_check_limits()
122 if (pmdev->pages[0].read_vin > s->peak_vin) { in adm1272_check_limits()
123 s->peak_vin = pmdev->pages[0].read_vin; in adm1272_check_limits()
126 if (pmdev->pages[0].read_iout > s->peak_iout) { in adm1272_check_limits()
127 s->peak_iout = pmdev->pages[0].read_iout; in adm1272_check_limits()
130 if (pmdev->pages[0].read_temperature_1 > s->peak_temperature) { in adm1272_check_limits()
131 s->peak_temperature = pmdev->pages[0].read_temperature_1; in adm1272_check_limits()
134 if (pmdev->pages[0].read_pin > s->peak_pin) { in adm1272_check_limits()
135 s->peak_pin = pmdev->pages[0].read_pin; in adm1272_check_limits()
[all …]
H A Dmax31785.c61 /* MAX31785 pages */
142 * | 255 | Applies to all pages |
176 pmbus_send8(pmdev, pmdev->pages[pmdev->page].fan_config_1_2); in max31785_read_byte()
182 pmbus_send16(pmdev, pmdev->pages[pmdev->page].fan_command_1); in max31785_read_byte()
188 pmbus_send16(pmdev, pmdev->pages[pmdev->page].read_fan_speed_1); in max31785_read_byte()
194 pmbus_send16(pmdev, pmdev->pages[pmdev->page].status_fans_1_2); in max31785_read_byte()
337 pmdev->pages[pmdev->page].fan_config_1_2 = pmbus_receive8(pmdev); in max31785_write_data()
343 pmdev->pages[pmdev->page].fan_command_1 = pmbus_receive16(pmdev); in max31785_write_data()
344 pmdev->pages[pmdev->page].read_fan_speed_1 = in max31785_write_data()
346 pmdev->pages[pmdev->page].fan_command_1); in max31785_write_data()
[all …]
H A Dmax34451.c126 | 255 | Applies to all pages. |
181 if (pmdev->pages[i].read_vout == 0) { /* PSU disabled */ in max34451_check_limits()
185 if (pmdev->pages[i].read_vout > s->vout_peak[i]) { in max34451_check_limits()
186 s->vout_peak[i] = pmdev->pages[i].read_vout; in max34451_check_limits()
189 if (pmdev->pages[i].read_vout < s->vout_min[i]) { in max34451_check_limits()
190 s->vout_min[i] = pmdev->pages[i].read_vout; in max34451_check_limits()
193 if (pmdev->pages[i].read_iout > s->iout_peak[i]) { in max34451_check_limits()
194 s->iout_peak[i] = pmdev->pages[i].read_iout; in max34451_check_limits()
199 if (pmdev->pages[i + 16].read_temperature_1 > s->temperature_peak[i]) { in max34451_check_limits()
200 s->temperature_peak[i] = pmdev->pages[i + 16].read_temperature_1; in max34451_check_limits()
[all …]
H A Dadm1266.c88 pmdev->pages[i].operation = ADM1266_OPERATION_DEFAULT; in adm1266_exit_reset()
89 pmdev->pages[i].revision = ADM1266_PMBUS_REVISION_DEFAULT; in adm1266_exit_reset()
90 pmdev->pages[i].vout_mode = 0; in adm1266_exit_reset()
91 pmdev->pages[i].read_vout = pmbus_data2linear_mode(12, 0); in adm1266_exit_reset()
92 pmdev->pages[i].vout_margin_high = pmbus_data2linear_mode(15, 0); in adm1266_exit_reset()
93 pmdev->pages[i].vout_margin_low = pmbus_data2linear_mode(3, 0); in adm1266_exit_reset()
94 pmdev->pages[i].vout_ov_fault_limit = pmbus_data2linear_mode(16, 0); in adm1266_exit_reset()
95 pmdev->pages[i].revision = ADM1266_PMBUS_REVISION_DEFAULT; in adm1266_exit_reset()
174 PMBusVoutMode *mode = (PMBusVoutMode *)&pmdev->pages[0].vout_mode; in adm1266_get()
191 PMBusVoutMode *mode = (PMBusVoutMode *)&pmdev->pages[0].vout_mode; in adm1266_set()
[all …]
/qemu/migration/
H A Dmultifd-nocomp.c29 void multifd_ram_payload_alloc(MultiFDPages_t *pages) in multifd_ram_payload_alloc() argument
31 pages->offset = g_new0(ram_addr_t, multifd_ram_page_count()); in multifd_ram_payload_alloc()
34 void multifd_ram_payload_free(MultiFDPages_t *pages) in multifd_ram_payload_free() argument
36 g_clear_pointer(&pages->offset, g_free); in multifd_ram_payload_free()
51 MultiFDPages_t *pages = &p->data->u.ram; in multifd_set_file_bitmap() local
53 assert(pages->block); in multifd_set_file_bitmap()
55 for (int i = 0; i < pages->normal_num; i++) { in multifd_set_file_bitmap()
56 ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], true); in multifd_set_file_bitmap()
59 for (int i = pages->normal_num; i < pages->num; i++) { in multifd_set_file_bitmap()
60 ramblock_set_file_bmap_atomic(pages->block, pages->offset[i], false); in multifd_set_file_bitmap()
[all …]
H A Dmultifd-zero-page.c41 * multifd_send_zero_page_detect: Perform zero page detection on all pages.
43 * Sorts normal pages before zero pages in p->pages->offset and updates
44 * p->pages->normal_num.
50 MultiFDPages_t *pages = &p->data->u.ram; in multifd_send_zero_page_detect() local
51 RAMBlock *rb = pages->block; in multifd_send_zero_page_detect()
53 int j = pages->num - 1; in multifd_send_zero_page_detect()
56 pages->normal_num = pages->num; in multifd_send_zero_page_detect()
61 * Sort the page offset array by moving all normal pages to in multifd_send_zero_page_detect()
62 * the left and all zero pages to the right of the array. in multifd_send_zero_page_detect()
65 uint64_t offset = pages->offset[i]; in multifd_send_zero_page_detect()
[all …]
H A Ddirtyrate.h19 * Sample 512 pages per GB as default.
42 * Take 1/16 pages in 1G as the maxmum sample page count
48 uint64_t sample_pages_per_gigabytes; /* sample pages per GB */
61 uint64_t sample_pages_count; /* count of sampled pages */
62 uint64_t sample_dirty_count; /* count of dirty pages we measure */
63 uint32_t *hash_result; /* array of hash result for sampled pages */
68 uint64_t total_sample_count; /* total sampled pages */
69 uint64_t total_block_mem_MB; /* size of total sampled pages in MB */
79 uint64_t sample_pages; /* sample pages per GB */
H A Dram.c88 * the pages region in the migration file at a time.
112 * right after the requested urgent pages.
124 /* used by the search for pages to send */
368 * PageSearchStatus structures for the channels when send pages.
376 /* Last block that we have visited searching for dirty pages */
382 /* How many times we have dirty too many pages */
389 /* number of dirty pages since start_time */
393 /* Amount of xbzrle pages since the beginning of the period */
402 /* total handled target pages at the beginning of period */
404 /* total handled target pages since start */
[all …]
H A Dmultifd-qpl.c36 /* array of hardware jobs, the number of jobs equals the number pages */
40 /* the number of pages that the QPL needs to process at one time */
181 * @num: the number of pages
381 * multifd_qpl_compress_pages_slow_path: compress pages using slow path
383 * Compress the pages using software. If compression fails, the uncompressed
391 MultiFDPages_t *pages = &p->data->u.ram; in multifd_qpl_compress_pages_slow_path() local
397 for (int i = 0; i < pages->normal_num; i++) { in multifd_qpl_compress_pages_slow_path()
398 buf = pages->block->host + pages->offset[i]; in multifd_qpl_compress_pages_slow_path()
411 * multifd_qpl_compress_pages: compress pages
413 * Submit the pages to the IAA hardware for compression. If hardware
[all …]
H A Dmultifd.h88 /* maximum number of allocated pages */
90 /* non zero pages */
92 /* size of the next packet that contains pages */
95 /* zero pages */
102 * - normal pages (initial normal_pages entries)
103 * - zero pages (following zero_pages entries)
119 /* number of used pages */
121 /* number of normal pages */
227 /* size of the next packet that contains pages */
276 /* size of the next packet that contains pages */
[all …]
H A Dmigration.h51 * 1<<6=64 pages -> 256K chunk when page size is 4K. This gives us
52 * the benefit that all the chunks are 64 pages aligned then the
57 * 1<<18=256K pages -> 1G chunk when page size is 4K. This is the
62 * 1<<31=2G pages -> 8T chunk when page size is 4K. This should be
79 /* Number of small pages copied (in size of TARGET_PAGE_SIZE) */
148 /* Postcopy priority thread is used to receive postcopy requested pages */
169 * An array of temp host huge pages to be used, one for each postcopy
215 /* A tree of pages that we requested to the source VM */
223 * The mutex helps to maintain the requested pages that we sent to the
237 * finished loading the urgent pages. If that happens, the two threads
[all …]
H A Dmultifd-qatzip.c31 * For compression: Buffer for pages to compress
155 * qatzip_send_prepare: Compress pages and update IO channel info.
164 MultiFDPages_t *pages = &p->data->u.ram; in qatzip_send_prepare() local
182 for (int i = 0; i < pages->normal_num; i++) { in qatzip_send_prepare()
184 pages->block->host + pages->offset[i], in qatzip_send_prepare()
188 in_len = pages->normal_num * page_size; in qatzip_send_prepare()
201 if (in_len != pages->normal_num * page_size) { in qatzip_send_prepare()
259 * doesn't send uncompressed pages in case the compression gets too big. in qatzip_recv_setup()
320 * qatzip_recv: Decompress pages and copy them to the appropriate
/qemu/docs/devel/migration/
H A Dmapped-ram.rst9 The core of the feature is to ensure that RAM pages are mapped
12 guest is constantly dirtying pages (i.e. live migration). Another
14 pages which are dirtied multiple times will always go to a fixed
16 sequential stream. Having the pages at fixed offsets also allows the
18 pages are ensured to be written respecting O_DIRECT alignment
55 track dirty pages, the migration will write the RAM pages to the disk
81 Instead of having a sequential stream of pages that follow the
82 RAMBlock headers, the dirty pages for a RAMBlock follow its header
86 A bitmap is introduced to track which pages have been written in the
87 migration file. Pages are written at a fixed location for every
[all …]
H A Dpostcopy.rst13 transferred, and accesses to pages that are yet to be transferred cause
52 ``migrate_set_parameter`` is ignored (to avoid delaying requested pages that
78 (using madvise) to ensure that no new huge pages are created
79 during the postcopy phase, and to cause any huge pages that
87 pages off the migration stream, while the main thread carries
90 any access to missing pages (on Linux using the 'userfault'
136 During postcopy the source scans the list of dirty pages and sends them
139 scanning restarts from the requested location. This causes requested pages
140 to be sent quickly, and also causes pages directly after the requested page
141 to be sent quickly in the hope that those pages are likely to be used
[all …]
H A Dvfio.rst112 System memory dirty pages tracking
118 dirty tracking module and marks system memory pages which were DMA-ed by the
134 IOMMU support for dirty page tracking. For this reason, all pages are
135 perpetually marked dirty, unless the device driver pins pages through external
136 APIs in which case only those pinned pages are perpetually marked dirty.
138 If the above two methods are not supported, all pages are perpetually marked
141 By default, dirty pages are tracked during pre-copy as well as stop-and-copy
143 phases. Copying dirty pages in pre-copy phase helps QEMU to predict if it can
145 dirty pages continuously, then it understands that even in stop-and-copy phase,
146 it is likely to find dirty pages and can predict the downtime accordingly.
[all …]
/qemu/linux-headers/asm-mips/
H A Dmman.h46 #define MAP_LOCKED 0x8000 /* pages are locked */
65 #define MCL_ONFAULT 4 /* lock all pages that are faulted in */
70 #define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
75 #define MADV_WILLNEED 3 /* will need these pages */
76 #define MADV_DONTNEED 4 /* don't need these pages */
79 #define MADV_FREE 8 /* free pages only if memory pressure */
80 #define MADV_REMOVE 9 /* remove these pages & resources */
84 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
85 #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
98 #define MADV_COLD 20 /* deactivate these pages */
[all …]
/qemu/linux-headers/asm-generic/
H A Dmman-common.h39 #define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */
48 #define MADV_WILLNEED 3 /* will need these pages */
49 #define MADV_DONTNEED 4 /* don't need these pages */
52 #define MADV_FREE 8 /* free pages only if memory pressure */
53 #define MADV_REMOVE 9 /* remove these pages & resources */
59 #define MADV_MERGEABLE 12 /* KSM may merge identical pages */
60 #define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
72 #define MADV_COLD 20 /* deactivate these pages */
73 #define MADV_PAGEOUT 21 /* reclaim these pages */
78 #define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
/qemu/docs/
H A Dxbzrle.txt14 In order to be able to calculate the update, the previous memory pages need to
15 be stored on the source. Those pages are stored in a dedicated cache
76 Keeping the hot pages in the cache is effective for decreasing cache
79 detected, XBZRLE will only evict pages in the cache that are older than
104 duplicate: E pages
105 normal: F pages
109 xbzrle pages: J pages
110 xbzrle cache miss: K pages
118 could not be compressed. This can happen if the changes in the pages are too
/qemu/hw/s390x/
H A Ds390-skeys.c152 uint64_t pages, gfn; in s390_qmp_dump_skeys() local
193 pages = (block->target_end - block->target_start) / TARGET_PAGE_SIZE; in s390_qmp_dump_skeys()
195 while (pages) { in s390_qmp_dump_skeys()
196 const uint64_t cur_pages = MIN(pages, S390_SKEYS_BUFFER_SIZE); in s390_qmp_dump_skeys()
211 pages -= cur_pages; in s390_qmp_dump_skeys()
273 error_report("Error: Setting storage keys for pages with unallocated " in qemu_s390_skeys_set()
294 error_report("Error: Getting storage keys for pages with unallocated " in qemu_s390_skeys_get()
326 uint64_t pages, gfn; in s390_storage_keys_save() local
349 pages = (block->target_end - block->target_start) / TARGET_PAGE_SIZE; in s390_storage_keys_save()
351 qemu_put_be64(f, pages); in s390_storage_keys_save()
[all …]
/qemu/contrib/plugins/
H A Dhotpages.c4 * Hot Pages - show which pages saw the most memory accesses.
49 static GHashTable *pages; variable
82 counts = g_hash_table_get_values(pages); in plugin_exit()
106 pages = g_hash_table_new(g_int64_hash, g_int64_equal); in plugin_init()
133 count = (PageCounters *) g_hash_table_lookup(pages, &page); in vcpu_haddr()
138 g_hash_table_insert(pages, &count->page_address, count); in vcpu_haddr()
/qemu/include/hw/hyperv/
H A Ddynmem-proto.h124 * The number of pages in the range.
231 * num_avail: Available memory in pages.
232 * num_committed: Committed memory in pages.
234 * in the system in pages.
235 * zero_free: The number of zero and free pages.
236 * page_file_writes: The writes to the page file in pages.
239 * This value is in pages.
262 * num_pages: number of pages to allocate.
361 * page_count: number of pages that were successfully hot added.
/qemu/
H A Dpage-target.c12 /* Convert target pages to MiB (2**20). */
13 size_t qemu_target_pages_to_MiB(size_t pages) in qemu_target_pages_to_MiB() argument
20 return pages >> (20 - page_bits); in qemu_target_pages_to_MiB()
/qemu/include/system/
H A Dram_addr.h39 * @pages: number of guest pages
44 static inline long clear_bmap_size(uint64_t pages, uint8_t shift) in clear_bmap_size() argument
46 return DIV_ROUND_UP(pages, 1UL << shift); in clear_bmap_size()
55 * @size: number of pages to set in the bitmap
349 * the number of dirty pages in @bitmap passed as argument. On the other hand,
350 * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
356 ram_addr_t pages) in cpu_physical_memory_set_dirty_lebitmap() argument
363 unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; in cpu_physical_memory_set_dirty_lebitmap()
374 long nr = BITS_TO_LONGS(pages); in cpu_physical_memory_set_dirty_lebitmap()
419 xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS); in cpu_physical_memory_set_dirty_lebitmap()
[all …]

12345678910>>...13