/qemu/tests/bench/ |
H A D | benchmark-crypto-cipher.c | 73 g_test_message("enc(%s-%s) chunk %zu bytes %.2f MB/sec ", in test_cipher_speed() 90 g_test_message("dec(%s-%s) chunk %zu bytes %.2f MB/sec ", in test_cipher_speed() 175 #define ADD_TEST(mode, cipher, keysize, chunk) \ in main() argument 177 (!size || g_str_equal(size, #chunk))) \ in main() 179 "/crypto/cipher/" #mode "-" #cipher "-" #keysize "/chunk-" #chunk, \ in main() 180 (void *)chunk, \ in main() 190 #define ADD_TESTS(chunk) \ in main() argument 192 ADD_TEST(ecb, aes, 128, chunk); \ in main() 193 ADD_TEST(ecb, aes, 256, chunk); \ in main() 194 ADD_TEST(cbc, aes, 128, chunk); \ in main() [all …]
|
/qemu/block/ |
H A D | dmg.c | 38 /* Limit chunk sizes to prevent unreasonable amounts of memory being used 113 /* Increase max chunk sizes, if necessary. This function is used to calculate 114 * the buffer sizes needed for compressed/uncompressed chunk I/O. 116 static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk, in update_max_chunk_size() argument 123 switch (s->types[chunk]) { in update_max_chunk_size() 127 compressed_size = s->lengths[chunk]; in update_max_chunk_size() 128 uncompressed_sectors = s->sectorcounts[chunk]; in update_max_chunk_size() 131 uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512); in update_max_chunk_size() 236 /* chunk offsets are relative to this sector number */ in dmg_read_mish_block() 243 /* move to begin of chunk entries */ in dmg_read_mish_block() [all …]
|
H A D | nbd.c | 579 NBDStructuredReplyChunk *chunk, in nbd_parse_offset_hole_payload() argument 586 if (chunk->length != sizeof(offset) + sizeof(hole_size)) { in nbd_parse_offset_hole_payload() 597 error_setg(errp, "Protocol error: server sent chunk exceeding requested" in nbd_parse_offset_hole_payload() 617 NBDStructuredReplyChunk *chunk, in nbd_parse_blockstatus_payload() argument 628 if (chunk->length < pay_len) { in nbd_parse_blockstatus_payload() 654 error_setg(errp, "Protocol error: server sent status chunk with " in nbd_parse_blockstatus_payload() 693 if (count != wide || chunk->length > pay_len) { in nbd_parse_blockstatus_payload() 718 static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk, in nbd_parse_error_payload() argument 725 assert(chunk->type & (1 << 15)); in nbd_parse_error_payload() 727 if (chunk->length < sizeof(error) + sizeof(message_size)) { in nbd_parse_error_payload() [all …]
|
H A D | dmg.h | 34 /* each chunk contains a certain number of sectors, 36 * lengths[i] is the length of the compressed chunk, 38 * sectorcounts[i] is the number of sectors in that chunk,
|
/qemu/migration/ |
H A D | trace-events | 234 …eft, uint64_t block, uint64_t chunk, void *local, void *remote) "completions %" PRIu64 " left %d, … 241 qemu_rdma_signal_unregister_append(uint64_t chunk, int pos) "Appending unregister chunk %" PRIu64 "… 242 qemu_rdma_signal_unregister_already(uint64_t chunk) "Unregister chunk %" PRIu64 " already in queue" 243 qemu_rdma_unregister_waiting_inflight(uint64_t chunk) "Cannot unregister inflight chunk: %" PRIu64 244 qemu_rdma_unregister_waiting_proc(uint64_t chunk, int pos) "Processing unregister for chunk: %" PRI… 245 qemu_rdma_unregister_waiting_send(uint64_t chunk) "Sending unregister for chunk: %" PRIu64 246 qemu_rdma_unregister_waiting_complete(uint64_t chunk) "Unregister for chunk: %" PRIu64 " complete." 248 …nt, int block, uint64_t chunk, uint64_t current, uint64_t len, int nb_sent, int nb_chunks) "(%d) N… 249 qemu_rdma_write_one_post(uint64_t chunk, long addr, long remote, uint32_t len) "Posting chunk: %" P… 251 …egres(int mykey, int theirkey, uint64_t chunk) "Received registration result: my key: 0x%x their k… [all …]
|
H A D | rdma.c | 83 * bits 30-63: ram block chunk number, 2^34 190 struct ibv_mr **pmr; /* MRs for chunk-level registration */ 191 struct ibv_mr *mr; /* MR for non-chunk-level registration */ 192 uint32_t *remote_keys; /* rkeys for chunk-level registration */ 193 uint32_t remote_rkey; /* rkeys for non-chunk-level registration */ 317 /* index of the chunk in the current ram block */ 419 * Register a single Chunk. 421 * to register an single chunk of memory before we can perform 427 uint64_t chunk; /* chunk to lookup if unregistering */ member 429 uint32_t current_index; /* which ramblock the chunk belongs to */ [all …]
|
H A D | migration.h | 51 * 1<<6=64 pages -> 256K chunk when page size is 4K. This gives us 57 * 1<<18=256K pages -> 1G chunk when page size is 4K. This is the 62 * 1<<31=2G pages -> 8T chunk when page size is 4K. This should be 487 * This decides the size of guest memory chunk that will be used 488 * to track dirty bitmap clearing. The size of memory chunk will 492 * (which is in 4M chunk).
|
/qemu/hw/display/ |
H A D | qxl-render.c | 220 QXLDataChunk *chunk, uint32_t group_id) in qxl_unpack_chunks() argument 227 bytes = MIN(size - offset, chunk->data_size); in qxl_unpack_chunks() 228 memcpy(dest + offset, chunk->data, bytes); in qxl_unpack_chunks() 233 chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id, in qxl_unpack_chunks() 234 sizeof(QXLDataChunk) + chunk->data_size); in qxl_unpack_chunks() 235 if (!chunk) { in qxl_unpack_chunks() 264 /* Assume that the full cursor is available in a single chunk. */ in qxl_cursor() 271 and_mask = cursor->chunk.data; in qxl_cursor() 280 qxl_unpack_chunks(c->data, size, qxl, &cursor->chunk, group_id); in qxl_cursor() 325 sizeof(QXLCursor) + cursor->chunk.data_size); in qxl_render_cursor()
|
/qemu/ui/ |
H A D | vdagent.c | 42 VDIChunkHeader chunk; member 161 VDIChunkHeader chunk; in vdagent_send_msg() local 173 chunk.port = VDP_CLIENT_PORT; in vdagent_send_msg() 174 chunk.size = msgsize - msgoff; in vdagent_send_msg() 175 if (chunk.size > 1024) { in vdagent_send_msg() 176 chunk.size = 1024; in vdagent_send_msg() 178 g_byte_array_append(vd->outbuf, (void *)&chunk, sizeof(chunk)); in vdagent_send_msg() 179 g_byte_array_append(vd->outbuf, msgbuf + msgoff, chunk.size); in vdagent_send_msg() 180 msgoff += chunk.size; in vdagent_send_msg() 803 memset(&vd->chunk, 0, sizeof(vd->chunk)); in vdagent_reset_bufs() [all …]
|
/qemu/docs/ |
H A D | rdma.txt | 213 8. Register request (dynamic chunk registration) 272 2. During runtime, once a 'chunk' becomes full of pages ready to 274 other side to register the memory for this chunk and respond 280 using chunk registration (or not checked at all and unconditionally 281 written if chunk registration is disabled. This is accomplished using 283 then we check the entire chunk for zero. Only if the entire chunk is 375 Chunk size is not dynamic, but it could be in a future implementation. 378 When a chunk is full (or a flush() occurs), the memory backed by 379 the chunk is registered with librdmacm is pinned in memory on 382 for the entire chunk. [all …]
|
/qemu/tests/tcg/aarch64/system/ |
H A D | kernel.ld | 6 /* Align text and rodata to the 1st 2 MiB chunk. */ 8 /* Align r/w data to the 2nd 2 MiB chunk. */ 10 /* Align the MTE-enabled page to the 3rd 2 MiB chunk. */
|
/qemu/tests/functional/qemu_test/ |
H A D | linuxkernel.py | 45 chunk = response.read(1 << 20) 46 if not chunk: 48 hl.update(chunk)
|
/qemu/nbd/ |
H A D | client.c | 1429 * Read structured reply chunk except magic field (which should be already 1433 static int nbd_receive_reply_chunk_header(QIOChannel *ioc, NBDReply *chunk, in nbd_receive_reply_chunk_header() argument 1440 if (chunk->magic == NBD_STRUCTURED_REPLY_MAGIC) { in nbd_receive_reply_chunk_header() 1441 len = sizeof(chunk->structured); in nbd_receive_reply_chunk_header() 1443 assert(chunk->magic == NBD_EXTENDED_REPLY_MAGIC); in nbd_receive_reply_chunk_header() 1444 len = sizeof(chunk->extended); in nbd_receive_reply_chunk_header() 1447 ret = nbd_read(ioc, (uint8_t *)chunk + sizeof(chunk->magic), in nbd_receive_reply_chunk_header() 1448 len - sizeof(chunk->magic), "structured chunk", in nbd_receive_reply_chunk_header() 1455 chunk->structured.flags = be16_to_cpu(chunk->structured.flags); in nbd_receive_reply_chunk_header() 1456 chunk->structured.type = be16_to_cpu(chunk->structured.type); in nbd_receive_reply_chunk_header() [all …]
|
H A D | server.c | 832 * Send one chunk of reply to NBD_OPT_{LIST,SET}_META_CONTEXT 2087 * Prepare the header of a reply chunk for network transmission. 2106 NBDExtendedReplyChunk *chunk = iov->iov_base; in set_be_chunk() local 2108 iov[0].iov_len = sizeof(*chunk); in set_be_chunk() 2109 stl_be_p(&chunk->magic, NBD_EXTENDED_REPLY_MAGIC); in set_be_chunk() 2110 stw_be_p(&chunk->flags, flags); in set_be_chunk() 2111 stw_be_p(&chunk->type, type); in set_be_chunk() 2112 stq_be_p(&chunk->cookie, request->cookie); in set_be_chunk() 2113 stq_be_p(&chunk->offset, request->from); in set_be_chunk() 2114 stq_be_p(&chunk->length, length); in set_be_chunk() [all …]
|
/qemu/hw/audio/ |
H A D | hda-codec.c | 248 uint32_t chunk = MIN(B_SIZE - start, to_transfer); in hda_audio_input_timer() local 250 &st->state->hda, st->stream, false, st->buf + start, chunk); in hda_audio_input_timer() 254 rpos += chunk; in hda_audio_input_timer() 255 to_transfer -= chunk; in hda_audio_input_timer() 256 st->rpos += chunk; in hda_audio_input_timer() 277 uint32_t chunk = (uint32_t) MIN(B_SIZE - start, to_transfer); in hda_audio_input_cb() local 278 uint32_t read = AUD_read(st->voice.in, st->buf + start, chunk); in hda_audio_input_cb() 282 if (chunk != read) { in hda_audio_input_cb() 318 uint32_t chunk = MIN(B_SIZE - start, to_transfer); in hda_audio_output_timer() local 320 &st->state->hda, st->stream, true, st->buf + start, chunk); in hda_audio_output_timer() [all …]
|
/qemu/tests/qemu-iotests/ |
H A D | 303 | 32 chunk = 1024 * 1024 variable 55 write_to_disk((i) * chunk, chunk)
|
H A D | 242 | 33 chunk = 256 * 1024 variable 85 write_to_disk((num-1) * chunk, chunk)
|
H A D | nbd-fault-injector.py | 83 chunk = sock.recv(bufsize - received) 84 if len(chunk) == 0: 86 chunks.append(chunk) 87 received += len(chunk)
|
H A D | 219 | 207 # copy-chunk-size. 209 # Chose 64k copy-chunk-size both for mirror (by buf_size) and backup (by 210 # x-max-chunk). The slice time, i.e. the granularity of the rate limiting 230 'x-perf': {'max-chunk': 65536},
|
/qemu/tests/tcg/multiarch/ |
H A D | sha512.c | 270 /** Perform one SHA-512 transformation, processing a 128-byte chunk. */ 271 static void Transform(uint64_t *s, const uint64_t *chunk) in Transform() argument 276 Round(a, b, c, &d, e, f, g, &h, 0x428a2f98d728ae22ull, w0 = be64_to_cpu(chunk[0])); in Transform() 277 Round(h, a, b, &c, d, e, f, &g, 0x7137449123ef65cdull, w1 = be64_to_cpu(chunk[1])); in Transform() 278 Round(g, h, a, &b, c, d, e, &f, 0xb5c0fbcfec4d3b2full, w2 = be64_to_cpu(chunk[2])); in Transform() 279 Round(f, g, h, &a, b, c, d, &e, 0xe9b5dba58189dbbcull, w3 = be64_to_cpu(chunk[3])); in Transform() 280 Round(e, f, g, &h, a, b, c, &d, 0x3956c25bf348b538ull, w4 = be64_to_cpu(chunk[4])); in Transform() 281 Round(d, e, f, &g, h, a, b, &c, 0x59f111f1b605d019ull, w5 = be64_to_cpu(chunk[5])); in Transform() 282 Round(c, d, e, &f, g, h, a, &b, 0x923f82a4af194f9bull, w6 = be64_to_cpu(chunk[6])); in Transform() 283 Round(b, c, d, &e, f, g, h, &a, 0xab1c5ed5da6d8118ull, w7 = be64_to_cpu(chunk[7])); in Transform() [all …]
|
H A D | vma-pthread.c | 6 * Map a contiguous chunk of RWX memory. Split it into 8 equally sized 169 /* Initialize memory chunk. */ in main() 200 /* Destroy memory chunk. */ in main()
|
/qemu/scripts/ |
H A D | qemu-stamp.py | 16 for chunk in iter(lambda: f.read(65536), b''): 17 sha.update(chunk)
|
/qemu/tests/tcg/aarch64/gdbstub/ |
H A D | test-mte.py | 9 # chunk, and then using the GDB 'memory-tagging' subcommands to set/get tags in 10 # different memory locations and ranges in the MTE-enabled memory chunk. 41 # Tagged address: the start of the MTE-enabled memory chunk to be tested
|
/qemu/include/standard-headers/asm-m68k/ |
H A D | bootinfo.h | 37 uint32_t addr; /* physical address of memory chunk */ 38 uint32_t size; /* length of memory chunk (in bytes) */ 54 #define BI_MEMCHUNK 0x0005 /* memory chunk address and size */
|
/qemu/include/block/ |
H A D | nbd.h | 142 /* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */ 149 /* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */ 282 /* only one extent in BLOCK_STATUS reply chunk */ 323 /* Chunk reply flags (for structured and extended replies) */ 324 #define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */ 326 /* Chunk reply types */
|