Lines Matching defs:memory

16 #include "memory-alloc.h"
28 * memory managed by the delta_zone structure. The delta_zone can move the data around within its
29 * memory, so the location of each delta list is recorded as a bit offset into the memory. Because
32 * volume index delta list memory can easily exceed 4 gigabits, so a 64 bit value is needed to
33 * address the memory. The volume index delta lists average around 6 kilobits, so 16 bits are
49 * The delta in each entry is encoded with a variable-length Huffman code to minimize the memory
55 * some bytes beyond the end of the bit field, so a delta_zone memory allocation is guarded by two
56 * invalid delta lists to prevent reading outside the delta_zone memory. The valid delta lists are
59 * delta list could cause this step to run off the end of the delta_zone memory, so as extra
66 * temporary offsets, and thus is somewhat more memory efficient.
82 * This is the number of guard bytes needed at the end of the memory byte array when using the bit
171 memmove(delta_zone->memory + destination,
172 delta_zone->memory + source,
233 memset(zone->memory + (list_bits / BITS_PER_BYTE), ~0,
301 vdo_free(vdo_forget(delta_index->delta_zones[z].memory));
314 result = vdo_allocate(size, u8, "delta list", &delta_zone->memory);
404 static inline u32 get_field(const u8 *memory, u64 offset, u8 size)
406 const void *addr = memory + offset / BITS_PER_BYTE;
412 static inline void set_field(u32 value, u8 *memory, u64 offset, u8 size)
414 void *addr = memory + offset / BITS_PER_BYTE;
431 static inline u32 get_immutable_start(const u8 *memory, u32 list_number)
433 return get_field(memory, get_immutable_header_offset(list_number),
438 static inline void set_immutable_start(u8 *memory, u32 list_number, u32 start)
440 set_field(start, memory, get_immutable_header_offset(list_number),
445 u8 *memory, size_t memory_size)
465 if (get_immutable_start(memory, 0) != get_immutable_header_offset(list_count + 1))
470 if (get_immutable_start(memory, i) > get_immutable_start(memory, i + 1))
478 if (get_immutable_start(memory, list_count) >
484 if (memory[memory_size - POST_FIELD_GUARD_BYTES + i] != (u8) ~0)
495 u8 *memory, size_t memory_size)
501 struct delta_page_header *header = (struct delta_page_header *) memory;
513 if (!verify_delta_index_page(nonce, list_count, expected_nonce, memory,
520 if (!verify_delta_index_page(nonce, list_count, expected_nonce, memory,
543 delta_zone->memory = memory;
562 static inline u64 get_big_field(const u8 *memory, u64 offset, u8 size)
564 const void *addr = memory + offset / BITS_PER_BYTE;
570 static inline void set_big_field(u64 value, u8 *memory, u64 offset, u8 size)
572 void *addr = memory + offset / BITS_PER_BYTE;
582 static inline void set_zero(u8 *memory, u64 offset, u32 size)
585 u8 *addr = memory + offset / BITS_PER_BYTE;
600 * size and memory offsets are measured in bits.
640 * size and memory offsets are measured in bits.
679 * to the destination. The size and memory offsets are measured in bits.
704 * memory page used in the immutable index. The number of lists copied onto the page is returned in
708 u8 *memory, size_t memory_size, u64 virtual_chapter_number,
752 header = (struct delta_page_header *) memory;
761 set_immutable_start(memory, 0, offset);
764 set_immutable_start(memory, i + 1, offset);
767 /* Copy the delta list data onto the memory page. */
769 move_bits(delta_zone->memory, delta_lists[i].start, memory,
770 get_immutable_start(memory, i), delta_lists[i].size);
774 memset(memory + memory_size - POST_FIELD_GUARD_BYTES, ~0,
812 /* Extend and balance memory to receive the delta lists */
984 move_bits(data, save_info->bit_offset, delta_zone->memory, delta_list->start,
1105 vdo_log_warning_strerror(result, "failed to write delta list memory");
1110 zone->memory + get_delta_list_byte_start(delta_list),
1113 vdo_log_warning_strerror(result, "failed to write delta list memory");
1207 /* One zone will use at least as much memory as other zone counts. */
1264 delta_list->start = get_immutable_start(delta_zone->memory, list_number);
1265 end_offset = get_immutable_start(delta_zone->memory, list_number + 1);
1282 uds_prefetch_range(&delta_zone->memory[delta_list->start / BITS_PER_BYTE],
1313 const u8 *memory = delta_zone->memory;
1315 const u8 *addr = memory + delta_offset / BITS_PER_BYTE;
1416 const u8 *addr = entry->delta_zone->memory + offset / BITS_PER_BYTE;
1427 u8 *addr = entry->delta_zone->memory + offset / BITS_PER_BYTE;
1503 return get_field(delta_entry->delta_zone->memory,
1536 set_field(value, delta_entry->delta_zone->memory,
1542 * Extend the memory used by the delta lists by adding growing_size bytes before the list indicated
1587 u8 *memory;
1626 * and/or rebalance the delta list memory choosing to move the least amount of
1653 memory = delta_zone->memory;
1654 move_bits(memory, source, memory, destination, count);
1665 u8 *memory = delta_zone->memory;
1669 set_field(delta_entry->delta, memory, offset, delta_zone->min_bits);
1676 set_field(t1, memory, offset, delta_zone->min_bits);
1677 set_zero(memory, offset + delta_zone->min_bits, t2);
1678 set_field(1, memory, offset + delta_zone->min_bits + t2, 1);
1684 u8 *memory = delta_entry->delta_zone->memory;
1687 set_field(value, memory, offset, delta_entry->value_bits);
1807 u8 *memory = delta_entry->delta_zone->memory;
1843 move_bits(memory, source, memory, destination, count);