186835cedSPrasad Joshi #include "kvm/qcow.h" 286835cedSPrasad Joshi 386835cedSPrasad Joshi #include "kvm/disk-image.h" 486835cedSPrasad Joshi #include "kvm/read-write.h" 5c0799eb9SPekka Enberg #include "kvm/mutex.h" 686835cedSPrasad Joshi #include "kvm/util.h" 786835cedSPrasad Joshi 886835cedSPrasad Joshi #include <sys/types.h> 986835cedSPrasad Joshi #include <sys/stat.h> 1086835cedSPrasad Joshi #include <stdbool.h> 1186835cedSPrasad Joshi #include <stdlib.h> 1286835cedSPrasad Joshi #include <string.h> 1386835cedSPrasad Joshi #include <unistd.h> 1486835cedSPrasad Joshi #include <fcntl.h> 1586835cedSPrasad Joshi 1686835cedSPrasad Joshi #include <linux/byteorder.h> 17865c675fSPrasad Joshi #include <linux/kernel.h> 180df6b4d9SPekka Enberg #include <linux/types.h> 1986835cedSPrasad Joshi 20473d58ffSPekka Enberg static int insert(struct rb_root *root, struct qcow_l2_table *new) 213309045fSPrasad Joshi { 223309045fSPrasad Joshi struct rb_node **link = &(root->rb_node), *parent = NULL; 233309045fSPrasad Joshi u64 offset = new->offset; 243309045fSPrasad Joshi 253309045fSPrasad Joshi /* search the tree */ 263309045fSPrasad Joshi while (*link) { 27473d58ffSPekka Enberg struct qcow_l2_table *t; 283309045fSPrasad Joshi 29473d58ffSPekka Enberg t = rb_entry(*link, struct qcow_l2_table, node); 303309045fSPrasad Joshi if (!t) 313309045fSPrasad Joshi goto error; 323309045fSPrasad Joshi 333309045fSPrasad Joshi parent = *link; 343309045fSPrasad Joshi 353309045fSPrasad Joshi if (t->offset > offset) 363309045fSPrasad Joshi link = &(*link)->rb_left; 373309045fSPrasad Joshi else if (t->offset < offset) 383309045fSPrasad Joshi link = &(*link)->rb_right; 393309045fSPrasad Joshi else 403309045fSPrasad Joshi goto out; 413309045fSPrasad Joshi } 423309045fSPrasad Joshi 433309045fSPrasad Joshi /* add new node */ 443309045fSPrasad Joshi rb_link_node(&new->node, parent, link); 453309045fSPrasad Joshi rb_insert_color(&new->node, root); 463309045fSPrasad Joshi out: 473309045fSPrasad Joshi return 0; 483309045fSPrasad Joshi error: 493309045fSPrasad Joshi return -1; 503309045fSPrasad Joshi } 513309045fSPrasad Joshi 52473d58ffSPekka Enberg static struct qcow_l2_table *search(struct rb_root *root, u64 offset) 533309045fSPrasad Joshi { 543309045fSPrasad Joshi struct rb_node *link = root->rb_node; 553309045fSPrasad Joshi 563309045fSPrasad Joshi while (link) { 57473d58ffSPekka Enberg struct qcow_l2_table *t; 583309045fSPrasad Joshi 59473d58ffSPekka Enberg t = rb_entry(link, struct qcow_l2_table, node); 603309045fSPrasad Joshi if (!t) 613309045fSPrasad Joshi goto out; 623309045fSPrasad Joshi 633309045fSPrasad Joshi if (t->offset > offset) 643309045fSPrasad Joshi link = link->rb_left; 653309045fSPrasad Joshi else if (t->offset < offset) 663309045fSPrasad Joshi link = link->rb_right; 673309045fSPrasad Joshi else 683309045fSPrasad Joshi return t; 693309045fSPrasad Joshi } 703309045fSPrasad Joshi out: 713309045fSPrasad Joshi return NULL; 723309045fSPrasad Joshi } 733309045fSPrasad Joshi 743309045fSPrasad Joshi static void free_cache(struct qcow *q) 753309045fSPrasad Joshi { 763309045fSPrasad Joshi struct list_head *pos, *n; 77473d58ffSPekka Enberg struct qcow_l2_table *t; 783309045fSPrasad Joshi struct rb_root *r = &q->root; 793309045fSPrasad Joshi 803309045fSPrasad Joshi list_for_each_safe(pos, n, &q->lru_list) { 813309045fSPrasad Joshi /* Remove cache table from the list and RB tree */ 823309045fSPrasad Joshi list_del(pos); 83473d58ffSPekka Enberg t = list_entry(pos, struct qcow_l2_table, list); 843309045fSPrasad Joshi rb_erase(&t->node, r); 853309045fSPrasad Joshi 863309045fSPrasad Joshi /* Free the cached node */ 873309045fSPrasad Joshi free(t); 883309045fSPrasad Joshi } 893309045fSPrasad Joshi } 903309045fSPrasad Joshi 91a4e46515SPekka Enberg static int qcow_l2_cache_write(struct qcow *q, struct qcow_l2_table *c) 92a4e46515SPekka Enberg { 93a4e46515SPekka Enberg struct qcow_header *header = q->header; 94a4e46515SPekka Enberg u64 size; 95a4e46515SPekka Enberg 96aff88976SPekka Enberg if (!c->dirty) 97aff88976SPekka Enberg return 0; 98aff88976SPekka Enberg 99a4e46515SPekka Enberg size = 1 << header->l2_bits; 100a4e46515SPekka Enberg 101aff88976SPekka Enberg if (pwrite_in_full(q->fd, c->table, size * sizeof(u64), c->offset) < 0) 102aff88976SPekka Enberg return -1; 103aff88976SPekka Enberg 104aff88976SPekka Enberg c->dirty = 0; 105aff88976SPekka Enberg 106aff88976SPekka Enberg return 0; 107a4e46515SPekka Enberg } 108a4e46515SPekka Enberg 109473d58ffSPekka Enberg static int cache_table(struct qcow *q, struct qcow_l2_table *c) 1103309045fSPrasad Joshi { 1113309045fSPrasad Joshi struct rb_root *r = &q->root; 112473d58ffSPekka Enberg struct qcow_l2_table *lru; 1133309045fSPrasad Joshi 1143309045fSPrasad Joshi if (q->nr_cached == MAX_CACHE_NODES) { 1153309045fSPrasad Joshi /* 1163309045fSPrasad Joshi * The node at the head of the list is least recently used 1173309045fSPrasad Joshi * node. Remove it from the list and replaced with a new node. 1183309045fSPrasad Joshi */ 119473d58ffSPekka Enberg lru = list_first_entry(&q->lru_list, struct qcow_l2_table, list); 1203309045fSPrasad Joshi 121a4e46515SPekka Enberg if (qcow_l2_cache_write(q, lru) < 0) 122a4e46515SPekka Enberg goto error; 123a4e46515SPekka Enberg 1243309045fSPrasad Joshi /* Remove the node from the cache */ 1253309045fSPrasad Joshi rb_erase(&lru->node, r); 1263309045fSPrasad Joshi list_del_init(&lru->list); 1273309045fSPrasad Joshi q->nr_cached--; 1283309045fSPrasad Joshi 1293309045fSPrasad Joshi /* Free the LRUed node */ 1303309045fSPrasad Joshi free(lru); 1313309045fSPrasad Joshi } 1323309045fSPrasad Joshi 1333309045fSPrasad Joshi /* Add new node in RB Tree: Helps in searching faster */ 1343309045fSPrasad Joshi if (insert(r, c) < 0) 1353309045fSPrasad Joshi goto error; 1363309045fSPrasad Joshi 1373309045fSPrasad Joshi /* Add in LRU replacement list */ 1383309045fSPrasad Joshi list_add_tail(&c->list, &q->lru_list); 1393309045fSPrasad Joshi q->nr_cached++; 1403309045fSPrasad Joshi 1413309045fSPrasad Joshi return 0; 1423309045fSPrasad Joshi error: 1433309045fSPrasad Joshi return -1; 1443309045fSPrasad Joshi } 1453309045fSPrasad Joshi 146fe8bdde0SPekka Enberg static struct qcow_l2_table *search_table(struct qcow *q, u64 offset) 1473309045fSPrasad Joshi { 148fe8bdde0SPekka Enberg struct qcow_l2_table *l2t; 1493309045fSPrasad Joshi 150fe8bdde0SPekka Enberg l2t = search(&q->root, offset); 151fe8bdde0SPekka Enberg if (!l2t) 152fe8bdde0SPekka Enberg return NULL; 1533309045fSPrasad Joshi 1543309045fSPrasad Joshi /* Update the LRU state, by moving the searched node to list tail */ 155fe8bdde0SPekka Enberg list_move_tail(&l2t->list, &q->lru_list); 1563309045fSPrasad Joshi 157fe8bdde0SPekka Enberg return l2t; 1583309045fSPrasad Joshi } 1593309045fSPrasad Joshi 1603309045fSPrasad Joshi /* Allocates a new node for caching L2 table */ 161473d58ffSPekka Enberg static struct qcow_l2_table *new_cache_table(struct qcow *q, u64 offset) 1623309045fSPrasad Joshi { 1633309045fSPrasad Joshi struct qcow_header *header = q->header; 164473d58ffSPekka Enberg struct qcow_l2_table *c; 1653309045fSPrasad Joshi u64 l2t_sz; 1663309045fSPrasad Joshi u64 size; 1673309045fSPrasad Joshi 1683309045fSPrasad Joshi l2t_sz = 1 << header->l2_bits; 1693309045fSPrasad Joshi size = sizeof(*c) + l2t_sz * sizeof(u64); 1703309045fSPrasad Joshi c = calloc(1, size); 1713309045fSPrasad Joshi if (!c) 1723309045fSPrasad Joshi goto out; 1733309045fSPrasad Joshi 1743309045fSPrasad Joshi c->offset = offset; 1753309045fSPrasad Joshi RB_CLEAR_NODE(&c->node); 1763309045fSPrasad Joshi INIT_LIST_HEAD(&c->list); 1773309045fSPrasad Joshi out: 1783309045fSPrasad Joshi return c; 1793309045fSPrasad Joshi } 1803309045fSPrasad Joshi 181742fce76SPrasad Joshi static inline u64 get_l1_index(struct qcow *q, u64 offset) 18286835cedSPrasad Joshi { 183ad627d62SPekka Enberg struct qcow_header *header = q->header; 18486835cedSPrasad Joshi 18586835cedSPrasad Joshi return offset >> (header->l2_bits + header->cluster_bits); 18686835cedSPrasad Joshi } 18786835cedSPrasad Joshi 188742fce76SPrasad Joshi static inline u64 get_l2_index(struct qcow *q, u64 offset) 18986835cedSPrasad Joshi { 190ad627d62SPekka Enberg struct qcow_header *header = q->header; 19186835cedSPrasad Joshi 19286835cedSPrasad Joshi return (offset >> (header->cluster_bits)) & ((1 << header->l2_bits)-1); 19386835cedSPrasad Joshi } 19486835cedSPrasad Joshi 195742fce76SPrasad Joshi static inline u64 get_cluster_offset(struct qcow *q, u64 offset) 19686835cedSPrasad Joshi { 197ad627d62SPekka Enberg struct qcow_header *header = q->header; 19886835cedSPrasad Joshi 19986835cedSPrasad Joshi return offset & ((1 << header->cluster_bits)-1); 20086835cedSPrasad Joshi } 20186835cedSPrasad Joshi 202fe8bdde0SPekka Enberg static struct qcow_l2_table *qcow_read_l2_table(struct qcow *q, u64 offset) 2033309045fSPrasad Joshi { 2043309045fSPrasad Joshi struct qcow_header *header = q->header; 205fe8bdde0SPekka Enberg struct qcow_l2_table *l2t; 2063309045fSPrasad Joshi u64 size; 2073309045fSPrasad Joshi 2083309045fSPrasad Joshi size = 1 << header->l2_bits; 2093309045fSPrasad Joshi 2103309045fSPrasad Joshi /* search an entry for offset in cache */ 211fe8bdde0SPekka Enberg l2t = search_table(q, offset); 212fe8bdde0SPekka Enberg if (l2t) 213fe8bdde0SPekka Enberg return l2t; 2143309045fSPrasad Joshi 2153309045fSPrasad Joshi /* allocate new node for caching l2 table */ 216fe8bdde0SPekka Enberg l2t = new_cache_table(q, offset); 217fe8bdde0SPekka Enberg if (!l2t) 2183309045fSPrasad Joshi goto error; 2193309045fSPrasad Joshi 2203309045fSPrasad Joshi /* table not cached: read from the disk */ 221fe8bdde0SPekka Enberg if (pread_in_full(q->fd, l2t->table, size * sizeof(u64), offset) < 0) 2223309045fSPrasad Joshi goto error; 2233309045fSPrasad Joshi 2243309045fSPrasad Joshi /* cache the table */ 225fe8bdde0SPekka Enberg if (cache_table(q, l2t) < 0) 2263309045fSPrasad Joshi goto error; 2273309045fSPrasad Joshi 228fe8bdde0SPekka Enberg return l2t; 2293309045fSPrasad Joshi error: 230fe8bdde0SPekka Enberg free(l2t); 231fe8bdde0SPekka Enberg return NULL; 2323309045fSPrasad Joshi } 2333309045fSPrasad Joshi 234b1c84095SPekka Enberg static ssize_t qcow_read_cluster(struct qcow *q, u64 offset, void *dst, u32 dst_len) 23586835cedSPrasad Joshi { 236ad627d62SPekka Enberg struct qcow_header *header = q->header; 237*3fb67b93SPekka Enberg struct qcow_l1_table *l1t = &q->table; 238*3fb67b93SPekka Enberg struct qcow_l2_table *l2t; 2393dac48d4SPrasad Joshi u64 cluster_size; 240742fce76SPrasad Joshi u64 clust_offset; 241742fce76SPrasad Joshi u64 clust_start; 242*3fb67b93SPekka Enberg u64 l2t_offset; 243a51948ceSPekka Enberg size_t length; 244*3fb67b93SPekka Enberg u64 l2t_size; 245742fce76SPrasad Joshi u64 l1_idx; 246742fce76SPrasad Joshi u64 l2_idx; 24786835cedSPrasad Joshi 248dae803fbSPekka Enberg cluster_size = 1 << header->cluster_bits; 24986835cedSPrasad Joshi 250c5e0624bSPrasad Joshi l1_idx = get_l1_index(q, offset); 251*3fb67b93SPekka Enberg if (l1_idx >= l1t->table_size) 252c0799eb9SPekka Enberg return -1; 25386835cedSPrasad Joshi 2543dac48d4SPrasad Joshi clust_offset = get_cluster_offset(q, offset); 2553dac48d4SPrasad Joshi if (clust_offset >= cluster_size) 256c0799eb9SPekka Enberg return -1; 2573dac48d4SPrasad Joshi 2583dac48d4SPrasad Joshi length = cluster_size - clust_offset; 2593dac48d4SPrasad Joshi if (length > dst_len) 2603dac48d4SPrasad Joshi length = dst_len; 2613dac48d4SPrasad Joshi 262c0799eb9SPekka Enberg mutex_lock(&q->mutex); 263b2ebe61bSPekka Enberg 264*3fb67b93SPekka Enberg l2t_offset = be64_to_cpu(l1t->l1_table[l1_idx]); 265*3fb67b93SPekka Enberg if (l2t_offset & QCOW_OFLAG_COMPRESSED) { 266b2ebe61bSPekka Enberg pr_warning("compressed sectors are not supported"); 267b2ebe61bSPekka Enberg goto out_error; 268b2ebe61bSPekka Enberg } 269b2ebe61bSPekka Enberg 270*3fb67b93SPekka Enberg l2t_offset &= QCOW_OFFSET_MASK; 271*3fb67b93SPekka Enberg if (!l2t_offset) 2723dac48d4SPrasad Joshi goto zero_cluster; 27386835cedSPrasad Joshi 274*3fb67b93SPekka Enberg l2t_size = 1 << header->l2_bits; 27586835cedSPrasad Joshi 2763309045fSPrasad Joshi /* read and cache level 2 table */ 277*3fb67b93SPekka Enberg l2t = qcow_read_l2_table(q, l2t_offset); 278*3fb67b93SPekka Enberg if (!l2t) 279b6edb0ecSSasha Levin goto out_error; 28086835cedSPrasad Joshi 281c5e0624bSPrasad Joshi l2_idx = get_l2_index(q, offset); 282*3fb67b93SPekka Enberg if (l2_idx >= l2t_size) 283b6edb0ecSSasha Levin goto out_error; 28486835cedSPrasad Joshi 285*3fb67b93SPekka Enberg clust_start = be64_to_cpu(l2t->table[l2_idx]); 286b2ebe61bSPekka Enberg if (clust_start & QCOW_OFLAG_COMPRESSED) { 287b2ebe61bSPekka Enberg pr_warning("compressed sectors are not supported"); 288b2ebe61bSPekka Enberg goto out_error; 289b2ebe61bSPekka Enberg } 290b2ebe61bSPekka Enberg 291b2ebe61bSPekka Enberg clust_start &= QCOW_OFFSET_MASK; 29286835cedSPrasad Joshi if (!clust_start) 2933dac48d4SPrasad Joshi goto zero_cluster; 29486835cedSPrasad Joshi 295c0799eb9SPekka Enberg mutex_unlock(&q->mutex); 29686835cedSPrasad Joshi 297c0799eb9SPekka Enberg if (pread_in_full(q->fd, dst, length, clust_start + clust_offset) < 0) 298c0799eb9SPekka Enberg return -1; 299c0799eb9SPekka Enberg 3003dac48d4SPrasad Joshi return length; 30186835cedSPrasad Joshi 302179b71f0SPekka Enberg zero_cluster: 303c0799eb9SPekka Enberg mutex_unlock(&q->mutex); 304179b71f0SPekka Enberg memset(dst, 0, length); 305c0799eb9SPekka Enberg return length; 306179b71f0SPekka Enberg 30786835cedSPrasad Joshi out_error: 308c0799eb9SPekka Enberg mutex_unlock(&q->mutex); 309179b71f0SPekka Enberg length = -1; 310c0799eb9SPekka Enberg return -1; 3113dac48d4SPrasad Joshi } 312b6edb0ecSSasha Levin 313b1c84095SPekka Enberg static ssize_t qcow_read_sector(struct disk_image *disk, u64 sector, void *dst, u32 dst_len) 3143dac48d4SPrasad Joshi { 31543835ac9SSasha Levin struct qcow *q = disk->priv; 316ad627d62SPekka Enberg struct qcow_header *header = q->header; 317d8eea993SPekka Enberg u32 nr_read; 3180df6b4d9SPekka Enberg u64 offset; 3190df6b4d9SPekka Enberg char *buf; 3203dac48d4SPrasad Joshi u32 nr; 3213dac48d4SPrasad Joshi 3220df6b4d9SPekka Enberg buf = dst; 323d8eea993SPekka Enberg nr_read = 0; 3240df6b4d9SPekka Enberg 325d8eea993SPekka Enberg while (nr_read < dst_len) { 3263dac48d4SPrasad Joshi offset = sector << SECTOR_SHIFT; 3273dac48d4SPrasad Joshi if (offset >= header->size) 3280df6b4d9SPekka Enberg return -1; 3293dac48d4SPrasad Joshi 330b1c84095SPekka Enberg nr = qcow_read_cluster(q, offset, buf, dst_len - nr_read); 331a51948ceSPekka Enberg if (nr <= 0) 3320df6b4d9SPekka Enberg return -1; 3333dac48d4SPrasad Joshi 334d8eea993SPekka Enberg nr_read += nr; 3353dac48d4SPrasad Joshi buf += nr; 3363dac48d4SPrasad Joshi sector += (nr >> SECTOR_SHIFT); 3373dac48d4SPrasad Joshi } 3380df6b4d9SPekka Enberg 33972133dd2SAsias He return dst_len; 34086835cedSPrasad Joshi } 34186835cedSPrasad Joshi 342865c675fSPrasad Joshi static inline u64 file_size(int fd) 343865c675fSPrasad Joshi { 344865c675fSPrasad Joshi struct stat st; 3450df6b4d9SPekka Enberg 346865c675fSPrasad Joshi if (fstat(fd, &st) < 0) 347865c675fSPrasad Joshi return 0; 3480df6b4d9SPekka Enberg 349865c675fSPrasad Joshi return st.st_size; 350865c675fSPrasad Joshi } 351865c675fSPrasad Joshi 3520df6b4d9SPekka Enberg static inline int qcow_pwrite_sync(int fd, void *buf, size_t count, off_t offset) 353865c675fSPrasad Joshi { 354865c675fSPrasad Joshi if (pwrite_in_full(fd, buf, count, offset) < 0) 355865c675fSPrasad Joshi return -1; 3560df6b4d9SPekka Enberg 3577d94a719SPekka Enberg return fdatasync(fd); 358865c675fSPrasad Joshi } 359865c675fSPrasad Joshi 360865c675fSPrasad Joshi /* Writes a level 2 table at the end of the file. */ 361b1c84095SPekka Enberg static u64 qcow_write_l2_table(struct qcow *q, u64 *table) 362865c675fSPrasad Joshi { 363865c675fSPrasad Joshi struct qcow_header *header = q->header; 364865c675fSPrasad Joshi u64 clust_sz; 365865c675fSPrasad Joshi u64 f_sz; 3660df6b4d9SPekka Enberg u64 off; 3670df6b4d9SPekka Enberg u64 sz; 368865c675fSPrasad Joshi 369865c675fSPrasad Joshi f_sz = file_size(q->fd); 370865c675fSPrasad Joshi if (!f_sz) 371865c675fSPrasad Joshi return 0; 372865c675fSPrasad Joshi 373865c675fSPrasad Joshi sz = 1 << header->l2_bits; 374865c675fSPrasad Joshi clust_sz = 1 << header->cluster_bits; 375865c675fSPrasad Joshi off = ALIGN(f_sz, clust_sz); 376865c675fSPrasad Joshi 3776fe151aeSPekka Enberg if (pwrite_in_full(q->fd, table, sz * sizeof(u64), off) < 0) 378865c675fSPrasad Joshi return 0; 3790df6b4d9SPekka Enberg 380865c675fSPrasad Joshi return off; 381865c675fSPrasad Joshi } 382865c675fSPrasad Joshi 383865c675fSPrasad Joshi /* 384865c675fSPrasad Joshi * QCOW file might grow during a write operation. Not only data but metadata is 385865c675fSPrasad Joshi * also written at the end of the file. Therefore it is necessary to ensure 3860df6b4d9SPekka Enberg * every write is committed to disk. Hence we use uses qcow_pwrite_sync() to 387865c675fSPrasad Joshi * synchronize the in-core state of QCOW image to disk. 388865c675fSPrasad Joshi * 389865c675fSPrasad Joshi * We also try to restore the image to a consistent state if the metdata 390865c675fSPrasad Joshi * operation fails. The two metadat operations are: level 1 and level 2 table 391865c675fSPrasad Joshi * update. If either of them fails the image is truncated to a consistent state. 392865c675fSPrasad Joshi */ 393b1c84095SPekka Enberg static ssize_t qcow_write_cluster(struct qcow *q, u64 offset, void *buf, u32 src_len) 394865c675fSPrasad Joshi { 395865c675fSPrasad Joshi struct qcow_header *header = q->header; 396*3fb67b93SPekka Enberg struct qcow_l1_table *l1t = &q->table; 397fe8bdde0SPekka Enberg struct qcow_l2_table *l2t; 3980df6b4d9SPekka Enberg u64 clust_start; 399*3fb67b93SPekka Enberg u64 l2t_offset; 4000df6b4d9SPekka Enberg u64 clust_off; 401*3fb67b93SPekka Enberg u64 l2t_size; 402865c675fSPrasad Joshi u64 clust_sz; 403865c675fSPrasad Joshi u64 l1t_idx; 404865c675fSPrasad Joshi u64 l2t_idx; 405865c675fSPrasad Joshi u64 f_sz; 4060df6b4d9SPekka Enberg u64 len; 407865c675fSPrasad Joshi 408fe8bdde0SPekka Enberg l2t = NULL; 409*3fb67b93SPekka Enberg l2t_size = 1 << header->l2_bits; 410865c675fSPrasad Joshi clust_sz = 1 << header->cluster_bits; 411865c675fSPrasad Joshi 412865c675fSPrasad Joshi l1t_idx = get_l1_index(q, offset); 413*3fb67b93SPekka Enberg if (l1t_idx >= l1t->table_size) 414c0799eb9SPekka Enberg return -1; 415865c675fSPrasad Joshi 416865c675fSPrasad Joshi l2t_idx = get_l2_index(q, offset); 417*3fb67b93SPekka Enberg if (l2t_idx >= l2t_size) 418c0799eb9SPekka Enberg return -1; 419865c675fSPrasad Joshi 420865c675fSPrasad Joshi clust_off = get_cluster_offset(q, offset); 421865c675fSPrasad Joshi if (clust_off >= clust_sz) 422c0799eb9SPekka Enberg return -1; 423865c675fSPrasad Joshi 424865c675fSPrasad Joshi len = clust_sz - clust_off; 425865c675fSPrasad Joshi if (len > src_len) 426865c675fSPrasad Joshi len = src_len; 427865c675fSPrasad Joshi 428c0799eb9SPekka Enberg mutex_lock(&q->mutex); 429c0799eb9SPekka Enberg 430*3fb67b93SPekka Enberg l2t_offset = be64_to_cpu(l1t->l1_table[l1t_idx]); 431*3fb67b93SPekka Enberg if (l2t_offset & QCOW_OFLAG_COMPRESSED) { 432121dd76eSPekka Enberg pr_warning("compressed clusters are not supported"); 433121dd76eSPekka Enberg goto error; 434121dd76eSPekka Enberg } 435*3fb67b93SPekka Enberg if (!(l2t_offset & QCOW_OFLAG_COPIED)) { 436121dd76eSPekka Enberg pr_warning("copy-on-write clusters are not supported"); 437b2ebe61bSPekka Enberg goto error; 438b2ebe61bSPekka Enberg } 439b2ebe61bSPekka Enberg 440*3fb67b93SPekka Enberg l2t_offset &= QCOW_OFFSET_MASK; 441*3fb67b93SPekka Enberg if (l2t_offset) { 4423309045fSPrasad Joshi /* read and cache l2 table */ 443*3fb67b93SPekka Enberg l2t = qcow_read_l2_table(q, l2t_offset); 444fe8bdde0SPekka Enberg if (!l2t) 4453309045fSPrasad Joshi goto error; 446865c675fSPrasad Joshi } else { 447*3fb67b93SPekka Enberg l2t = new_cache_table(q, l2t_offset); 448fe8bdde0SPekka Enberg if (!l2t) 4493309045fSPrasad Joshi goto error; 4503309045fSPrasad Joshi 4510df6b4d9SPekka Enberg /* Capture the state of the consistent QCOW image */ 452865c675fSPrasad Joshi f_sz = file_size(q->fd); 453865c675fSPrasad Joshi if (!f_sz) 4543309045fSPrasad Joshi goto free_cache; 455865c675fSPrasad Joshi 456865c675fSPrasad Joshi /* Write the l2 table of 0's at the end of the file */ 457*3fb67b93SPekka Enberg l2t_offset = qcow_write_l2_table(q, l2t->table); 458*3fb67b93SPekka Enberg if (!l2t_offset) 4593309045fSPrasad Joshi goto free_cache; 460865c675fSPrasad Joshi 461fe8bdde0SPekka Enberg if (cache_table(q, l2t) < 0) { 4623309045fSPrasad Joshi if (ftruncate(q->fd, f_sz) < 0) 4633309045fSPrasad Joshi goto free_cache; 4643309045fSPrasad Joshi 4653309045fSPrasad Joshi goto free_cache; 466865c675fSPrasad Joshi } 467865c675fSPrasad Joshi 4680df6b4d9SPekka Enberg /* Update the in-core entry */ 469*3fb67b93SPekka Enberg l1t->l1_table[l1t_idx] = cpu_to_be64(l2t_offset); 470865c675fSPrasad Joshi } 471865c675fSPrasad Joshi 4720df6b4d9SPekka Enberg /* Capture the state of the consistent QCOW image */ 473865c675fSPrasad Joshi f_sz = file_size(q->fd); 474865c675fSPrasad Joshi if (!f_sz) 4753309045fSPrasad Joshi goto error; 476865c675fSPrasad Joshi 477b2ebe61bSPekka Enberg clust_start = be64_to_cpu(l2t->table[l2t_idx]); 478b2ebe61bSPekka Enberg if (clust_start & QCOW_OFLAG_COMPRESSED) { 479121dd76eSPekka Enberg pr_warning("compressed clusters are not supported"); 480121dd76eSPekka Enberg goto error; 481121dd76eSPekka Enberg } 482121dd76eSPekka Enberg if (!(clust_start & QCOW_OFLAG_COPIED)) { 483121dd76eSPekka Enberg pr_warning("copy-on-write clusters are not supported"); 484b2ebe61bSPekka Enberg goto error; 485b2ebe61bSPekka Enberg } 486b2ebe61bSPekka Enberg 487b2ebe61bSPekka Enberg clust_start &= QCOW_OFFSET_MASK; 488865c675fSPrasad Joshi if (!clust_start) { 489865c675fSPrasad Joshi clust_start = ALIGN(f_sz, clust_sz); 4904bd7e48bSPekka Enberg l2t->table[l2t_idx] = cpu_to_be64(clust_start); 491aff88976SPekka Enberg l2t->dirty = 1; 492865c675fSPrasad Joshi } 4930df6b4d9SPekka Enberg 494c0799eb9SPekka Enberg mutex_unlock(&q->mutex); 495c0799eb9SPekka Enberg 496a4e46515SPekka Enberg /* Write actual data */ 497a4e46515SPekka Enberg if (pwrite_in_full(q->fd, buf, len, clust_start + clust_off) < 0) 498a4e46515SPekka Enberg return -1; 499a4e46515SPekka Enberg 500865c675fSPrasad Joshi return len; 5013309045fSPrasad Joshi 5023309045fSPrasad Joshi free_cache: 503fe8bdde0SPekka Enberg free(l2t); 504865c675fSPrasad Joshi error: 505c0799eb9SPekka Enberg mutex_unlock(&q->mutex); 506865c675fSPrasad Joshi return -1; 507865c675fSPrasad Joshi } 508865c675fSPrasad Joshi 509b1c84095SPekka Enberg static ssize_t qcow_write_sector(struct disk_image *disk, u64 sector, void *src, u32 src_len) 51086835cedSPrasad Joshi { 511865c675fSPrasad Joshi struct qcow *q = disk->priv; 512865c675fSPrasad Joshi struct qcow_header *header = q->header; 513c4acb611SIngo Molnar u32 nr_written; 5140df6b4d9SPekka Enberg char *buf; 515865c675fSPrasad Joshi u64 offset; 516865c675fSPrasad Joshi ssize_t nr; 517865c675fSPrasad Joshi 5180df6b4d9SPekka Enberg buf = src; 5190df6b4d9SPekka Enberg nr_written = 0; 520865c675fSPrasad Joshi offset = sector << SECTOR_SHIFT; 5210df6b4d9SPekka Enberg 5220df6b4d9SPekka Enberg while (nr_written < src_len) { 523865c675fSPrasad Joshi if (offset >= header->size) 5240df6b4d9SPekka Enberg return -1; 525865c675fSPrasad Joshi 526b1c84095SPekka Enberg nr = qcow_write_cluster(q, offset, buf, src_len - nr_written); 527865c675fSPrasad Joshi if (nr < 0) 5280df6b4d9SPekka Enberg return -1; 529865c675fSPrasad Joshi 5300df6b4d9SPekka Enberg nr_written += nr; 531865c675fSPrasad Joshi buf += nr; 532865c675fSPrasad Joshi offset += nr; 533865c675fSPrasad Joshi } 5340df6b4d9SPekka Enberg 53572133dd2SAsias He return nr_written; 53686835cedSPrasad Joshi } 53786835cedSPrasad Joshi 538b1c84095SPekka Enberg static ssize_t qcow_nowrite_sector(struct disk_image *disk, u64 sector, void *src, u32 src_len) 539f10860caSPekka Enberg { 540f10860caSPekka Enberg /* I/O error */ 541b1c84095SPekka Enberg pr_info("%s: no write support\n", __func__); 542f10860caSPekka Enberg return -1; 543f10860caSPekka Enberg } 544f10860caSPekka Enberg 545659f4186SPekka Enberg static int qcow_disk_flush(struct disk_image *disk) 546659f4186SPekka Enberg { 54773984b11SPekka Enberg struct qcow *q = disk->priv; 54873984b11SPekka Enberg struct qcow_header *header; 549a4e46515SPekka Enberg struct list_head *pos, *n; 550473aaa2dSPekka Enberg struct qcow_l1_table *table; 55173984b11SPekka Enberg 55273984b11SPekka Enberg header = q->header; 55373984b11SPekka Enberg table = &q->table; 55473984b11SPekka Enberg 555a4e46515SPekka Enberg mutex_lock(&q->mutex); 556a4e46515SPekka Enberg 557a4e46515SPekka Enberg list_for_each_safe(pos, n, &q->lru_list) { 558a4e46515SPekka Enberg struct qcow_l2_table *c = list_entry(pos, struct qcow_l2_table, list); 559a4e46515SPekka Enberg 560a4e46515SPekka Enberg if (qcow_l2_cache_write(q, c) < 0) 561a4e46515SPekka Enberg goto error_unlock; 562a4e46515SPekka Enberg } 563a4e46515SPekka Enberg 564a4e46515SPekka Enberg if (fdatasync(disk->fd) < 0) 565a4e46515SPekka Enberg goto error_unlock; 566a4e46515SPekka Enberg 56773984b11SPekka Enberg if (pwrite_in_full(disk->fd, table->l1_table, table->table_size * sizeof(u64), header->l1_table_offset) < 0) 568a4e46515SPekka Enberg goto error_unlock; 569a4e46515SPekka Enberg 570a4e46515SPekka Enberg mutex_unlock(&q->mutex); 57173984b11SPekka Enberg 572659f4186SPekka Enberg return fsync(disk->fd); 573a4e46515SPekka Enberg 574a4e46515SPekka Enberg error_unlock: 575a4e46515SPekka Enberg mutex_unlock(&q->mutex); 576a4e46515SPekka Enberg return -1; 577659f4186SPekka Enberg } 578659f4186SPekka Enberg 579b1c84095SPekka Enberg static int qcow_disk_close(struct disk_image *disk) 58086835cedSPrasad Joshi { 58186835cedSPrasad Joshi struct qcow *q; 58286835cedSPrasad Joshi 58343835ac9SSasha Levin if (!disk) 58472133dd2SAsias He return 0; 58586835cedSPrasad Joshi 58643835ac9SSasha Levin q = disk->priv; 58786835cedSPrasad Joshi 5883309045fSPrasad Joshi free_cache(q); 5896c6f79b6SPrasad Joshi free(q->table.l1_table); 59086835cedSPrasad Joshi free(q->header); 59186835cedSPrasad Joshi free(q); 59272133dd2SAsias He 59372133dd2SAsias He return 0; 59486835cedSPrasad Joshi } 59586835cedSPrasad Joshi 596b1c84095SPekka Enberg static struct disk_image_operations qcow_disk_readonly_ops = { 597b1c84095SPekka Enberg .read_sector = qcow_read_sector, 598b1c84095SPekka Enberg .write_sector = qcow_nowrite_sector, 599b1c84095SPekka Enberg .close = qcow_disk_close, 600f10860caSPekka Enberg }; 601f10860caSPekka Enberg 602b1c84095SPekka Enberg static struct disk_image_operations qcow_disk_ops = { 603b1c84095SPekka Enberg .read_sector = qcow_read_sector, 604b1c84095SPekka Enberg .write_sector = qcow_write_sector, 605659f4186SPekka Enberg .flush = qcow_disk_flush, 606b1c84095SPekka Enberg .close = qcow_disk_close, 60786835cedSPrasad Joshi }; 60886835cedSPrasad Joshi 60986835cedSPrasad Joshi static int qcow_read_l1_table(struct qcow *q) 61086835cedSPrasad Joshi { 611ad627d62SPekka Enberg struct qcow_header *header = q->header; 612473aaa2dSPekka Enberg struct qcow_l1_table *table = &q->table; 61386835cedSPrasad Joshi 614ad627d62SPekka Enberg table->table_size = header->l1_size; 61586835cedSPrasad Joshi 61600adcc1bSPrasad Joshi table->l1_table = calloc(table->table_size, sizeof(u64)); 61700adcc1bSPrasad Joshi if (!table->l1_table) 61886835cedSPrasad Joshi return -1; 61986835cedSPrasad Joshi 620659f4186SPekka Enberg return pread_in_full(q->fd, table->l1_table, sizeof(u64) * table->table_size, header->l1_table_offset); 62186835cedSPrasad Joshi } 62286835cedSPrasad Joshi 623ad627d62SPekka Enberg static void *qcow2_read_header(int fd) 62486835cedSPrasad Joshi { 625ad627d62SPekka Enberg struct qcow2_header_disk f_header; 626ad627d62SPekka Enberg struct qcow_header *header; 62786835cedSPrasad Joshi 628ad627d62SPekka Enberg header = malloc(sizeof(struct qcow_header)); 62986835cedSPrasad Joshi if (!header) 63086835cedSPrasad Joshi return NULL; 63186835cedSPrasad Joshi 6320657f33dSPrasad Joshi if (pread_in_full(fd, &f_header, sizeof(struct qcow2_header_disk), 0) < 0) { 6330657f33dSPrasad Joshi free(header); 63486835cedSPrasad Joshi return NULL; 6350657f33dSPrasad Joshi } 63686835cedSPrasad Joshi 637ad627d62SPekka Enberg be32_to_cpus(&f_header.magic); 638ad627d62SPekka Enberg be32_to_cpus(&f_header.version); 639ad627d62SPekka Enberg be64_to_cpus(&f_header.backing_file_offset); 640ad627d62SPekka Enberg be32_to_cpus(&f_header.backing_file_size); 641ad627d62SPekka Enberg be32_to_cpus(&f_header.cluster_bits); 642ad627d62SPekka Enberg be64_to_cpus(&f_header.size); 643ad627d62SPekka Enberg be32_to_cpus(&f_header.crypt_method); 644ad627d62SPekka Enberg be32_to_cpus(&f_header.l1_size); 645ad627d62SPekka Enberg be64_to_cpus(&f_header.l1_table_offset); 646ad627d62SPekka Enberg be64_to_cpus(&f_header.refcount_table_offset); 647ad627d62SPekka Enberg be32_to_cpus(&f_header.refcount_table_clusters); 648ad627d62SPekka Enberg be32_to_cpus(&f_header.nb_snapshots); 649ad627d62SPekka Enberg be64_to_cpus(&f_header.snapshots_offset); 650ad627d62SPekka Enberg 651ad627d62SPekka Enberg *header = (struct qcow_header) { 652ad627d62SPekka Enberg .size = f_header.size, 653ad627d62SPekka Enberg .l1_table_offset = f_header.l1_table_offset, 654ad627d62SPekka Enberg .l1_size = f_header.l1_size, 655ad627d62SPekka Enberg .cluster_bits = f_header.cluster_bits, 656ad627d62SPekka Enberg .l2_bits = f_header.cluster_bits - 3, 657ad627d62SPekka Enberg }; 658ad627d62SPekka Enberg 659ad627d62SPekka Enberg return header; 660ad627d62SPekka Enberg } 661ad627d62SPekka Enberg 662f10860caSPekka Enberg static struct disk_image *qcow2_probe(int fd, bool readonly) 663ad627d62SPekka Enberg { 664ad627d62SPekka Enberg struct qcow *q; 665ad627d62SPekka Enberg struct qcow_header *h; 666ad627d62SPekka Enberg struct disk_image *disk_image; 667ad627d62SPekka Enberg 668ad627d62SPekka Enberg q = calloc(1, sizeof(struct qcow)); 669ad627d62SPekka Enberg if (!q) 670ad627d62SPekka Enberg goto error; 671ad627d62SPekka Enberg 672c0799eb9SPekka Enberg mutex_init(&q->mutex); 673ad627d62SPekka Enberg q->fd = fd; 6743309045fSPrasad Joshi q->root = RB_ROOT; 6753309045fSPrasad Joshi INIT_LIST_HEAD(&q->lru_list); 676ad627d62SPekka Enberg 677ad627d62SPekka Enberg h = q->header = qcow2_read_header(fd); 678ad627d62SPekka Enberg if (!h) 679ad627d62SPekka Enberg goto error; 680ad627d62SPekka Enberg 681ad627d62SPekka Enberg if (qcow_read_l1_table(q) < 0) 682ad627d62SPekka Enberg goto error; 683ad627d62SPekka Enberg 6847d22135fSAsias He /* 6857d22135fSAsias He * Do not use mmap use read/write instead 6867d22135fSAsias He */ 687f10860caSPekka Enberg if (readonly) 688b1c84095SPekka Enberg disk_image = disk_image__new(fd, h->size, &qcow_disk_readonly_ops, DISK_IMAGE_NOMMAP); 689f10860caSPekka Enberg else 690b1c84095SPekka Enberg disk_image = disk_image__new(fd, h->size, &qcow_disk_ops, DISK_IMAGE_NOMMAP); 691f10860caSPekka Enberg 692ad627d62SPekka Enberg if (!disk_image) 693ad627d62SPekka Enberg goto error; 694ad627d62SPekka Enberg disk_image->priv = q; 695ad627d62SPekka Enberg 696ad627d62SPekka Enberg return disk_image; 697ad627d62SPekka Enberg error: 698ad627d62SPekka Enberg if (!q) 699ad627d62SPekka Enberg return NULL; 700ad627d62SPekka Enberg 701ad627d62SPekka Enberg free(q->table.l1_table); 702ad627d62SPekka Enberg free(q->header); 703ad627d62SPekka Enberg free(q); 704ad627d62SPekka Enberg 705ad627d62SPekka Enberg return NULL; 706ad627d62SPekka Enberg } 707ad627d62SPekka Enberg 708ad627d62SPekka Enberg static bool qcow2_check_image(int fd) 709ad627d62SPekka Enberg { 710ad627d62SPekka Enberg struct qcow2_header_disk f_header; 711ad627d62SPekka Enberg 712ad627d62SPekka Enberg if (pread_in_full(fd, &f_header, sizeof(struct qcow2_header_disk), 0) < 0) 713ad627d62SPekka Enberg return false; 714ad627d62SPekka Enberg 715ad627d62SPekka Enberg be32_to_cpus(&f_header.magic); 716ad627d62SPekka Enberg be32_to_cpus(&f_header.version); 717ad627d62SPekka Enberg 718ad627d62SPekka Enberg if (f_header.magic != QCOW_MAGIC) 719ad627d62SPekka Enberg return false; 720ad627d62SPekka Enberg 721ad627d62SPekka Enberg if (f_header.version != QCOW2_VERSION) 722ad627d62SPekka Enberg return false; 723ad627d62SPekka Enberg 724ad627d62SPekka Enberg return true; 725ad627d62SPekka Enberg } 726ad627d62SPekka Enberg 727ad627d62SPekka Enberg static void *qcow1_read_header(int fd) 728ad627d62SPekka Enberg { 729ad627d62SPekka Enberg struct qcow1_header_disk f_header; 730ad627d62SPekka Enberg struct qcow_header *header; 731ad627d62SPekka Enberg 732ad627d62SPekka Enberg header = malloc(sizeof(struct qcow_header)); 733ad627d62SPekka Enberg if (!header) 734ad627d62SPekka Enberg return NULL; 735ad627d62SPekka Enberg 736d39cefd2SSasha Levin if (pread_in_full(fd, &f_header, sizeof(struct qcow1_header_disk), 0) < 0) { 737d39cefd2SSasha Levin free(header); 738ad627d62SPekka Enberg return NULL; 739d39cefd2SSasha Levin } 740ad627d62SPekka Enberg 741ad627d62SPekka Enberg be32_to_cpus(&f_header.magic); 742ad627d62SPekka Enberg be32_to_cpus(&f_header.version); 743ad627d62SPekka Enberg be64_to_cpus(&f_header.backing_file_offset); 744ad627d62SPekka Enberg be32_to_cpus(&f_header.backing_file_size); 745ad627d62SPekka Enberg be32_to_cpus(&f_header.mtime); 746ad627d62SPekka Enberg be64_to_cpus(&f_header.size); 747ad627d62SPekka Enberg be32_to_cpus(&f_header.crypt_method); 748ad627d62SPekka Enberg be64_to_cpus(&f_header.l1_table_offset); 749ad627d62SPekka Enberg 750ad627d62SPekka Enberg *header = (struct qcow_header) { 751ad627d62SPekka Enberg .size = f_header.size, 752ad627d62SPekka Enberg .l1_table_offset = f_header.l1_table_offset, 753ad627d62SPekka Enberg .l1_size = f_header.size / ((1 << f_header.l2_bits) * (1 << f_header.cluster_bits)), 754ad627d62SPekka Enberg .cluster_bits = f_header.cluster_bits, 755ad627d62SPekka Enberg .l2_bits = f_header.l2_bits, 756ad627d62SPekka Enberg }; 75786835cedSPrasad Joshi 75886835cedSPrasad Joshi return header; 75986835cedSPrasad Joshi } 76086835cedSPrasad Joshi 761f10860caSPekka Enberg static struct disk_image *qcow1_probe(int fd, bool readonly) 76286835cedSPrasad Joshi { 76386835cedSPrasad Joshi struct qcow *q; 764ad627d62SPekka Enberg struct qcow_header *h; 76586835cedSPrasad Joshi struct disk_image *disk_image; 76686835cedSPrasad Joshi 76786835cedSPrasad Joshi q = calloc(1, sizeof(struct qcow)); 76886835cedSPrasad Joshi if (!q) 76986835cedSPrasad Joshi goto error; 77086835cedSPrasad Joshi 771c0799eb9SPekka Enberg mutex_init(&q->mutex); 77286835cedSPrasad Joshi q->fd = fd; 7733309045fSPrasad Joshi q->root = RB_ROOT; 7743309045fSPrasad Joshi INIT_LIST_HEAD(&q->lru_list); 77586835cedSPrasad Joshi 77686835cedSPrasad Joshi h = q->header = qcow1_read_header(fd); 77786835cedSPrasad Joshi if (!h) 77886835cedSPrasad Joshi goto error; 77986835cedSPrasad Joshi 78086835cedSPrasad Joshi if (qcow_read_l1_table(q) < 0) 78186835cedSPrasad Joshi goto error; 78286835cedSPrasad Joshi 7837d22135fSAsias He /* 7847d22135fSAsias He * Do not use mmap use read/write instead 7857d22135fSAsias He */ 786f10860caSPekka Enberg if (readonly) 787b1c84095SPekka Enberg disk_image = disk_image__new(fd, h->size, &qcow_disk_readonly_ops, DISK_IMAGE_NOMMAP); 788f10860caSPekka Enberg else 789b1c84095SPekka Enberg disk_image = disk_image__new(fd, h->size, &qcow_disk_ops, DISK_IMAGE_NOMMAP); 790f10860caSPekka Enberg 79186835cedSPrasad Joshi if (!disk_image) 79286835cedSPrasad Joshi goto error; 79386835cedSPrasad Joshi disk_image->priv = q; 79486835cedSPrasad Joshi 79586835cedSPrasad Joshi return disk_image; 79686835cedSPrasad Joshi error: 79786835cedSPrasad Joshi if (!q) 79886835cedSPrasad Joshi return NULL; 79986835cedSPrasad Joshi 8006c6f79b6SPrasad Joshi free(q->table.l1_table); 80186835cedSPrasad Joshi free(q->header); 80286835cedSPrasad Joshi free(q); 80386835cedSPrasad Joshi 80486835cedSPrasad Joshi return NULL; 80586835cedSPrasad Joshi } 80686835cedSPrasad Joshi 807ad627d62SPekka Enberg static bool qcow1_check_image(int fd) 80886835cedSPrasad Joshi { 809ad627d62SPekka Enberg struct qcow1_header_disk f_header; 81086835cedSPrasad Joshi 811ad627d62SPekka Enberg if (pread_in_full(fd, &f_header, sizeof(struct qcow1_header_disk), 0) < 0) 812ad627d62SPekka Enberg return false; 81386835cedSPrasad Joshi 814ad627d62SPekka Enberg be32_to_cpus(&f_header.magic); 815ad627d62SPekka Enberg be32_to_cpus(&f_header.version); 81686835cedSPrasad Joshi 817ad627d62SPekka Enberg if (f_header.magic != QCOW_MAGIC) 818ad627d62SPekka Enberg return false; 81986835cedSPrasad Joshi 820ad627d62SPekka Enberg if (f_header.version != QCOW1_VERSION) 821ad627d62SPekka Enberg return false; 82286835cedSPrasad Joshi 823ad627d62SPekka Enberg return true; 82486835cedSPrasad Joshi } 82586835cedSPrasad Joshi 826f10860caSPekka Enberg struct disk_image *qcow_probe(int fd, bool readonly) 82786835cedSPrasad Joshi { 828ad627d62SPekka Enberg if (qcow1_check_image(fd)) 829f10860caSPekka Enberg return qcow1_probe(fd, readonly); 830ad627d62SPekka Enberg 831ad627d62SPekka Enberg if (qcow2_check_image(fd)) 832f10860caSPekka Enberg return qcow2_probe(fd, readonly); 833ad627d62SPekka Enberg 834ad627d62SPekka Enberg return NULL; 83586835cedSPrasad Joshi } 836