xref: /kvmtool/hw/cfi_flash.c (revision d632faceb69d7816b7b5ec645ece22c2507d00d3)
1 #include <stdbool.h>
2 #include <stdlib.h>
3 #include <string.h>
4 #include <linux/bitops.h>
5 #include <linux/err.h>
6 #include <linux/sizes.h>
7 #include <linux/types.h>
8 
9 #include "kvm/kvm.h"
10 #include "kvm/kvm-arch.h"
11 #include "kvm/kvm-cpu.h"
12 #include "kvm/devices.h"
13 #include "kvm/fdt.h"
14 #include "kvm/mutex.h"
15 #include "kvm/util.h"
16 
17 /*
18  * The EDK2 driver hardcodes two 16-bit chips on a 32-bit bus.
19  * This code supports one or two chips (enforced below).
20  */
21 #define CFI_NR_FLASH_CHIPS			2
22 
23 /* We always emulate a 32 bit bus width. */
24 #define CFI_BUS_WIDTH				4
25 
26 /* The *effective* size of an erase block (over all chips) */
27 #define FLASH_BLOCK_SIZE			SZ_64K
28 #define FLASH_BLOCK_SIZE_PER_CHIP					\
29 	(FLASH_BLOCK_SIZE / CFI_NR_FLASH_CHIPS)
30 
31 #define PROGRAM_BUFF_SIZE_BITS			7
32 #define PROGRAM_BUFF_SIZE			(1U << PROGRAM_BUFF_SIZE_BITS)
33 #define PROGRAM_BUFF_SIZE_BITS_PER_CHIP					\
34 	(PROGRAM_BUFF_SIZE_BITS + 1 - CFI_NR_FLASH_CHIPS)
35 
36 /* CFI commands */
37 #define CFI_CMD_LOCK_BLOCK			0x01
38 #define CFI_CMD_ALTERNATE_WORD_PROGRAM		0x10
39 #define CFI_CMD_ERASE_BLOCK_SETUP		0x20
40 #define CFI_CMD_WORD_PROGRAM			0x40
41 #define CFI_CMD_CLEAR_STATUS_REG		0x50
42 #define CFI_CMD_LOCK_BLOCK_SETUP		0x60
43 #define CFI_CMD_READ_STATUS_REG			0x70
44 #define CFI_CMD_READ_JEDEC_DEVID		0x90
45 #define CFI_CMD_READ_CFI_QUERY			0x98
46 #define CFI_CMD_CONFIRM				0xd0
47 #define CFI_CMD_BUFFERED_PROGRAM_SETUP		0xe8
48 #define CFI_CMD_READ_ARRAY			0xff
49 
50 #define CFI_STATUS_PROTECT_BIT		0x02
51 #define CFI_STATUS_PROGRAM_LOCK_BIT	0x10
52 #define CFI_STATUS_ERASE_CLEAR_LOCK_BIT	0x20
53 #define CFI_STATUS_LOCK_ERROR		CFI_STATUS_PROGRAM_LOCK_BIT |	\
54 					CFI_STATUS_PROTECT_BIT
55 #define CFI_STATUS_ERASE_ERROR		CFI_STATUS_ERASE_CLEAR_LOCK_BIT | \
56 					CFI_STATUS_PROGRAM_LOCK_BIT
57 #define CFI_STATUS_READY		0x80
58 
59 /*
60  * CFI query table contents, as far as it is constant.
61  * The dynamic information (size, etc.) will be generated on the fly.
62  */
63 #define CFI_GEOM_OFFSET				0x27
64 static const u8 cfi_query_table[] = {
65 		/* CFI query identification string */
66 	[0x10] = 'Q', 'R', 'Y',		/* ID string */
67 	0x01, 0x00,		/* primary command set: Intel/Sharp extended */
68 	0x31, 0x00,		/* address of primary extended query table */
69 	0x00, 0x00,		/* alternative command set: unused */
70 	0x00, 0x00,		/* address of alternative extended query table*/
71 		/* system interface information */
72 	[0x1b] = 0x45,			/* minimum Vcc voltage: 4.5V */
73 	0x55,			/* maximum Vcc voltage: 5.5V */
74 	0x00,			/* minimum Vpp voltage: 0.0V (unused) */
75 	0x00,			/* maximum Vpp voltage: 0.0V *(unused) */
76 	0x01,			/* timeout for single word program: 2 us */
77 	0x01,			/* timeout for multi-byte program: 2 us */
78 	0x01,			/* timeout for block erase: 2 ms */
79 	0x00,			/* timeout for full chip erase: not supported */
80 	0x00,			/* max timeout for single word program: 1x */
81 	0x00,			/* max timeout for mulit-byte program: 1x */
82 	0x00,			/* max timeout for block erase: 1x */
83 	0x00,			/* max timeout for chip erase: not supported */
84 		/* flash geometry information */
85 	[0x27] = 0x00,		/* size in power-of-2 bytes, filled later */
86 	0x05, 0x00,		/* interface description: 32 and 16 bits */
87 	PROGRAM_BUFF_SIZE_BITS_PER_CHIP, 0x00,
88 				/* number of bytes in write buffer */
89 	0x01,			/* one erase block region */
90 	0x00, 0x00, 0x00, 0x00, /* number and size of erase blocks, generated */
91 		/* Intel primary algorithm extended query table */
92 	[0x31] = 'P', 'R', 'I',
93 	'1', '0',		/* version 1.0 */
94 	0xa0, 0x00, 0x00, 0x00, /* optional features: instant lock & pm-read */
95 	0x00,			/* no functions after suspend */
96 	0x01, 0x00,		/* only lock bit supported */
97 	0x50,			/* best Vcc value: 5.0V */
98 	0x00,			/* best Vpp value: 0.0V (unused) */
99 	0x01,			/* number of protection register fields */
100 	0x00, 0x00, 0x00, 0x00,	/* protection field 1 description */
101 };
102 
103 /*
104  * Those states represent a subset of the CFI flash state machine.
105  */
106 enum cfi_flash_state {
107 	READY,
108 	LOCK_BLOCK_SETUP,
109 	WORD_PROGRAM,
110 	BUFFERED_PROGRAM_SETUP,
111 	BUFFER_WRITE,
112 	ERASE_BLOCK_SETUP,
113 };
114 
115 /*
116  * The device can be in several **Read** modes.
117  * We don't implement the asynchronous burst mode.
118  */
119 enum cfi_read_mode {
120 	READ_ARRAY,
121 	READ_STATUS_REG,
122 	READ_JEDEC_DEVID,
123 	READ_CFI_QUERY,
124 };
125 
126 struct cfi_flash_device {
127 	struct device_header	dev_hdr;
128 	/* Protects the CFI state machine variables in this data structure. */
129 	struct mutex		mutex;
130 	u64			base_addr;
131 	u32			size;
132 
133 	void			*flash_memory;
134 	u8			program_buffer[PROGRAM_BUFF_SIZE];
135 	unsigned long		*lock_bm;
136 	u64			block_address;
137 	unsigned int		buff_written;
138 	unsigned int		buffer_length;
139 
140 	enum cfi_flash_state	state;
141 	enum cfi_read_mode	read_mode;
142 	u8			sr;
143 	bool			is_mapped;
144 };
145 
nr_erase_blocks(struct cfi_flash_device * sfdev)146 static int nr_erase_blocks(struct cfi_flash_device *sfdev)
147 {
148 	return sfdev->size / FLASH_BLOCK_SIZE;
149 }
150 
151 /*
152  * CFI queries always deal with one byte of information, possibly mirrored
153  * to other bytes on the bus. This is dealt with in the callers.
154  * The address provided is the one for 8-bit addressing, and would need to
155  * be adjusted for wider accesses.
156  */
read_cfi(struct cfi_flash_device * sfdev,u64 faddr)157 static u8 read_cfi(struct cfi_flash_device *sfdev, u64 faddr)
158 {
159 	if (faddr > sizeof(cfi_query_table)) {
160 		pr_debug("CFI query read access beyond the end of table");
161 		return 0;
162 	}
163 
164 	/* Fixup dynamic information in the geometry part of the table. */
165 	switch (faddr) {
166 	case 0x27:		/* device size in bytes, power of two */
167 		return pow2_size(sfdev->size / CFI_NR_FLASH_CHIPS);
168 	case 0x2d + 0:	/* number of erase blocks, minus one */
169 		return (nr_erase_blocks(sfdev) - 1) & 0xff;
170 	case 0x2d + 1:
171 		return ((nr_erase_blocks(sfdev) - 1) >> 8) & 0xff;
172 	case 0x2d + 2:	/* erase block size, in units of 256 */
173 		return (FLASH_BLOCK_SIZE_PER_CHIP / 256) & 0xff;
174 	case 0x2d + 3:
175 		return ((FLASH_BLOCK_SIZE_PER_CHIP / 256) >> 8) & 0xff;
176 	}
177 
178 	return cfi_query_table[faddr];
179 }
180 
block_is_locked(struct cfi_flash_device * sfdev,u64 faddr)181 static bool block_is_locked(struct cfi_flash_device *sfdev, u64 faddr)
182 {
183 	int block_nr = faddr / FLASH_BLOCK_SIZE;
184 
185 	return test_bit(block_nr, sfdev->lock_bm);
186 }
187 
188 #define DEV_ID_MASK 0x7ff
read_dev_id(struct cfi_flash_device * sfdev,u64 faddr)189 static u16 read_dev_id(struct cfi_flash_device *sfdev, u64 faddr)
190 {
191 	switch ((faddr & DEV_ID_MASK) / CFI_BUS_WIDTH) {
192 	case 0x0:				/* vendor ID */
193 		return 0x0000;
194 	case 0x1:				/* device ID */
195 		return 0xffff;
196 	case 0x2:
197 		return block_is_locked(sfdev, faddr & ~DEV_ID_MASK);
198 	default:			/* Ignore the other entries. */
199 		return 0;
200 	}
201 }
202 
lock_block(struct cfi_flash_device * sfdev,u64 faddr,bool lock)203 static void lock_block(struct cfi_flash_device *sfdev, u64 faddr, bool lock)
204 {
205 	int block_nr = faddr / FLASH_BLOCK_SIZE;
206 
207 	if (lock)
208 		set_bit(block_nr, sfdev->lock_bm);
209 	else
210 		clear_bit(block_nr, sfdev->lock_bm);
211 }
212 
word_program(struct cfi_flash_device * sfdev,u64 faddr,void * data,int len)213 static void word_program(struct cfi_flash_device *sfdev,
214 			 u64 faddr, void *data, int len)
215 {
216 	if (block_is_locked(sfdev, faddr)) {
217 		sfdev->sr |= CFI_STATUS_LOCK_ERROR;
218 		return;
219 	}
220 
221 	memcpy(sfdev->flash_memory + faddr, data, len);
222 }
223 
224 /* Reset the program buffer state to prepare for follow-up writes. */
buffer_setup(struct cfi_flash_device * sfdev)225 static void buffer_setup(struct cfi_flash_device *sfdev)
226 {
227 	memset(sfdev->program_buffer, 0, sizeof(sfdev->program_buffer));
228 	sfdev->block_address = ~0ULL;
229 	sfdev->buff_written = 0;
230 }
231 
buffer_write(struct cfi_flash_device * sfdev,u64 faddr,void * buffer,int len)232 static bool buffer_write(struct cfi_flash_device *sfdev,
233 			 u64 faddr, void *buffer, int len)
234 {
235 	unsigned int buff_addr;
236 
237 	if (sfdev->buff_written >= sfdev->buffer_length)
238 		return false;
239 
240 	/*
241 	 * The first word written into the buffer after the setup command
242 	 * happens to be the base address for the buffer.
243 	 * All subsequent writes need to be within this address and this
244 	 * address plus the buffer size, so keep this value around.
245 	 */
246 	if (sfdev->block_address == ~0ULL)
247 		sfdev->block_address = faddr;
248 
249 	if (faddr < sfdev->block_address)
250 		return false;
251 	buff_addr = faddr - sfdev->block_address;
252 	if (buff_addr >= PROGRAM_BUFF_SIZE)
253 		return false;
254 
255 	memcpy(sfdev->program_buffer + buff_addr, buffer, len);
256 	sfdev->buff_written += len;
257 
258 	return true;
259 }
260 
buffer_confirm(struct cfi_flash_device * sfdev)261 static void buffer_confirm(struct cfi_flash_device *sfdev)
262 {
263 	if (block_is_locked(sfdev, sfdev->block_address)) {
264 		sfdev->sr |= CFI_STATUS_LOCK_ERROR;
265 		return;
266 	}
267 	memcpy(sfdev->flash_memory + sfdev->block_address,
268 	       sfdev->program_buffer, sfdev->buff_written);
269 }
270 
block_erase_confirm(struct cfi_flash_device * sfdev,u64 faddr)271 static void block_erase_confirm(struct cfi_flash_device *sfdev, u64 faddr)
272 {
273 	if (block_is_locked(sfdev, faddr)) {
274 		sfdev->sr |= CFI_STATUS_LOCK_ERROR;
275 		return;
276 	}
277 
278 	memset(sfdev->flash_memory + faddr, 0xff, FLASH_BLOCK_SIZE);
279 }
280 
cfi_flash_read(struct cfi_flash_device * sfdev,u64 faddr,u8 * data,u32 len)281 static void cfi_flash_read(struct cfi_flash_device *sfdev,
282 			   u64 faddr, u8 *data, u32 len)
283 {
284 	u16 cfi_value = 0;
285 
286 	switch (sfdev->read_mode) {
287 	case READ_ARRAY:
288 		/* just copy the requested bytes from the array */
289 		memcpy(data, sfdev->flash_memory + faddr, len);
290 		return;
291 	case READ_STATUS_REG:
292 		cfi_value = sfdev->sr;
293 		break;
294 	case READ_JEDEC_DEVID:
295 		cfi_value = read_dev_id(sfdev, faddr);
296 		break;
297 	case READ_CFI_QUERY:
298 		cfi_value = read_cfi(sfdev, faddr / CFI_BUS_WIDTH);
299 		break;
300 	}
301 	switch (len) {
302 	case 1:
303 		*data = cfi_value;
304 		break;
305 	case 8: memset(data + 4, 0, 4);
306 		/* fall-through */
307 	case 4:
308 		if (CFI_NR_FLASH_CHIPS == 2)
309 			memcpy(data + 2, &cfi_value, 2);
310 		else
311 			memset(data + 2, 0, 2);
312 		/* fall-through */
313 	case 2:
314 		memcpy(data, &cfi_value, 2);
315 		break;
316 	default:
317 		pr_debug("CFI flash: illegal access length %d for read mode %d",
318 			 len, sfdev->read_mode);
319 		break;
320 	}
321 }
322 
323 /*
324  * Any writes happening in "READY" state don't actually write to the memory,
325  * but are really treated as commands to advance the state machine and select
326  * the next action.
327  * Change the state and modes according to the value written. The address
328  * that value is written to does not matter and is ignored.
329  */
cfi_flash_write_ready(struct cfi_flash_device * sfdev,u8 command)330 static void cfi_flash_write_ready(struct cfi_flash_device *sfdev, u8 command)
331 {
332 	switch (command) {
333 	case CFI_CMD_READ_JEDEC_DEVID:
334 		sfdev->read_mode = READ_JEDEC_DEVID;
335 		break;
336 	case CFI_CMD_READ_STATUS_REG:
337 		sfdev->read_mode = READ_STATUS_REG;
338 		break;
339 	case CFI_CMD_READ_CFI_QUERY:
340 		sfdev->read_mode = READ_CFI_QUERY;
341 		break;
342 	case CFI_CMD_CLEAR_STATUS_REG:
343 		sfdev->sr = CFI_STATUS_READY;
344 		break;
345 	case CFI_CMD_WORD_PROGRAM:
346 	case CFI_CMD_ALTERNATE_WORD_PROGRAM:
347 		sfdev->state = WORD_PROGRAM;
348 		sfdev->read_mode = READ_STATUS_REG;
349 		break;
350 	case CFI_CMD_LOCK_BLOCK_SETUP:
351 		sfdev->state = LOCK_BLOCK_SETUP;
352 		break;
353 	case CFI_CMD_ERASE_BLOCK_SETUP:
354 		sfdev->state = ERASE_BLOCK_SETUP;
355 		sfdev->read_mode = READ_STATUS_REG;
356 		break;
357 	case CFI_CMD_BUFFERED_PROGRAM_SETUP:
358 		buffer_setup(sfdev);
359 		sfdev->state = BUFFERED_PROGRAM_SETUP;
360 		sfdev->read_mode = READ_STATUS_REG;
361 		break;
362 	case CFI_CMD_CONFIRM:
363 		pr_debug("CFI flash: unexpected confirm command 0xd0");
364 		break;
365 	default:
366 		pr_debug("CFI flash: unknown command 0x%x", command);
367 		/* fall-through */
368 	case CFI_CMD_READ_ARRAY:
369 		sfdev->read_mode = READ_ARRAY;
370 		break;
371 	}
372 }
373 
cfi_flash_write(struct cfi_flash_device * sfdev,u16 command,u64 faddr,u8 * data,u32 len)374 static void cfi_flash_write(struct cfi_flash_device *sfdev, u16 command,
375 			    u64 faddr, u8 *data, u32 len)
376 {
377 	switch (sfdev->state) {
378 	case READY:
379 		cfi_flash_write_ready(sfdev, command & 0xff);
380 		return;
381 	case LOCK_BLOCK_SETUP:
382 		switch (command & 0xff) {
383 		case CFI_CMD_LOCK_BLOCK:
384 			lock_block(sfdev, faddr, true);
385 			sfdev->read_mode = READ_STATUS_REG;
386 			break;
387 		case CFI_CMD_CONFIRM:
388 			lock_block(sfdev, faddr, false);
389 			sfdev->read_mode = READ_STATUS_REG;
390 			break;
391 		default:
392 			sfdev->sr |= CFI_STATUS_ERASE_ERROR;
393 			break;
394 		}
395 		sfdev->state = READY;
396 		break;
397 
398 	case WORD_PROGRAM:
399 		word_program(sfdev, faddr, data, len);
400 		sfdev->read_mode = READ_STATUS_REG;
401 		sfdev->state = READY;
402 		break;
403 
404 	case BUFFER_WRITE:
405 		if (buffer_write(sfdev, faddr, data, len))
406 			break;
407 
408 		if ((command & 0xff) == CFI_CMD_CONFIRM) {
409 			buffer_confirm(sfdev);
410 			sfdev->read_mode = READ_STATUS_REG;
411 		} else {
412 			pr_debug("CFI flash: BUFFER_WRITE: expected CONFIRM(0xd0), got 0x%x @ 0x%llx",
413 				 command, faddr);
414 			sfdev->sr |= CFI_STATUS_PROGRAM_LOCK_BIT;
415 		}
416 		sfdev->state = READY;
417 		break;
418 
419 	case BUFFERED_PROGRAM_SETUP:
420 		sfdev->buffer_length = (command + 1) * CFI_BUS_WIDTH;
421 		if (sfdev->buffer_length > PROGRAM_BUFF_SIZE)
422 			sfdev->buffer_length = PROGRAM_BUFF_SIZE;
423 		sfdev->state = BUFFER_WRITE;
424 		sfdev->read_mode = READ_STATUS_REG;
425 		break;
426 
427 	case ERASE_BLOCK_SETUP:
428 		if ((command & 0xff) == CFI_CMD_CONFIRM)
429 			block_erase_confirm(sfdev, faddr);
430 		else
431 			sfdev->sr |= CFI_STATUS_ERASE_ERROR;
432 
433 		sfdev->state = READY;
434 		sfdev->read_mode = READ_STATUS_REG;
435 		break;
436 	default:
437 		pr_debug("CFI flash: unexpected/unknown command 0x%x", command);
438 		break;
439 	}
440 }
441 
442 /*
443  * If we are in ARRAY_READ mode, we can map the flash array directly
444  * into the guest, just as read-only. This greatly improves read
445  * performance, and avoids problems with exits due to accesses from
446  * load instructions without syndrome information (on ARM).
447  * Also it could allow code to be executed XIP in there.
448  */
map_flash_memory(struct kvm * kvm,struct cfi_flash_device * sfdev)449 static int map_flash_memory(struct kvm *kvm, struct cfi_flash_device *sfdev)
450 {
451 	int ret;
452 
453 	ret = kvm__register_mem(kvm, sfdev->base_addr, sfdev->size,
454 				sfdev->flash_memory,
455 				KVM_MEM_TYPE_RAM | KVM_MEM_TYPE_READONLY);
456 	if (!ret)
457 		sfdev->is_mapped = true;
458 
459 	return ret;
460 }
461 
462 /*
463  * Any write access changing the read mode would need to bring us back to
464  * "trap everything", as the CFI query read need proper handholding.
465  */
unmap_flash_memory(struct kvm * kvm,struct cfi_flash_device * sfdev)466 static int unmap_flash_memory(struct kvm *kvm, struct cfi_flash_device *sfdev)
467 {
468 	int ret;
469 
470 	ret = kvm__destroy_mem(kvm, sfdev->base_addr, sfdev->size,
471 			       sfdev->flash_memory);
472 
473 	if (!ret)
474 		sfdev->is_mapped = false;
475 
476 	return ret;
477 }
478 
cfi_flash_mmio(struct kvm_cpu * vcpu,u64 addr,u8 * data,u32 len,u8 is_write,void * context)479 static void cfi_flash_mmio(struct kvm_cpu *vcpu,
480 			   u64 addr, u8 *data, u32 len, u8 is_write,
481 			   void *context)
482 {
483 	struct cfi_flash_device *sfdev = context;
484 	u64 faddr = addr - sfdev->base_addr;
485 	u32 value;
486 
487 	if (!is_write) {
488 		mutex_lock(&sfdev->mutex);
489 
490 		cfi_flash_read(sfdev, faddr, data, len);
491 
492 		mutex_unlock(&sfdev->mutex);
493 
494 		return;
495 	}
496 
497 	if (len > 4) {
498 		pr_info("CFI flash: MMIO %d-bit write access not supported",
499 			 len * 8);
500 		return;
501 	}
502 
503 	memcpy(&value, data, len);
504 
505 	mutex_lock(&sfdev->mutex);
506 
507 	cfi_flash_write(sfdev, value & 0xffff, faddr, data, len);
508 
509 	/* Adjust our mapping status accordingly. */
510 	if (!sfdev->is_mapped && sfdev->read_mode == READ_ARRAY)
511 		map_flash_memory(vcpu->kvm, sfdev);
512 	else if (sfdev->is_mapped && sfdev->read_mode != READ_ARRAY)
513 		unmap_flash_memory(vcpu->kvm, sfdev);
514 
515 	mutex_unlock(&sfdev->mutex);
516 }
517 
518 #ifdef CONFIG_HAS_LIBFDT
generate_cfi_flash_fdt_node(void * fdt,struct device_header * dev_hdr,void (* generate_irq_prop)(void * fdt,u8 irq,enum irq_type))519 static void generate_cfi_flash_fdt_node(void *fdt,
520 					struct device_header *dev_hdr,
521 					void (*generate_irq_prop)(void *fdt,
522 								  u8 irq,
523 								enum irq_type))
524 {
525 	struct cfi_flash_device *sfdev;
526 	u64 reg_prop[2];
527 
528 	sfdev = container_of(dev_hdr, struct cfi_flash_device, dev_hdr);
529 	reg_prop[0] = cpu_to_fdt64(sfdev->base_addr);
530 	reg_prop[1] = cpu_to_fdt64(sfdev->size);
531 
532 	_FDT(fdt_begin_node(fdt, "flash"));
533 	_FDT(fdt_property_cell(fdt, "bank-width", CFI_BUS_WIDTH));
534 	_FDT(fdt_property_cell(fdt, "#address-cells", 0x1));
535 	_FDT(fdt_property_cell(fdt, "#size-cells", 0x1));
536 	_FDT(fdt_property_string(fdt, "compatible", "cfi-flash"));
537 	_FDT(fdt_property_string(fdt, "label", "System-firmware"));
538 	_FDT(fdt_property(fdt, "reg", &reg_prop, sizeof(reg_prop)));
539 	_FDT(fdt_end_node(fdt));
540 }
541 #else
542 #define generate_cfi_flash_fdt_node NULL
543 #endif
544 
create_flash_device_file(struct kvm * kvm,const char * filename)545 static struct cfi_flash_device *create_flash_device_file(struct kvm *kvm,
546 							 const char *filename)
547 {
548 	struct cfi_flash_device *sfdev;
549 	struct stat statbuf;
550 	unsigned int value;
551 	int ret;
552 	int fd;
553 
554 	fd = open(filename, O_RDWR);
555 	if (fd < 0)
556 		return ERR_PTR(-errno);
557 
558 	if (fstat(fd, &statbuf) < 0) {
559 		ret = -errno;
560 		goto out_close;
561 	}
562 
563 	sfdev = malloc(sizeof(struct cfi_flash_device));
564 	if (!sfdev) {
565 		ret = -ENOMEM;
566 		goto out_close;
567 	}
568 
569 	sfdev->size = statbuf.st_size;
570 	/* Round down to nearest power-of-2 size value. */
571 	sfdev->size = 1U << (pow2_size(sfdev->size + 1) - 1);
572 	if (sfdev->size > KVM_FLASH_MAX_SIZE)
573 		sfdev->size = KVM_FLASH_MAX_SIZE;
574 	if (sfdev->size < statbuf.st_size) {
575 		pr_info("flash file size (%llu bytes) is not a power of two",
576 			(unsigned long long)statbuf.st_size);
577 		pr_info("only using first %u bytes", sfdev->size);
578 	}
579 	sfdev->flash_memory = mmap(NULL, sfdev->size,
580 				   PROT_READ | PROT_WRITE, MAP_SHARED,
581 				   fd, 0);
582 	if (sfdev->flash_memory == MAP_FAILED) {
583 		ret = -errno;
584 		goto out_free;
585 	}
586 	sfdev->base_addr = KVM_FLASH_MMIO_BASE;
587 	sfdev->state = READY;
588 	sfdev->read_mode = READ_ARRAY;
589 	sfdev->sr = CFI_STATUS_READY;
590 
591 	map_flash_memory(kvm, sfdev);
592 
593 	value = roundup(nr_erase_blocks(sfdev), BITS_PER_LONG) / 8;
594 	sfdev->lock_bm = malloc(value);
595 	memset(sfdev->lock_bm, 0, value);
596 
597 	sfdev->dev_hdr.bus_type = DEVICE_BUS_MMIO;
598 	sfdev->dev_hdr.data = generate_cfi_flash_fdt_node;
599 	mutex_init(&sfdev->mutex);
600 	ret = device__register(&sfdev->dev_hdr);
601 	if (ret)
602 		goto out_unmap;
603 
604 	ret = kvm__register_mmio(kvm,
605 				 sfdev->base_addr, sfdev->size,
606 				 false, cfi_flash_mmio, sfdev);
607 	if (ret) {
608 		device__unregister(&sfdev->dev_hdr);
609 		goto out_unmap;
610 	}
611 
612 	return sfdev;
613 
614 out_unmap:
615 	munmap(sfdev->flash_memory, sfdev->size);
616 out_free:
617 	free(sfdev);
618 out_close:
619 	close(fd);
620 
621 	return ERR_PTR(ret);
622 }
623 
cfi_flash__init(struct kvm * kvm)624 static int cfi_flash__init(struct kvm *kvm)
625 {
626 	struct cfi_flash_device *sfdev;
627 
628 	BUILD_BUG_ON(CFI_NR_FLASH_CHIPS != 1 && CFI_NR_FLASH_CHIPS != 2);
629 
630 	if (!kvm->cfg.flash_filename)
631 		return 0;
632 
633 	sfdev = create_flash_device_file(kvm, kvm->cfg.flash_filename);
634 	if (IS_ERR(sfdev))
635 		return PTR_ERR(sfdev);
636 
637 	return 0;
638 }
639 dev_init(cfi_flash__init);
640