1 /*
2 * QEMU Firmware configuration device emulation
3 *
4 * Copyright (c) 2008 Gleb Natapov
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "qemu/osdep.h"
26 #include "qemu/datadir.h"
27 #include "system/system.h"
28 #include "system/dma.h"
29 #include "system/reset.h"
30 #include "system/address-spaces.h"
31 #include "hw/boards.h"
32 #include "hw/nvram/fw_cfg.h"
33 #include "hw/qdev-properties.h"
34 #include "hw/sysbus.h"
35 #include "migration/qemu-file-types.h"
36 #include "migration/vmstate.h"
37 #include "trace.h"
38 #include "qemu/error-report.h"
39 #include "qemu/option.h"
40 #include "qemu/config-file.h"
41 #include "qemu/cutils.h"
42 #include "qapi/error.h"
43 #include "hw/acpi/aml-build.h"
44 #include "hw/loader.h"
45
46 #define FW_CFG_FILE_SLOTS_DFLT 0x20
47
48 /* FW_CFG_VERSION bits */
49 #define FW_CFG_VERSION 0x01
50 #define FW_CFG_VERSION_DMA 0x02
51
52 /* FW_CFG_DMA_CONTROL bits */
53 #define FW_CFG_DMA_CTL_ERROR 0x01
54 #define FW_CFG_DMA_CTL_READ 0x02
55 #define FW_CFG_DMA_CTL_SKIP 0x04
56 #define FW_CFG_DMA_CTL_SELECT 0x08
57 #define FW_CFG_DMA_CTL_WRITE 0x10
58
59 #define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */
60
61 struct FWCfgEntry {
62 uint32_t len;
63 bool allow_write;
64 uint8_t *data;
65 void *callback_opaque;
66 FWCfgCallback select_cb;
67 FWCfgWriteCallback write_cb;
68 };
69
70 /**
71 * key_name:
72 *
73 * @key: The uint16 selector key.
74 *
75 * Returns: The stringified name if the selector refers to a well-known
76 * numerically defined item, or NULL on key lookup failure.
77 */
key_name(uint16_t key)78 static const char *key_name(uint16_t key)
79 {
80 static const char *fw_cfg_wellknown_keys[FW_CFG_FILE_FIRST] = {
81 [FW_CFG_SIGNATURE] = "signature",
82 [FW_CFG_ID] = "id",
83 [FW_CFG_UUID] = "uuid",
84 [FW_CFG_RAM_SIZE] = "ram_size",
85 [FW_CFG_NOGRAPHIC] = "nographic",
86 [FW_CFG_NB_CPUS] = "nb_cpus",
87 [FW_CFG_MACHINE_ID] = "machine_id",
88 [FW_CFG_KERNEL_ADDR] = "kernel_addr",
89 [FW_CFG_KERNEL_SIZE] = "kernel_size",
90 [FW_CFG_KERNEL_CMDLINE] = "kernel_cmdline",
91 [FW_CFG_INITRD_ADDR] = "initrd_addr",
92 [FW_CFG_INITRD_SIZE] = "initdr_size",
93 [FW_CFG_BOOT_DEVICE] = "boot_device",
94 [FW_CFG_NUMA] = "numa",
95 [FW_CFG_BOOT_MENU] = "boot_menu",
96 [FW_CFG_MAX_CPUS] = "max_cpus",
97 [FW_CFG_KERNEL_ENTRY] = "kernel_entry",
98 [FW_CFG_KERNEL_DATA] = "kernel_data",
99 [FW_CFG_INITRD_DATA] = "initrd_data",
100 [FW_CFG_CMDLINE_ADDR] = "cmdline_addr",
101 [FW_CFG_CMDLINE_SIZE] = "cmdline_size",
102 [FW_CFG_CMDLINE_DATA] = "cmdline_data",
103 [FW_CFG_SETUP_ADDR] = "setup_addr",
104 [FW_CFG_SETUP_SIZE] = "setup_size",
105 [FW_CFG_SETUP_DATA] = "setup_data",
106 [FW_CFG_FILE_DIR] = "file_dir",
107 };
108
109 if (key & FW_CFG_ARCH_LOCAL) {
110 return fw_cfg_arch_key_name(key);
111 }
112 if (key < FW_CFG_FILE_FIRST) {
113 return fw_cfg_wellknown_keys[key];
114 }
115
116 return NULL;
117 }
118
trace_key_name(uint16_t key)119 static inline const char *trace_key_name(uint16_t key)
120 {
121 const char *name = key_name(key);
122
123 return name ? name : "unknown";
124 }
125
126 #define JPG_FILE 0
127 #define BMP_FILE 1
128
read_splashfile(char * filename,gsize * file_sizep,int * file_typep)129 static char *read_splashfile(char *filename, gsize *file_sizep,
130 int *file_typep)
131 {
132 GError *err = NULL;
133 gchar *content;
134 int file_type;
135 unsigned int filehead;
136 int bmp_bpp;
137
138 if (!g_file_get_contents(filename, &content, file_sizep, &err)) {
139 error_report("failed to read splash file '%s': %s",
140 filename, err->message);
141 g_error_free(err);
142 return NULL;
143 }
144
145 /* check file size */
146 if (*file_sizep < 30) {
147 goto error;
148 }
149
150 /* check magic ID */
151 filehead = lduw_le_p(content);
152 if (filehead == 0xd8ff) {
153 file_type = JPG_FILE;
154 } else if (filehead == 0x4d42) {
155 file_type = BMP_FILE;
156 } else {
157 goto error;
158 }
159
160 /* check BMP bpp */
161 if (file_type == BMP_FILE) {
162 bmp_bpp = lduw_le_p(&content[28]);
163 if (bmp_bpp != 24) {
164 goto error;
165 }
166 }
167
168 /* return values */
169 *file_typep = file_type;
170
171 return content;
172
173 error:
174 error_report("splash file '%s' format not recognized; must be JPEG "
175 "or 24 bit BMP", filename);
176 g_free(content);
177 return NULL;
178 }
179
fw_cfg_bootsplash(FWCfgState * s)180 static void fw_cfg_bootsplash(FWCfgState *s)
181 {
182 char *filename, *file_data;
183 gsize file_size;
184 int file_type;
185
186 /* insert splash time if user configurated */
187 if (current_machine->boot_config.has_splash_time) {
188 int64_t bst_val = current_machine->boot_config.splash_time;
189 uint16_t bst_le16;
190
191 /* validate the input */
192 if (bst_val < 0 || bst_val > 0xffff) {
193 error_report("splash-time is invalid,"
194 "it should be a value between 0 and 65535");
195 exit(1);
196 }
197 /* use little endian format */
198 bst_le16 = cpu_to_le16(bst_val);
199 fw_cfg_add_file(s, "etc/boot-menu-wait",
200 g_memdup(&bst_le16, sizeof bst_le16), sizeof bst_le16);
201 }
202
203 /* insert splash file if user configurated */
204 if (current_machine->boot_config.splash) {
205 const char *boot_splash_filename = current_machine->boot_config.splash;
206 filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, boot_splash_filename);
207 if (filename == NULL) {
208 error_report("failed to find file '%s'", boot_splash_filename);
209 return;
210 }
211
212 /* loading file data */
213 file_data = read_splashfile(filename, &file_size, &file_type);
214 if (file_data == NULL) {
215 g_free(filename);
216 return;
217 }
218 g_free(boot_splash_filedata);
219 boot_splash_filedata = (uint8_t *)file_data;
220
221 /* insert data */
222 if (file_type == JPG_FILE) {
223 fw_cfg_add_file(s, "bootsplash.jpg",
224 boot_splash_filedata, file_size);
225 } else {
226 fw_cfg_add_file(s, "bootsplash.bmp",
227 boot_splash_filedata, file_size);
228 }
229 g_free(filename);
230 }
231 }
232
fw_cfg_reboot(FWCfgState * s)233 static void fw_cfg_reboot(FWCfgState *s)
234 {
235 uint64_t rt_val = -1;
236 uint32_t rt_le32;
237
238 if (current_machine->boot_config.has_reboot_timeout) {
239 rt_val = current_machine->boot_config.reboot_timeout;
240
241 /* validate the input */
242 if (rt_val > 0xffff && rt_val != (uint64_t)-1) {
243 error_report("reboot timeout is invalid,"
244 "it should be a value between -1 and 65535");
245 exit(1);
246 }
247 }
248
249 rt_le32 = cpu_to_le32(rt_val);
250 fw_cfg_add_file(s, "etc/boot-fail-wait", g_memdup(&rt_le32, 4), 4);
251 }
252
fw_cfg_write(FWCfgState * s,uint8_t value)253 static void fw_cfg_write(FWCfgState *s, uint8_t value)
254 {
255 /* nothing, write support removed in QEMU v2.4+ */
256 }
257
fw_cfg_file_slots(const FWCfgState * s)258 static inline uint16_t fw_cfg_file_slots(const FWCfgState *s)
259 {
260 return s->file_slots;
261 }
262
263 /* Note: this function returns an exclusive limit. */
fw_cfg_max_entry(const FWCfgState * s)264 static inline uint32_t fw_cfg_max_entry(const FWCfgState *s)
265 {
266 return FW_CFG_FILE_FIRST + fw_cfg_file_slots(s);
267 }
268
fw_cfg_select(FWCfgState * s,uint16_t key)269 static int fw_cfg_select(FWCfgState *s, uint16_t key)
270 {
271 int arch, ret;
272 FWCfgEntry *e;
273
274 s->cur_offset = 0;
275 if ((key & FW_CFG_ENTRY_MASK) >= fw_cfg_max_entry(s)) {
276 s->cur_entry = FW_CFG_INVALID;
277 ret = 0;
278 } else {
279 s->cur_entry = key;
280 ret = 1;
281 /* entry successfully selected, now run callback if present */
282 arch = !!(key & FW_CFG_ARCH_LOCAL);
283 e = &s->entries[arch][key & FW_CFG_ENTRY_MASK];
284 if (e->select_cb) {
285 e->select_cb(e->callback_opaque);
286 }
287 }
288
289 trace_fw_cfg_select(s, key, trace_key_name(key), ret);
290 return ret;
291 }
292
fw_cfg_data_read(void * opaque,hwaddr addr,unsigned size)293 static uint64_t fw_cfg_data_read(void *opaque, hwaddr addr, unsigned size)
294 {
295 FWCfgState *s = opaque;
296 int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
297 FWCfgEntry *e = (s->cur_entry == FW_CFG_INVALID) ? NULL :
298 &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
299 uint64_t value = 0;
300
301 assert(size > 0 && size <= sizeof(value));
302 if (s->cur_entry != FW_CFG_INVALID && e->data && s->cur_offset < e->len) {
303 /* The least significant 'size' bytes of the return value are
304 * expected to contain a string preserving portion of the item
305 * data, padded with zeros on the right in case we run out early.
306 * In technical terms, we're composing the host-endian representation
307 * of the big endian interpretation of the fw_cfg string.
308 */
309 do {
310 value = (value << 8) | e->data[s->cur_offset++];
311 } while (--size && s->cur_offset < e->len);
312 /* If size is still not zero, we *did* run out early, so continue
313 * left-shifting, to add the appropriate number of padding zeros
314 * on the right.
315 */
316 value <<= 8 * size;
317 }
318
319 trace_fw_cfg_read(s, value);
320 return value;
321 }
322
fw_cfg_data_mem_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)323 static void fw_cfg_data_mem_write(void *opaque, hwaddr addr,
324 uint64_t value, unsigned size)
325 {
326 FWCfgState *s = opaque;
327 unsigned i = size;
328
329 do {
330 fw_cfg_write(s, value >> (8 * --i));
331 } while (i);
332 }
333
fw_cfg_dma_transfer(FWCfgState * s)334 static void fw_cfg_dma_transfer(FWCfgState *s)
335 {
336 dma_addr_t len;
337 FWCfgDmaAccess dma;
338 int arch;
339 FWCfgEntry *e;
340 int read = 0, write = 0;
341 dma_addr_t dma_addr;
342
343 /* Reset the address before the next access */
344 dma_addr = s->dma_addr;
345 s->dma_addr = 0;
346
347 if (dma_memory_read(s->dma_as, dma_addr,
348 &dma, sizeof(dma), MEMTXATTRS_UNSPECIFIED)) {
349 stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control),
350 FW_CFG_DMA_CTL_ERROR, MEMTXATTRS_UNSPECIFIED);
351 return;
352 }
353
354 dma.address = be64_to_cpu(dma.address);
355 dma.length = be32_to_cpu(dma.length);
356 dma.control = be32_to_cpu(dma.control);
357
358 if (dma.control & FW_CFG_DMA_CTL_SELECT) {
359 fw_cfg_select(s, dma.control >> 16);
360 }
361
362 arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL);
363 e = (s->cur_entry == FW_CFG_INVALID) ? NULL :
364 &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK];
365
366 if (dma.control & FW_CFG_DMA_CTL_READ) {
367 read = 1;
368 write = 0;
369 } else if (dma.control & FW_CFG_DMA_CTL_WRITE) {
370 read = 0;
371 write = 1;
372 } else if (dma.control & FW_CFG_DMA_CTL_SKIP) {
373 read = 0;
374 write = 0;
375 } else {
376 dma.length = 0;
377 }
378
379 dma.control = 0;
380
381 while (dma.length > 0 && !(dma.control & FW_CFG_DMA_CTL_ERROR)) {
382 if (s->cur_entry == FW_CFG_INVALID || !e->data ||
383 s->cur_offset >= e->len) {
384 len = dma.length;
385
386 /* If the access is not a read access, it will be a skip access,
387 * tested before.
388 */
389 if (read) {
390 if (dma_memory_set(s->dma_as, dma.address, 0, len,
391 MEMTXATTRS_UNSPECIFIED)) {
392 dma.control |= FW_CFG_DMA_CTL_ERROR;
393 }
394 }
395 if (write) {
396 dma.control |= FW_CFG_DMA_CTL_ERROR;
397 }
398 } else {
399 if (dma.length <= (e->len - s->cur_offset)) {
400 len = dma.length;
401 } else {
402 len = (e->len - s->cur_offset);
403 }
404
405 /* If the access is not a read access, it will be a skip access,
406 * tested before.
407 */
408 if (read) {
409 if (dma_memory_write(s->dma_as, dma.address,
410 &e->data[s->cur_offset], len,
411 MEMTXATTRS_UNSPECIFIED)) {
412 dma.control |= FW_CFG_DMA_CTL_ERROR;
413 }
414 }
415 if (write) {
416 if (!e->allow_write ||
417 len != dma.length ||
418 dma_memory_read(s->dma_as, dma.address,
419 &e->data[s->cur_offset], len,
420 MEMTXATTRS_UNSPECIFIED)) {
421 dma.control |= FW_CFG_DMA_CTL_ERROR;
422 } else if (e->write_cb) {
423 e->write_cb(e->callback_opaque, s->cur_offset, len);
424 }
425 }
426
427 s->cur_offset += len;
428 }
429
430 dma.address += len;
431 dma.length -= len;
432
433 }
434
435 stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control),
436 dma.control, MEMTXATTRS_UNSPECIFIED);
437
438 trace_fw_cfg_read(s, 0);
439 }
440
fw_cfg_dma_mem_read(void * opaque,hwaddr addr,unsigned size)441 static uint64_t fw_cfg_dma_mem_read(void *opaque, hwaddr addr,
442 unsigned size)
443 {
444 /* Return a signature value (and handle various read sizes) */
445 return extract64(FW_CFG_DMA_SIGNATURE, (8 - addr - size) * 8, size * 8);
446 }
447
fw_cfg_dma_mem_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)448 static void fw_cfg_dma_mem_write(void *opaque, hwaddr addr,
449 uint64_t value, unsigned size)
450 {
451 FWCfgState *s = opaque;
452
453 if (size == 4) {
454 if (addr == 0) {
455 /* FWCfgDmaAccess high address */
456 s->dma_addr = value << 32;
457 } else if (addr == 4) {
458 /* FWCfgDmaAccess low address */
459 s->dma_addr |= value;
460 fw_cfg_dma_transfer(s);
461 }
462 } else if (size == 8 && addr == 0) {
463 s->dma_addr = value;
464 fw_cfg_dma_transfer(s);
465 }
466 }
467
fw_cfg_dma_mem_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)468 static bool fw_cfg_dma_mem_valid(void *opaque, hwaddr addr,
469 unsigned size, bool is_write,
470 MemTxAttrs attrs)
471 {
472 return !is_write || ((size == 4 && (addr == 0 || addr == 4)) ||
473 (size == 8 && addr == 0));
474 }
475
fw_cfg_data_mem_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)476 static bool fw_cfg_data_mem_valid(void *opaque, hwaddr addr,
477 unsigned size, bool is_write,
478 MemTxAttrs attrs)
479 {
480 return addr == 0;
481 }
482
fw_cfg_ctl_mem_read(void * opaque,hwaddr addr,unsigned size)483 static uint64_t fw_cfg_ctl_mem_read(void *opaque, hwaddr addr, unsigned size)
484 {
485 return 0;
486 }
487
fw_cfg_ctl_mem_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)488 static void fw_cfg_ctl_mem_write(void *opaque, hwaddr addr,
489 uint64_t value, unsigned size)
490 {
491 fw_cfg_select(opaque, (uint16_t)value);
492 }
493
fw_cfg_ctl_mem_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)494 static bool fw_cfg_ctl_mem_valid(void *opaque, hwaddr addr,
495 unsigned size, bool is_write,
496 MemTxAttrs attrs)
497 {
498 return is_write && size == 2;
499 }
500
fw_cfg_comb_write(void * opaque,hwaddr addr,uint64_t value,unsigned size)501 static void fw_cfg_comb_write(void *opaque, hwaddr addr,
502 uint64_t value, unsigned size)
503 {
504 switch (size) {
505 case 1:
506 fw_cfg_write(opaque, (uint8_t)value);
507 break;
508 case 2:
509 fw_cfg_select(opaque, (uint16_t)value);
510 break;
511 }
512 }
513
fw_cfg_comb_valid(void * opaque,hwaddr addr,unsigned size,bool is_write,MemTxAttrs attrs)514 static bool fw_cfg_comb_valid(void *opaque, hwaddr addr,
515 unsigned size, bool is_write,
516 MemTxAttrs attrs)
517 {
518 return (size == 1) || (is_write && size == 2);
519 }
520
521 static const MemoryRegionOps fw_cfg_ctl_mem_ops = {
522 .read = fw_cfg_ctl_mem_read,
523 .write = fw_cfg_ctl_mem_write,
524 .endianness = DEVICE_BIG_ENDIAN,
525 .valid.accepts = fw_cfg_ctl_mem_valid,
526 };
527
528 static const MemoryRegionOps fw_cfg_data_mem_ops = {
529 .read = fw_cfg_data_read,
530 .write = fw_cfg_data_mem_write,
531 .endianness = DEVICE_BIG_ENDIAN,
532 .valid = {
533 .min_access_size = 1,
534 .max_access_size = 1,
535 .accepts = fw_cfg_data_mem_valid,
536 },
537 };
538
539 static const MemoryRegionOps fw_cfg_comb_mem_ops = {
540 .read = fw_cfg_data_read,
541 .write = fw_cfg_comb_write,
542 .endianness = DEVICE_LITTLE_ENDIAN,
543 .valid.accepts = fw_cfg_comb_valid,
544 };
545
546 static const MemoryRegionOps fw_cfg_dma_mem_ops = {
547 .read = fw_cfg_dma_mem_read,
548 .write = fw_cfg_dma_mem_write,
549 .endianness = DEVICE_BIG_ENDIAN,
550 .valid.accepts = fw_cfg_dma_mem_valid,
551 .valid.max_access_size = 8,
552 .impl.max_access_size = 8,
553 };
554
fw_cfg_reset(DeviceState * d)555 static void fw_cfg_reset(DeviceState *d)
556 {
557 FWCfgState *s = FW_CFG(d);
558
559 /* we never register a read callback for FW_CFG_SIGNATURE */
560 fw_cfg_select(s, FW_CFG_SIGNATURE);
561 }
562
563 /* Save restore 32 bit int as uint16_t
564 This is a Big hack, but it is how the old state did it.
565 Or we broke compatibility in the state, or we can't use struct tm
566 */
567
get_uint32_as_uint16(QEMUFile * f,void * pv,size_t size,const VMStateField * field)568 static int get_uint32_as_uint16(QEMUFile *f, void *pv, size_t size,
569 const VMStateField *field)
570 {
571 uint32_t *v = pv;
572 *v = qemu_get_be16(f);
573 return 0;
574 }
575
put_unused(QEMUFile * f,void * pv,size_t size,const VMStateField * field,JSONWriter * vmdesc)576 static int put_unused(QEMUFile *f, void *pv, size_t size,
577 const VMStateField *field, JSONWriter *vmdesc)
578 {
579 fprintf(stderr, "uint32_as_uint16 is only used for backward compatibility.\n");
580 fprintf(stderr, "This functions shouldn't be called.\n");
581
582 return 0;
583 }
584
585 static const VMStateInfo vmstate_hack_uint32_as_uint16 = {
586 .name = "int32_as_uint16",
587 .get = get_uint32_as_uint16,
588 .put = put_unused,
589 };
590
591 #define VMSTATE_UINT16_HACK(_f, _s, _t) \
592 VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint32_as_uint16, uint32_t)
593
594
is_version_1(void * opaque,int version_id)595 static bool is_version_1(void *opaque, int version_id)
596 {
597 return version_id == 1;
598 }
599
fw_cfg_dma_enabled(void * opaque)600 bool fw_cfg_dma_enabled(void *opaque)
601 {
602 FWCfgState *s = opaque;
603
604 return s->dma_enabled;
605 }
606
fw_cfg_acpi_mr_restore(void * opaque)607 static bool fw_cfg_acpi_mr_restore(void *opaque)
608 {
609 FWCfgState *s = opaque;
610 bool mr_aligned;
611
612 mr_aligned = QEMU_IS_ALIGNED(s->table_mr_size, qemu_real_host_page_size()) &&
613 QEMU_IS_ALIGNED(s->linker_mr_size, qemu_real_host_page_size()) &&
614 QEMU_IS_ALIGNED(s->rsdp_mr_size, qemu_real_host_page_size());
615 return s->acpi_mr_restore && !mr_aligned;
616 }
617
fw_cfg_update_mr(FWCfgState * s,uint16_t key,size_t size)618 static void fw_cfg_update_mr(FWCfgState *s, uint16_t key, size_t size)
619 {
620 MemoryRegion *mr;
621 ram_addr_t offset;
622 int arch = !!(key & FW_CFG_ARCH_LOCAL);
623 void *ptr;
624
625 key &= FW_CFG_ENTRY_MASK;
626 assert(key < fw_cfg_max_entry(s));
627
628 ptr = s->entries[arch][key].data;
629 mr = memory_region_from_host(ptr, &offset);
630
631 memory_region_ram_resize(mr, size, &error_abort);
632 }
633
fw_cfg_acpi_mr_restore_post_load(void * opaque,int version_id)634 static int fw_cfg_acpi_mr_restore_post_load(void *opaque, int version_id)
635 {
636 FWCfgState *s = opaque;
637 int i, index;
638
639 assert(s->files);
640
641 index = be32_to_cpu(s->files->count);
642
643 for (i = 0; i < index; i++) {
644 if (!strcmp(s->files->f[i].name, ACPI_BUILD_TABLE_FILE)) {
645 fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->table_mr_size);
646 } else if (!strcmp(s->files->f[i].name, ACPI_BUILD_LOADER_FILE)) {
647 fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->linker_mr_size);
648 } else if (!strcmp(s->files->f[i].name, ACPI_BUILD_RSDP_FILE)) {
649 fw_cfg_update_mr(s, FW_CFG_FILE_FIRST + i, s->rsdp_mr_size);
650 }
651 }
652
653 return 0;
654 }
655
656 static const VMStateDescription vmstate_fw_cfg_dma = {
657 .name = "fw_cfg/dma",
658 .needed = fw_cfg_dma_enabled,
659 .fields = (const VMStateField[]) {
660 VMSTATE_UINT64(dma_addr, FWCfgState),
661 VMSTATE_END_OF_LIST()
662 },
663 };
664
665 static const VMStateDescription vmstate_fw_cfg_acpi_mr = {
666 .name = "fw_cfg/acpi_mr",
667 .version_id = 1,
668 .minimum_version_id = 1,
669 .needed = fw_cfg_acpi_mr_restore,
670 .post_load = fw_cfg_acpi_mr_restore_post_load,
671 .fields = (const VMStateField[]) {
672 VMSTATE_UINT64(table_mr_size, FWCfgState),
673 VMSTATE_UINT64(linker_mr_size, FWCfgState),
674 VMSTATE_UINT64(rsdp_mr_size, FWCfgState),
675 VMSTATE_END_OF_LIST()
676 },
677 };
678
679 static const VMStateDescription vmstate_fw_cfg = {
680 .name = "fw_cfg",
681 .version_id = 2,
682 .minimum_version_id = 1,
683 .fields = (const VMStateField[]) {
684 VMSTATE_UINT16(cur_entry, FWCfgState),
685 VMSTATE_UINT16_HACK(cur_offset, FWCfgState, is_version_1),
686 VMSTATE_UINT32_V(cur_offset, FWCfgState, 2),
687 VMSTATE_END_OF_LIST()
688 },
689 .subsections = (const VMStateDescription * const []) {
690 &vmstate_fw_cfg_dma,
691 &vmstate_fw_cfg_acpi_mr,
692 NULL,
693 }
694 };
695
fw_cfg_add_bytes_callback(FWCfgState * s,uint16_t key,FWCfgCallback select_cb,FWCfgWriteCallback write_cb,void * callback_opaque,void * data,size_t len,bool read_only)696 static void fw_cfg_add_bytes_callback(FWCfgState *s, uint16_t key,
697 FWCfgCallback select_cb,
698 FWCfgWriteCallback write_cb,
699 void *callback_opaque,
700 void *data, size_t len,
701 bool read_only)
702 {
703 int arch = !!(key & FW_CFG_ARCH_LOCAL);
704
705 key &= FW_CFG_ENTRY_MASK;
706
707 assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
708 assert(s->entries[arch][key].data == NULL); /* avoid key conflict */
709
710 s->entries[arch][key].data = data;
711 s->entries[arch][key].len = (uint32_t)len;
712 s->entries[arch][key].select_cb = select_cb;
713 s->entries[arch][key].write_cb = write_cb;
714 s->entries[arch][key].callback_opaque = callback_opaque;
715 s->entries[arch][key].allow_write = !read_only;
716 }
717
fw_cfg_modify_bytes_read(FWCfgState * s,uint16_t key,void * data,size_t len)718 static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key,
719 void *data, size_t len)
720 {
721 void *ptr;
722 int arch = !!(key & FW_CFG_ARCH_LOCAL);
723
724 key &= FW_CFG_ENTRY_MASK;
725
726 assert(key < fw_cfg_max_entry(s) && len < UINT32_MAX);
727
728 /* return the old data to the function caller, avoid memory leak */
729 ptr = s->entries[arch][key].data;
730 s->entries[arch][key].data = data;
731 s->entries[arch][key].len = len;
732 s->entries[arch][key].allow_write = false;
733
734 return ptr;
735 }
736
fw_cfg_add_bytes(FWCfgState * s,uint16_t key,void * data,size_t len)737 void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len)
738 {
739 trace_fw_cfg_add_bytes(key, trace_key_name(key), len);
740 fw_cfg_add_bytes_callback(s, key, NULL, NULL, NULL, data, len, true);
741 }
742
fw_cfg_add_string(FWCfgState * s,uint16_t key,const char * value)743 void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value)
744 {
745 size_t sz = strlen(value) + 1;
746
747 trace_fw_cfg_add_string(key, trace_key_name(key), value);
748 fw_cfg_add_bytes(s, key, g_memdup(value, sz), sz);
749 }
750
fw_cfg_modify_string(FWCfgState * s,uint16_t key,const char * value)751 void fw_cfg_modify_string(FWCfgState *s, uint16_t key, const char *value)
752 {
753 size_t sz = strlen(value) + 1;
754 char *old;
755
756 old = fw_cfg_modify_bytes_read(s, key, g_memdup(value, sz), sz);
757 g_free(old);
758 }
759
fw_cfg_add_i16(FWCfgState * s,uint16_t key,uint16_t value)760 void fw_cfg_add_i16(FWCfgState *s, uint16_t key, uint16_t value)
761 {
762 uint16_t *copy;
763
764 copy = g_malloc(sizeof(value));
765 *copy = cpu_to_le16(value);
766 trace_fw_cfg_add_i16(key, trace_key_name(key), value);
767 fw_cfg_add_bytes(s, key, copy, sizeof(value));
768 }
769
fw_cfg_modify_i16(FWCfgState * s,uint16_t key,uint16_t value)770 void fw_cfg_modify_i16(FWCfgState *s, uint16_t key, uint16_t value)
771 {
772 uint16_t *copy, *old;
773
774 copy = g_malloc(sizeof(value));
775 *copy = cpu_to_le16(value);
776 old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
777 g_free(old);
778 }
779
fw_cfg_add_i32(FWCfgState * s,uint16_t key,uint32_t value)780 void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value)
781 {
782 uint32_t *copy;
783
784 copy = g_malloc(sizeof(value));
785 *copy = cpu_to_le32(value);
786 trace_fw_cfg_add_i32(key, trace_key_name(key), value);
787 fw_cfg_add_bytes(s, key, copy, sizeof(value));
788 }
789
fw_cfg_modify_i32(FWCfgState * s,uint16_t key,uint32_t value)790 void fw_cfg_modify_i32(FWCfgState *s, uint16_t key, uint32_t value)
791 {
792 uint32_t *copy, *old;
793
794 copy = g_malloc(sizeof(value));
795 *copy = cpu_to_le32(value);
796 old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
797 g_free(old);
798 }
799
fw_cfg_add_i64(FWCfgState * s,uint16_t key,uint64_t value)800 void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value)
801 {
802 uint64_t *copy;
803
804 copy = g_malloc(sizeof(value));
805 *copy = cpu_to_le64(value);
806 trace_fw_cfg_add_i64(key, trace_key_name(key), value);
807 fw_cfg_add_bytes(s, key, copy, sizeof(value));
808 }
809
fw_cfg_modify_i64(FWCfgState * s,uint16_t key,uint64_t value)810 void fw_cfg_modify_i64(FWCfgState *s, uint16_t key, uint64_t value)
811 {
812 uint64_t *copy, *old;
813
814 copy = g_malloc(sizeof(value));
815 *copy = cpu_to_le64(value);
816 old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value));
817 g_free(old);
818 }
819
820 /*
821 * Any sub-page size update to these table MRs will be lost during migration,
822 * as we use aligned size in ram_load_precopy() -> qemu_ram_resize() path.
823 * In order to avoid the inconsistency in sizes save them separately and
824 * migrate over in vmstate post_load().
825 */
fw_cfg_acpi_mr_save(FWCfgState * s,const char * filename,size_t len)826 static void fw_cfg_acpi_mr_save(FWCfgState *s, const char *filename, size_t len)
827 {
828 if (!strcmp(filename, ACPI_BUILD_TABLE_FILE)) {
829 s->table_mr_size = len;
830 } else if (!strcmp(filename, ACPI_BUILD_LOADER_FILE)) {
831 s->linker_mr_size = len;
832 } else if (!strcmp(filename, ACPI_BUILD_RSDP_FILE)) {
833 s->rsdp_mr_size = len;
834 }
835 }
836
fw_cfg_add_file_callback(FWCfgState * s,const char * filename,FWCfgCallback select_cb,FWCfgWriteCallback write_cb,void * callback_opaque,void * data,size_t len,bool read_only)837 void fw_cfg_add_file_callback(FWCfgState *s, const char *filename,
838 FWCfgCallback select_cb,
839 FWCfgWriteCallback write_cb,
840 void *callback_opaque,
841 void *data, size_t len, bool read_only)
842 {
843 int i, index, count;
844 size_t dsize;
845 int order = 0;
846
847 if (!s->files) {
848 dsize = sizeof(uint32_t) + sizeof(FWCfgFile) * fw_cfg_file_slots(s);
849 s->files = g_malloc0(dsize);
850 fw_cfg_add_bytes(s, FW_CFG_FILE_DIR, s->files, dsize);
851 }
852
853 count = be32_to_cpu(s->files->count);
854 assert(count < fw_cfg_file_slots(s));
855
856 /* Find the insertion point, sorting by file name. */
857 for (index = count;
858 index > 0 && strcmp(filename, s->files->f[index - 1].name) < 0;
859 index--)
860 ;
861
862 /*
863 * Move all the entries from the index point and after down one
864 * to create a slot for the new entry. Because calculations are
865 * being done with the index, make it so that "i" is the current
866 * index and "i - 1" is the one being copied from, thus the
867 * unusual start and end in the for statement.
868 */
869 for (i = count; i > index; i--) {
870 s->files->f[i] = s->files->f[i - 1];
871 s->files->f[i].select = cpu_to_be16(FW_CFG_FILE_FIRST + i);
872 s->entries[0][FW_CFG_FILE_FIRST + i] =
873 s->entries[0][FW_CFG_FILE_FIRST + i - 1];
874 s->entry_order[i] = s->entry_order[i - 1];
875 }
876
877 memset(&s->files->f[index], 0, sizeof(FWCfgFile));
878 memset(&s->entries[0][FW_CFG_FILE_FIRST + index], 0, sizeof(FWCfgEntry));
879
880 pstrcpy(s->files->f[index].name, sizeof(s->files->f[index].name), filename);
881 for (i = 0; i <= count; i++) {
882 if (i != index &&
883 strcmp(s->files->f[index].name, s->files->f[i].name) == 0) {
884 error_report("duplicate fw_cfg file name: %s",
885 s->files->f[index].name);
886 exit(1);
887 }
888 }
889
890 fw_cfg_add_bytes_callback(s, FW_CFG_FILE_FIRST + index,
891 select_cb, write_cb,
892 callback_opaque, data, len,
893 read_only);
894
895 s->files->f[index].size = cpu_to_be32(len);
896 s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index);
897 s->entry_order[index] = order;
898 trace_fw_cfg_add_file(s, index, s->files->f[index].name, len);
899
900 s->files->count = cpu_to_be32(count+1);
901 fw_cfg_acpi_mr_save(s, filename, len);
902 }
903
fw_cfg_add_file(FWCfgState * s,const char * filename,void * data,size_t len)904 void fw_cfg_add_file(FWCfgState *s, const char *filename,
905 void *data, size_t len)
906 {
907 fw_cfg_add_file_callback(s, filename, NULL, NULL, NULL, data, len, true);
908 }
909
fw_cfg_modify_file(FWCfgState * s,const char * filename,void * data,size_t len)910 void *fw_cfg_modify_file(FWCfgState *s, const char *filename,
911 void *data, size_t len)
912 {
913 int i, index;
914 void *ptr = NULL;
915
916 assert(s->files);
917
918 index = be32_to_cpu(s->files->count);
919
920 for (i = 0; i < index; i++) {
921 if (strcmp(filename, s->files->f[i].name) == 0) {
922 ptr = fw_cfg_modify_bytes_read(s, FW_CFG_FILE_FIRST + i,
923 data, len);
924 s->files->f[i].size = cpu_to_be32(len);
925 fw_cfg_acpi_mr_save(s, filename, len);
926 return ptr;
927 }
928 }
929
930 assert(index < fw_cfg_file_slots(s));
931
932 /* add new one */
933 fw_cfg_add_file_callback(s, filename, NULL, NULL, NULL, data, len, true);
934 return NULL;
935 }
936
fw_cfg_add_file_from_generator(FWCfgState * s,Object * parent,const char * part,const char * filename,Error ** errp)937 bool fw_cfg_add_file_from_generator(FWCfgState *s,
938 Object *parent, const char *part,
939 const char *filename, Error **errp)
940 {
941 ERRP_GUARD();
942 FWCfgDataGeneratorClass *klass;
943 GByteArray *array;
944 Object *obj;
945 gsize size;
946
947 obj = object_resolve_path_component(parent, part);
948 if (!obj) {
949 error_setg(errp, "Cannot find object ID '%s'", part);
950 return false;
951 }
952 if (!object_dynamic_cast(obj, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE)) {
953 error_setg(errp, "Object ID '%s' is not a '%s' subclass",
954 part, TYPE_FW_CFG_DATA_GENERATOR_INTERFACE);
955 return false;
956 }
957 klass = FW_CFG_DATA_GENERATOR_GET_CLASS(obj);
958 array = klass->get_data(obj, errp);
959 if (*errp || !array) {
960 return false;
961 }
962 size = array->len;
963 fw_cfg_add_file(s, filename, g_byte_array_free(array, FALSE), size);
964
965 return true;
966 }
967
fw_cfg_machine_reset(void * opaque)968 static void fw_cfg_machine_reset(void *opaque)
969 {
970 FWCfgState *s = opaque;
971 void *ptr;
972 size_t len;
973 char *buf;
974
975 buf = get_boot_devices_list(&len);
976 ptr = fw_cfg_modify_file(s, "bootorder", (uint8_t *)buf, len);
977 g_free(ptr);
978
979 buf = get_boot_devices_lchs_list(&len);
980 ptr = fw_cfg_modify_file(s, "bios-geometry", (uint8_t *)buf, len);
981 g_free(ptr);
982 }
983
fw_cfg_machine_ready(struct Notifier * n,void * data)984 static void fw_cfg_machine_ready(struct Notifier *n, void *data)
985 {
986 FWCfgState *s = container_of(n, FWCfgState, machine_ready);
987 qemu_register_reset(fw_cfg_machine_reset, s);
988 }
989
990 static const Property fw_cfg_properties[] = {
991 DEFINE_PROP_BOOL("acpi-mr-restore", FWCfgState, acpi_mr_restore, true),
992 };
993
fw_cfg_common_realize(DeviceState * dev,Error ** errp)994 static void fw_cfg_common_realize(DeviceState *dev, Error **errp)
995 {
996 FWCfgState *s = FW_CFG(dev);
997 MachineState *machine = MACHINE(qdev_get_machine());
998 uint32_t version = FW_CFG_VERSION;
999
1000 if (!fw_cfg_find()) {
1001 error_setg(errp, "at most one %s device is permitted", TYPE_FW_CFG);
1002 return;
1003 }
1004
1005 fw_cfg_add_bytes(s, FW_CFG_SIGNATURE, (char *)"QEMU", 4);
1006 fw_cfg_add_bytes(s, FW_CFG_UUID, &qemu_uuid, 16);
1007 fw_cfg_add_i16(s, FW_CFG_NOGRAPHIC, (uint16_t)!machine->enable_graphics);
1008 fw_cfg_add_i16(s, FW_CFG_BOOT_MENU, (uint16_t)(machine->boot_config.has_menu && machine->boot_config.menu));
1009 fw_cfg_bootsplash(s);
1010 fw_cfg_reboot(s);
1011
1012 if (s->dma_enabled) {
1013 version |= FW_CFG_VERSION_DMA;
1014 }
1015
1016 fw_cfg_add_i32(s, FW_CFG_ID, version);
1017
1018 s->machine_ready.notify = fw_cfg_machine_ready;
1019 qemu_add_machine_init_done_notifier(&s->machine_ready);
1020 }
1021
fw_cfg_init_io_dma(uint32_t iobase,uint32_t dma_iobase,AddressSpace * dma_as)1022 FWCfgState *fw_cfg_init_io_dma(uint32_t iobase, uint32_t dma_iobase,
1023 AddressSpace *dma_as)
1024 {
1025 DeviceState *dev;
1026 SysBusDevice *sbd;
1027 FWCfgIoState *ios;
1028 FWCfgState *s;
1029 MemoryRegion *iomem = get_system_io();
1030 bool dma_requested = dma_iobase && dma_as;
1031
1032 dev = qdev_new(TYPE_FW_CFG_IO);
1033 if (!dma_requested) {
1034 qdev_prop_set_bit(dev, "dma_enabled", false);
1035 }
1036
1037 object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG,
1038 OBJECT(dev));
1039
1040 sbd = SYS_BUS_DEVICE(dev);
1041 sysbus_realize_and_unref(sbd, &error_fatal);
1042 ios = FW_CFG_IO(dev);
1043 memory_region_add_subregion(iomem, iobase, &ios->comb_iomem);
1044
1045 s = FW_CFG(dev);
1046
1047 if (s->dma_enabled) {
1048 /* 64 bits for the address field */
1049 s->dma_as = dma_as;
1050 s->dma_addr = 0;
1051 memory_region_add_subregion(iomem, dma_iobase, &s->dma_iomem);
1052 }
1053
1054 return s;
1055 }
1056
fw_cfg_init_mem_wide(hwaddr ctl_addr,hwaddr data_addr,uint32_t data_width,hwaddr dma_addr,AddressSpace * dma_as)1057 FWCfgState *fw_cfg_init_mem_wide(hwaddr ctl_addr,
1058 hwaddr data_addr, uint32_t data_width,
1059 hwaddr dma_addr, AddressSpace *dma_as)
1060 {
1061 DeviceState *dev;
1062 SysBusDevice *sbd;
1063 FWCfgState *s;
1064 bool dma_requested = dma_addr && dma_as;
1065
1066 dev = qdev_new(TYPE_FW_CFG_MEM);
1067 qdev_prop_set_uint32(dev, "data_width", data_width);
1068 if (!dma_requested) {
1069 qdev_prop_set_bit(dev, "dma_enabled", false);
1070 }
1071
1072 object_property_add_child(OBJECT(qdev_get_machine()), TYPE_FW_CFG,
1073 OBJECT(dev));
1074
1075 sbd = SYS_BUS_DEVICE(dev);
1076 sysbus_realize_and_unref(sbd, &error_fatal);
1077 sysbus_mmio_map(sbd, 0, ctl_addr);
1078 sysbus_mmio_map(sbd, 1, data_addr);
1079
1080 s = FW_CFG(dev);
1081
1082 if (s->dma_enabled) {
1083 s->dma_as = dma_as;
1084 s->dma_addr = 0;
1085 sysbus_mmio_map(sbd, 2, dma_addr);
1086 }
1087
1088 return s;
1089 }
1090
fw_cfg_init_mem(hwaddr ctl_addr,hwaddr data_addr)1091 FWCfgState *fw_cfg_init_mem(hwaddr ctl_addr, hwaddr data_addr)
1092 {
1093 return fw_cfg_init_mem_wide(ctl_addr, data_addr,
1094 fw_cfg_data_mem_ops.valid.max_access_size,
1095 0, NULL);
1096 }
1097
1098
fw_cfg_find(void)1099 FWCfgState *fw_cfg_find(void)
1100 {
1101 /* Returns NULL unless there is exactly one fw_cfg device */
1102 return FW_CFG(object_resolve_path_type("", TYPE_FW_CFG, NULL));
1103 }
1104
load_image_to_fw_cfg(FWCfgState * fw_cfg,uint16_t size_key,uint16_t data_key,const char * image_name,bool try_decompress)1105 void load_image_to_fw_cfg(FWCfgState *fw_cfg, uint16_t size_key,
1106 uint16_t data_key, const char *image_name,
1107 bool try_decompress)
1108 {
1109 size_t size = -1;
1110 uint8_t *data;
1111
1112 if (image_name == NULL) {
1113 return;
1114 }
1115
1116 if (try_decompress) {
1117 size = load_image_gzipped_buffer(image_name,
1118 LOAD_IMAGE_MAX_GUNZIP_BYTES, &data);
1119 }
1120
1121 if (size == (size_t)-1) {
1122 gchar *contents;
1123 gsize length;
1124
1125 if (!g_file_get_contents(image_name, &contents, &length, NULL)) {
1126 error_report("failed to load \"%s\"", image_name);
1127 exit(1);
1128 }
1129 size = length;
1130 data = (uint8_t *)contents;
1131 }
1132
1133 fw_cfg_add_i32(fw_cfg, size_key, size);
1134 fw_cfg_add_bytes(fw_cfg, data_key, data, size);
1135 }
1136
fw_cfg_class_init(ObjectClass * klass,const void * data)1137 static void fw_cfg_class_init(ObjectClass *klass, const void *data)
1138 {
1139 DeviceClass *dc = DEVICE_CLASS(klass);
1140
1141 device_class_set_legacy_reset(dc, fw_cfg_reset);
1142 dc->vmsd = &vmstate_fw_cfg;
1143
1144 device_class_set_props(dc, fw_cfg_properties);
1145 }
1146
1147 static const TypeInfo fw_cfg_info = {
1148 .name = TYPE_FW_CFG,
1149 .parent = TYPE_SYS_BUS_DEVICE,
1150 .abstract = true,
1151 .instance_size = sizeof(FWCfgState),
1152 .class_init = fw_cfg_class_init,
1153 };
1154
fw_cfg_file_slots_allocate(FWCfgState * s,Error ** errp)1155 static void fw_cfg_file_slots_allocate(FWCfgState *s, Error **errp)
1156 {
1157 uint16_t file_slots_max;
1158
1159 if (fw_cfg_file_slots(s) < FW_CFG_FILE_SLOTS_MIN) {
1160 error_setg(errp, "\"file_slots\" must be at least 0x%x",
1161 FW_CFG_FILE_SLOTS_MIN);
1162 return;
1163 }
1164
1165 /* (UINT16_MAX & FW_CFG_ENTRY_MASK) is the highest inclusive selector value
1166 * that we permit. The actual (exclusive) value coming from the
1167 * configuration is (FW_CFG_FILE_FIRST + fw_cfg_file_slots(s)). */
1168 file_slots_max = (UINT16_MAX & FW_CFG_ENTRY_MASK) - FW_CFG_FILE_FIRST + 1;
1169 if (fw_cfg_file_slots(s) > file_slots_max) {
1170 error_setg(errp, "\"file_slots\" must not exceed 0x%" PRIx16,
1171 file_slots_max);
1172 return;
1173 }
1174
1175 s->entries[0] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
1176 s->entries[1] = g_new0(FWCfgEntry, fw_cfg_max_entry(s));
1177 s->entry_order = g_new0(int, fw_cfg_max_entry(s));
1178 }
1179
1180 static const Property fw_cfg_io_properties[] = {
1181 DEFINE_PROP_BOOL("dma_enabled", FWCfgIoState, parent_obj.dma_enabled,
1182 true),
1183 DEFINE_PROP_UINT16("x-file-slots", FWCfgIoState, parent_obj.file_slots,
1184 FW_CFG_FILE_SLOTS_DFLT),
1185 };
1186
fw_cfg_io_realize(DeviceState * dev,Error ** errp)1187 static void fw_cfg_io_realize(DeviceState *dev, Error **errp)
1188 {
1189 ERRP_GUARD();
1190 FWCfgIoState *s = FW_CFG_IO(dev);
1191
1192 fw_cfg_file_slots_allocate(FW_CFG(s), errp);
1193 if (*errp) {
1194 return;
1195 }
1196
1197 /* when using port i/o, the 8-bit data register ALWAYS overlaps
1198 * with half of the 16-bit control register. Hence, the total size
1199 * of the i/o region used is FW_CFG_CTL_SIZE */
1200 memory_region_init_io(&s->comb_iomem, OBJECT(s), &fw_cfg_comb_mem_ops,
1201 FW_CFG(s), "fwcfg", FW_CFG_CTL_SIZE);
1202
1203 if (FW_CFG(s)->dma_enabled) {
1204 memory_region_init_io(&FW_CFG(s)->dma_iomem, OBJECT(s),
1205 &fw_cfg_dma_mem_ops, FW_CFG(s), "fwcfg.dma",
1206 sizeof(dma_addr_t));
1207 }
1208
1209 fw_cfg_common_realize(dev, errp);
1210 }
1211
fw_cfg_io_class_init(ObjectClass * klass,const void * data)1212 static void fw_cfg_io_class_init(ObjectClass *klass, const void *data)
1213 {
1214 DeviceClass *dc = DEVICE_CLASS(klass);
1215
1216 dc->realize = fw_cfg_io_realize;
1217 device_class_set_props(dc, fw_cfg_io_properties);
1218 }
1219
1220 static const TypeInfo fw_cfg_io_info = {
1221 .name = TYPE_FW_CFG_IO,
1222 .parent = TYPE_FW_CFG,
1223 .instance_size = sizeof(FWCfgIoState),
1224 .class_init = fw_cfg_io_class_init,
1225 };
1226
1227
1228 static const Property fw_cfg_mem_properties[] = {
1229 DEFINE_PROP_UINT32("data_width", FWCfgMemState, data_width, -1),
1230 DEFINE_PROP_BOOL("dma_enabled", FWCfgMemState, parent_obj.dma_enabled,
1231 true),
1232 DEFINE_PROP_UINT16("x-file-slots", FWCfgMemState, parent_obj.file_slots,
1233 FW_CFG_FILE_SLOTS_DFLT),
1234 };
1235
fw_cfg_mem_realize(DeviceState * dev,Error ** errp)1236 static void fw_cfg_mem_realize(DeviceState *dev, Error **errp)
1237 {
1238 ERRP_GUARD();
1239 FWCfgMemState *s = FW_CFG_MEM(dev);
1240 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1241 const MemoryRegionOps *data_ops = &fw_cfg_data_mem_ops;
1242
1243 fw_cfg_file_slots_allocate(FW_CFG(s), errp);
1244 if (*errp) {
1245 return;
1246 }
1247
1248 memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops,
1249 FW_CFG(s), "fwcfg.ctl", FW_CFG_CTL_SIZE);
1250 sysbus_init_mmio(sbd, &s->ctl_iomem);
1251
1252 if (s->data_width > data_ops->valid.max_access_size) {
1253 s->wide_data_ops = *data_ops;
1254
1255 s->wide_data_ops.valid.max_access_size = s->data_width;
1256 s->wide_data_ops.impl.max_access_size = s->data_width;
1257 data_ops = &s->wide_data_ops;
1258 }
1259 memory_region_init_io(&s->data_iomem, OBJECT(s), data_ops, FW_CFG(s),
1260 "fwcfg.data", data_ops->valid.max_access_size);
1261 sysbus_init_mmio(sbd, &s->data_iomem);
1262
1263 if (FW_CFG(s)->dma_enabled) {
1264 memory_region_init_io(&FW_CFG(s)->dma_iomem, OBJECT(s),
1265 &fw_cfg_dma_mem_ops, FW_CFG(s), "fwcfg.dma",
1266 sizeof(dma_addr_t));
1267 sysbus_init_mmio(sbd, &FW_CFG(s)->dma_iomem);
1268 }
1269
1270 fw_cfg_common_realize(dev, errp);
1271 }
1272
fw_cfg_mem_class_init(ObjectClass * klass,const void * data)1273 static void fw_cfg_mem_class_init(ObjectClass *klass, const void *data)
1274 {
1275 DeviceClass *dc = DEVICE_CLASS(klass);
1276
1277 dc->realize = fw_cfg_mem_realize;
1278 device_class_set_props(dc, fw_cfg_mem_properties);
1279 }
1280
1281 static const TypeInfo fw_cfg_mem_info = {
1282 .name = TYPE_FW_CFG_MEM,
1283 .parent = TYPE_FW_CFG,
1284 .instance_size = sizeof(FWCfgMemState),
1285 .class_init = fw_cfg_mem_class_init,
1286 };
1287
fw_cfg_register_types(void)1288 static void fw_cfg_register_types(void)
1289 {
1290 type_register_static(&fw_cfg_info);
1291 type_register_static(&fw_cfg_io_info);
1292 type_register_static(&fw_cfg_mem_info);
1293 }
1294
1295 type_init(fw_cfg_register_types)
1296