xref: /qemu/hw/ssi/ibex_spi_host.c (revision 06b40d250ecfa1633209c2e431a7a38acfd03a98)
1 /*
2  * QEMU model of the Ibex SPI Controller
3  * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/
4  *
5  * Copyright (C) 2022 Western Digital
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy
8  * of this software and associated documentation files (the "Software"), to deal
9  * in the Software without restriction, including without limitation the rights
10  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11  * copies of the Software, and to permit persons to whom the Software is
12  * furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23  * THE SOFTWARE.
24  */
25 
26 #include "qemu/osdep.h"
27 #include "qemu/log.h"
28 #include "qemu/module.h"
29 #include "hw/registerfields.h"
30 #include "hw/ssi/ibex_spi_host.h"
31 #include "hw/irq.h"
32 #include "hw/qdev-properties.h"
33 #include "hw/qdev-properties-system.h"
34 #include "migration/vmstate.h"
35 #include "trace.h"
36 
37 REG32(INTR_STATE, 0x00)
38     FIELD(INTR_STATE, ERROR, 0, 1)
39     FIELD(INTR_STATE, SPI_EVENT, 1, 1)
40 REG32(INTR_ENABLE, 0x04)
41     FIELD(INTR_ENABLE, ERROR, 0, 1)
42     FIELD(INTR_ENABLE, SPI_EVENT, 1, 1)
43 REG32(INTR_TEST, 0x08)
44     FIELD(INTR_TEST, ERROR, 0, 1)
45     FIELD(INTR_TEST, SPI_EVENT, 1, 1)
46 REG32(ALERT_TEST, 0x0c)
47     FIELD(ALERT_TEST, FETAL_TEST, 0, 1)
48 REG32(CONTROL, 0x10)
49     FIELD(CONTROL, RX_WATERMARK, 0, 8)
50     FIELD(CONTROL, TX_WATERMARK, 1, 8)
51     FIELD(CONTROL, OUTPUT_EN, 29, 1)
52     FIELD(CONTROL, SW_RST, 30, 1)
53     FIELD(CONTROL, SPIEN, 31, 1)
54 REG32(STATUS, 0x14)
55     FIELD(STATUS, TXQD, 0, 8)
56     FIELD(STATUS, RXQD, 18, 8)
57     FIELD(STATUS, CMDQD, 16, 3)
58     FIELD(STATUS, RXWM, 20, 1)
59     FIELD(STATUS, BYTEORDER, 22, 1)
60     FIELD(STATUS, RXSTALL, 23, 1)
61     FIELD(STATUS, RXEMPTY, 24, 1)
62     FIELD(STATUS, RXFULL, 25, 1)
63     FIELD(STATUS, TXWM, 26, 1)
64     FIELD(STATUS, TXSTALL, 27, 1)
65     FIELD(STATUS, TXEMPTY, 28, 1)
66     FIELD(STATUS, TXFULL, 29, 1)
67     FIELD(STATUS, ACTIVE, 30, 1)
68     FIELD(STATUS, READY, 31, 1)
69 REG32(CONFIGOPTS, 0x18)
70     FIELD(CONFIGOPTS, CLKDIV_0, 0, 16)
71     FIELD(CONFIGOPTS, CSNIDLE_0, 16, 4)
72     FIELD(CONFIGOPTS, CSNTRAIL_0, 20, 4)
73     FIELD(CONFIGOPTS, CSNLEAD_0, 24, 4)
74     FIELD(CONFIGOPTS, FULLCYC_0, 29, 1)
75     FIELD(CONFIGOPTS, CPHA_0, 30, 1)
76     FIELD(CONFIGOPTS, CPOL_0, 31, 1)
77 REG32(CSID, 0x1c)
78     FIELD(CSID, CSID, 0, 32)
79 REG32(COMMAND, 0x20)
80     FIELD(COMMAND, LEN, 0, 8)
81     FIELD(COMMAND, CSAAT, 9, 1)
82     FIELD(COMMAND, SPEED, 10, 2)
83     FIELD(COMMAND, DIRECTION, 12, 2)
84 REG32(ERROR_ENABLE, 0x2c)
85     FIELD(ERROR_ENABLE, CMDBUSY, 0, 1)
86     FIELD(ERROR_ENABLE, OVERFLOW, 1, 1)
87     FIELD(ERROR_ENABLE, UNDERFLOW, 2, 1)
88     FIELD(ERROR_ENABLE, CMDINVAL, 3, 1)
89     FIELD(ERROR_ENABLE, CSIDINVAL, 4, 1)
90 REG32(ERROR_STATUS, 0x30)
91     FIELD(ERROR_STATUS, CMDBUSY, 0, 1)
92     FIELD(ERROR_STATUS, OVERFLOW, 1, 1)
93     FIELD(ERROR_STATUS, UNDERFLOW, 2, 1)
94     FIELD(ERROR_STATUS, CMDINVAL, 3, 1)
95     FIELD(ERROR_STATUS, CSIDINVAL, 4, 1)
96     FIELD(ERROR_STATUS, ACCESSINVAL, 5, 1)
97 REG32(EVENT_ENABLE, 0x34)
98     FIELD(EVENT_ENABLE, RXFULL, 0, 1)
99     FIELD(EVENT_ENABLE, TXEMPTY, 1, 1)
100     FIELD(EVENT_ENABLE, RXWM, 2, 1)
101     FIELD(EVENT_ENABLE, TXWM, 3, 1)
102     FIELD(EVENT_ENABLE, READY, 4, 1)
103     FIELD(EVENT_ENABLE, IDLE, 5, 1)
104 
div4_round_up(uint8_t dividend)105 static inline uint8_t div4_round_up(uint8_t dividend)
106 {
107     return (dividend + 3) / 4;
108 }
109 
ibex_spi_rxfifo_reset(IbexSPIHostState * s)110 static void ibex_spi_rxfifo_reset(IbexSPIHostState *s)
111 {
112     uint32_t data = s->regs[IBEX_SPI_HOST_STATUS];
113     /* Empty the RX FIFO and assert RXEMPTY */
114     fifo8_reset(&s->rx_fifo);
115     data = FIELD_DP32(data, STATUS, RXFULL, 0);
116     data = FIELD_DP32(data, STATUS, RXEMPTY, 1);
117     s->regs[IBEX_SPI_HOST_STATUS] = data;
118 }
119 
ibex_spi_txfifo_reset(IbexSPIHostState * s)120 static void ibex_spi_txfifo_reset(IbexSPIHostState *s)
121 {
122     uint32_t data = s->regs[IBEX_SPI_HOST_STATUS];
123     /* Empty the TX FIFO and assert TXEMPTY */
124     fifo8_reset(&s->tx_fifo);
125     data = FIELD_DP32(data, STATUS, TXFULL, 0);
126     data = FIELD_DP32(data, STATUS, TXEMPTY, 1);
127     s->regs[IBEX_SPI_HOST_STATUS] = data;
128 }
129 
ibex_spi_host_reset(DeviceState * dev)130 static void ibex_spi_host_reset(DeviceState *dev)
131 {
132     IbexSPIHostState *s = IBEX_SPI_HOST(dev);
133     trace_ibex_spi_host_reset("Resetting Ibex SPI");
134 
135     /* SPI Host Register Reset */
136     s->regs[IBEX_SPI_HOST_INTR_STATE]   = 0x00;
137     s->regs[IBEX_SPI_HOST_INTR_ENABLE]  = 0x00;
138     s->regs[IBEX_SPI_HOST_INTR_TEST]    = 0x00;
139     s->regs[IBEX_SPI_HOST_ALERT_TEST]   = 0x00;
140     s->regs[IBEX_SPI_HOST_CONTROL]      = 0x7f;
141     s->regs[IBEX_SPI_HOST_STATUS]       = 0x00;
142     s->regs[IBEX_SPI_HOST_CONFIGOPTS]   = 0x00;
143     s->regs[IBEX_SPI_HOST_CSID]         = 0x00;
144     s->regs[IBEX_SPI_HOST_COMMAND]      = 0x00;
145     /* RX/TX Modelled by FIFO */
146     s->regs[IBEX_SPI_HOST_RXDATA]       = 0x00;
147     s->regs[IBEX_SPI_HOST_TXDATA]       = 0x00;
148 
149     s->regs[IBEX_SPI_HOST_ERROR_ENABLE] = 0x1F;
150     s->regs[IBEX_SPI_HOST_ERROR_STATUS] = 0x00;
151     s->regs[IBEX_SPI_HOST_EVENT_ENABLE] = 0x00;
152 
153     ibex_spi_rxfifo_reset(s);
154     ibex_spi_txfifo_reset(s);
155 
156     s->init_status = true;
157 }
158 
159 /*
160  * Check if we need to trigger an interrupt.
161  * The two interrupts lines (host_err and event) can
162  * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'.
163  *
164  * Interrupts are triggered based on the ones
165  * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`.
166  */
ibex_spi_host_irq(IbexSPIHostState * s)167 static void ibex_spi_host_irq(IbexSPIHostState *s)
168 {
169     uint32_t intr_test_reg = s->regs[IBEX_SPI_HOST_INTR_TEST];
170     uint32_t intr_en_reg = s->regs[IBEX_SPI_HOST_INTR_ENABLE];
171     uint32_t intr_state_reg = s->regs[IBEX_SPI_HOST_INTR_STATE];
172 
173     uint32_t err_en_reg = s->regs[IBEX_SPI_HOST_ERROR_ENABLE];
174     uint32_t event_en_reg = s->regs[IBEX_SPI_HOST_EVENT_ENABLE];
175     uint32_t err_status_reg = s->regs[IBEX_SPI_HOST_ERROR_STATUS];
176     uint32_t status_reg = s->regs[IBEX_SPI_HOST_STATUS];
177 
178 
179     bool error_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, ERROR);
180     bool event_en = FIELD_EX32(intr_en_reg, INTR_ENABLE, SPI_EVENT);
181     bool err_pending = FIELD_EX32(intr_state_reg, INTR_STATE, ERROR);
182     bool status_pending = FIELD_EX32(intr_state_reg, INTR_STATE, SPI_EVENT);
183 
184     int err_irq = 0, event_irq = 0;
185 
186     /* Error IRQ enabled and Error IRQ Cleared */
187     if (error_en && !err_pending) {
188         /* Event enabled, Interrupt Test Error */
189         if (FIELD_EX32(intr_test_reg, INTR_TEST,  ERROR)) {
190             err_irq = 1;
191         } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE,  CMDBUSY) &&
192                    FIELD_EX32(err_status_reg, ERROR_STATUS,  CMDBUSY)) {
193             /* Wrote to COMMAND when not READY */
194             err_irq = 1;
195         } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE,  CMDINVAL)  &&
196                    FIELD_EX32(err_status_reg, ERROR_STATUS,  CMDINVAL)) {
197             /* Invalid command segment */
198             err_irq = 1;
199         } else if (FIELD_EX32(err_en_reg, ERROR_ENABLE,  CSIDINVAL) &&
200                    FIELD_EX32(err_status_reg, ERROR_STATUS,  CSIDINVAL)) {
201             /* Invalid value for CSID */
202             err_irq = 1;
203         }
204         if (err_irq) {
205             s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK;
206         }
207     }
208 
209     qemu_set_irq(s->host_err, err_irq);
210 
211     /* Event IRQ Enabled and Event IRQ Cleared */
212     if (event_en && !status_pending) {
213         if (FIELD_EX32(intr_test_reg, INTR_STATE,  SPI_EVENT)) {
214             /* Event enabled, Interrupt Test Event */
215             event_irq = 1;
216         } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE,  READY) &&
217                    FIELD_EX32(status_reg, STATUS, READY)) {
218             /* SPI Host ready for next command */
219             event_irq = 1;
220         } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE,  TXEMPTY) &&
221                    FIELD_EX32(status_reg, STATUS,  TXEMPTY)) {
222             /* SPI TXEMPTY, TXFIFO drained */
223             event_irq = 1;
224         } else if (FIELD_EX32(event_en_reg, EVENT_ENABLE,  RXFULL) &&
225                    FIELD_EX32(status_reg, STATUS,  RXFULL)) {
226             /* SPI RXFULL, RXFIFO  full */
227             event_irq = 1;
228         }
229         if (event_irq) {
230             s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK;
231         }
232     }
233 
234     qemu_set_irq(s->event, event_irq);
235 }
236 
ibex_spi_host_transfer(IbexSPIHostState * s)237 static void ibex_spi_host_transfer(IbexSPIHostState *s)
238 {
239     uint32_t rx, tx, data;
240     /* Get num of one byte transfers */
241     uint8_t segment_len = FIELD_EX32(s->regs[IBEX_SPI_HOST_COMMAND],
242                                      COMMAND,  LEN);
243 
244     while (segment_len > 0) {
245         if (fifo8_is_empty(&s->tx_fifo)) {
246             /* Assert Stall */
247             s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXSTALL_MASK;
248             break;
249         } else if (fifo8_is_full(&s->rx_fifo)) {
250             /* Assert Stall */
251             s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXSTALL_MASK;
252             break;
253         } else {
254             tx = fifo8_pop(&s->tx_fifo);
255         }
256 
257         rx = ssi_transfer(s->ssi, tx);
258 
259         trace_ibex_spi_host_transfer(tx, rx);
260 
261         if (!fifo8_is_full(&s->rx_fifo)) {
262             fifo8_push(&s->rx_fifo, rx);
263         } else {
264             /* Assert RXFULL */
265             s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXFULL_MASK;
266         }
267         --segment_len;
268     }
269 
270     data = s->regs[IBEX_SPI_HOST_STATUS];
271     /* Assert Ready */
272     data = FIELD_DP32(data, STATUS, READY, 1);
273     /* Set RXQD */
274     data = FIELD_DP32(data, STATUS, RXQD, div4_round_up(segment_len));
275     /* Set TXQD */
276     data = FIELD_DP32(data, STATUS, TXQD, fifo8_num_used(&s->tx_fifo) / 4);
277     /* Clear TXFULL */
278     data = FIELD_DP32(data, STATUS, TXFULL, 0);
279     /* Reset RXEMPTY */
280     data = FIELD_DP32(data, STATUS, RXEMPTY, 0);
281     /* Update register status */
282     s->regs[IBEX_SPI_HOST_STATUS] = data;
283     /* Drop remaining bytes that exceed segment_len */
284     ibex_spi_txfifo_reset(s);
285 
286     ibex_spi_host_irq(s);
287 }
288 
ibex_spi_host_read(void * opaque,hwaddr addr,unsigned int size)289 static uint64_t ibex_spi_host_read(void *opaque, hwaddr addr,
290                                      unsigned int size)
291 {
292     IbexSPIHostState *s = opaque;
293     uint32_t rc = 0;
294     uint8_t rx_byte = 0;
295 
296     trace_ibex_spi_host_read(addr, size);
297 
298     /* Match reg index */
299     addr = addr >> 2;
300     switch (addr) {
301     /* Skipping any W/O registers */
302     case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE:
303     case IBEX_SPI_HOST_CONTROL...IBEX_SPI_HOST_STATUS:
304         rc = s->regs[addr];
305         break;
306     case IBEX_SPI_HOST_CSID:
307         rc = s->regs[addr];
308         break;
309     case IBEX_SPI_HOST_CONFIGOPTS:
310         rc = s->config_opts[s->regs[IBEX_SPI_HOST_CSID]];
311         break;
312     case IBEX_SPI_HOST_TXDATA:
313         rc = s->regs[addr];
314         break;
315     case IBEX_SPI_HOST_RXDATA:
316         /* Clear RXFULL */
317         s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK;
318 
319         for (int i = 0; i < 4; ++i) {
320             if (fifo8_is_empty(&s->rx_fifo)) {
321                 /* Assert RXEMPTY, no IRQ */
322                 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK;
323                 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
324                                                 R_ERROR_STATUS_UNDERFLOW_MASK;
325                 return rc;
326             }
327             rx_byte = fifo8_pop(&s->rx_fifo);
328             rc |= rx_byte << (i * 8);
329         }
330         break;
331     case IBEX_SPI_HOST_ERROR_ENABLE...IBEX_SPI_HOST_EVENT_ENABLE:
332         rc = s->regs[addr];
333         break;
334     default:
335         qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
336                       addr << 2);
337     }
338     return rc;
339 }
340 
341 
ibex_spi_host_write(void * opaque,hwaddr addr,uint64_t val64,unsigned int size)342 static void ibex_spi_host_write(void *opaque, hwaddr addr,
343                                 uint64_t val64, unsigned int size)
344 {
345     IbexSPIHostState *s = opaque;
346     uint32_t val32 = val64;
347     uint32_t shift_mask = 0xff, status = 0, data = 0;
348     uint8_t txqd_len;
349 
350     trace_ibex_spi_host_write(addr, size, val64);
351 
352     /* Match reg index */
353     addr = addr >> 2;
354 
355     switch (addr) {
356     /* Skipping any R/O registers */
357     case IBEX_SPI_HOST_INTR_STATE:
358         /* rw1c status register */
359         if (FIELD_EX32(val32, INTR_STATE, ERROR)) {
360             data = FIELD_DP32(data, INTR_STATE, ERROR, 0);
361         }
362         if (FIELD_EX32(val32, INTR_STATE, SPI_EVENT)) {
363             data = FIELD_DP32(data, INTR_STATE, SPI_EVENT, 0);
364         }
365         s->regs[addr] = data;
366         break;
367     case IBEX_SPI_HOST_INTR_ENABLE:
368         s->regs[addr] = val32;
369         break;
370     case IBEX_SPI_HOST_INTR_TEST:
371         s->regs[addr] = val32;
372         ibex_spi_host_irq(s);
373         break;
374     case IBEX_SPI_HOST_ALERT_TEST:
375         s->regs[addr] = val32;
376         qemu_log_mask(LOG_UNIMP,
377                         "%s: SPI_ALERT_TEST is not supported\n", __func__);
378         break;
379     case IBEX_SPI_HOST_CONTROL:
380         s->regs[addr] = val32;
381 
382         if (val32 & R_CONTROL_SW_RST_MASK)  {
383             ibex_spi_host_reset((DeviceState *)s);
384             /* Clear active if any */
385             s->regs[IBEX_SPI_HOST_STATUS] &=  ~R_STATUS_ACTIVE_MASK;
386         }
387 
388         if (val32 & R_CONTROL_OUTPUT_EN_MASK)  {
389             qemu_log_mask(LOG_UNIMP,
390                           "%s: CONTROL_OUTPUT_EN is not supported\n", __func__);
391         }
392         break;
393     case IBEX_SPI_HOST_CONFIGOPTS:
394         /* Update the respective config-opts register based on CSIDth index */
395         s->config_opts[s->regs[IBEX_SPI_HOST_CSID]] = val32;
396         qemu_log_mask(LOG_UNIMP,
397                       "%s: CONFIGOPTS Hardware settings not supported\n",
398                          __func__);
399         break;
400     case IBEX_SPI_HOST_CSID:
401         if (val32 >= s->num_cs) {
402             /* CSID exceeds max num_cs */
403             s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
404                                                 R_ERROR_STATUS_CSIDINVAL_MASK;
405             ibex_spi_host_irq(s);
406             return;
407         }
408         s->regs[addr] = val32;
409         break;
410     case IBEX_SPI_HOST_COMMAND:
411         s->regs[addr] = val32;
412 
413         /* STALL, IP not enabled */
414         if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_CONTROL],
415                          CONTROL, SPIEN))) {
416             return;
417         }
418 
419         /* SPI not ready, IRQ Error */
420         if (!(FIELD_EX32(s->regs[IBEX_SPI_HOST_STATUS],
421                          STATUS, READY))) {
422             s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= R_ERROR_STATUS_CMDBUSY_MASK;
423             ibex_spi_host_irq(s);
424             return;
425         }
426 
427         /* Assert Not Ready */
428         s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_READY_MASK;
429 
430         if (FIELD_EX32(val32, COMMAND, DIRECTION) != BIDIRECTIONAL_TRANSFER) {
431             qemu_log_mask(LOG_UNIMP,
432                           "%s: Rx Only/Tx Only are not supported\n", __func__);
433         }
434 
435         if (val32 & R_COMMAND_CSAAT_MASK)  {
436             qemu_log_mask(LOG_UNIMP,
437                           "%s: CSAAT is not supported\n", __func__);
438         }
439         if (val32 & R_COMMAND_SPEED_MASK)  {
440             qemu_log_mask(LOG_UNIMP,
441                           "%s: SPEED is not supported\n", __func__);
442         }
443 
444         /* Set Transfer Callback */
445         timer_mod(s->fifo_trigger_handle,
446                     qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
447                     (TX_INTERRUPT_TRIGGER_DELAY_NS));
448 
449         break;
450     case IBEX_SPI_HOST_TXDATA:
451         /*
452          * This is a hardware `feature` where
453          * the first word written to TXDATA after init is omitted entirely
454          */
455         if (s->init_status) {
456             s->init_status = false;
457             return;
458         }
459 
460         for (int i = 0; i < 4; ++i) {
461             /* Attempting to write when TXFULL */
462             if (fifo8_is_full(&s->tx_fifo)) {
463                 /* Assert RXEMPTY, no IRQ */
464                 s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXFULL_MASK;
465                 s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
466                                                  R_ERROR_STATUS_OVERFLOW_MASK;
467                 ibex_spi_host_irq(s);
468                 return;
469             }
470             /* Byte ordering is set by the IP */
471             status = s->regs[IBEX_SPI_HOST_STATUS];
472             if (FIELD_EX32(status, STATUS, BYTEORDER) == 0) {
473                 /* LE: LSB transmitted first (default for ibex processor) */
474                 shift_mask = 0xff << (i * 8);
475             } else {
476                 /* BE: MSB transmitted first */
477                 qemu_log_mask(LOG_UNIMP,
478                              "%s: Big endian is not supported\n", __func__);
479             }
480 
481             fifo8_push(&s->tx_fifo, (val32 & shift_mask) >> (i * 8));
482         }
483         status = s->regs[IBEX_SPI_HOST_STATUS];
484         /* Reset TXEMPTY */
485         status = FIELD_DP32(status, STATUS, TXEMPTY, 0);
486         /* Update TXQD */
487         txqd_len = FIELD_EX32(status, STATUS, TXQD);
488         /* Partial bytes (size < 4) are padded, in words. */
489         txqd_len += 1;
490         status = FIELD_DP32(status, STATUS, TXQD, txqd_len);
491         /* Assert Ready */
492         status = FIELD_DP32(status, STATUS, READY, 1);
493         /* Update register status */
494         s->regs[IBEX_SPI_HOST_STATUS] = status;
495         break;
496     case IBEX_SPI_HOST_ERROR_ENABLE:
497         s->regs[addr] = val32;
498 
499         if (val32 & R_ERROR_ENABLE_CMDINVAL_MASK)  {
500             qemu_log_mask(LOG_UNIMP,
501                           "%s: Segment Length is not supported\n", __func__);
502         }
503         break;
504     case IBEX_SPI_HOST_ERROR_STATUS:
505     /*
506      *  Indicates any errors that have occurred.
507      *  When an error occurs, the corresponding bit must be cleared
508      *  here before issuing any further commands
509      */
510         status = s->regs[addr];
511         /* rw1c status register */
512         if (FIELD_EX32(val32, ERROR_STATUS, CMDBUSY)) {
513             status = FIELD_DP32(status, ERROR_STATUS, CMDBUSY, 0);
514         }
515         if (FIELD_EX32(val32, ERROR_STATUS, OVERFLOW)) {
516             status = FIELD_DP32(status, ERROR_STATUS, OVERFLOW, 0);
517         }
518         if (FIELD_EX32(val32, ERROR_STATUS, UNDERFLOW)) {
519             status = FIELD_DP32(status, ERROR_STATUS, UNDERFLOW, 0);
520         }
521         if (FIELD_EX32(val32, ERROR_STATUS, CMDINVAL)) {
522             status = FIELD_DP32(status, ERROR_STATUS, CMDINVAL, 0);
523         }
524         if (FIELD_EX32(val32, ERROR_STATUS, CSIDINVAL)) {
525             status = FIELD_DP32(status, ERROR_STATUS, CSIDINVAL, 0);
526         }
527         if (FIELD_EX32(val32, ERROR_STATUS, ACCESSINVAL)) {
528             status = FIELD_DP32(status, ERROR_STATUS, ACCESSINVAL, 0);
529         }
530         s->regs[addr] = status;
531         break;
532     case IBEX_SPI_HOST_EVENT_ENABLE:
533     /* Controls which classes of SPI events raise an interrupt. */
534         s->regs[addr] = val32;
535 
536         if (val32 & R_EVENT_ENABLE_RXWM_MASK)  {
537             qemu_log_mask(LOG_UNIMP,
538                           "%s: RXWM is not supported\n", __func__);
539         }
540         if (val32 & R_EVENT_ENABLE_TXWM_MASK)  {
541             qemu_log_mask(LOG_UNIMP,
542                           "%s: TXWM is not supported\n", __func__);
543         }
544 
545         if (val32 & R_EVENT_ENABLE_IDLE_MASK)  {
546             qemu_log_mask(LOG_UNIMP,
547                           "%s: IDLE is not supported\n", __func__);
548         }
549         break;
550     default:
551         qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
552                       addr << 2);
553     }
554 }
555 
556 static const MemoryRegionOps ibex_spi_ops = {
557     .read = ibex_spi_host_read,
558     .write = ibex_spi_host_write,
559     /* Ibex default LE */
560     .endianness = DEVICE_LITTLE_ENDIAN,
561 };
562 
563 static const Property ibex_spi_properties[] = {
564     DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1),
565 };
566 
567 static const VMStateDescription vmstate_ibex = {
568     .name = TYPE_IBEX_SPI_HOST,
569     .version_id = 1,
570     .minimum_version_id = 1,
571     .fields = (const VMStateField[]) {
572         VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS),
573         VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState,
574                               num_cs, 0, vmstate_info_uint32, uint32_t),
575         VMSTATE_FIFO8(rx_fifo, IbexSPIHostState),
576         VMSTATE_FIFO8(tx_fifo, IbexSPIHostState),
577         VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexSPIHostState),
578         VMSTATE_BOOL(init_status, IbexSPIHostState),
579         VMSTATE_END_OF_LIST()
580     }
581 };
582 
fifo_trigger_update(void * opaque)583 static void fifo_trigger_update(void *opaque)
584 {
585     IbexSPIHostState *s = opaque;
586     ibex_spi_host_transfer(s);
587 }
588 
ibex_spi_host_realize(DeviceState * dev,Error ** errp)589 static void ibex_spi_host_realize(DeviceState *dev, Error **errp)
590 {
591     IbexSPIHostState *s = IBEX_SPI_HOST(dev);
592     int i;
593 
594     s->ssi = ssi_create_bus(dev, "ssi");
595     s->cs_lines = g_new0(qemu_irq, s->num_cs);
596 
597     for (i = 0; i < s->num_cs; ++i) {
598         sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]);
599     }
600 
601     /* Setup CONFIGOPTS Multi-register */
602     s->config_opts = g_new0(uint32_t, s->num_cs);
603 
604     /* Setup FIFO Interrupt Timer */
605     s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
606                                           fifo_trigger_update, s);
607 
608     /* FIFO sizes as per OT Spec */
609     fifo8_create(&s->tx_fifo, IBEX_SPI_HOST_TXFIFO_LEN);
610     fifo8_create(&s->rx_fifo, IBEX_SPI_HOST_RXFIFO_LEN);
611 }
612 
ibex_spi_host_init(Object * obj)613 static void ibex_spi_host_init(Object *obj)
614 {
615     IbexSPIHostState *s = IBEX_SPI_HOST(obj);
616 
617     sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->host_err);
618     sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->event);
619 
620     memory_region_init_io(&s->mmio, obj, &ibex_spi_ops, s,
621                           TYPE_IBEX_SPI_HOST, 0x1000);
622     sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
623 }
624 
ibex_spi_host_class_init(ObjectClass * klass,const void * data)625 static void ibex_spi_host_class_init(ObjectClass *klass, const void *data)
626 {
627     DeviceClass *dc = DEVICE_CLASS(klass);
628     dc->realize = ibex_spi_host_realize;
629     device_class_set_legacy_reset(dc, ibex_spi_host_reset);
630     dc->vmsd = &vmstate_ibex;
631     device_class_set_props(dc, ibex_spi_properties);
632 }
633 
634 static const TypeInfo ibex_spi_host_info = {
635     .name          = TYPE_IBEX_SPI_HOST,
636     .parent        = TYPE_SYS_BUS_DEVICE,
637     .instance_size = sizeof(IbexSPIHostState),
638     .instance_init = ibex_spi_host_init,
639     .class_init    = ibex_spi_host_class_init,
640 };
641 
ibex_spi_host_register_types(void)642 static void ibex_spi_host_register_types(void)
643 {
644     type_register_static(&ibex_spi_host_info);
645 }
646 
647 type_init(ibex_spi_host_register_types)
648