xref: /qemu/hw/ssi/pnv_spi.c (revision 7192d7b7fea15f2226a896f02b360bf7cfce1ab1)
1 /*
2  * QEMU PowerPC SPI model
3  *
4  * Copyright (c) 2024, IBM Corporation.
5  *
6  * SPDX-License-Identifier: GPL-2.0-or-later
7  */
8 
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "hw/qdev-properties.h"
12 #include "hw/ppc/pnv_xscom.h"
13 #include "hw/ssi/pnv_spi.h"
14 #include "hw/ssi/pnv_spi_regs.h"
15 #include "hw/ssi/ssi.h"
16 #include <libfdt.h>
17 #include "hw/irq.h"
18 #include "trace.h"
19 
20 #define PNV_SPI_OPCODE_LO_NIBBLE(x) (x & 0x0F)
21 #define PNV_SPI_MASKED_OPCODE(x) (x & 0xF0)
22 #define PNV_SPI_FIFO_SIZE 16
23 
24 /*
25  * Macro from include/hw/ppc/fdt.h
26  * fdt.h cannot be included here as it contain ppc target specific dependency.
27  */
28 #define _FDT(exp)                                                  \
29     do {                                                           \
30         int _ret = (exp);                                          \
31         if (_ret < 0) {                                            \
32             qemu_log_mask(LOG_GUEST_ERROR,                         \
33                     "error creating device tree: %s: %s",          \
34                     #exp, fdt_strerror(_ret));                     \
35             exit(1);                                               \
36         }                                                          \
37     } while (0)
38 
39 static bool does_rdr_match(PnvSpi *s)
40 {
41     /*
42      * According to spec, the mask bits that are 0 are compared and the
43      * bits that are 1 are ignored.
44      */
45     uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK, s->regs[SPI_MM_REG]);
46     uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL, s->regs[SPI_MM_REG]);
47 
48     if ((~rdr_match_mask & rdr_match_val) == ((~rdr_match_mask) &
49             GETFIELD(PPC_BITMASK(48, 63), s->regs[SPI_RCV_DATA_REG]))) {
50         return true;
51     }
52     return false;
53 }
54 
55 static uint8_t get_from_offset(PnvSpi *s, uint8_t offset)
56 {
57     uint8_t byte;
58 
59     /*
60      * Offset is an index between 0 and PNV_SPI_REG_SIZE - 1
61      * Check the offset before using it.
62      */
63     if (offset < PNV_SPI_REG_SIZE) {
64         byte = (s->regs[SPI_XMIT_DATA_REG] >> (56 - offset * 8)) & 0xFF;
65     } else {
66         /*
67          * Log an error and return a 0xFF since we have to assign something
68          * to byte before returning.
69          */
70         qemu_log_mask(LOG_GUEST_ERROR, "Invalid offset = %d used to get byte "
71                       "from TDR\n", offset);
72         byte = 0xff;
73     }
74     return byte;
75 }
76 
77 static uint8_t read_from_frame(PnvSpi *s, uint8_t nr_bytes, uint8_t ecc_count,
78                 uint8_t shift_in_count)
79 {
80     uint8_t byte;
81     int count = 0;
82 
83     while (count < nr_bytes) {
84         shift_in_count++;
85         if ((ecc_count != 0) &&
86             (shift_in_count == (PNV_SPI_REG_SIZE + ecc_count))) {
87             shift_in_count = 0;
88         } else if (!fifo8_is_empty(&s->rx_fifo)) {
89             byte = fifo8_pop(&s->rx_fifo);
90             trace_pnv_spi_shift_rx(byte, count);
91             s->regs[SPI_RCV_DATA_REG] = (s->regs[SPI_RCV_DATA_REG] << 8) | byte;
92         } else {
93             qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty RX_FIFO\n");
94         }
95         count++;
96     } /* end of while */
97     return shift_in_count;
98 }
99 
100 static void spi_response(PnvSpi *s)
101 {
102     uint8_t ecc_count;
103     uint8_t shift_in_count;
104     uint32_t rx_len;
105     int i;
106 
107     /*
108      * Processing here must handle:
109      * - Which bytes in the payload we should move to the RDR
110      * - Explicit mode counter configuration settings
111      * - RDR full and RDR overrun status
112      */
113 
114     /*
115      * First check that the response payload is the exact same
116      * number of bytes as the request payload was
117      */
118     rx_len = fifo8_num_used(&s->rx_fifo);
119     if (rx_len != (s->N1_bytes + s->N2_bytes)) {
120         qemu_log_mask(LOG_GUEST_ERROR, "Invalid response payload size in "
121                        "bytes, expected %d, got %d\n",
122                        (s->N1_bytes + s->N2_bytes), rx_len);
123     } else {
124         uint8_t ecc_control;
125         trace_pnv_spi_rx_received(rx_len);
126         trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
127                         s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
128         /*
129          * Adding an ECC count let's us know when we have found a payload byte
130          * that was shifted in but cannot be loaded into RDR.  Bits 29-30 of
131          * clock_config_reset_control register equal to either 0b00 or 0b10
132          * indicate that we are taking in data with ECC and either applying
133          * the ECC or discarding it.
134          */
135         ecc_count = 0;
136         ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
137         if (ecc_control == 0 || ecc_control == 2) {
138             ecc_count = 1;
139         }
140         /*
141          * Use the N1_rx and N2_rx counts to control shifting data from the
142          * payload into the RDR.  Keep an overall count of the number of bytes
143          * shifted into RDR so we can discard every 9th byte when ECC is
144          * enabled.
145          */
146         shift_in_count = 0;
147         /* Handle the N1 portion of the frame first */
148         if (s->N1_rx != 0) {
149             trace_pnv_spi_rx_read_N1frame();
150             shift_in_count = read_from_frame(s, s->N1_bytes, ecc_count, shift_in_count);
151         }
152         /* Handle the N2 portion of the frame */
153         if (s->N2_rx != 0) {
154             /* pop out N1_bytes from rx_fifo if not already */
155             if (s->N1_rx == 0) {
156                 for (i = 0; i < s->N1_bytes; i++) {
157                     if (!fifo8_is_empty(&s->rx_fifo)) {
158                         fifo8_pop(&s->rx_fifo);
159                     } else {
160                         qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty"
161                                                        " RX_FIFO\n");
162                     }
163                 }
164             }
165             trace_pnv_spi_rx_read_N2frame();
166             shift_in_count = read_from_frame(s, s->N2_bytes, ecc_count, shift_in_count);
167         }
168         if ((s->N1_rx + s->N2_rx) > 0) {
169             /*
170              * Data was received so handle RDR status.
171              * It is easier to handle RDR_full and RDR_overrun status here
172              * since the RDR register's shift_byte_in method is called
173              * multiple times in a row. Controlling RDR status is done here
174              * instead of in the RDR scoped methods for that reason.
175              */
176             if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
177                 /*
178                  * Data was shifted into the RDR before having been read
179                  * causing previous data to have been overrun.
180                  */
181                 s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status, 1);
182             } else {
183                 /*
184                  * Set status to indicate that the received data register is
185                  * full. This flag is only cleared once the RDR is unloaded.
186                  */
187                 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 1);
188             }
189         }
190     } /* end of else */
191 } /* end of spi_response() */
192 
193 static void transfer(PnvSpi *s)
194 {
195     uint32_t tx, rx, payload_len;
196     uint8_t rx_byte;
197 
198     payload_len = fifo8_num_used(&s->tx_fifo);
199     for (int offset = 0; offset < payload_len; offset += s->transfer_len) {
200         tx = 0;
201         for (int i = 0; i < s->transfer_len; i++) {
202             if ((offset + i) >= payload_len) {
203                 tx <<= 8;
204             } else if (!fifo8_is_empty(&s->tx_fifo)) {
205                 tx = (tx << 8) | fifo8_pop(&s->tx_fifo);
206             } else {
207                 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO underflow\n");
208             }
209         }
210         rx = ssi_transfer(s->ssi_bus, tx);
211         for (int i = 0; i < s->transfer_len; i++) {
212             if ((offset + i) >= payload_len) {
213                 break;
214             }
215             rx_byte = (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
216             if (!fifo8_is_full(&s->rx_fifo)) {
217                 fifo8_push(&s->rx_fifo, rx_byte);
218             } else {
219                 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: RX_FIFO is full\n");
220                 break;
221             }
222         }
223     }
224     spi_response(s);
225     /* Reset fifo for next frame */
226     fifo8_reset(&s->tx_fifo);
227     fifo8_reset(&s->rx_fifo);
228 }
229 
230 /*
231  * Calculate the N1 counters based on passed in opcode and
232  * internal register values.
233  * The method assumes that the opcode is a Shift_N1 opcode
234  * and doesn't test it.
235  * The counters returned are:
236  * N1 bits: Number of bits in the payload data that are significant
237  * to the responder.
238  * N1_bytes: Total count of payload bytes for the N1 (portion of the) frame.
239  * N1_tx: Total number of bytes taken from TDR for N1
240  * N1_rx: Total number of bytes taken from the payload for N1
241  */
242 static void calculate_N1(PnvSpi *s, uint8_t opcode)
243 {
244     /*
245      * Shift_N1 opcode form: 0x3M
246      * Implicit mode:
247      * If M != 0 the shift count is M bytes and M is the number of tx bytes.
248      * Forced Implicit mode:
249      * M is the shift count but tx and rx is determined by the count control
250      * register fields.  Note that we only check for forced Implicit mode when
251      * M != 0 since the mode doesn't make sense when M = 0.
252      * Explicit mode:
253      * If M == 0 then shift count is number of bits defined in the
254      * Counter Configuration Register's shift_count_N1 field.
255      */
256     if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
257         /* Explicit mode */
258         s->N1_bits = GETFIELD(SPI_CTR_CFG_N1, s->regs[SPI_CTR_CFG_REG]);
259         s->N1_bytes = (s->N1_bits + 7) / 8;
260         s->N1_tx = 0;
261         s->N1_rx = 0;
262         /* If tx count control for N1 is set, load the tx value */
263         if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
264             s->N1_tx = s->N1_bytes;
265         }
266         /* If rx count control for N1 is set, load the rx value */
267         if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
268             s->N1_rx = s->N1_bytes;
269         }
270     } else {
271         /* Implicit mode/Forced Implicit mode, use M field from opcode */
272         s->N1_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
273         s->N1_bits = s->N1_bytes * 8;
274         /*
275          * Assume that we are going to transmit the count
276          * (pure Implicit only)
277          */
278         s->N1_tx = s->N1_bytes;
279         s->N1_rx = 0;
280         /* Let Forced Implicit mode have an effect on the counts */
281         if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
282             /*
283              * If Forced Implicit mode and count control doesn't
284              * indicate transmit then reset the tx count to 0
285              */
286             if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 0) {
287                 s->N1_tx = 0;
288             }
289             /* If rx count control for N1 is set, load the rx value */
290             if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
291                 s->N1_rx = s->N1_bytes;
292             }
293         }
294     }
295     /*
296      * Enforce an upper limit on the size of N1 that is equal to the known size
297      * of the shift register, 64 bits or 72 bits if ECC is enabled.
298      * If the size exceeds 72 bits it is a user error so log an error,
299      * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
300      * error bit.
301      */
302     uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
303     if (ecc_control == 0 || ecc_control == 2) {
304         if (s->N1_bytes > (PNV_SPI_REG_SIZE + 1)) {
305             qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size when "
306                           "ECC enabled, bytes = 0x%x, bits = 0x%x\n",
307                           s->N1_bytes, s->N1_bits);
308             s->N1_bytes = PNV_SPI_REG_SIZE + 1;
309             s->N1_bits = s->N1_bytes * 8;
310         }
311     } else if (s->N1_bytes > PNV_SPI_REG_SIZE) {
312         qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size, "
313                       "bytes = 0x%x, bits = 0x%x\n", s->N1_bytes, s->N1_bits);
314         s->N1_bytes = PNV_SPI_REG_SIZE;
315         s->N1_bits = s->N1_bytes * 8;
316     }
317 } /* end of calculate_N1 */
318 
319 /*
320  * Shift_N1 operation handler method
321  */
322 static bool operation_shiftn1(PnvSpi *s, uint8_t opcode, bool send_n1_alone)
323 {
324     uint8_t n1_count;
325     bool stop = false;
326     /*
327      * Use a combination of N1 counters to build the N1 portion of the
328      * transmit payload.
329      * We only care about transmit at this time since the request payload
330      * only represents data going out on the controller output line.
331      * Leave mode specific considerations in the calculate function since
332      * all we really care about are counters that tell use exactly how
333      * many bytes are in the payload and how many of those bytes to
334      * include from the TDR into the payload.
335      */
336     calculate_N1(s, opcode);
337     trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
338                     s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
339     /*
340      * Zero out the N2 counters here in case there is no N2 operation following
341      * the N1 operation in the sequencer.  This keeps leftover N2 information
342      * from interfering with spi_response logic.
343      */
344     s->N2_bits = 0;
345     s->N2_bytes = 0;
346     s->N2_tx = 0;
347     s->N2_rx = 0;
348     /*
349      * N1_bytes is the overall size of the N1 portion of the frame regardless of
350      * whether N1 is used for tx, rx or both.  Loop over the size to build a
351      * payload that is N1_bytes long.
352      * N1_tx is the count of bytes to take from the TDR and "shift" into the
353      * frame which means append those bytes to the payload for the N1 portion
354      * of the frame.
355      * If N1_tx is 0 or if the count exceeds the size of the TDR append 0xFF to
356      * the frame until the overall N1 count is reached.
357      */
358     n1_count = 0;
359     while (n1_count < s->N1_bytes) {
360         /*
361          * Assuming that if N1_tx is not equal to 0 then it is the same as
362          * N1_bytes.
363          */
364         if ((s->N1_tx != 0) && (n1_count < PNV_SPI_REG_SIZE)) {
365 
366             if (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1) {
367                 /*
368                  * Note that we are only appending to the payload IF the TDR
369                  * is full otherwise we don't touch the payload because we are
370                  * going to NOT send the payload and instead tell the sequencer
371                  * that called us to stop and wait for a TDR write so we have
372                  * data to load into the payload.
373                  */
374                 uint8_t n1_byte = 0x00;
375                 n1_byte = get_from_offset(s, n1_count);
376                 if (!fifo8_is_full(&s->tx_fifo)) {
377                     trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count);
378                     fifo8_push(&s->tx_fifo, n1_byte);
379                 } else {
380                     qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
381                     break;
382                 }
383             } else {
384                 /*
385                  * We hit a shift_n1 opcode TX but the TDR is empty, tell the
386                  * sequencer to stop and break this loop.
387                  */
388                 trace_pnv_spi_sequencer_stop_requested("Shift N1"
389                                 "set for transmit but TDR is empty");
390                 stop = true;
391                 break;
392             }
393         } else {
394             /*
395              * Cases here:
396              * - we are receiving during the N1 frame segment and the RDR
397              *   is full so we need to stop until the RDR is read
398              * - we are transmitting and we don't care about RDR status
399              *   since we won't be loading RDR during the frame segment.
400              * - we are receiving and the RDR is empty so we allow the operation
401              *   to proceed.
402              */
403             if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
404                 trace_pnv_spi_sequencer_stop_requested("shift N1"
405                                 "set for receive but RDR is full");
406                 stop = true;
407                 break;
408             } else if (!fifo8_is_full(&s->tx_fifo)) {
409                 trace_pnv_spi_tx_append_FF("n1_byte");
410                 fifo8_push(&s->tx_fifo, 0xff);
411             } else {
412                 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
413                 break;
414             }
415         }
416         n1_count++;
417     } /* end of while */
418     /*
419      * If we are not stopping due to an empty TDR and we are doing an N1 TX
420      * and the TDR is full we need to clear the TDR_full status.
421      * Do this here instead of up in the loop above so we don't log the message
422      * in every loop iteration.
423      * Ignore the send_n1_alone flag, all that does is defer the TX until the N2
424      * operation, which was found immediately after the current opcode.  The TDR
425      * was unloaded and will be shifted so we have to clear the TDR_full status.
426      */
427     if (!stop && (s->N1_tx != 0) &&
428         (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
429         s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
430     }
431     /*
432      * There are other reasons why the shifter would stop, such as a TDR empty
433      * or RDR full condition with N1 set to receive.  If we haven't stopped due
434      * to either one of those conditions then check if the send_n1_alone flag is
435      * equal to False, indicating the next opcode is an N2 operation, AND if
436      * the N2 counter reload switch (bit 0 of the N2 count control field) is
437      * set.  This condition requires a pacing write to "kick" off the N2
438      * shift which includes the N1 shift as well when send_n1_alone is False.
439      */
440     if (!stop && !send_n1_alone &&
441        (GETFIELD(SPI_CTR_CFG_N2_CTRL_B0, s->regs[SPI_CTR_CFG_REG]) == 1)) {
442         trace_pnv_spi_sequencer_stop_requested("N2 counter reload "
443                         "active, stop N1 shift, TDR_underrun set to 1");
444         stop = true;
445         s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 1);
446     }
447     /*
448      * If send_n1_alone is set AND we have a full TDR then this is the first and
449      * last payload to send and we don't have an N2 frame segment to add to the
450      * payload.
451      */
452     if (send_n1_alone && !stop) {
453         /* We have a TX and a full TDR or an RX and an empty RDR */
454         trace_pnv_spi_tx_request("Shifting N1 frame", fifo8_num_used(&s->tx_fifo));
455         transfer(s);
456         /* The N1 frame shift is complete so reset the N1 counters */
457         s->N2_bits = 0;
458         s->N2_bytes = 0;
459         s->N2_tx = 0;
460         s->N2_rx = 0;
461     }
462     return stop;
463 } /* end of operation_shiftn1() */
464 
465 /*
466  * Calculate the N2 counters based on passed in opcode and
467  * internal register values.
468  * The method assumes that the opcode is a Shift_N2 opcode
469  * and doesn't test it.
470  * The counters returned are:
471  * N2 bits: Number of bits in the payload data that are significant
472  * to the responder.
473  * N2_bytes: Total count of payload bytes for the N2 frame.
474  * N2_tx: Total number of bytes taken from TDR for N2
475  * N2_rx: Total number of bytes taken from the payload for N2
476  */
477 static void calculate_N2(PnvSpi *s, uint8_t opcode)
478 {
479     /*
480      * Shift_N2 opcode form: 0x4M
481      * Implicit mode:
482      * If M!=0 the shift count is M bytes and M is the number of rx bytes.
483      * Forced Implicit mode:
484      * M is the shift count but tx and rx is determined by the count control
485      * register fields.  Note that we only check for Forced Implicit mode when
486      * M != 0 since the mode doesn't make sense when M = 0.
487      * Explicit mode:
488      * If M==0 then shift count is number of bits defined in the
489      * Counter Configuration Register's shift_count_N1 field.
490      */
491     if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
492         /* Explicit mode */
493         s->N2_bits = GETFIELD(SPI_CTR_CFG_N2, s->regs[SPI_CTR_CFG_REG]);
494         s->N2_bytes = (s->N2_bits + 7) / 8;
495         s->N2_tx = 0;
496         s->N2_rx = 0;
497         /* If tx count control for N2 is set, load the tx value */
498         if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
499             s->N2_tx = s->N2_bytes;
500         }
501         /* If rx count control for N2 is set, load the rx value */
502         if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
503             s->N2_rx = s->N2_bytes;
504         }
505     } else {
506         /* Implicit mode/Forced Implicit mode, use M field from opcode */
507         s->N2_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
508         s->N2_bits = s->N2_bytes * 8;
509         /* Assume that we are going to receive the count */
510         s->N2_rx = s->N2_bytes;
511         s->N2_tx = 0;
512         /* Let Forced Implicit mode have an effect on the counts */
513         if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
514             /*
515              * If Forced Implicit mode and count control doesn't
516              * indicate a receive then reset the rx count to 0
517              */
518             if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 0) {
519                 s->N2_rx = 0;
520             }
521             /* If tx count control for N2 is set, load the tx value */
522             if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
523                 s->N2_tx = s->N2_bytes;
524             }
525         }
526     }
527     /*
528      * Enforce an upper limit on the size of N1 that is equal to the
529      * known size of the shift register, 64 bits or 72 bits if ECC
530      * is enabled.
531      * If the size exceeds 72 bits it is a user error so log an error,
532      * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
533      * error bit.
534      */
535     uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
536     if (ecc_control == 0 || ecc_control == 2) {
537         if (s->N2_bytes > (PNV_SPI_REG_SIZE + 1)) {
538             /* Unsupported N2 shift size when ECC enabled */
539             s->N2_bytes = PNV_SPI_REG_SIZE + 1;
540             s->N2_bits = s->N2_bytes * 8;
541         }
542     } else if (s->N2_bytes > PNV_SPI_REG_SIZE) {
543         /* Unsupported N2 shift size */
544         s->N2_bytes = PNV_SPI_REG_SIZE;
545         s->N2_bits = s->N2_bytes * 8;
546     }
547 } /* end of calculate_N2 */
548 
549 /*
550  * Shift_N2 operation handler method
551  */
552 
553 static bool operation_shiftn2(PnvSpi *s, uint8_t opcode)
554 {
555     uint8_t n2_count;
556     bool stop = false;
557     /*
558      * Use a combination of N2 counters to build the N2 portion of the
559      * transmit payload.
560      */
561     calculate_N2(s, opcode);
562     trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
563                     s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
564     /*
565      * The only difference between this code and the code for shift N1 is
566      * that this code has to account for the possible presence of N1 transmit
567      * bytes already taken from the TDR.
568      * If there are bytes to be transmitted for the N2 portion of the frame
569      * and there are still bytes in TDR that have not been copied into the
570      * TX data of the payload, this code will handle transmitting those
571      * remaining bytes.
572      * If for some reason the transmit count(s) add up to more than the size
573      * of the TDR we will just append 0xFF to the transmit payload data until
574      * the payload is N1 + N2 bytes long.
575      */
576     n2_count = 0;
577     while (n2_count < s->N2_bytes) {
578         /*
579          * If the RDR is full and we need to RX just bail out, letting the
580          * code continue will end up building the payload twice in the same
581          * buffer since RDR full causes a sequence stop and restart.
582          */
583         if ((s->N2_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
584             trace_pnv_spi_sequencer_stop_requested("shift N2 set"
585                             "for receive but RDR is full");
586             stop = true;
587             break;
588         }
589         if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) < PNV_SPI_REG_SIZE)) {
590             /* Always append data for the N2 segment if it is set for TX */
591             uint8_t n2_byte = 0x00;
592             n2_byte = get_from_offset(s, (s->N1_tx + n2_count));
593             if (!fifo8_is_full(&s->tx_fifo)) {
594                 trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count));
595                 fifo8_push(&s->tx_fifo, n2_byte);
596             } else {
597                 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
598                 break;
599             }
600         } else if (!fifo8_is_full(&s->tx_fifo)) {
601             /*
602              * Regardless of whether or not N2 is set for TX or RX, we need
603              * the number of bytes in the payload to match the overall length
604              * of the operation.
605              */
606             trace_pnv_spi_tx_append_FF("n2_byte");
607             fifo8_push(&s->tx_fifo, 0xff);
608         } else {
609             qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
610             break;
611         }
612         n2_count++;
613     } /* end of while */
614     if (!stop) {
615         /* We have a TX and a full TDR or an RX and an empty RDR */
616         trace_pnv_spi_tx_request("Shifting N2 frame", fifo8_num_used(&s->tx_fifo));
617         transfer(s);
618         /*
619          * If we are doing an N2 TX and the TDR is full we need to clear the
620          * TDR_full status. Do this here instead of up in the loop above so we
621          * don't log the message in every loop iteration.
622          */
623         if ((s->N2_tx != 0) && (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
624             s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
625         }
626         /*
627          * The N2 frame shift is complete so reset the N2 counters.
628          * Reset the N1 counters also in case the frame was a combination of
629          * N1 and N2 segments.
630          */
631         s->N2_bits = 0;
632         s->N2_bytes = 0;
633         s->N2_tx = 0;
634         s->N2_rx = 0;
635         s->N1_bits = 0;
636         s->N1_bytes = 0;
637         s->N1_tx = 0;
638         s->N1_rx = 0;
639     }
640     return stop;
641 } /*  end of operation_shiftn2()*/
642 
643 static void operation_sequencer(PnvSpi *s)
644 {
645     /*
646      * Loop through each sequencer operation ID and perform the requested
647      *  operations.
648      * Flag for indicating if we should send the N1 frame or wait to combine
649      * it with a preceding N2 frame.
650      */
651     bool send_n1_alone = true;
652     bool stop = false; /* Flag to stop the sequencer */
653     uint8_t opcode = 0;
654     uint8_t masked_opcode = 0;
655     uint8_t seq_index;
656 
657     /*
658      * Clear the sequencer FSM error bit - general_SPI_status[3]
659      * before starting a sequence.
660      */
661     s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 0);
662     /*
663      * If the FSM is idle set the sequencer index to 0
664      * (new/restarted sequence)
665      */
666     if (GETFIELD(SPI_STS_SEQ_FSM, s->status) == SEQ_STATE_IDLE) {
667         s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
668     }
669     /*
670      * SPI_STS_SEQ_INDEX of status register is kept in seq_index variable and
671      * updated back to status register at the end of operation_sequencer().
672      */
673     seq_index = GETFIELD(SPI_STS_SEQ_INDEX, s->status);
674     /*
675      * There are only 8 possible operation IDs to iterate through though
676      * some operations may cause more than one frame to be sequenced.
677      */
678     while (seq_index < NUM_SEQ_OPS) {
679         opcode = s->seq_op[seq_index];
680         /* Set sequencer state to decode */
681         s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_DECODE);
682         /*
683          * Only the upper nibble of the operation ID is needed to know what
684          * kind of operation is requested.
685          */
686         masked_opcode = PNV_SPI_MASKED_OPCODE(opcode);
687         switch (masked_opcode) {
688         /*
689          * Increment the operation index in each case instead of just
690          * once at the end in case an operation like the branch
691          * operation needs to change the index.
692          */
693         case SEQ_OP_STOP:
694             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
695             /* A stop operation in any position stops the sequencer */
696             trace_pnv_spi_sequencer_op("STOP", seq_index);
697 
698             stop = true;
699             s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
700             s->loop_counter_1 = 0;
701             s->loop_counter_2 = 0;
702             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
703             break;
704 
705         case SEQ_OP_SELECT_SLAVE:
706             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
707             trace_pnv_spi_sequencer_op("SELECT_SLAVE", seq_index);
708             /*
709              * This device currently only supports a single responder
710              * connection at position 0.  De-selecting a responder is fine
711              * and expected at the end of a sequence but selecting any
712              * responder other than 0 should cause an error.
713              */
714             s->responder_select = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
715             if (s->responder_select == 0) {
716                 trace_pnv_spi_shifter_done();
717                 qemu_set_irq(s->cs_line[0], 1);
718                 seq_index++;
719                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_DONE);
720             } else if (s->responder_select != 1) {
721                 qemu_log_mask(LOG_GUEST_ERROR, "Slave selection other than 1 "
722                               "not supported, select = 0x%x\n", s->responder_select);
723                 trace_pnv_spi_sequencer_stop_requested("invalid responder select");
724                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
725                 stop = true;
726             } else {
727                 /*
728                  * Only allow an FSM_START state when a responder is
729                  * selected
730                  */
731                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_START);
732                 trace_pnv_spi_shifter_stating();
733                 qemu_set_irq(s->cs_line[0], 0);
734                 /*
735                  * A Shift_N2 operation is only valid after a Shift_N1
736                  * according to the spec. The spec doesn't say if that means
737                  * immediately after or just after at any point. We will track
738                  * the occurrence of a Shift_N1 to enforce this requirement in
739                  * the most generic way possible by assuming that the rule
740                  * applies once a valid responder select has occurred.
741                  */
742                 s->shift_n1_done = false;
743                 seq_index++;
744                 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
745                                 SEQ_STATE_INDEX_INCREMENT);
746             }
747             break;
748 
749         case SEQ_OP_SHIFT_N1:
750             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
751             trace_pnv_spi_sequencer_op("SHIFT_N1", seq_index);
752             /*
753              * Only allow a shift_n1 when the state is not IDLE or DONE.
754              * In either of those two cases the sequencer is not in a proper
755              * state to perform shift operations because the sequencer has:
756              * - processed a responder deselect (DONE)
757              * - processed a stop opcode (IDLE)
758              * - encountered an error (IDLE)
759              */
760             if ((GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_IDLE) ||
761                 (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_DONE)) {
762                 qemu_log_mask(LOG_GUEST_ERROR, "Shift_N1 not allowed in "
763                               "shifter state = 0x%llx", GETFIELD(
764                         SPI_STS_SHIFTER_FSM, s->status));
765                 /*
766                  * Set sequencer FSM error bit 3 (general_SPI_status[3])
767                  * in status reg.
768                  */
769                 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
770                 trace_pnv_spi_sequencer_stop_requested("invalid shifter state");
771                 stop = true;
772             } else {
773                 /*
774                  * Look for the special case where there is a shift_n1 set for
775                  * transmit and it is followed by a shift_n2 set for transmit
776                  * AND the combined transmit length of the two operations is
777                  * less than or equal to the size of the TDR register. In this
778                  * case we want to use both this current shift_n1 opcode and the
779                  * following shift_n2 opcode to assemble the frame for
780                  * transmission to the responder without requiring a refill of
781                  * the TDR between the two operations.
782                  */
783                 if ((seq_index != 7) &&
784                     PNV_SPI_MASKED_OPCODE(s->seq_op[(seq_index + 1)]) ==
785                     SEQ_OP_SHIFT_N2) {
786                     send_n1_alone = false;
787                 }
788                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N1);
789                 stop = operation_shiftn1(s, opcode, send_n1_alone);
790                 if (stop) {
791                     /*
792                      *  The operation code says to stop, this can occur if:
793                      * (1) RDR is full and the N1 shift is set for receive
794                      * (2) TDR was empty at the time of the N1 shift so we need
795                      * to wait for data.
796                      * (3) Neither 1 nor 2 are occurring and we aren't sending
797                      * N1 alone and N2 counter reload is set (bit 0 of the N2
798                      * counter reload field).  In this case TDR_underrun will
799                      * will be set and the Payload has been loaded so it is
800                      * ok to advance the sequencer.
801                      */
802                     if (GETFIELD(SPI_STS_TDR_UNDERRUN, s->status)) {
803                         s->shift_n1_done = true;
804                         s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
805                                                   FSM_SHIFT_N2);
806                         seq_index++;
807                     } else {
808                         /*
809                          * This is case (1) or (2) so the sequencer needs to
810                          * wait and NOT go to the next sequence yet.
811                          */
812                         s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
813                     }
814                 } else {
815                     /* Ok to move on to the next index */
816                     s->shift_n1_done = true;
817                     seq_index++;
818                     s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
819                                     SEQ_STATE_INDEX_INCREMENT);
820                 }
821             }
822             break;
823 
824         case SEQ_OP_SHIFT_N2:
825             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
826             trace_pnv_spi_sequencer_op("SHIFT_N2", seq_index);
827             if (!s->shift_n1_done) {
828                 qemu_log_mask(LOG_GUEST_ERROR, "Shift_N2 is not allowed if a "
829                               "Shift_N1 is not done, shifter state = 0x%llx",
830                               GETFIELD(SPI_STS_SHIFTER_FSM, s->status));
831                 /*
832                  * In case the sequencer actually stops if an N2 shift is
833                  * requested before any N1 shift is done. Set sequencer FSM
834                  * error bit 3 (general_SPI_status[3]) in status reg.
835                  */
836                 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
837                 trace_pnv_spi_sequencer_stop_requested("shift_n2 w/no shift_n1 done");
838                 stop = true;
839             } else {
840                 /* Ok to do a Shift_N2 */
841                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N2);
842                 stop = operation_shiftn2(s, opcode);
843                 /*
844                  * If the operation code says to stop set the shifter state to
845                  * wait and stop
846                  */
847                 if (stop) {
848                     s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
849                 } else {
850                     /* Ok to move on to the next index */
851                     seq_index++;
852                     s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
853                                     SEQ_STATE_INDEX_INCREMENT);
854                 }
855             }
856             break;
857 
858         case SEQ_OP_BRANCH_IFNEQ_RDR:
859             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
860             trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", seq_index);
861             /*
862              * The memory mapping register RDR match value is compared against
863              * the 16 rightmost bytes of the RDR (potentially with masking).
864              * Since this comparison is performed against the contents of the
865              * RDR then a receive must have previously occurred otherwise
866              * there is no data to compare and the operation cannot be
867              * completed and will stop the sequencer until RDR full is set to
868              * 1.
869              */
870             if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
871                 bool rdr_matched = false;
872                 rdr_matched = does_rdr_match(s);
873                 if (rdr_matched) {
874                     trace_pnv_spi_RDR_match("success");
875                     /* A match occurred, increment the sequencer index. */
876                     seq_index++;
877                     s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
878                                     SEQ_STATE_INDEX_INCREMENT);
879                 } else {
880                     trace_pnv_spi_RDR_match("failed");
881                     /*
882                      * Branch the sequencer to the index coded into the op
883                      * code.
884                      */
885                     seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
886                 }
887                 /*
888                  * Regardless of where the branch ended up we want the
889                  * sequencer to continue shifting so we have to clear
890                  * RDR_full.
891                  */
892                 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
893             } else {
894                 trace_pnv_spi_sequencer_stop_requested("RDR not"
895                                 "full for 0x6x opcode");
896                 stop = true;
897                 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
898             }
899             break;
900 
901         case SEQ_OP_TRANSFER_TDR:
902             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
903             qemu_log_mask(LOG_GUEST_ERROR, "Transfer TDR is not supported\n");
904             seq_index++;
905             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
906             break;
907 
908         case SEQ_OP_BRANCH_IFNEQ_INC_1:
909             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
910             trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", seq_index);
911             /*
912              * The spec says the loop should execute count compare + 1 times.
913              * However we learned from engineering that we really only loop
914              * count_compare times, count compare = 0 makes this op code a
915              * no-op
916              */
917             if (s->loop_counter_1 !=
918                 GETFIELD(SPI_CTR_CFG_CMP1, s->regs[SPI_CTR_CFG_REG])) {
919                 /*
920                  * Next index is the lower nibble of the branch operation ID,
921                  * mask off all but the first three bits so we don't try to
922                  * access beyond the sequencer_operation_reg boundary.
923                  */
924                 seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
925                 s->loop_counter_1++;
926             } else {
927                 /* Continue to next index if loop counter is reached */
928                 seq_index++;
929                 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
930                                 SEQ_STATE_INDEX_INCREMENT);
931             }
932             break;
933 
934         case SEQ_OP_BRANCH_IFNEQ_INC_2:
935             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
936             trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", seq_index);
937             uint8_t condition2 = GETFIELD(SPI_CTR_CFG_CMP2,
938                               s->regs[SPI_CTR_CFG_REG]);
939             /*
940              * The spec says the loop should execute count compare + 1 times.
941              * However we learned from engineering that we really only loop
942              * count_compare times, count compare = 0 makes this op code a
943              * no-op
944              */
945             if (s->loop_counter_2 != condition2) {
946                 /*
947                  * Next index is the lower nibble of the branch operation ID,
948                  * mask off all but the first three bits so we don't try to
949                  * access beyond the sequencer_operation_reg boundary.
950                  */
951                 seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
952                 s->loop_counter_2++;
953             } else {
954                 /* Continue to next index if loop counter is reached */
955                 seq_index++;
956                 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
957                                 SEQ_STATE_INDEX_INCREMENT);
958             }
959             break;
960 
961         default:
962             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
963             /* Ignore unsupported operations. */
964             seq_index++;
965             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
966             break;
967         } /* end of switch */
968         /*
969          * If we used all 8 opcodes without seeing a 00 - STOP in the sequence
970          * we need to go ahead and end things as if there was a STOP at the
971          * end.
972          */
973         if (seq_index == NUM_SEQ_OPS) {
974             /* All 8 opcodes completed, sequencer idling */
975             s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
976             seq_index = 0;
977             s->loop_counter_1 = 0;
978             s->loop_counter_2 = 0;
979             s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
980             break;
981         }
982         /* Break the loop if a stop was requested */
983         if (stop) {
984             break;
985         }
986     } /* end of while */
987     /* Update sequencer index field in status.*/
988     s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, seq_index);
989     return;
990 } /* end of operation_sequencer() */
991 
992 /*
993  * The SPIC engine and its internal sequencer can be interrupted and reset by
994  * a hardware signal, the sbe_spicst_hard_reset bits from Pervasive
995  * Miscellaneous Register of sbe_register_bo device.
996  * Reset immediately aborts any SPI transaction in progress and returns the
997  * sequencer and state machines to idle state.
998  * The configuration register values are not changed. The status register is
999  * not reset. The engine registers are not reset.
1000  * The SPIC engine reset does not have any affect on the attached devices.
1001  * Reset handling of any attached devices is beyond the scope of the engine.
1002  */
1003 static void do_reset(DeviceState *dev)
1004 {
1005     PnvSpi *s = PNV_SPI(dev);
1006     DeviceState *ssi_dev;
1007 
1008     trace_pnv_spi_reset();
1009 
1010     /* Connect cs irq */
1011     ssi_dev = ssi_get_cs(s->ssi_bus, 0);
1012     if (ssi_dev) {
1013         qemu_irq cs_line = qdev_get_gpio_in_named(ssi_dev, SSI_GPIO_CS, 0);
1014         qdev_connect_gpio_out_named(DEVICE(s), "cs", 0, cs_line);
1015     }
1016 
1017     /* Reset all N1 and N2 counters, and other constants */
1018     s->N2_bits = 0;
1019     s->N2_bytes = 0;
1020     s->N2_tx = 0;
1021     s->N2_rx = 0;
1022     s->N1_bits = 0;
1023     s->N1_bytes = 0;
1024     s->N1_tx = 0;
1025     s->N1_rx = 0;
1026     s->loop_counter_1 = 0;
1027     s->loop_counter_2 = 0;
1028     /* Disconnected from responder */
1029     qemu_set_irq(s->cs_line[0], 1);
1030 }
1031 
1032 static uint64_t pnv_spi_xscom_read(void *opaque, hwaddr addr, unsigned size)
1033 {
1034     PnvSpi *s = PNV_SPI(opaque);
1035     uint32_t reg = addr >> 3;
1036     uint64_t val = ~0ull;
1037 
1038     switch (reg) {
1039     case ERROR_REG:
1040     case SPI_CTR_CFG_REG:
1041     case CONFIG_REG1:
1042     case SPI_CLK_CFG_REG:
1043     case SPI_MM_REG:
1044     case SPI_XMIT_DATA_REG:
1045         val = s->regs[reg];
1046         break;
1047     case SPI_RCV_DATA_REG:
1048         val = s->regs[reg];
1049         trace_pnv_spi_read_RDR(val);
1050         s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
1051         if (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_WAIT) {
1052             trace_pnv_spi_start_sequencer();
1053             operation_sequencer(s);
1054         }
1055         break;
1056     case SPI_SEQ_OP_REG:
1057         val = 0;
1058         for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
1059             val = (val << 8) | s->seq_op[i];
1060         }
1061         break;
1062     case SPI_STS_REG:
1063         val = s->status;
1064         break;
1065     default:
1066         qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
1067                  "read at 0x%" PRIx32 "\n", reg);
1068     }
1069 
1070     trace_pnv_spi_read(addr, val);
1071     return val;
1072 }
1073 
1074 static void pnv_spi_xscom_write(void *opaque, hwaddr addr,
1075                                  uint64_t val, unsigned size)
1076 {
1077     PnvSpi *s = PNV_SPI(opaque);
1078     uint32_t reg = addr >> 3;
1079 
1080     trace_pnv_spi_write(addr, val);
1081 
1082     switch (reg) {
1083     case ERROR_REG:
1084     case SPI_CTR_CFG_REG:
1085     case CONFIG_REG1:
1086     case SPI_MM_REG:
1087     case SPI_RCV_DATA_REG:
1088         s->regs[reg] = val;
1089         break;
1090     case SPI_CLK_CFG_REG:
1091         /*
1092          * To reset the SPI controller write the sequence 0x5 0xA to
1093          * reset_control field
1094          */
1095         if ((GETFIELD(SPI_CLK_CFG_RST_CTRL, s->regs[SPI_CLK_CFG_REG]) == 0x5)
1096              && (GETFIELD(SPI_CLK_CFG_RST_CTRL, val) == 0xA)) {
1097                 /* SPI controller reset sequence completed, resetting */
1098             s->regs[reg] = SPI_CLK_CFG_HARD_RST;
1099         } else {
1100             s->regs[reg] = val;
1101         }
1102         break;
1103     case SPI_XMIT_DATA_REG:
1104         /*
1105          * Writing to the transmit data register causes the transmit data
1106          * register full status bit in the status register to be set.  Writing
1107          * when the transmit data register full status bit is already set
1108          * causes a "Resource Not Available" condition.  This is not possible
1109          * in the model since writes to this register are not asynchronous to
1110          * the operation sequence like it would be in hardware.
1111          */
1112         s->regs[reg] = val;
1113         trace_pnv_spi_write_TDR(val);
1114         s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 1);
1115         s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 0);
1116         trace_pnv_spi_start_sequencer();
1117         operation_sequencer(s);
1118         break;
1119     case SPI_SEQ_OP_REG:
1120         for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
1121             s->seq_op[i] = (val >> (56 - i * 8)) & 0xFF;
1122         }
1123         break;
1124     case SPI_STS_REG:
1125         /* other fields are ignore_write */
1126         s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status,
1127                                   GETFIELD(SPI_STS_RDR, val));
1128         s->status = SETFIELD(SPI_STS_TDR_OVERRUN, s->status,
1129                                   GETFIELD(SPI_STS_TDR, val));
1130         break;
1131     default:
1132         qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
1133                  "write at 0x%" PRIx32 "\n", reg);
1134     }
1135     return;
1136 }
1137 
1138 static const MemoryRegionOps pnv_spi_xscom_ops = {
1139     .read = pnv_spi_xscom_read,
1140     .write = pnv_spi_xscom_write,
1141     .valid.min_access_size = 8,
1142     .valid.max_access_size = 8,
1143     .impl.min_access_size = 8,
1144     .impl.max_access_size = 8,
1145     .endianness = DEVICE_BIG_ENDIAN,
1146 };
1147 
1148 static const Property pnv_spi_properties[] = {
1149     DEFINE_PROP_UINT32("spic_num", PnvSpi, spic_num, 0),
1150     DEFINE_PROP_UINT32("chip-id", PnvSpi, chip_id, 0),
1151     DEFINE_PROP_UINT8("transfer_len", PnvSpi, transfer_len, 4),
1152 };
1153 
1154 static void pnv_spi_realize(DeviceState *dev, Error **errp)
1155 {
1156     PnvSpi *s = PNV_SPI(dev);
1157     g_autofree char *name = g_strdup_printf("chip%d." TYPE_PNV_SPI_BUS ".%d",
1158                     s->chip_id, s->spic_num);
1159     s->ssi_bus = ssi_create_bus(dev, name);
1160     s->cs_line = g_new0(qemu_irq, 1);
1161     qdev_init_gpio_out_named(DEVICE(s), s->cs_line, "cs", 1);
1162 
1163     fifo8_create(&s->tx_fifo, PNV_SPI_FIFO_SIZE);
1164     fifo8_create(&s->rx_fifo, PNV_SPI_FIFO_SIZE);
1165 
1166     /* spi scoms */
1167     pnv_xscom_region_init(&s->xscom_spic_regs, OBJECT(s), &pnv_spi_xscom_ops,
1168                           s, "xscom-spi", PNV10_XSCOM_PIB_SPIC_SIZE);
1169 }
1170 
1171 static int pnv_spi_dt_xscom(PnvXScomInterface *dev, void *fdt,
1172                              int offset)
1173 {
1174     PnvSpi *s = PNV_SPI(dev);
1175     g_autofree char *name;
1176     int s_offset;
1177     const char compat[] = "ibm,power10-spi";
1178     uint32_t spic_pcba = PNV10_XSCOM_PIB_SPIC_BASE +
1179         s->spic_num * PNV10_XSCOM_PIB_SPIC_SIZE;
1180     uint32_t reg[] = {
1181         cpu_to_be32(spic_pcba),
1182         cpu_to_be32(PNV10_XSCOM_PIB_SPIC_SIZE)
1183     };
1184     name = g_strdup_printf("pnv_spi@%x", spic_pcba);
1185     s_offset = fdt_add_subnode(fdt, offset, name);
1186     _FDT(s_offset);
1187 
1188     _FDT(fdt_setprop(fdt, s_offset, "reg", reg, sizeof(reg)));
1189     _FDT(fdt_setprop(fdt, s_offset, "compatible", compat, sizeof(compat)));
1190     _FDT((fdt_setprop_cell(fdt, s_offset, "spic_num#", s->spic_num)));
1191     return 0;
1192 }
1193 
1194 static void pnv_spi_class_init(ObjectClass *klass, void *data)
1195 {
1196     DeviceClass *dc = DEVICE_CLASS(klass);
1197     PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass);
1198 
1199     xscomc->dt_xscom = pnv_spi_dt_xscom;
1200 
1201     dc->desc = "PowerNV SPI";
1202     dc->realize = pnv_spi_realize;
1203     device_class_set_legacy_reset(dc, do_reset);
1204     device_class_set_props(dc, pnv_spi_properties);
1205 }
1206 
1207 static const TypeInfo pnv_spi_info = {
1208     .name          = TYPE_PNV_SPI,
1209     .parent        = TYPE_SYS_BUS_DEVICE,
1210     .instance_size = sizeof(PnvSpi),
1211     .class_init    = pnv_spi_class_init,
1212     .interfaces    = (InterfaceInfo[]) {
1213         { TYPE_PNV_XSCOM_INTERFACE },
1214         { }
1215     }
1216 };
1217 
1218 static void pnv_spi_register_types(void)
1219 {
1220     type_register_static(&pnv_spi_info);
1221 }
1222 
1223 type_init(pnv_spi_register_types);
1224