1 /*
2 * QEMU PowerPC SPI model
3 *
4 * Copyright (c) 2024, IBM Corporation.
5 *
6 * SPDX-License-Identifier: GPL-2.0-or-later
7 */
8
9 #include "qemu/osdep.h"
10 #include "qemu/log.h"
11 #include "hw/qdev-properties.h"
12 #include "hw/ppc/pnv_xscom.h"
13 #include "hw/ssi/pnv_spi.h"
14 #include "hw/ssi/pnv_spi_regs.h"
15 #include "hw/ssi/ssi.h"
16 #include <libfdt.h>
17 #include "hw/irq.h"
18 #include "trace.h"
19
20 #define PNV_SPI_OPCODE_LO_NIBBLE(x) (x & 0x0F)
21 #define PNV_SPI_MASKED_OPCODE(x) (x & 0xF0)
22 #define PNV_SPI_FIFO_SIZE 16
23 #define RDR_MATCH_FAILURE_LIMIT 16
24
25 /*
26 * Macro from include/hw/ppc/fdt.h
27 * fdt.h cannot be included here as it contain ppc target specific dependency.
28 */
29 #define _FDT(exp) \
30 do { \
31 int _ret = (exp); \
32 if (_ret < 0) { \
33 qemu_log_mask(LOG_GUEST_ERROR, \
34 "error creating device tree: %s: %s", \
35 #exp, fdt_strerror(_ret)); \
36 exit(1); \
37 } \
38 } while (0)
39
does_rdr_match(PnvSpi * s)40 static bool does_rdr_match(PnvSpi *s)
41 {
42 /*
43 * According to spec, the mask bits that are 0 are compared and the
44 * bits that are 1 are ignored.
45 */
46 uint16_t rdr_match_mask = GETFIELD(SPI_MM_RDR_MATCH_MASK, s->regs[SPI_MM_REG]);
47 uint16_t rdr_match_val = GETFIELD(SPI_MM_RDR_MATCH_VAL, s->regs[SPI_MM_REG]);
48
49 if ((~rdr_match_mask & rdr_match_val) == ((~rdr_match_mask) &
50 GETFIELD(PPC_BITMASK(48, 63), s->regs[SPI_RCV_DATA_REG]))) {
51 return true;
52 }
53 return false;
54 }
55
get_from_offset(PnvSpi * s,uint8_t offset)56 static uint8_t get_from_offset(PnvSpi *s, uint8_t offset)
57 {
58 uint8_t byte;
59
60 /*
61 * Offset is an index between 0 and PNV_SPI_REG_SIZE - 1
62 * Check the offset before using it.
63 */
64 if (offset < PNV_SPI_REG_SIZE) {
65 byte = (s->regs[SPI_XMIT_DATA_REG] >> (56 - offset * 8)) & 0xFF;
66 } else {
67 /*
68 * Log an error and return a 0xFF since we have to assign something
69 * to byte before returning.
70 */
71 qemu_log_mask(LOG_GUEST_ERROR, "Invalid offset = %d used to get byte "
72 "from TDR\n", offset);
73 byte = 0xff;
74 }
75 return byte;
76 }
77
read_from_frame(PnvSpi * s,uint8_t nr_bytes,uint8_t ecc_count,uint8_t shift_in_count)78 static uint8_t read_from_frame(PnvSpi *s, uint8_t nr_bytes, uint8_t ecc_count,
79 uint8_t shift_in_count)
80 {
81 uint8_t byte;
82 int count = 0;
83
84 while (count < nr_bytes) {
85 shift_in_count++;
86 if ((ecc_count != 0) &&
87 (shift_in_count == (PNV_SPI_REG_SIZE + ecc_count))) {
88 shift_in_count = 0;
89 } else if (!fifo8_is_empty(&s->rx_fifo)) {
90 byte = fifo8_pop(&s->rx_fifo);
91 trace_pnv_spi_shift_rx(byte, count);
92 s->regs[SPI_RCV_DATA_REG] = (s->regs[SPI_RCV_DATA_REG] << 8) | byte;
93 } else {
94 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty RX_FIFO\n");
95 }
96 count++;
97 } /* end of while */
98 return shift_in_count;
99 }
100
spi_response(PnvSpi * s)101 static void spi_response(PnvSpi *s)
102 {
103 uint8_t ecc_count;
104 uint8_t shift_in_count;
105 uint32_t rx_len;
106 int i;
107
108 /*
109 * Processing here must handle:
110 * - Which bytes in the payload we should move to the RDR
111 * - Explicit mode counter configuration settings
112 * - RDR full and RDR overrun status
113 */
114
115 /*
116 * First check that the response payload is the exact same
117 * number of bytes as the request payload was
118 */
119 rx_len = fifo8_num_used(&s->rx_fifo);
120 if (rx_len != (s->N1_bytes + s->N2_bytes)) {
121 qemu_log_mask(LOG_GUEST_ERROR, "Invalid response payload size in "
122 "bytes, expected %d, got %d\n",
123 (s->N1_bytes + s->N2_bytes), rx_len);
124 } else {
125 uint8_t ecc_control;
126 trace_pnv_spi_rx_received(rx_len);
127 trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
128 s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
129 /*
130 * Adding an ECC count let's us know when we have found a payload byte
131 * that was shifted in but cannot be loaded into RDR. Bits 29-30 of
132 * clock_config_reset_control register equal to either 0b00 or 0b10
133 * indicate that we are taking in data with ECC and either applying
134 * the ECC or discarding it.
135 */
136 ecc_count = 0;
137 ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
138 if (ecc_control == 0 || ecc_control == 2) {
139 ecc_count = 1;
140 }
141 /*
142 * Use the N1_rx and N2_rx counts to control shifting data from the
143 * payload into the RDR. Keep an overall count of the number of bytes
144 * shifted into RDR so we can discard every 9th byte when ECC is
145 * enabled.
146 */
147 shift_in_count = 0;
148 /* Handle the N1 portion of the frame first */
149 if (s->N1_rx != 0) {
150 trace_pnv_spi_rx_read_N1frame();
151 shift_in_count = read_from_frame(s, s->N1_bytes, ecc_count, shift_in_count);
152 }
153 /* Handle the N2 portion of the frame */
154 if (s->N2_rx != 0) {
155 /* pop out N1_bytes from rx_fifo if not already */
156 if (s->N1_rx == 0) {
157 for (i = 0; i < s->N1_bytes; i++) {
158 if (!fifo8_is_empty(&s->rx_fifo)) {
159 fifo8_pop(&s->rx_fifo);
160 } else {
161 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: Reading empty"
162 " RX_FIFO\n");
163 }
164 }
165 }
166 trace_pnv_spi_rx_read_N2frame();
167 shift_in_count = read_from_frame(s, s->N2_bytes, ecc_count, shift_in_count);
168 }
169 if ((s->N1_rx + s->N2_rx) > 0) {
170 /*
171 * Data was received so handle RDR status.
172 * It is easier to handle RDR_full and RDR_overrun status here
173 * since the RDR register's shift_byte_in method is called
174 * multiple times in a row. Controlling RDR status is done here
175 * instead of in the RDR scoped methods for that reason.
176 */
177 if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
178 /*
179 * Data was shifted into the RDR before having been read
180 * causing previous data to have been overrun.
181 */
182 s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status, 1);
183 } else {
184 /*
185 * Set status to indicate that the received data register is
186 * full. This flag is only cleared once the RDR is unloaded.
187 */
188 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 1);
189 }
190 }
191 } /* end of else */
192 } /* end of spi_response() */
193
transfer(PnvSpi * s)194 static void transfer(PnvSpi *s)
195 {
196 uint32_t tx, rx, payload_len;
197 uint8_t rx_byte;
198
199 payload_len = fifo8_num_used(&s->tx_fifo);
200 for (int offset = 0; offset < payload_len; offset += s->transfer_len) {
201 tx = 0;
202 for (int i = 0; i < s->transfer_len; i++) {
203 if ((offset + i) >= payload_len) {
204 tx <<= 8;
205 } else if (!fifo8_is_empty(&s->tx_fifo)) {
206 tx = (tx << 8) | fifo8_pop(&s->tx_fifo);
207 } else {
208 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO underflow\n");
209 }
210 }
211 rx = ssi_transfer(s->ssi_bus, tx);
212 for (int i = 0; i < s->transfer_len; i++) {
213 if ((offset + i) >= payload_len) {
214 break;
215 }
216 rx_byte = (rx >> (8 * (s->transfer_len - 1) - i * 8)) & 0xFF;
217 if (!fifo8_is_full(&s->rx_fifo)) {
218 fifo8_push(&s->rx_fifo, rx_byte);
219 } else {
220 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: RX_FIFO is full\n");
221 break;
222 }
223 }
224 }
225 spi_response(s);
226 /* Reset fifo for next frame */
227 fifo8_reset(&s->tx_fifo);
228 fifo8_reset(&s->rx_fifo);
229 }
230
231 /*
232 * Calculate the N1 counters based on passed in opcode and
233 * internal register values.
234 * The method assumes that the opcode is a Shift_N1 opcode
235 * and doesn't test it.
236 * The counters returned are:
237 * N1 bits: Number of bits in the payload data that are significant
238 * to the responder.
239 * N1_bytes: Total count of payload bytes for the N1 (portion of the) frame.
240 * N1_tx: Total number of bytes taken from TDR for N1
241 * N1_rx: Total number of bytes taken from the payload for N1
242 */
calculate_N1(PnvSpi * s,uint8_t opcode)243 static void calculate_N1(PnvSpi *s, uint8_t opcode)
244 {
245 /*
246 * Shift_N1 opcode form: 0x3M
247 * Implicit mode:
248 * If M != 0 the shift count is M bytes and M is the number of tx bytes.
249 * Forced Implicit mode:
250 * M is the shift count but tx and rx is determined by the count control
251 * register fields. Note that we only check for forced Implicit mode when
252 * M != 0 since the mode doesn't make sense when M = 0.
253 * Explicit mode:
254 * If M == 0 then shift count is number of bits defined in the
255 * Counter Configuration Register's shift_count_N1 field.
256 */
257 if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
258 /* Explicit mode */
259 s->N1_bits = GETFIELD(SPI_CTR_CFG_N1, s->regs[SPI_CTR_CFG_REG]);
260 s->N1_bytes = (s->N1_bits + 7) / 8;
261 s->N1_tx = 0;
262 s->N1_rx = 0;
263 /* If tx count control for N1 is set, load the tx value */
264 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
265 s->N1_tx = s->N1_bytes;
266 }
267 /* If rx count control for N1 is set, load the rx value */
268 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
269 s->N1_rx = s->N1_bytes;
270 }
271 } else {
272 /* Implicit mode/Forced Implicit mode, use M field from opcode */
273 s->N1_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
274 s->N1_bits = s->N1_bytes * 8;
275 /*
276 * Assume that we are going to transmit the count
277 * (pure Implicit only)
278 */
279 s->N1_tx = s->N1_bytes;
280 s->N1_rx = 0;
281 /* Let Forced Implicit mode have an effect on the counts */
282 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
283 /*
284 * If Forced Implicit mode and count control doesn't
285 * indicate transmit then reset the tx count to 0
286 */
287 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 0) {
288 s->N1_tx = 0;
289 }
290 /* If rx count control for N1 is set, load the rx value */
291 if (GETFIELD(SPI_CTR_CFG_N1_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
292 s->N1_rx = s->N1_bytes;
293 }
294 }
295 }
296 /*
297 * Enforce an upper limit on the size of N1 that is equal to the known size
298 * of the shift register, 64 bits or 72 bits if ECC is enabled.
299 * If the size exceeds 72 bits it is a user error so log an error,
300 * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
301 * error bit.
302 */
303 uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
304 if (ecc_control == 0 || ecc_control == 2) {
305 if (s->N1_bytes > (PNV_SPI_REG_SIZE + 1)) {
306 qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size when "
307 "ECC enabled, bytes = 0x%x, bits = 0x%x\n",
308 s->N1_bytes, s->N1_bits);
309 s->N1_bytes = PNV_SPI_REG_SIZE + 1;
310 s->N1_bits = s->N1_bytes * 8;
311 }
312 } else if (s->N1_bytes > PNV_SPI_REG_SIZE) {
313 qemu_log_mask(LOG_GUEST_ERROR, "Unsupported N1 shift size, "
314 "bytes = 0x%x, bits = 0x%x\n", s->N1_bytes, s->N1_bits);
315 s->N1_bytes = PNV_SPI_REG_SIZE;
316 s->N1_bits = s->N1_bytes * 8;
317 }
318 } /* end of calculate_N1 */
319
320 /*
321 * Shift_N1 operation handler method
322 */
operation_shiftn1(PnvSpi * s,uint8_t opcode,bool send_n1_alone)323 static bool operation_shiftn1(PnvSpi *s, uint8_t opcode, bool send_n1_alone)
324 {
325 uint8_t n1_count;
326 bool stop = false;
327 /*
328 * Use a combination of N1 counters to build the N1 portion of the
329 * transmit payload.
330 * We only care about transmit at this time since the request payload
331 * only represents data going out on the controller output line.
332 * Leave mode specific considerations in the calculate function since
333 * all we really care about are counters that tell use exactly how
334 * many bytes are in the payload and how many of those bytes to
335 * include from the TDR into the payload.
336 */
337 calculate_N1(s, opcode);
338 trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
339 s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
340 /*
341 * Zero out the N2 counters here in case there is no N2 operation following
342 * the N1 operation in the sequencer. This keeps leftover N2 information
343 * from interfering with spi_response logic.
344 */
345 s->N2_bits = 0;
346 s->N2_bytes = 0;
347 s->N2_tx = 0;
348 s->N2_rx = 0;
349 /*
350 * N1_bytes is the overall size of the N1 portion of the frame regardless of
351 * whether N1 is used for tx, rx or both. Loop over the size to build a
352 * payload that is N1_bytes long.
353 * N1_tx is the count of bytes to take from the TDR and "shift" into the
354 * frame which means append those bytes to the payload for the N1 portion
355 * of the frame.
356 * If N1_tx is 0 or if the count exceeds the size of the TDR append 0xFF to
357 * the frame until the overall N1 count is reached.
358 */
359 n1_count = 0;
360 while (n1_count < s->N1_bytes) {
361 /*
362 * Assuming that if N1_tx is not equal to 0 then it is the same as
363 * N1_bytes.
364 */
365 if ((s->N1_tx != 0) && (n1_count < PNV_SPI_REG_SIZE)) {
366
367 if (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1) {
368 /*
369 * Note that we are only appending to the payload IF the TDR
370 * is full otherwise we don't touch the payload because we are
371 * going to NOT send the payload and instead tell the sequencer
372 * that called us to stop and wait for a TDR write so we have
373 * data to load into the payload.
374 */
375 uint8_t n1_byte = 0x00;
376 n1_byte = get_from_offset(s, n1_count);
377 if (!fifo8_is_full(&s->tx_fifo)) {
378 trace_pnv_spi_tx_append("n1_byte", n1_byte, n1_count);
379 fifo8_push(&s->tx_fifo, n1_byte);
380 } else {
381 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
382 break;
383 }
384 } else {
385 /*
386 * We hit a shift_n1 opcode TX but the TDR is empty, tell the
387 * sequencer to stop and break this loop.
388 */
389 trace_pnv_spi_sequencer_stop_requested("Shift N1"
390 "set for transmit but TDR is empty");
391 stop = true;
392 break;
393 }
394 } else {
395 /*
396 * Cases here:
397 * - we are receiving during the N1 frame segment and the RDR
398 * is full so we need to stop until the RDR is read
399 * - we are transmitting and we don't care about RDR status
400 * since we won't be loading RDR during the frame segment.
401 * - we are receiving and the RDR is empty so we allow the operation
402 * to proceed.
403 */
404 if ((s->N1_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
405 trace_pnv_spi_sequencer_stop_requested("shift N1"
406 "set for receive but RDR is full");
407 stop = true;
408 break;
409 } else if (!fifo8_is_full(&s->tx_fifo)) {
410 trace_pnv_spi_tx_append_FF("n1_byte");
411 fifo8_push(&s->tx_fifo, 0xff);
412 } else {
413 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
414 break;
415 }
416 }
417 n1_count++;
418 } /* end of while */
419 /*
420 * If we are not stopping due to an empty TDR and we are doing an N1 TX
421 * and the TDR is full we need to clear the TDR_full status.
422 * Do this here instead of up in the loop above so we don't log the message
423 * in every loop iteration.
424 * Ignore the send_n1_alone flag, all that does is defer the TX until the N2
425 * operation, which was found immediately after the current opcode. The TDR
426 * was unloaded and will be shifted so we have to clear the TDR_full status.
427 */
428 if (!stop && (s->N1_tx != 0) &&
429 (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
430 s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
431 }
432 /*
433 * There are other reasons why the shifter would stop, such as a TDR empty
434 * or RDR full condition with N1 set to receive. If we haven't stopped due
435 * to either one of those conditions then check if the send_n1_alone flag is
436 * equal to False, indicating the next opcode is an N2 operation, AND if
437 * the N2 counter reload switch (bit 0 of the N2 count control field) is
438 * set. This condition requires a pacing write to "kick" off the N2
439 * shift which includes the N1 shift as well when send_n1_alone is False.
440 */
441 if (!stop && !send_n1_alone &&
442 (GETFIELD(SPI_CTR_CFG_N2_CTRL_B0, s->regs[SPI_CTR_CFG_REG]) == 1)) {
443 trace_pnv_spi_sequencer_stop_requested("N2 counter reload "
444 "active, stop N1 shift, TDR_underrun set to 1");
445 stop = true;
446 s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 1);
447 }
448 /*
449 * If send_n1_alone is set AND we have a full TDR then this is the first and
450 * last payload to send and we don't have an N2 frame segment to add to the
451 * payload.
452 */
453 if (send_n1_alone && !stop) {
454 /* We have a TX and a full TDR or an RX and an empty RDR */
455 trace_pnv_spi_tx_request("Shifting N1 frame", fifo8_num_used(&s->tx_fifo));
456 transfer(s);
457 /* The N1 frame shift is complete so reset the N1 counters */
458 s->N2_bits = 0;
459 s->N2_bytes = 0;
460 s->N2_tx = 0;
461 s->N2_rx = 0;
462 }
463 return stop;
464 } /* end of operation_shiftn1() */
465
466 /*
467 * Calculate the N2 counters based on passed in opcode and
468 * internal register values.
469 * The method assumes that the opcode is a Shift_N2 opcode
470 * and doesn't test it.
471 * The counters returned are:
472 * N2 bits: Number of bits in the payload data that are significant
473 * to the responder.
474 * N2_bytes: Total count of payload bytes for the N2 frame.
475 * N2_tx: Total number of bytes taken from TDR for N2
476 * N2_rx: Total number of bytes taken from the payload for N2
477 */
calculate_N2(PnvSpi * s,uint8_t opcode)478 static void calculate_N2(PnvSpi *s, uint8_t opcode)
479 {
480 /*
481 * Shift_N2 opcode form: 0x4M
482 * Implicit mode:
483 * If M!=0 the shift count is M bytes and M is the number of rx bytes.
484 * Forced Implicit mode:
485 * M is the shift count but tx and rx is determined by the count control
486 * register fields. Note that we only check for Forced Implicit mode when
487 * M != 0 since the mode doesn't make sense when M = 0.
488 * Explicit mode:
489 * If M==0 then shift count is number of bits defined in the
490 * Counter Configuration Register's shift_count_N1 field.
491 */
492 if (PNV_SPI_OPCODE_LO_NIBBLE(opcode) == 0) {
493 /* Explicit mode */
494 s->N2_bits = GETFIELD(SPI_CTR_CFG_N2, s->regs[SPI_CTR_CFG_REG]);
495 s->N2_bytes = (s->N2_bits + 7) / 8;
496 s->N2_tx = 0;
497 s->N2_rx = 0;
498 /* If tx count control for N2 is set, load the tx value */
499 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
500 s->N2_tx = s->N2_bytes;
501 }
502 /* If rx count control for N2 is set, load the rx value */
503 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 1) {
504 s->N2_rx = s->N2_bytes;
505 }
506 } else {
507 /* Implicit mode/Forced Implicit mode, use M field from opcode */
508 s->N2_bytes = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
509 s->N2_bits = s->N2_bytes * 8;
510 /* Assume that we are going to receive the count */
511 s->N2_rx = s->N2_bytes;
512 s->N2_tx = 0;
513 /* Let Forced Implicit mode have an effect on the counts */
514 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B1, s->regs[SPI_CTR_CFG_REG]) == 1) {
515 /*
516 * If Forced Implicit mode and count control doesn't
517 * indicate a receive then reset the rx count to 0
518 */
519 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B3, s->regs[SPI_CTR_CFG_REG]) == 0) {
520 s->N2_rx = 0;
521 }
522 /* If tx count control for N2 is set, load the tx value */
523 if (GETFIELD(SPI_CTR_CFG_N2_CTRL_B2, s->regs[SPI_CTR_CFG_REG]) == 1) {
524 s->N2_tx = s->N2_bytes;
525 }
526 }
527 }
528 /*
529 * Enforce an upper limit on the size of N1 that is equal to the
530 * known size of the shift register, 64 bits or 72 bits if ECC
531 * is enabled.
532 * If the size exceeds 72 bits it is a user error so log an error,
533 * cap the size at a max of 64 bits or 72 bits and set the sequencer FSM
534 * error bit.
535 */
536 uint8_t ecc_control = GETFIELD(SPI_CLK_CFG_ECC_CTRL, s->regs[SPI_CLK_CFG_REG]);
537 if (ecc_control == 0 || ecc_control == 2) {
538 if (s->N2_bytes > (PNV_SPI_REG_SIZE + 1)) {
539 /* Unsupported N2 shift size when ECC enabled */
540 s->N2_bytes = PNV_SPI_REG_SIZE + 1;
541 s->N2_bits = s->N2_bytes * 8;
542 }
543 } else if (s->N2_bytes > PNV_SPI_REG_SIZE) {
544 /* Unsupported N2 shift size */
545 s->N2_bytes = PNV_SPI_REG_SIZE;
546 s->N2_bits = s->N2_bytes * 8;
547 }
548 } /* end of calculate_N2 */
549
550 /*
551 * Shift_N2 operation handler method
552 */
553
operation_shiftn2(PnvSpi * s,uint8_t opcode)554 static bool operation_shiftn2(PnvSpi *s, uint8_t opcode)
555 {
556 uint8_t n2_count;
557 bool stop = false;
558 /*
559 * Use a combination of N2 counters to build the N2 portion of the
560 * transmit payload.
561 */
562 calculate_N2(s, opcode);
563 trace_pnv_spi_log_Ncounts(s->N1_bits, s->N1_bytes, s->N1_tx,
564 s->N1_rx, s->N2_bits, s->N2_bytes, s->N2_tx, s->N2_rx);
565 /*
566 * The only difference between this code and the code for shift N1 is
567 * that this code has to account for the possible presence of N1 transmit
568 * bytes already taken from the TDR.
569 * If there are bytes to be transmitted for the N2 portion of the frame
570 * and there are still bytes in TDR that have not been copied into the
571 * TX data of the payload, this code will handle transmitting those
572 * remaining bytes.
573 * If for some reason the transmit count(s) add up to more than the size
574 * of the TDR we will just append 0xFF to the transmit payload data until
575 * the payload is N1 + N2 bytes long.
576 */
577 n2_count = 0;
578 while (n2_count < s->N2_bytes) {
579 /*
580 * If the RDR is full and we need to RX just bail out, letting the
581 * code continue will end up building the payload twice in the same
582 * buffer since RDR full causes a sequence stop and restart.
583 */
584 if ((s->N2_rx != 0) && (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1)) {
585 trace_pnv_spi_sequencer_stop_requested("shift N2 set"
586 "for receive but RDR is full");
587 stop = true;
588 break;
589 }
590 if ((s->N2_tx != 0) && ((s->N1_tx + n2_count) < PNV_SPI_REG_SIZE)) {
591 /* Always append data for the N2 segment if it is set for TX */
592 uint8_t n2_byte = 0x00;
593 n2_byte = get_from_offset(s, (s->N1_tx + n2_count));
594 if (!fifo8_is_full(&s->tx_fifo)) {
595 trace_pnv_spi_tx_append("n2_byte", n2_byte, (s->N1_tx + n2_count));
596 fifo8_push(&s->tx_fifo, n2_byte);
597 } else {
598 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
599 break;
600 }
601 } else if (!fifo8_is_full(&s->tx_fifo)) {
602 /*
603 * Regardless of whether or not N2 is set for TX or RX, we need
604 * the number of bytes in the payload to match the overall length
605 * of the operation.
606 */
607 trace_pnv_spi_tx_append_FF("n2_byte");
608 fifo8_push(&s->tx_fifo, 0xff);
609 } else {
610 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: TX_FIFO is full\n");
611 break;
612 }
613 n2_count++;
614 } /* end of while */
615 if (!stop) {
616 /* We have a TX and a full TDR or an RX and an empty RDR */
617 trace_pnv_spi_tx_request("Shifting N2 frame", fifo8_num_used(&s->tx_fifo));
618 transfer(s);
619 /*
620 * If we are doing an N2 TX and the TDR is full we need to clear the
621 * TDR_full status. Do this here instead of up in the loop above so we
622 * don't log the message in every loop iteration.
623 */
624 if ((s->N2_tx != 0) && (GETFIELD(SPI_STS_TDR_FULL, s->status) == 1)) {
625 s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 0);
626 }
627 /*
628 * The N2 frame shift is complete so reset the N2 counters.
629 * Reset the N1 counters also in case the frame was a combination of
630 * N1 and N2 segments.
631 */
632 s->N2_bits = 0;
633 s->N2_bytes = 0;
634 s->N2_tx = 0;
635 s->N2_rx = 0;
636 s->N1_bits = 0;
637 s->N1_bytes = 0;
638 s->N1_tx = 0;
639 s->N1_rx = 0;
640 }
641 return stop;
642 } /* end of operation_shiftn2()*/
643
operation_sequencer(PnvSpi * s)644 static void operation_sequencer(PnvSpi *s)
645 {
646 /*
647 * Loop through each sequencer operation ID and perform the requested
648 * operations.
649 * Flag for indicating if we should send the N1 frame or wait to combine
650 * it with a preceding N2 frame.
651 */
652 bool send_n1_alone = true;
653 bool stop = false; /* Flag to stop the sequencer */
654 uint8_t opcode = 0;
655 uint8_t masked_opcode = 0;
656 uint8_t seq_index;
657
658 /*
659 * Clear the sequencer FSM error bit - general_SPI_status[3]
660 * before starting a sequence.
661 */
662 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 0);
663 /*
664 * If the FSM is idle set the sequencer index to 0
665 * (new/restarted sequence)
666 */
667 if (GETFIELD(SPI_STS_SEQ_FSM, s->status) == SEQ_STATE_IDLE) {
668 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, 0);
669 }
670 /*
671 * SPI_STS_SEQ_INDEX of status register is kept in seq_index variable and
672 * updated back to status register at the end of operation_sequencer().
673 */
674 seq_index = GETFIELD(SPI_STS_SEQ_INDEX, s->status);
675 /*
676 * There are only 8 possible operation IDs to iterate through though
677 * some operations may cause more than one frame to be sequenced.
678 */
679 while (seq_index < NUM_SEQ_OPS) {
680 opcode = s->seq_op[seq_index];
681 /* Set sequencer state to decode */
682 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_DECODE);
683 /*
684 * Only the upper nibble of the operation ID is needed to know what
685 * kind of operation is requested.
686 */
687 masked_opcode = PNV_SPI_MASKED_OPCODE(opcode);
688 switch (masked_opcode) {
689 /*
690 * Increment the operation index in each case instead of just
691 * once at the end in case an operation like the branch
692 * operation needs to change the index.
693 */
694 case SEQ_OP_STOP:
695 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
696 /* A stop operation in any position stops the sequencer */
697 trace_pnv_spi_sequencer_op("STOP", seq_index);
698
699 stop = true;
700 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
701 s->loop_counter_1 = 0;
702 s->loop_counter_2 = 0;
703 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
704 break;
705
706 case SEQ_OP_SELECT_SLAVE:
707 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
708 trace_pnv_spi_sequencer_op("SELECT_SLAVE", seq_index);
709 /*
710 * This device currently only supports a single responder
711 * connection at position 0. De-selecting a responder is fine
712 * and expected at the end of a sequence but selecting any
713 * responder other than 0 should cause an error.
714 */
715 s->responder_select = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
716 if (s->responder_select == 0) {
717 trace_pnv_spi_shifter_done();
718 qemu_set_irq(s->cs_line[0], 1);
719 seq_index++;
720 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_DONE);
721 } else if (s->responder_select != 1) {
722 qemu_log_mask(LOG_GUEST_ERROR, "Slave selection other than 1 "
723 "not supported, select = 0x%x\n", s->responder_select);
724 trace_pnv_spi_sequencer_stop_requested("invalid responder select");
725 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
726 stop = true;
727 } else {
728 /*
729 * Only allow an FSM_START state when a responder is
730 * selected
731 */
732 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_START);
733 trace_pnv_spi_shifter_stating();
734 qemu_set_irq(s->cs_line[0], 0);
735 /*
736 * A Shift_N2 operation is only valid after a Shift_N1
737 * according to the spec. The spec doesn't say if that means
738 * immediately after or just after at any point. We will track
739 * the occurrence of a Shift_N1 to enforce this requirement in
740 * the most generic way possible by assuming that the rule
741 * applies once a valid responder select has occurred.
742 */
743 s->shift_n1_done = false;
744 seq_index++;
745 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
746 SEQ_STATE_INDEX_INCREMENT);
747 }
748 break;
749
750 case SEQ_OP_SHIFT_N1:
751 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
752 trace_pnv_spi_sequencer_op("SHIFT_N1", seq_index);
753 /*
754 * Only allow a shift_n1 when the state is not IDLE or DONE.
755 * In either of those two cases the sequencer is not in a proper
756 * state to perform shift operations because the sequencer has:
757 * - processed a responder deselect (DONE)
758 * - processed a stop opcode (IDLE)
759 * - encountered an error (IDLE)
760 */
761 if ((GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_IDLE) ||
762 (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_DONE)) {
763 qemu_log_mask(LOG_GUEST_ERROR, "Shift_N1 not allowed in "
764 "shifter state = 0x%llx", GETFIELD(
765 SPI_STS_SHIFTER_FSM, s->status));
766 /*
767 * Set sequencer FSM error bit 3 (general_SPI_status[3])
768 * in status reg.
769 */
770 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
771 trace_pnv_spi_sequencer_stop_requested("invalid shifter state");
772 stop = true;
773 } else {
774 /*
775 * Look for the special case where there is a shift_n1 set for
776 * transmit and it is followed by a shift_n2 set for transmit
777 * AND the combined transmit length of the two operations is
778 * less than or equal to the size of the TDR register. In this
779 * case we want to use both this current shift_n1 opcode and the
780 * following shift_n2 opcode to assemble the frame for
781 * transmission to the responder without requiring a refill of
782 * the TDR between the two operations.
783 */
784 if ((seq_index != 7) &&
785 PNV_SPI_MASKED_OPCODE(s->seq_op[(seq_index + 1)]) ==
786 SEQ_OP_SHIFT_N2) {
787 send_n1_alone = false;
788 }
789 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N1);
790 stop = operation_shiftn1(s, opcode, send_n1_alone);
791 if (stop) {
792 /*
793 * The operation code says to stop, this can occur if:
794 * (1) RDR is full and the N1 shift is set for receive
795 * (2) TDR was empty at the time of the N1 shift so we need
796 * to wait for data.
797 * (3) Neither 1 nor 2 are occurring and we aren't sending
798 * N1 alone and N2 counter reload is set (bit 0 of the N2
799 * counter reload field). In this case TDR_underrun will
800 * will be set and the Payload has been loaded so it is
801 * ok to advance the sequencer.
802 */
803 if (GETFIELD(SPI_STS_TDR_UNDERRUN, s->status)) {
804 s->shift_n1_done = true;
805 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status,
806 FSM_SHIFT_N2);
807 seq_index++;
808 } else {
809 /*
810 * This is case (1) or (2) so the sequencer needs to
811 * wait and NOT go to the next sequence yet.
812 */
813 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
814 }
815 } else {
816 /* Ok to move on to the next index */
817 s->shift_n1_done = true;
818 seq_index++;
819 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
820 SEQ_STATE_INDEX_INCREMENT);
821 }
822 }
823 break;
824
825 case SEQ_OP_SHIFT_N2:
826 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
827 trace_pnv_spi_sequencer_op("SHIFT_N2", seq_index);
828 if (!s->shift_n1_done) {
829 qemu_log_mask(LOG_GUEST_ERROR, "Shift_N2 is not allowed if a "
830 "Shift_N1 is not done, shifter state = 0x%llx",
831 GETFIELD(SPI_STS_SHIFTER_FSM, s->status));
832 /*
833 * In case the sequencer actually stops if an N2 shift is
834 * requested before any N1 shift is done. Set sequencer FSM
835 * error bit 3 (general_SPI_status[3]) in status reg.
836 */
837 s->status = SETFIELD(SPI_STS_GEN_STATUS_B3, s->status, 1);
838 trace_pnv_spi_sequencer_stop_requested("shift_n2 w/no shift_n1 done");
839 stop = true;
840 } else {
841 /* Ok to do a Shift_N2 */
842 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_SHIFT_N2);
843 stop = operation_shiftn2(s, opcode);
844 /*
845 * If the operation code says to stop set the shifter state to
846 * wait and stop
847 */
848 if (stop) {
849 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
850 } else {
851 /* Ok to move on to the next index */
852 seq_index++;
853 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
854 SEQ_STATE_INDEX_INCREMENT);
855 }
856 }
857 break;
858
859 case SEQ_OP_BRANCH_IFNEQ_RDR:
860 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
861 trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_RDR", seq_index);
862 /*
863 * The memory mapping register RDR match value is compared against
864 * the 16 rightmost bytes of the RDR (potentially with masking).
865 * Since this comparison is performed against the contents of the
866 * RDR then a receive must have previously occurred otherwise
867 * there is no data to compare and the operation cannot be
868 * completed and will stop the sequencer until RDR full is set to
869 * 1.
870 */
871 if (GETFIELD(SPI_STS_RDR_FULL, s->status) == 1) {
872 bool rdr_matched = false;
873 rdr_matched = does_rdr_match(s);
874 if (rdr_matched) {
875 trace_pnv_spi_RDR_match("success");
876 s->fail_count = 0;
877 /* A match occurred, increment the sequencer index. */
878 seq_index++;
879 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
880 SEQ_STATE_INDEX_INCREMENT);
881 } else {
882 trace_pnv_spi_RDR_match("failed");
883 s->fail_count++;
884 /*
885 * Branch the sequencer to the index coded into the op
886 * code.
887 */
888 seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
889 }
890 if (s->fail_count >= RDR_MATCH_FAILURE_LIMIT) {
891 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi: RDR match failure"
892 " limit crossed %d times hence requesting "
893 "sequencer to stop.\n",
894 RDR_MATCH_FAILURE_LIMIT);
895 stop = true;
896 }
897 /*
898 * Regardless of where the branch ended up we want the
899 * sequencer to continue shifting so we have to clear
900 * RDR_full.
901 */
902 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
903 } else {
904 trace_pnv_spi_sequencer_stop_requested("RDR not"
905 "full for 0x6x opcode");
906 stop = true;
907 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_WAIT);
908 }
909 break;
910
911 case SEQ_OP_TRANSFER_TDR:
912 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
913 qemu_log_mask(LOG_GUEST_ERROR, "Transfer TDR is not supported\n");
914 seq_index++;
915 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
916 break;
917
918 case SEQ_OP_BRANCH_IFNEQ_INC_1:
919 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
920 trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_1", seq_index);
921 /*
922 * The spec says the loop should execute count compare + 1 times.
923 * However we learned from engineering that we really only loop
924 * count_compare times, count compare = 0 makes this op code a
925 * no-op
926 */
927 if (s->loop_counter_1 !=
928 GETFIELD(SPI_CTR_CFG_CMP1, s->regs[SPI_CTR_CFG_REG])) {
929 /*
930 * Next index is the lower nibble of the branch operation ID,
931 * mask off all but the first three bits so we don't try to
932 * access beyond the sequencer_operation_reg boundary.
933 */
934 seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
935 s->loop_counter_1++;
936 } else {
937 /* Continue to next index if loop counter is reached */
938 seq_index++;
939 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
940 SEQ_STATE_INDEX_INCREMENT);
941 }
942 break;
943
944 case SEQ_OP_BRANCH_IFNEQ_INC_2:
945 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
946 trace_pnv_spi_sequencer_op("BRANCH_IFNEQ_INC_2", seq_index);
947 uint8_t condition2 = GETFIELD(SPI_CTR_CFG_CMP2,
948 s->regs[SPI_CTR_CFG_REG]);
949 /*
950 * The spec says the loop should execute count compare + 1 times.
951 * However we learned from engineering that we really only loop
952 * count_compare times, count compare = 0 makes this op code a
953 * no-op
954 */
955 if (s->loop_counter_2 != condition2) {
956 /*
957 * Next index is the lower nibble of the branch operation ID,
958 * mask off all but the first three bits so we don't try to
959 * access beyond the sequencer_operation_reg boundary.
960 */
961 seq_index = PNV_SPI_OPCODE_LO_NIBBLE(opcode);
962 s->loop_counter_2++;
963 } else {
964 /* Continue to next index if loop counter is reached */
965 seq_index++;
966 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status,
967 SEQ_STATE_INDEX_INCREMENT);
968 }
969 break;
970
971 default:
972 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_EXECUTE);
973 /* Ignore unsupported operations. */
974 seq_index++;
975 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_INDEX_INCREMENT);
976 break;
977 } /* end of switch */
978 /*
979 * If we used all 8 opcodes without seeing a 00 - STOP in the sequence
980 * we need to go ahead and end things as if there was a STOP at the
981 * end.
982 */
983 if (seq_index == NUM_SEQ_OPS) {
984 /* All 8 opcodes completed, sequencer idling */
985 s->status = SETFIELD(SPI_STS_SHIFTER_FSM, s->status, FSM_IDLE);
986 seq_index = 0;
987 s->loop_counter_1 = 0;
988 s->loop_counter_2 = 0;
989 s->status = SETFIELD(SPI_STS_SEQ_FSM, s->status, SEQ_STATE_IDLE);
990 break;
991 }
992 /* Break the loop if a stop was requested */
993 if (stop) {
994 break;
995 }
996 } /* end of while */
997 /* Update sequencer index field in status.*/
998 s->status = SETFIELD(SPI_STS_SEQ_INDEX, s->status, seq_index);
999 } /* end of operation_sequencer() */
1000
1001 /*
1002 * The SPIC engine and its internal sequencer can be interrupted and reset by
1003 * a hardware signal, the sbe_spicst_hard_reset bits from Pervasive
1004 * Miscellaneous Register of sbe_register_bo device.
1005 * Reset immediately aborts any SPI transaction in progress and returns the
1006 * sequencer and state machines to idle state.
1007 * The configuration register values are not changed. The status register is
1008 * not reset. The engine registers are not reset.
1009 * The SPIC engine reset does not have any affect on the attached devices.
1010 * Reset handling of any attached devices is beyond the scope of the engine.
1011 */
do_reset(DeviceState * dev)1012 static void do_reset(DeviceState *dev)
1013 {
1014 PnvSpi *s = PNV_SPI(dev);
1015 DeviceState *ssi_dev;
1016
1017 trace_pnv_spi_reset();
1018
1019 /* Connect cs irq */
1020 ssi_dev = ssi_get_cs(s->ssi_bus, 0);
1021 if (ssi_dev) {
1022 qemu_irq cs_line = qdev_get_gpio_in_named(ssi_dev, SSI_GPIO_CS, 0);
1023 qdev_connect_gpio_out_named(DEVICE(s), "cs", 0, cs_line);
1024 }
1025
1026 /* Reset all N1 and N2 counters, and other constants */
1027 s->N2_bits = 0;
1028 s->N2_bytes = 0;
1029 s->N2_tx = 0;
1030 s->N2_rx = 0;
1031 s->N1_bits = 0;
1032 s->N1_bytes = 0;
1033 s->N1_tx = 0;
1034 s->N1_rx = 0;
1035 s->loop_counter_1 = 0;
1036 s->loop_counter_2 = 0;
1037 /* Disconnected from responder */
1038 qemu_set_irq(s->cs_line[0], 1);
1039 }
1040
pnv_spi_xscom_read(void * opaque,hwaddr addr,unsigned size)1041 static uint64_t pnv_spi_xscom_read(void *opaque, hwaddr addr, unsigned size)
1042 {
1043 PnvSpi *s = PNV_SPI(opaque);
1044 uint32_t reg = addr >> 3;
1045 uint64_t val = ~0ull;
1046
1047 switch (reg) {
1048 case ERROR_REG:
1049 case SPI_CTR_CFG_REG:
1050 case CONFIG_REG1:
1051 case SPI_CLK_CFG_REG:
1052 case SPI_MM_REG:
1053 case SPI_XMIT_DATA_REG:
1054 val = s->regs[reg];
1055 break;
1056 case SPI_RCV_DATA_REG:
1057 val = s->regs[reg];
1058 trace_pnv_spi_read_RDR(val);
1059 s->status = SETFIELD(SPI_STS_RDR_FULL, s->status, 0);
1060 if (GETFIELD(SPI_STS_SHIFTER_FSM, s->status) == FSM_WAIT) {
1061 trace_pnv_spi_start_sequencer();
1062 operation_sequencer(s);
1063 }
1064 break;
1065 case SPI_SEQ_OP_REG:
1066 val = 0;
1067 for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
1068 val = (val << 8) | s->seq_op[i];
1069 }
1070 break;
1071 case SPI_STS_REG:
1072 val = s->status;
1073 break;
1074 default:
1075 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
1076 "read at 0x%" PRIx32 "\n", reg);
1077 }
1078
1079 trace_pnv_spi_read(addr, val);
1080 return val;
1081 }
1082
pnv_spi_xscom_write(void * opaque,hwaddr addr,uint64_t val,unsigned size)1083 static void pnv_spi_xscom_write(void *opaque, hwaddr addr,
1084 uint64_t val, unsigned size)
1085 {
1086 PnvSpi *s = PNV_SPI(opaque);
1087 uint32_t reg = addr >> 3;
1088
1089 trace_pnv_spi_write(addr, val);
1090
1091 switch (reg) {
1092 case ERROR_REG:
1093 case SPI_CTR_CFG_REG:
1094 case CONFIG_REG1:
1095 case SPI_MM_REG:
1096 case SPI_RCV_DATA_REG:
1097 s->regs[reg] = val;
1098 break;
1099 case SPI_CLK_CFG_REG:
1100 /*
1101 * To reset the SPI controller write the sequence 0x5 0xA to
1102 * reset_control field
1103 */
1104 if ((GETFIELD(SPI_CLK_CFG_RST_CTRL, s->regs[SPI_CLK_CFG_REG]) == 0x5)
1105 && (GETFIELD(SPI_CLK_CFG_RST_CTRL, val) == 0xA)) {
1106 /* SPI controller reset sequence completed, resetting */
1107 s->regs[reg] = SPI_CLK_CFG_HARD_RST;
1108 } else {
1109 s->regs[reg] = val;
1110 }
1111 break;
1112 case SPI_XMIT_DATA_REG:
1113 /*
1114 * Writing to the transmit data register causes the transmit data
1115 * register full status bit in the status register to be set. Writing
1116 * when the transmit data register full status bit is already set
1117 * causes a "Resource Not Available" condition. This is not possible
1118 * in the model since writes to this register are not asynchronous to
1119 * the operation sequence like it would be in hardware.
1120 */
1121 s->regs[reg] = val;
1122 trace_pnv_spi_write_TDR(val);
1123 s->status = SETFIELD(SPI_STS_TDR_FULL, s->status, 1);
1124 s->status = SETFIELD(SPI_STS_TDR_UNDERRUN, s->status, 0);
1125 trace_pnv_spi_start_sequencer();
1126 operation_sequencer(s);
1127 break;
1128 case SPI_SEQ_OP_REG:
1129 for (int i = 0; i < PNV_SPI_REG_SIZE; i++) {
1130 s->seq_op[i] = (val >> (56 - i * 8)) & 0xFF;
1131 }
1132 break;
1133 case SPI_STS_REG:
1134 /* other fields are ignore_write */
1135 s->status = SETFIELD(SPI_STS_RDR_OVERRUN, s->status,
1136 GETFIELD(SPI_STS_RDR, val));
1137 s->status = SETFIELD(SPI_STS_TDR_OVERRUN, s->status,
1138 GETFIELD(SPI_STS_TDR, val));
1139 break;
1140 default:
1141 qemu_log_mask(LOG_GUEST_ERROR, "pnv_spi_regs: Invalid xscom "
1142 "write at 0x%" PRIx32 "\n", reg);
1143 }
1144 }
1145
1146 static const MemoryRegionOps pnv_spi_xscom_ops = {
1147 .read = pnv_spi_xscom_read,
1148 .write = pnv_spi_xscom_write,
1149 .valid.min_access_size = 8,
1150 .valid.max_access_size = 8,
1151 .impl.min_access_size = 8,
1152 .impl.max_access_size = 8,
1153 .endianness = DEVICE_BIG_ENDIAN,
1154 };
1155
1156 static const Property pnv_spi_properties[] = {
1157 DEFINE_PROP_UINT32("spic_num", PnvSpi, spic_num, 0),
1158 DEFINE_PROP_UINT32("chip-id", PnvSpi, chip_id, 0),
1159 DEFINE_PROP_UINT8("transfer_len", PnvSpi, transfer_len, 4),
1160 };
1161
pnv_spi_realize(DeviceState * dev,Error ** errp)1162 static void pnv_spi_realize(DeviceState *dev, Error **errp)
1163 {
1164 PnvSpi *s = PNV_SPI(dev);
1165 g_autofree char *name = g_strdup_printf("chip%d." TYPE_PNV_SPI_BUS ".%d",
1166 s->chip_id, s->spic_num);
1167 s->ssi_bus = ssi_create_bus(dev, name);
1168 s->cs_line = g_new0(qemu_irq, 1);
1169 qdev_init_gpio_out_named(DEVICE(s), s->cs_line, "cs", 1);
1170
1171 fifo8_create(&s->tx_fifo, PNV_SPI_FIFO_SIZE);
1172 fifo8_create(&s->rx_fifo, PNV_SPI_FIFO_SIZE);
1173
1174 /* spi scoms */
1175 pnv_xscom_region_init(&s->xscom_spic_regs, OBJECT(s), &pnv_spi_xscom_ops,
1176 s, "xscom-spi", PNV10_XSCOM_PIB_SPIC_SIZE);
1177 }
1178
pnv_spi_dt_xscom(PnvXScomInterface * dev,void * fdt,int offset)1179 static int pnv_spi_dt_xscom(PnvXScomInterface *dev, void *fdt,
1180 int offset)
1181 {
1182 PnvSpi *s = PNV_SPI(dev);
1183 g_autofree char *name;
1184 int s_offset;
1185 const char compat[] = "ibm,power10-spi";
1186 uint32_t spic_pcba = PNV10_XSCOM_PIB_SPIC_BASE +
1187 s->spic_num * PNV10_XSCOM_PIB_SPIC_SIZE;
1188 uint32_t reg[] = {
1189 cpu_to_be32(spic_pcba),
1190 cpu_to_be32(PNV10_XSCOM_PIB_SPIC_SIZE)
1191 };
1192 name = g_strdup_printf("pnv_spi@%x", spic_pcba);
1193 s_offset = fdt_add_subnode(fdt, offset, name);
1194 _FDT(s_offset);
1195
1196 _FDT(fdt_setprop(fdt, s_offset, "reg", reg, sizeof(reg)));
1197 _FDT(fdt_setprop(fdt, s_offset, "compatible", compat, sizeof(compat)));
1198 _FDT((fdt_setprop_cell(fdt, s_offset, "spic_num#", s->spic_num)));
1199 return 0;
1200 }
1201
pnv_spi_class_init(ObjectClass * klass,const void * data)1202 static void pnv_spi_class_init(ObjectClass *klass, const void *data)
1203 {
1204 DeviceClass *dc = DEVICE_CLASS(klass);
1205 PnvXScomInterfaceClass *xscomc = PNV_XSCOM_INTERFACE_CLASS(klass);
1206
1207 xscomc->dt_xscom = pnv_spi_dt_xscom;
1208
1209 dc->desc = "PowerNV SPI";
1210 dc->realize = pnv_spi_realize;
1211 device_class_set_legacy_reset(dc, do_reset);
1212 device_class_set_props(dc, pnv_spi_properties);
1213 }
1214
1215 static const TypeInfo pnv_spi_info = {
1216 .name = TYPE_PNV_SPI,
1217 .parent = TYPE_SYS_BUS_DEVICE,
1218 .instance_size = sizeof(PnvSpi),
1219 .class_init = pnv_spi_class_init,
1220 .interfaces = (const InterfaceInfo[]) {
1221 { TYPE_PNV_XSCOM_INTERFACE },
1222 { }
1223 }
1224 };
1225
pnv_spi_register_types(void)1226 static void pnv_spi_register_types(void)
1227 {
1228 type_register_static(&pnv_spi_info);
1229 }
1230
1231 type_init(pnv_spi_register_types);
1232