1 /*
2 * SD Association Host Standard Specification v2.0 controller emulation
3 *
4 * Datasheet: PartA2_SD_Host_Controller_Simplified_Specification_Ver2.00.pdf
5 *
6 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
7 * Mitsyanko Igor <i.mitsyanko@samsung.com>
8 * Peter A.G. Crosthwaite <peter.crosthwaite@petalogix.com>
9 *
10 * Based on MMC controller for Samsung S5PC1xx-based board emulation
11 * by Alexey Merkulov and Vladimir Monakhov.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License along
24 * with this program; if not, see <http://www.gnu.org/licenses/>.
25 */
26
27 #include "qemu/osdep.h"
28 #include "qemu/units.h"
29 #include "qemu/error-report.h"
30 #include "qapi/error.h"
31 #include "hw/irq.h"
32 #include "hw/qdev-properties.h"
33 #include "system/dma.h"
34 #include "qemu/timer.h"
35 #include "qemu/bitops.h"
36 #include "hw/sd/sdhci.h"
37 #include "migration/vmstate.h"
38 #include "sdhci-internal.h"
39 #include "qemu/log.h"
40 #include "trace.h"
41 #include "qom/object.h"
42
43 #define TYPE_SDHCI_BUS "sdhci-bus"
44 /* This is reusing the SDBus typedef from SD_BUS */
DECLARE_INSTANCE_CHECKER(SDBus,SDHCI_BUS,TYPE_SDHCI_BUS)45 DECLARE_INSTANCE_CHECKER(SDBus, SDHCI_BUS,
46 TYPE_SDHCI_BUS)
47
48 #define MASKED_WRITE(reg, mask, val) (reg = (reg & (mask)) | (val))
49
50 static inline unsigned int sdhci_get_fifolen(SDHCIState *s)
51 {
52 return 1 << (9 + FIELD_EX32(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH));
53 }
54
55 /* return true on error */
sdhci_check_capab_freq_range(SDHCIState * s,const char * desc,uint8_t freq,Error ** errp)56 static bool sdhci_check_capab_freq_range(SDHCIState *s, const char *desc,
57 uint8_t freq, Error **errp)
58 {
59 if (s->sd_spec_version >= 3) {
60 return false;
61 }
62 switch (freq) {
63 case 0:
64 case 10 ... 63:
65 break;
66 default:
67 error_setg(errp, "SD %s clock frequency can have value"
68 "in range 0-63 only", desc);
69 return true;
70 }
71 return false;
72 }
73
sdhci_check_capareg(SDHCIState * s,Error ** errp)74 static void sdhci_check_capareg(SDHCIState *s, Error **errp)
75 {
76 uint64_t msk = s->capareg;
77 uint32_t val;
78 bool y;
79
80 switch (s->sd_spec_version) {
81 case 4:
82 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT_V4);
83 trace_sdhci_capareg("64-bit system bus (v4)", val);
84 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT_V4, 0);
85
86 val = FIELD_EX64(s->capareg, SDHC_CAPAB, UHS_II);
87 trace_sdhci_capareg("UHS-II", val);
88 msk = FIELD_DP64(msk, SDHC_CAPAB, UHS_II, 0);
89
90 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA3);
91 trace_sdhci_capareg("ADMA3", val);
92 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA3, 0);
93
94 /* fallthrough */
95 case 3:
96 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ASYNC_INT);
97 trace_sdhci_capareg("async interrupt", val);
98 msk = FIELD_DP64(msk, SDHC_CAPAB, ASYNC_INT, 0);
99
100 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SLOT_TYPE);
101 if (val) {
102 error_setg(errp, "slot-type not supported");
103 return;
104 }
105 trace_sdhci_capareg("slot type", val);
106 msk = FIELD_DP64(msk, SDHC_CAPAB, SLOT_TYPE, 0);
107
108 if (val != 2) {
109 val = FIELD_EX64(s->capareg, SDHC_CAPAB, EMBEDDED_8BIT);
110 trace_sdhci_capareg("8-bit bus", val);
111 }
112 msk = FIELD_DP64(msk, SDHC_CAPAB, EMBEDDED_8BIT, 0);
113
114 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS_SPEED);
115 trace_sdhci_capareg("bus speed mask", val);
116 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS_SPEED, 0);
117
118 val = FIELD_EX64(s->capareg, SDHC_CAPAB, DRIVER_STRENGTH);
119 trace_sdhci_capareg("driver strength mask", val);
120 msk = FIELD_DP64(msk, SDHC_CAPAB, DRIVER_STRENGTH, 0);
121
122 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TIMER_RETUNING);
123 trace_sdhci_capareg("timer re-tuning", val);
124 msk = FIELD_DP64(msk, SDHC_CAPAB, TIMER_RETUNING, 0);
125
126 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDR50_TUNING);
127 trace_sdhci_capareg("use SDR50 tuning", val);
128 msk = FIELD_DP64(msk, SDHC_CAPAB, SDR50_TUNING, 0);
129
130 val = FIELD_EX64(s->capareg, SDHC_CAPAB, RETUNING_MODE);
131 trace_sdhci_capareg("re-tuning mode", val);
132 msk = FIELD_DP64(msk, SDHC_CAPAB, RETUNING_MODE, 0);
133
134 val = FIELD_EX64(s->capareg, SDHC_CAPAB, CLOCK_MULT);
135 trace_sdhci_capareg("clock multiplier", val);
136 msk = FIELD_DP64(msk, SDHC_CAPAB, CLOCK_MULT, 0);
137
138 /* fallthrough */
139 case 2: /* default version */
140 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA2);
141 trace_sdhci_capareg("ADMA2", val);
142 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA2, 0);
143
144 val = FIELD_EX64(s->capareg, SDHC_CAPAB, ADMA1);
145 trace_sdhci_capareg("ADMA1", val);
146 msk = FIELD_DP64(msk, SDHC_CAPAB, ADMA1, 0);
147
148 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BUS64BIT);
149 trace_sdhci_capareg("64-bit system bus (v3)", val);
150 msk = FIELD_DP64(msk, SDHC_CAPAB, BUS64BIT, 0);
151
152 /* fallthrough */
153 case 1:
154 y = FIELD_EX64(s->capareg, SDHC_CAPAB, TOUNIT);
155 msk = FIELD_DP64(msk, SDHC_CAPAB, TOUNIT, 0);
156
157 val = FIELD_EX64(s->capareg, SDHC_CAPAB, TOCLKFREQ);
158 trace_sdhci_capareg(y ? "timeout (MHz)" : "Timeout (KHz)", val);
159 if (sdhci_check_capab_freq_range(s, "timeout", val, errp)) {
160 return;
161 }
162 msk = FIELD_DP64(msk, SDHC_CAPAB, TOCLKFREQ, 0);
163
164 val = FIELD_EX64(s->capareg, SDHC_CAPAB, BASECLKFREQ);
165 trace_sdhci_capareg(y ? "base (MHz)" : "Base (KHz)", val);
166 if (sdhci_check_capab_freq_range(s, "base", val, errp)) {
167 return;
168 }
169 msk = FIELD_DP64(msk, SDHC_CAPAB, BASECLKFREQ, 0);
170
171 val = FIELD_EX64(s->capareg, SDHC_CAPAB, MAXBLOCKLENGTH);
172 if (val >= 3) {
173 error_setg(errp, "block size can be 512, 1024 or 2048 only");
174 return;
175 }
176 trace_sdhci_capareg("max block length", sdhci_get_fifolen(s));
177 msk = FIELD_DP64(msk, SDHC_CAPAB, MAXBLOCKLENGTH, 0);
178
179 val = FIELD_EX64(s->capareg, SDHC_CAPAB, HIGHSPEED);
180 trace_sdhci_capareg("high speed", val);
181 msk = FIELD_DP64(msk, SDHC_CAPAB, HIGHSPEED, 0);
182
183 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SDMA);
184 trace_sdhci_capareg("SDMA", val);
185 msk = FIELD_DP64(msk, SDHC_CAPAB, SDMA, 0);
186
187 val = FIELD_EX64(s->capareg, SDHC_CAPAB, SUSPRESUME);
188 trace_sdhci_capareg("suspend/resume", val);
189 msk = FIELD_DP64(msk, SDHC_CAPAB, SUSPRESUME, 0);
190
191 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V33);
192 trace_sdhci_capareg("3.3v", val);
193 msk = FIELD_DP64(msk, SDHC_CAPAB, V33, 0);
194
195 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V30);
196 trace_sdhci_capareg("3.0v", val);
197 msk = FIELD_DP64(msk, SDHC_CAPAB, V30, 0);
198
199 val = FIELD_EX64(s->capareg, SDHC_CAPAB, V18);
200 trace_sdhci_capareg("1.8v", val);
201 msk = FIELD_DP64(msk, SDHC_CAPAB, V18, 0);
202 break;
203
204 default:
205 error_setg(errp, "Unsupported spec version: %u", s->sd_spec_version);
206 }
207 if (msk) {
208 qemu_log_mask(LOG_UNIMP,
209 "SDHCI: unknown CAPAB mask: 0x%016" PRIx64 "\n", msk);
210 }
211 }
212
sdhci_slotint(SDHCIState * s)213 static uint8_t sdhci_slotint(SDHCIState *s)
214 {
215 return (s->norintsts & s->norintsigen) || (s->errintsts & s->errintsigen) ||
216 ((s->norintsts & SDHC_NIS_INSERT) && (s->wakcon & SDHC_WKUP_ON_INS)) ||
217 ((s->norintsts & SDHC_NIS_REMOVE) && (s->wakcon & SDHC_WKUP_ON_RMV));
218 }
219
220 /* Return true if IRQ was pending and delivered */
sdhci_update_irq(SDHCIState * s)221 static bool sdhci_update_irq(SDHCIState *s)
222 {
223 bool pending = sdhci_slotint(s);
224
225 qemu_set_irq(s->irq, pending);
226
227 return pending;
228 }
229
sdhci_raise_insertion_irq(void * opaque)230 static void sdhci_raise_insertion_irq(void *opaque)
231 {
232 SDHCIState *s = (SDHCIState *)opaque;
233
234 if (s->norintsts & SDHC_NIS_REMOVE) {
235 timer_mod(s->insert_timer,
236 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
237 } else {
238 s->prnsts = 0x1ff0000;
239 if (s->norintstsen & SDHC_NISEN_INSERT) {
240 s->norintsts |= SDHC_NIS_INSERT;
241 }
242 sdhci_update_irq(s);
243 }
244 }
245
sdhci_set_inserted(DeviceState * dev,bool level)246 static void sdhci_set_inserted(DeviceState *dev, bool level)
247 {
248 SDHCIState *s = (SDHCIState *)dev;
249
250 trace_sdhci_set_inserted(level ? "insert" : "eject");
251 if ((s->norintsts & SDHC_NIS_REMOVE) && level) {
252 /* Give target some time to notice card ejection */
253 timer_mod(s->insert_timer,
254 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_INSERTION_DELAY);
255 } else {
256 if (level) {
257 s->prnsts = 0x1ff0000;
258 if (s->norintstsen & SDHC_NISEN_INSERT) {
259 s->norintsts |= SDHC_NIS_INSERT;
260 }
261 } else {
262 s->prnsts = 0x1fa0000;
263 s->pwrcon &= ~SDHC_POWER_ON;
264 s->clkcon &= ~SDHC_CLOCK_SDCLK_EN;
265 if (s->norintstsen & SDHC_NISEN_REMOVE) {
266 s->norintsts |= SDHC_NIS_REMOVE;
267 }
268 }
269 sdhci_update_irq(s);
270 }
271 }
272
sdhci_set_readonly(DeviceState * dev,bool level)273 static void sdhci_set_readonly(DeviceState *dev, bool level)
274 {
275 SDHCIState *s = (SDHCIState *)dev;
276
277 if (s->wp_inverted) {
278 level = !level;
279 }
280
281 if (level) {
282 s->prnsts &= ~SDHC_WRITE_PROTECT;
283 } else {
284 /* Write enabled */
285 s->prnsts |= SDHC_WRITE_PROTECT;
286 }
287 }
288
sdhci_reset(SDHCIState * s)289 static void sdhci_reset(SDHCIState *s)
290 {
291 DeviceState *dev = DEVICE(s);
292
293 timer_del(s->insert_timer);
294 timer_del(s->transfer_timer);
295
296 /*
297 * Set all registers to 0. Capabilities/Version registers are not cleared
298 * and assumed to always preserve their value, given to them during
299 * initialization
300 */
301 memset(&s->sdmasysad, 0, (uintptr_t)&s->capareg - (uintptr_t)&s->sdmasysad);
302
303 /* Reset other state based on current card insertion/readonly status */
304 sdhci_set_inserted(dev, sdbus_get_inserted(&s->sdbus));
305 sdhci_set_readonly(dev, sdbus_get_readonly(&s->sdbus));
306
307 s->data_count = 0;
308 s->stopped_state = sdhc_not_stopped;
309 s->pending_insert_state = false;
310 if (s->vendor == SDHCI_VENDOR_FSL) {
311 s->norintstsen = 0x013f;
312 s->errintstsen = 0x117f;
313 }
314 }
315
sdhci_poweron_reset(DeviceState * dev)316 static void sdhci_poweron_reset(DeviceState *dev)
317 {
318 /*
319 * QOM (ie power-on) reset. This is identical to reset
320 * commanded via device register apart from handling of the
321 * 'pending insert on powerup' quirk.
322 */
323 SDHCIState *s = (SDHCIState *)dev;
324
325 sdhci_reset(s);
326
327 if (s->pending_insert_quirk) {
328 s->pending_insert_state = true;
329 }
330 }
331
332 static void sdhci_data_transfer(void *opaque);
333
334 #define BLOCK_SIZE_MASK (4 * KiB - 1)
335
sdhci_send_command(SDHCIState * s)336 static void sdhci_send_command(SDHCIState *s)
337 {
338 SDRequest request;
339 uint8_t response[16];
340 int rlen;
341 bool timeout = false;
342
343 s->errintsts = 0;
344 s->acmd12errsts = 0;
345 request.cmd = s->cmdreg >> 8;
346 request.arg = s->argument;
347
348 trace_sdhci_send_command(request.cmd, request.arg);
349 rlen = sdbus_do_command(&s->sdbus, &request, response);
350
351 if (s->cmdreg & SDHC_CMD_RESPONSE) {
352 if (rlen == 4) {
353 s->rspreg[0] = ldl_be_p(response);
354 s->rspreg[1] = s->rspreg[2] = s->rspreg[3] = 0;
355 trace_sdhci_response4(s->rspreg[0]);
356 } else if (rlen == 16) {
357 s->rspreg[0] = ldl_be_p(&response[11]);
358 s->rspreg[1] = ldl_be_p(&response[7]);
359 s->rspreg[2] = ldl_be_p(&response[3]);
360 s->rspreg[3] = (response[0] << 16) | (response[1] << 8) |
361 response[2];
362 trace_sdhci_response16(s->rspreg[3], s->rspreg[2],
363 s->rspreg[1], s->rspreg[0]);
364 } else {
365 timeout = true;
366 trace_sdhci_error("timeout waiting for command response");
367 if (s->errintstsen & SDHC_EISEN_CMDTIMEOUT) {
368 s->errintsts |= SDHC_EIS_CMDTIMEOUT;
369 s->norintsts |= SDHC_NIS_ERR;
370 }
371 }
372
373 if (!(s->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
374 (s->norintstsen & SDHC_NISEN_TRSCMP) &&
375 (s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY) {
376 s->norintsts |= SDHC_NIS_TRSCMP;
377 }
378 }
379
380 if (s->norintstsen & SDHC_NISEN_CMDCMP) {
381 s->norintsts |= SDHC_NIS_CMDCMP;
382 }
383
384 sdhci_update_irq(s);
385
386 if (!timeout && (s->blksize & BLOCK_SIZE_MASK) &&
387 (s->cmdreg & SDHC_CMD_DATA_PRESENT)) {
388 s->data_count = 0;
389 sdhci_data_transfer(s);
390 }
391 }
392
sdhci_end_transfer(SDHCIState * s)393 static void sdhci_end_transfer(SDHCIState *s)
394 {
395 /* Automatically send CMD12 to stop transfer if AutoCMD12 enabled */
396 if ((s->trnmod & SDHC_TRNS_ACMD12) != 0) {
397 SDRequest request;
398 uint8_t response[16];
399
400 request.cmd = 0x0C;
401 request.arg = 0;
402 trace_sdhci_end_transfer(request.cmd, request.arg);
403 sdbus_do_command(&s->sdbus, &request, response);
404 /* Auto CMD12 response goes to the upper Response register */
405 s->rspreg[3] = ldl_be_p(response);
406 }
407
408 s->prnsts &= ~(SDHC_DOING_READ | SDHC_DOING_WRITE |
409 SDHC_DAT_LINE_ACTIVE | SDHC_DATA_INHIBIT |
410 SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE);
411
412 if (s->norintstsen & SDHC_NISEN_TRSCMP) {
413 s->norintsts |= SDHC_NIS_TRSCMP;
414 }
415
416 sdhci_update_irq(s);
417 }
418
419 /*
420 * Programmed i/o data transfer
421 */
422
423 /* Fill host controller's read buffer with BLKSIZE bytes of data from card */
sdhci_read_block_from_card(SDHCIState * s)424 static void sdhci_read_block_from_card(SDHCIState *s)
425 {
426 const uint16_t blk_size = s->blksize & BLOCK_SIZE_MASK;
427
428 if ((s->trnmod & SDHC_TRNS_MULTI) &&
429 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) {
430 return;
431 }
432
433 if (!FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) {
434 /* Device is not in tuning */
435 sdbus_read_data(&s->sdbus, s->fifo_buffer, blk_size);
436 }
437
438 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, EXECUTE_TUNING)) {
439 /* Device is in tuning */
440 s->hostctl2 &= ~R_SDHC_HOSTCTL2_EXECUTE_TUNING_MASK;
441 s->hostctl2 |= R_SDHC_HOSTCTL2_SAMPLING_CLKSEL_MASK;
442 s->prnsts &= ~(SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ |
443 SDHC_DATA_INHIBIT);
444 goto read_done;
445 }
446
447 /* New data now available for READ through Buffer Port Register */
448 s->prnsts |= SDHC_DATA_AVAILABLE;
449 if (s->norintstsen & SDHC_NISEN_RBUFRDY) {
450 s->norintsts |= SDHC_NIS_RBUFRDY;
451 }
452
453 /* Clear DAT line active status if that was the last block */
454 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 ||
455 ((s->trnmod & SDHC_TRNS_MULTI) && s->blkcnt == 1)) {
456 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE;
457 }
458
459 /*
460 * If stop at block gap request was set and it's not the last block of
461 * data - generate Block Event interrupt
462 */
463 if (s->stopped_state == sdhc_gap_read && (s->trnmod & SDHC_TRNS_MULTI) &&
464 s->blkcnt != 1) {
465 s->prnsts &= ~SDHC_DAT_LINE_ACTIVE;
466 if (s->norintstsen & SDHC_EISEN_BLKGAP) {
467 s->norintsts |= SDHC_EIS_BLKGAP;
468 }
469 }
470
471 read_done:
472 sdhci_update_irq(s);
473 }
474
475 /* Read @size byte of data from host controller @s BUFFER DATA PORT register */
sdhci_read_dataport(SDHCIState * s,unsigned size)476 static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size)
477 {
478 uint32_t value = 0;
479 int i;
480
481 /* first check that a valid data exists in host controller input buffer */
482 if ((s->prnsts & SDHC_DATA_AVAILABLE) == 0) {
483 trace_sdhci_error("read from empty buffer");
484 return 0;
485 }
486
487 for (i = 0; i < size; i++) {
488 assert(s->data_count < s->buf_maxsz);
489 value |= s->fifo_buffer[s->data_count] << i * 8;
490 s->data_count++;
491 /* check if we've read all valid data (blksize bytes) from buffer */
492 if ((s->data_count) >= (s->blksize & BLOCK_SIZE_MASK)) {
493 trace_sdhci_read_dataport(s->data_count);
494 s->prnsts &= ~SDHC_DATA_AVAILABLE; /* no more data in a buffer */
495 s->data_count = 0; /* next buff read must start at position [0] */
496
497 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
498 s->blkcnt--;
499 }
500
501 /* if that was the last block of data */
502 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 ||
503 ((s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0)) ||
504 /* stop at gap request */
505 (s->stopped_state == sdhc_gap_read &&
506 !(s->prnsts & SDHC_DAT_LINE_ACTIVE))) {
507 sdhci_end_transfer(s);
508 } else { /* if there are more data, read next block from card */
509 sdhci_read_block_from_card(s);
510 }
511 break;
512 }
513 }
514
515 return value;
516 }
517
518 /* Write data from host controller FIFO to card */
sdhci_write_block_to_card(SDHCIState * s)519 static void sdhci_write_block_to_card(SDHCIState *s)
520 {
521 if (s->prnsts & SDHC_SPACE_AVAILABLE) {
522 if (s->norintstsen & SDHC_NISEN_WBUFRDY) {
523 s->norintsts |= SDHC_NIS_WBUFRDY;
524 }
525 sdhci_update_irq(s);
526 return;
527 }
528
529 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
530 if (s->blkcnt == 0) {
531 return;
532 } else {
533 s->blkcnt--;
534 }
535 }
536
537 sdbus_write_data(&s->sdbus, s->fifo_buffer, s->blksize & BLOCK_SIZE_MASK);
538
539 /* Next data can be written through BUFFER DATORT register */
540 s->prnsts |= SDHC_SPACE_AVAILABLE;
541
542 /* Finish transfer if that was the last block of data */
543 if ((s->trnmod & SDHC_TRNS_MULTI) == 0 ||
544 ((s->trnmod & SDHC_TRNS_MULTI) &&
545 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) && (s->blkcnt == 0))) {
546 sdhci_end_transfer(s);
547 } else if (s->norintstsen & SDHC_NISEN_WBUFRDY) {
548 s->norintsts |= SDHC_NIS_WBUFRDY;
549 }
550
551 /* Generate Block Gap Event if requested and if not the last block */
552 if (s->stopped_state == sdhc_gap_write && (s->trnmod & SDHC_TRNS_MULTI) &&
553 s->blkcnt > 0) {
554 s->prnsts &= ~SDHC_DOING_WRITE;
555 if (s->norintstsen & SDHC_EISEN_BLKGAP) {
556 s->norintsts |= SDHC_EIS_BLKGAP;
557 }
558 sdhci_end_transfer(s);
559 }
560
561 sdhci_update_irq(s);
562 }
563
564 /*
565 * Write @size bytes of @value data to host controller @s Buffer Data Port
566 * register
567 */
sdhci_write_dataport(SDHCIState * s,uint32_t value,unsigned size)568 static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size)
569 {
570 unsigned i;
571
572 /* Check that there is free space left in a buffer */
573 if (!(s->prnsts & SDHC_SPACE_AVAILABLE)) {
574 trace_sdhci_error("Can't write to data buffer: buffer full");
575 return;
576 }
577
578 for (i = 0; i < size; i++) {
579 assert(s->data_count < s->buf_maxsz);
580 s->fifo_buffer[s->data_count] = value & 0xFF;
581 s->data_count++;
582 value >>= 8;
583 if (s->data_count >= (s->blksize & BLOCK_SIZE_MASK)) {
584 trace_sdhci_write_dataport(s->data_count);
585 s->data_count = 0;
586 s->prnsts &= ~SDHC_SPACE_AVAILABLE;
587 if (s->prnsts & SDHC_DOING_WRITE) {
588 sdhci_write_block_to_card(s);
589 }
590 }
591 }
592 }
593
594 /*
595 * Single DMA data transfer
596 */
597
598 /* Multi block SDMA transfer */
sdhci_sdma_transfer_multi_blocks(SDHCIState * s)599 static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s)
600 {
601 bool page_aligned = false;
602 unsigned int begin;
603 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK;
604 uint32_t boundary_chk = 1 << (((s->blksize & ~BLOCK_SIZE_MASK) >> 12) + 12);
605 uint32_t boundary_count = boundary_chk - (s->sdmasysad % boundary_chk);
606
607 if (!(s->trnmod & SDHC_TRNS_BLK_CNT_EN) || !s->blkcnt) {
608 qemu_log_mask(LOG_UNIMP, "infinite transfer is not supported\n");
609 return;
610 }
611
612 /*
613 * XXX: Some sd/mmc drivers (for example, u-boot-slp) do not account for
614 * possible stop at page boundary if initial address is not page aligned,
615 * allow them to work properly
616 */
617 if ((s->sdmasysad % boundary_chk) == 0) {
618 page_aligned = true;
619 }
620
621 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE;
622 if (s->trnmod & SDHC_TRNS_READ) {
623 s->prnsts |= SDHC_DOING_READ;
624 while (s->blkcnt) {
625 if (s->data_count == 0) {
626 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size);
627 }
628 begin = s->data_count;
629 if (((boundary_count + begin) < block_size) && page_aligned) {
630 s->data_count = boundary_count + begin;
631 boundary_count = 0;
632 } else {
633 s->data_count = block_size;
634 boundary_count -= block_size - begin;
635 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
636 s->blkcnt--;
637 }
638 }
639 dma_memory_write(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin],
640 s->data_count - begin, MEMTXATTRS_UNSPECIFIED);
641 s->sdmasysad += s->data_count - begin;
642 if (s->data_count == block_size) {
643 s->data_count = 0;
644 }
645 if (page_aligned && boundary_count == 0) {
646 break;
647 }
648 }
649 } else {
650 s->prnsts |= SDHC_DOING_WRITE;
651 while (s->blkcnt) {
652 begin = s->data_count;
653 if (((boundary_count + begin) < block_size) && page_aligned) {
654 s->data_count = boundary_count + begin;
655 boundary_count = 0;
656 } else {
657 s->data_count = block_size;
658 boundary_count -= block_size - begin;
659 }
660 dma_memory_read(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin],
661 s->data_count - begin, MEMTXATTRS_UNSPECIFIED);
662 s->sdmasysad += s->data_count - begin;
663 if (s->data_count == block_size) {
664 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size);
665 s->data_count = 0;
666 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
667 s->blkcnt--;
668 }
669 }
670 if (page_aligned && boundary_count == 0) {
671 break;
672 }
673 }
674 }
675
676 if (s->norintstsen & SDHC_NISEN_DMA) {
677 s->norintsts |= SDHC_NIS_DMA;
678 }
679
680 if (s->blkcnt == 0) {
681 sdhci_end_transfer(s);
682 } else {
683 sdhci_update_irq(s);
684 }
685 }
686
687 /* single block SDMA transfer */
sdhci_sdma_transfer_single_block(SDHCIState * s)688 static void sdhci_sdma_transfer_single_block(SDHCIState *s)
689 {
690 uint32_t datacnt = s->blksize & BLOCK_SIZE_MASK;
691
692 if (s->trnmod & SDHC_TRNS_READ) {
693 sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt);
694 dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt,
695 MEMTXATTRS_UNSPECIFIED);
696 } else {
697 dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt,
698 MEMTXATTRS_UNSPECIFIED);
699 sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt);
700 }
701 s->blkcnt--;
702
703 if (s->norintstsen & SDHC_NISEN_DMA) {
704 s->norintsts |= SDHC_NIS_DMA;
705 }
706
707 sdhci_end_transfer(s);
708 }
709
sdhci_sdma_transfer(SDHCIState * s)710 static void sdhci_sdma_transfer(SDHCIState *s)
711 {
712 if ((s->blkcnt == 1) || !(s->trnmod & SDHC_TRNS_MULTI)) {
713 sdhci_sdma_transfer_single_block(s);
714 } else {
715 sdhci_sdma_transfer_multi_blocks(s);
716 }
717 }
718
719 typedef struct ADMADescr {
720 hwaddr addr;
721 uint16_t length;
722 uint8_t attr;
723 uint8_t incr;
724 } ADMADescr;
725
get_adma_description(SDHCIState * s,ADMADescr * dscr)726 static void get_adma_description(SDHCIState *s, ADMADescr *dscr)
727 {
728 uint32_t adma1 = 0;
729 uint64_t adma2 = 0;
730 hwaddr entry_addr = (hwaddr)s->admasysaddr;
731 switch (SDHC_DMA_TYPE(s->hostctl1)) {
732 case SDHC_CTRL_ADMA2_32:
733 dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2),
734 MEMTXATTRS_UNSPECIFIED);
735 adma2 = le64_to_cpu(adma2);
736 /*
737 * The spec does not specify endianness of descriptor table.
738 * We currently assume that it is LE.
739 */
740 dscr->addr = (hwaddr)extract64(adma2, 32, 32) & ~0x3ull;
741 dscr->length = (uint16_t)extract64(adma2, 16, 16);
742 dscr->attr = (uint8_t)extract64(adma2, 0, 7);
743 dscr->incr = 8;
744 break;
745 case SDHC_CTRL_ADMA1_32:
746 dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1),
747 MEMTXATTRS_UNSPECIFIED);
748 adma1 = le32_to_cpu(adma1);
749 dscr->addr = (hwaddr)(adma1 & 0xFFFFF000);
750 dscr->attr = (uint8_t)extract32(adma1, 0, 7);
751 dscr->incr = 4;
752 if ((dscr->attr & SDHC_ADMA_ATTR_ACT_MASK) == SDHC_ADMA_ATTR_SET_LEN) {
753 dscr->length = (uint16_t)extract32(adma1, 12, 16);
754 } else {
755 dscr->length = 4 * KiB;
756 }
757 break;
758 case SDHC_CTRL_ADMA2_64:
759 dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1,
760 MEMTXATTRS_UNSPECIFIED);
761 dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2,
762 MEMTXATTRS_UNSPECIFIED);
763 dscr->length = le16_to_cpu(dscr->length);
764 dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8,
765 MEMTXATTRS_UNSPECIFIED);
766 dscr->addr = le64_to_cpu(dscr->addr);
767 dscr->attr &= (uint8_t) ~0xC0;
768 dscr->incr = 12;
769 break;
770 }
771 }
772
773 /* Advanced DMA data transfer */
774
sdhci_do_adma(SDHCIState * s)775 static void sdhci_do_adma(SDHCIState *s)
776 {
777 unsigned int begin, length;
778 const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK;
779 const MemTxAttrs attrs = { .memory = true };
780 ADMADescr dscr = {};
781 MemTxResult res = MEMTX_ERROR;
782 int i;
783
784 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) {
785 /* Stop Multiple Transfer */
786 sdhci_end_transfer(s);
787 return;
788 }
789
790 for (i = 0; i < SDHC_ADMA_DESCS_PER_DELAY; ++i) {
791 s->admaerr &= ~SDHC_ADMAERR_LENGTH_MISMATCH;
792
793 get_adma_description(s, &dscr);
794 trace_sdhci_adma_loop(dscr.addr, dscr.length, dscr.attr);
795
796 if ((dscr.attr & SDHC_ADMA_ATTR_VALID) == 0) {
797 /* Indicate that error occurred in ST_FDS state */
798 s->admaerr &= ~SDHC_ADMAERR_STATE_MASK;
799 s->admaerr |= SDHC_ADMAERR_STATE_ST_FDS;
800
801 /* Generate ADMA error interrupt */
802 if (s->errintstsen & SDHC_EISEN_ADMAERR) {
803 s->errintsts |= SDHC_EIS_ADMAERR;
804 s->norintsts |= SDHC_NIS_ERR;
805 }
806
807 sdhci_update_irq(s);
808 return;
809 }
810
811 length = dscr.length ? dscr.length : 64 * KiB;
812
813 switch (dscr.attr & SDHC_ADMA_ATTR_ACT_MASK) {
814 case SDHC_ADMA_ATTR_ACT_TRAN: /* data transfer */
815 s->prnsts |= SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE;
816 if (s->trnmod & SDHC_TRNS_READ) {
817 s->prnsts |= SDHC_DOING_READ;
818 while (length) {
819 if (s->data_count == 0) {
820 sdbus_read_data(&s->sdbus, s->fifo_buffer, block_size);
821 }
822 begin = s->data_count;
823 if ((length + begin) < block_size) {
824 s->data_count = length + begin;
825 length = 0;
826 } else {
827 s->data_count = block_size;
828 length -= block_size - begin;
829 }
830 res = dma_memory_write(s->dma_as, dscr.addr,
831 &s->fifo_buffer[begin],
832 s->data_count - begin,
833 attrs);
834 if (res != MEMTX_OK) {
835 break;
836 }
837 dscr.addr += s->data_count - begin;
838 if (s->data_count == block_size) {
839 s->data_count = 0;
840 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
841 s->blkcnt--;
842 if (s->blkcnt == 0) {
843 break;
844 }
845 }
846 }
847 }
848 } else {
849 s->prnsts |= SDHC_DOING_WRITE;
850 while (length) {
851 begin = s->data_count;
852 if ((length + begin) < block_size) {
853 s->data_count = length + begin;
854 length = 0;
855 } else {
856 s->data_count = block_size;
857 length -= block_size - begin;
858 }
859 res = dma_memory_read(s->dma_as, dscr.addr,
860 &s->fifo_buffer[begin],
861 s->data_count - begin,
862 attrs);
863 if (res != MEMTX_OK) {
864 break;
865 }
866 dscr.addr += s->data_count - begin;
867 if (s->data_count == block_size) {
868 sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size);
869 s->data_count = 0;
870 if (s->trnmod & SDHC_TRNS_BLK_CNT_EN) {
871 s->blkcnt--;
872 if (s->blkcnt == 0) {
873 break;
874 }
875 }
876 }
877 }
878 }
879 if (res != MEMTX_OK) {
880 s->data_count = 0;
881 if (s->errintstsen & SDHC_EISEN_ADMAERR) {
882 trace_sdhci_error("Set ADMA error flag");
883 s->errintsts |= SDHC_EIS_ADMAERR;
884 s->norintsts |= SDHC_NIS_ERR;
885 }
886 sdhci_update_irq(s);
887 } else {
888 s->admasysaddr += dscr.incr;
889 }
890 break;
891 case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */
892 s->admasysaddr = dscr.addr;
893 trace_sdhci_adma("link", s->admasysaddr);
894 break;
895 default:
896 s->admasysaddr += dscr.incr;
897 break;
898 }
899
900 if (dscr.attr & SDHC_ADMA_ATTR_INT) {
901 trace_sdhci_adma("interrupt", s->admasysaddr);
902 if (s->norintstsen & SDHC_NISEN_DMA) {
903 s->norintsts |= SDHC_NIS_DMA;
904 }
905
906 if (sdhci_update_irq(s) && !(dscr.attr & SDHC_ADMA_ATTR_END)) {
907 /* IRQ delivered, reschedule current transfer */
908 break;
909 }
910 }
911
912 /* ADMA transfer terminates if blkcnt == 0 or by END attribute */
913 if (((s->trnmod & SDHC_TRNS_BLK_CNT_EN) &&
914 (s->blkcnt == 0)) || (dscr.attr & SDHC_ADMA_ATTR_END)) {
915 trace_sdhci_adma_transfer_completed();
916 if (length || ((dscr.attr & SDHC_ADMA_ATTR_END) &&
917 (s->trnmod & SDHC_TRNS_BLK_CNT_EN) &&
918 s->blkcnt != 0)) {
919 trace_sdhci_error("SD/MMC host ADMA length mismatch");
920 s->admaerr |= SDHC_ADMAERR_LENGTH_MISMATCH |
921 SDHC_ADMAERR_STATE_ST_TFR;
922 if (s->errintstsen & SDHC_EISEN_ADMAERR) {
923 trace_sdhci_error("Set ADMA error flag");
924 s->errintsts |= SDHC_EIS_ADMAERR;
925 s->norintsts |= SDHC_NIS_ERR;
926 }
927
928 sdhci_update_irq(s);
929 }
930 sdhci_end_transfer(s);
931 return;
932 }
933
934 }
935
936 /* we have unfinished business - reschedule to continue ADMA */
937 timer_mod(s->transfer_timer,
938 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + SDHC_TRANSFER_DELAY);
939 }
940
941 /* Perform data transfer according to controller configuration */
942
sdhci_data_transfer(void * opaque)943 static void sdhci_data_transfer(void *opaque)
944 {
945 SDHCIState *s = (SDHCIState *)opaque;
946
947 if (s->trnmod & SDHC_TRNS_DMA) {
948 switch (SDHC_DMA_TYPE(s->hostctl1)) {
949 case SDHC_CTRL_SDMA:
950 sdhci_sdma_transfer(s);
951 break;
952 case SDHC_CTRL_ADMA1_32:
953 if (!(s->capareg & R_SDHC_CAPAB_ADMA1_MASK)) {
954 trace_sdhci_error("ADMA1 not supported");
955 break;
956 }
957
958 sdhci_do_adma(s);
959 break;
960 case SDHC_CTRL_ADMA2_32:
961 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK)) {
962 trace_sdhci_error("ADMA2 not supported");
963 break;
964 }
965
966 sdhci_do_adma(s);
967 break;
968 case SDHC_CTRL_ADMA2_64:
969 if (!(s->capareg & R_SDHC_CAPAB_ADMA2_MASK) ||
970 !(s->capareg & R_SDHC_CAPAB_BUS64BIT_MASK)) {
971 trace_sdhci_error("64 bit ADMA not supported");
972 break;
973 }
974
975 sdhci_do_adma(s);
976 break;
977 default:
978 trace_sdhci_error("Unsupported DMA type");
979 break;
980 }
981 } else {
982 if ((s->trnmod & SDHC_TRNS_READ) && sdbus_data_ready(&s->sdbus)) {
983 s->prnsts |= SDHC_DOING_READ | SDHC_DATA_INHIBIT |
984 SDHC_DAT_LINE_ACTIVE;
985 sdhci_read_block_from_card(s);
986 } else {
987 s->prnsts |= SDHC_DOING_WRITE | SDHC_DAT_LINE_ACTIVE |
988 SDHC_SPACE_AVAILABLE | SDHC_DATA_INHIBIT;
989 sdhci_write_block_to_card(s);
990 }
991 }
992 }
993
sdhci_can_issue_command(SDHCIState * s)994 static bool sdhci_can_issue_command(SDHCIState *s)
995 {
996 if (!SDHC_CLOCK_IS_ON(s->clkcon) ||
997 (((s->prnsts & SDHC_DATA_INHIBIT) || s->stopped_state) &&
998 ((s->cmdreg & SDHC_CMD_DATA_PRESENT) ||
999 ((s->cmdreg & SDHC_CMD_RESPONSE) == SDHC_CMD_RSP_WITH_BUSY &&
1000 !(SDHC_COMMAND_TYPE(s->cmdreg) == SDHC_CMD_ABORT))))) {
1001 return false;
1002 }
1003
1004 return true;
1005 }
1006
1007 /*
1008 * The Buffer Data Port register must be accessed in sequential and
1009 * continuous manner
1010 */
1011 static inline bool
sdhci_buff_access_is_sequential(SDHCIState * s,unsigned byte_num)1012 sdhci_buff_access_is_sequential(SDHCIState *s, unsigned byte_num)
1013 {
1014 if ((s->data_count & 0x3) != byte_num) {
1015 qemu_log_mask(LOG_GUEST_ERROR,
1016 "SDHCI: Non-sequential access to Buffer Data Port"
1017 " register is prohibited\n");
1018 return false;
1019 }
1020 return true;
1021 }
1022
sdhci_resume_pending_transfer(SDHCIState * s)1023 static void sdhci_resume_pending_transfer(SDHCIState *s)
1024 {
1025 timer_del(s->transfer_timer);
1026 sdhci_data_transfer(s);
1027 }
1028
sdhci_read(void * opaque,hwaddr offset,unsigned size)1029 static uint64_t sdhci_read(void *opaque, hwaddr offset, unsigned size)
1030 {
1031 SDHCIState *s = (SDHCIState *)opaque;
1032 uint32_t ret = 0;
1033
1034 if (timer_pending(s->transfer_timer)) {
1035 sdhci_resume_pending_transfer(s);
1036 }
1037
1038 switch (offset & ~0x3) {
1039 case SDHC_SYSAD:
1040 ret = s->sdmasysad;
1041 break;
1042 case SDHC_BLKSIZE:
1043 ret = s->blksize | (s->blkcnt << 16);
1044 break;
1045 case SDHC_ARGUMENT:
1046 ret = s->argument;
1047 break;
1048 case SDHC_TRNMOD:
1049 ret = s->trnmod | (s->cmdreg << 16);
1050 break;
1051 case SDHC_RSPREG0 ... SDHC_RSPREG3:
1052 ret = s->rspreg[((offset & ~0x3) - SDHC_RSPREG0) >> 2];
1053 break;
1054 case SDHC_BDATA:
1055 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) {
1056 ret = sdhci_read_dataport(s, size);
1057 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret);
1058 return ret;
1059 }
1060 break;
1061 case SDHC_PRNSTS:
1062 ret = s->prnsts;
1063 ret = FIELD_DP32(ret, SDHC_PRNSTS, DAT_LVL,
1064 sdbus_get_dat_lines(&s->sdbus));
1065 ret = FIELD_DP32(ret, SDHC_PRNSTS, CMD_LVL,
1066 sdbus_get_cmd_line(&s->sdbus));
1067 break;
1068 case SDHC_HOSTCTL:
1069 ret = s->hostctl1 | (s->pwrcon << 8) | (s->blkgap << 16) |
1070 (s->wakcon << 24);
1071 break;
1072 case SDHC_CLKCON:
1073 ret = s->clkcon | (s->timeoutcon << 16);
1074 break;
1075 case SDHC_NORINTSTS:
1076 ret = s->norintsts | (s->errintsts << 16);
1077 break;
1078 case SDHC_NORINTSTSEN:
1079 ret = s->norintstsen | (s->errintstsen << 16);
1080 break;
1081 case SDHC_NORINTSIGEN:
1082 ret = s->norintsigen | (s->errintsigen << 16);
1083 break;
1084 case SDHC_ACMD12ERRSTS:
1085 ret = s->acmd12errsts | (s->hostctl2 << 16);
1086 break;
1087 case SDHC_CAPAB:
1088 ret = (uint32_t)s->capareg;
1089 break;
1090 case SDHC_CAPAB + 4:
1091 ret = (uint32_t)(s->capareg >> 32);
1092 break;
1093 case SDHC_MAXCURR:
1094 ret = (uint32_t)s->maxcurr;
1095 break;
1096 case SDHC_MAXCURR + 4:
1097 ret = (uint32_t)(s->maxcurr >> 32);
1098 break;
1099 case SDHC_ADMAERR:
1100 ret = s->admaerr;
1101 break;
1102 case SDHC_ADMASYSADDR:
1103 ret = (uint32_t)s->admasysaddr;
1104 break;
1105 case SDHC_ADMASYSADDR + 4:
1106 ret = (uint32_t)(s->admasysaddr >> 32);
1107 break;
1108 case SDHC_SLOT_INT_STATUS:
1109 ret = (s->version << 16) | sdhci_slotint(s);
1110 break;
1111 default:
1112 qemu_log_mask(LOG_UNIMP, "SDHC rd_%ub @0x%02" HWADDR_PRIx " "
1113 "not implemented\n", size, offset);
1114 break;
1115 }
1116
1117 ret >>= (offset & 0x3) * 8;
1118 ret &= (1ULL << (size * 8)) - 1;
1119 trace_sdhci_access("rd", size << 3, offset, "->", ret, ret);
1120 return ret;
1121 }
1122
sdhci_blkgap_write(SDHCIState * s,uint8_t value)1123 static inline void sdhci_blkgap_write(SDHCIState *s, uint8_t value)
1124 {
1125 if ((value & SDHC_STOP_AT_GAP_REQ) && (s->blkgap & SDHC_STOP_AT_GAP_REQ)) {
1126 return;
1127 }
1128 s->blkgap = value & SDHC_STOP_AT_GAP_REQ;
1129
1130 if ((value & SDHC_CONTINUE_REQ) && s->stopped_state &&
1131 (s->blkgap & SDHC_STOP_AT_GAP_REQ) == 0) {
1132 if (s->stopped_state == sdhc_gap_read) {
1133 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_READ;
1134 sdhci_read_block_from_card(s);
1135 } else {
1136 s->prnsts |= SDHC_DAT_LINE_ACTIVE | SDHC_DOING_WRITE;
1137 sdhci_write_block_to_card(s);
1138 }
1139 s->stopped_state = sdhc_not_stopped;
1140 } else if (!s->stopped_state && (value & SDHC_STOP_AT_GAP_REQ)) {
1141 if (s->prnsts & SDHC_DOING_READ) {
1142 s->stopped_state = sdhc_gap_read;
1143 } else if (s->prnsts & SDHC_DOING_WRITE) {
1144 s->stopped_state = sdhc_gap_write;
1145 }
1146 }
1147 }
1148
sdhci_reset_write(SDHCIState * s,uint8_t value)1149 static inline void sdhci_reset_write(SDHCIState *s, uint8_t value)
1150 {
1151 switch (value) {
1152 case SDHC_RESET_ALL:
1153 sdhci_reset(s);
1154 break;
1155 case SDHC_RESET_CMD:
1156 s->prnsts &= ~SDHC_CMD_INHIBIT;
1157 s->norintsts &= ~SDHC_NIS_CMDCMP;
1158 break;
1159 case SDHC_RESET_DATA:
1160 s->data_count = 0;
1161 s->prnsts &= ~(SDHC_SPACE_AVAILABLE | SDHC_DATA_AVAILABLE |
1162 SDHC_DOING_READ | SDHC_DOING_WRITE |
1163 SDHC_DATA_INHIBIT | SDHC_DAT_LINE_ACTIVE);
1164 s->blkgap &= ~(SDHC_STOP_AT_GAP_REQ | SDHC_CONTINUE_REQ);
1165 s->stopped_state = sdhc_not_stopped;
1166 s->norintsts &= ~(SDHC_NIS_WBUFRDY | SDHC_NIS_RBUFRDY |
1167 SDHC_NIS_DMA | SDHC_NIS_TRSCMP | SDHC_NIS_BLKGAP);
1168 break;
1169 }
1170 }
1171
1172 static void
sdhci_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1173 sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
1174 {
1175 SDHCIState *s = (SDHCIState *)opaque;
1176 unsigned shift = 8 * (offset & 0x3);
1177 uint32_t mask = ~(((1ULL << (size * 8)) - 1) << shift);
1178 uint32_t value = val;
1179 value <<= shift;
1180
1181 if (timer_pending(s->transfer_timer)) {
1182 sdhci_resume_pending_transfer(s);
1183 }
1184
1185 switch (offset & ~0x3) {
1186 case SDHC_SYSAD:
1187 if (!TRANSFERRING_DATA(s->prnsts)) {
1188 s->sdmasysad = (s->sdmasysad & mask) | value;
1189 MASKED_WRITE(s->sdmasysad, mask, value);
1190 /* Writing to last byte of sdmasysad might trigger transfer */
1191 if (!(mask & 0xFF000000) && s->blkcnt &&
1192 (s->blksize & BLOCK_SIZE_MASK) &&
1193 SDHC_DMA_TYPE(s->hostctl1) == SDHC_CTRL_SDMA) {
1194 sdhci_sdma_transfer(s);
1195 }
1196 }
1197 break;
1198 case SDHC_BLKSIZE:
1199 if (!TRANSFERRING_DATA(s->prnsts)) {
1200 uint16_t blksize = s->blksize;
1201
1202 /*
1203 * [14:12] SDMA Buffer Boundary
1204 * [11:00] Transfer Block Size
1205 */
1206 MASKED_WRITE(s->blksize, mask, extract32(value, 0, 15));
1207 MASKED_WRITE(s->blkcnt, mask >> 16, value >> 16);
1208
1209 /* Limit block size to the maximum buffer size */
1210 if (extract32(s->blksize, 0, 12) > s->buf_maxsz) {
1211 qemu_log_mask(LOG_GUEST_ERROR, "%s: Size 0x%x is larger than "
1212 "the maximum buffer 0x%x\n", __func__, s->blksize,
1213 s->buf_maxsz);
1214
1215 s->blksize = deposit32(s->blksize, 0, 12, s->buf_maxsz);
1216 }
1217
1218 /*
1219 * If the block size is programmed to a different value from
1220 * the previous one, reset the data pointer of s->fifo_buffer[]
1221 * so that s->fifo_buffer[] can be filled in using the new block
1222 * size in the next transfer.
1223 */
1224 if (blksize != s->blksize) {
1225 s->data_count = 0;
1226 }
1227 }
1228
1229 break;
1230 case SDHC_ARGUMENT:
1231 MASKED_WRITE(s->argument, mask, value);
1232 break;
1233 case SDHC_TRNMOD:
1234 /*
1235 * DMA can be enabled only if it is supported as indicated by
1236 * capabilities register
1237 */
1238 if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) {
1239 value &= ~SDHC_TRNS_DMA;
1240 }
1241
1242 /* TRNMOD writes are inhibited while Command Inhibit (DAT) is true */
1243 if (s->prnsts & SDHC_DATA_INHIBIT) {
1244 mask |= 0xffff;
1245 }
1246
1247 MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK);
1248 MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16);
1249
1250 /* Writing to the upper byte of CMDREG triggers SD command generation */
1251 if ((mask & 0xFF000000) || !sdhci_can_issue_command(s)) {
1252 break;
1253 }
1254
1255 sdhci_send_command(s);
1256 break;
1257 case SDHC_BDATA:
1258 if (sdhci_buff_access_is_sequential(s, offset - SDHC_BDATA)) {
1259 sdhci_write_dataport(s, value >> shift, size);
1260 }
1261 break;
1262 case SDHC_HOSTCTL:
1263 if (!(mask & 0xFF0000)) {
1264 sdhci_blkgap_write(s, value >> 16);
1265 }
1266 MASKED_WRITE(s->hostctl1, mask, value);
1267 MASKED_WRITE(s->pwrcon, mask >> 8, value >> 8);
1268 MASKED_WRITE(s->wakcon, mask >> 24, value >> 24);
1269 if (!(s->prnsts & SDHC_CARD_PRESENT) || ((s->pwrcon >> 1) & 0x7) < 5 ||
1270 !(s->capareg & (1 << (31 - ((s->pwrcon >> 1) & 0x7))))) {
1271 s->pwrcon &= ~SDHC_POWER_ON;
1272 }
1273 break;
1274 case SDHC_CLKCON:
1275 if (!(mask & 0xFF000000)) {
1276 sdhci_reset_write(s, value >> 24);
1277 }
1278 MASKED_WRITE(s->clkcon, mask, value);
1279 MASKED_WRITE(s->timeoutcon, mask >> 16, value >> 16);
1280 if (s->clkcon & SDHC_CLOCK_INT_EN) {
1281 s->clkcon |= SDHC_CLOCK_INT_STABLE;
1282 } else {
1283 s->clkcon &= ~SDHC_CLOCK_INT_STABLE;
1284 }
1285 break;
1286 case SDHC_NORINTSTS:
1287 if (s->norintstsen & SDHC_NISEN_CARDINT) {
1288 value &= ~SDHC_NIS_CARDINT;
1289 }
1290 s->norintsts &= mask | ~value;
1291 s->errintsts &= (mask >> 16) | ~(value >> 16);
1292 if (s->errintsts) {
1293 s->norintsts |= SDHC_NIS_ERR;
1294 } else {
1295 s->norintsts &= ~SDHC_NIS_ERR;
1296 }
1297 sdhci_update_irq(s);
1298 break;
1299 case SDHC_NORINTSTSEN:
1300 MASKED_WRITE(s->norintstsen, mask, value);
1301 MASKED_WRITE(s->errintstsen, mask >> 16, value >> 16);
1302 s->norintsts &= s->norintstsen;
1303 s->errintsts &= s->errintstsen;
1304 if (s->errintsts) {
1305 s->norintsts |= SDHC_NIS_ERR;
1306 } else {
1307 s->norintsts &= ~SDHC_NIS_ERR;
1308 }
1309 /*
1310 * Quirk for Raspberry Pi: pending card insert interrupt
1311 * appears when first enabled after power on
1312 */
1313 if ((s->norintstsen & SDHC_NISEN_INSERT) && s->pending_insert_state) {
1314 assert(s->pending_insert_quirk);
1315 s->norintsts |= SDHC_NIS_INSERT;
1316 s->pending_insert_state = false;
1317 }
1318 sdhci_update_irq(s);
1319 break;
1320 case SDHC_NORINTSIGEN:
1321 MASKED_WRITE(s->norintsigen, mask, value);
1322 MASKED_WRITE(s->errintsigen, mask >> 16, value >> 16);
1323 sdhci_update_irq(s);
1324 break;
1325 case SDHC_ADMAERR:
1326 MASKED_WRITE(s->admaerr, mask, value);
1327 break;
1328 case SDHC_ADMASYSADDR:
1329 s->admasysaddr = (s->admasysaddr & (0xFFFFFFFF00000000ULL |
1330 (uint64_t)mask)) | (uint64_t)value;
1331 break;
1332 case SDHC_ADMASYSADDR + 4:
1333 s->admasysaddr = (s->admasysaddr & (0x00000000FFFFFFFFULL |
1334 ((uint64_t)mask << 32))) | ((uint64_t)value << 32);
1335 break;
1336 case SDHC_FEAER:
1337 s->acmd12errsts |= value;
1338 s->errintsts |= (value >> 16) & s->errintstsen;
1339 if (s->acmd12errsts) {
1340 s->errintsts |= SDHC_EIS_CMD12ERR;
1341 }
1342 if (s->errintsts) {
1343 s->norintsts |= SDHC_NIS_ERR;
1344 }
1345 sdhci_update_irq(s);
1346 break;
1347 case SDHC_ACMD12ERRSTS:
1348 MASKED_WRITE(s->acmd12errsts, mask, value & UINT16_MAX);
1349 if (s->uhs_mode >= UHS_I) {
1350 MASKED_WRITE(s->hostctl2, mask >> 16, value >> 16);
1351
1352 if (FIELD_EX32(s->hostctl2, SDHC_HOSTCTL2, V18_ENA)) {
1353 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_1_8V);
1354 } else {
1355 sdbus_set_voltage(&s->sdbus, SD_VOLTAGE_3_3V);
1356 }
1357 }
1358 break;
1359
1360 case SDHC_CAPAB:
1361 case SDHC_CAPAB + 4:
1362 case SDHC_MAXCURR:
1363 case SDHC_MAXCURR + 4:
1364 qemu_log_mask(LOG_GUEST_ERROR, "SDHC wr_%ub @0x%02" HWADDR_PRIx
1365 " <- 0x%08x read-only\n", size, offset, value >> shift);
1366 break;
1367
1368 default:
1369 qemu_log_mask(LOG_UNIMP, "SDHC wr_%ub @0x%02" HWADDR_PRIx " <- 0x%08x "
1370 "not implemented\n", size, offset, value >> shift);
1371 break;
1372 }
1373 trace_sdhci_access("wr", size << 3, offset, "<-",
1374 value >> shift, value >> shift);
1375 }
1376
1377 static const MemoryRegionOps sdhci_mmio_le_ops = {
1378 .read = sdhci_read,
1379 .write = sdhci_write,
1380 .valid = {
1381 .min_access_size = 1,
1382 .max_access_size = 4,
1383 .unaligned = false
1384 },
1385 .endianness = DEVICE_LITTLE_ENDIAN,
1386 };
1387
1388 static const MemoryRegionOps sdhci_mmio_be_ops = {
1389 .read = sdhci_read,
1390 .write = sdhci_write,
1391 .impl = {
1392 .min_access_size = 4,
1393 .max_access_size = 4,
1394 },
1395 .valid = {
1396 .min_access_size = 1,
1397 .max_access_size = 4,
1398 .unaligned = false
1399 },
1400 .endianness = DEVICE_BIG_ENDIAN,
1401 };
1402
sdhci_init_readonly_registers(SDHCIState * s,Error ** errp)1403 static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp)
1404 {
1405 ERRP_GUARD();
1406
1407 switch (s->sd_spec_version) {
1408 case 2 ... 3:
1409 break;
1410 default:
1411 error_setg(errp, "Only Spec v2/v3 are supported");
1412 return;
1413 }
1414 s->version = (SDHC_HCVER_VENDOR << 8) | (s->sd_spec_version - 1);
1415
1416 sdhci_check_capareg(s, errp);
1417 if (*errp) {
1418 return;
1419 }
1420 }
1421
1422 /* --- qdev common --- */
1423
sdhci_initfn(SDHCIState * s)1424 void sdhci_initfn(SDHCIState *s)
1425 {
1426 qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus");
1427
1428 s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1429 sdhci_raise_insertion_irq, s);
1430 s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1431 sdhci_data_transfer, s);
1432
1433 s->io_ops = &sdhci_mmio_le_ops;
1434 }
1435
sdhci_uninitfn(SDHCIState * s)1436 void sdhci_uninitfn(SDHCIState *s)
1437 {
1438 timer_free(s->insert_timer);
1439 timer_free(s->transfer_timer);
1440
1441 g_free(s->fifo_buffer);
1442 s->fifo_buffer = NULL;
1443 }
1444
sdhci_common_realize(SDHCIState * s,Error ** errp)1445 void sdhci_common_realize(SDHCIState *s, Error **errp)
1446 {
1447 ERRP_GUARD();
1448
1449 switch (s->endianness) {
1450 case DEVICE_LITTLE_ENDIAN:
1451 /* s->io_ops is little endian by default */
1452 break;
1453 case DEVICE_BIG_ENDIAN:
1454 if (s->io_ops != &sdhci_mmio_le_ops) {
1455 error_setg(errp, "SD controller doesn't support big endianness");
1456 return;
1457 }
1458 s->io_ops = &sdhci_mmio_be_ops;
1459 break;
1460 default:
1461 error_setg(errp, "Incorrect endianness");
1462 return;
1463 }
1464
1465 sdhci_init_readonly_registers(s, errp);
1466 if (*errp) {
1467 return;
1468 }
1469
1470 s->buf_maxsz = sdhci_get_fifolen(s);
1471 s->fifo_buffer = g_malloc0(s->buf_maxsz);
1472
1473 memory_region_init_io(&s->iomem, OBJECT(s), s->io_ops, s, "sdhci",
1474 SDHC_REGISTERS_MAP_SIZE);
1475 }
1476
sdhci_common_unrealize(SDHCIState * s)1477 void sdhci_common_unrealize(SDHCIState *s)
1478 {
1479 /*
1480 * This function is expected to be called only once for each class:
1481 * - SysBus: via DeviceClass->unrealize(),
1482 * - PCI: via PCIDeviceClass->exit().
1483 * However to avoid double-free and/or use-after-free we still nullify
1484 * this variable (better safe than sorry!).
1485 */
1486 g_free(s->fifo_buffer);
1487 s->fifo_buffer = NULL;
1488 }
1489
sdhci_pending_insert_vmstate_needed(void * opaque)1490 static bool sdhci_pending_insert_vmstate_needed(void *opaque)
1491 {
1492 SDHCIState *s = opaque;
1493
1494 return s->pending_insert_state;
1495 }
1496
1497 static const VMStateDescription sdhci_pending_insert_vmstate = {
1498 .name = "sdhci/pending-insert",
1499 .version_id = 1,
1500 .minimum_version_id = 1,
1501 .needed = sdhci_pending_insert_vmstate_needed,
1502 .fields = (const VMStateField[]) {
1503 VMSTATE_BOOL(pending_insert_state, SDHCIState),
1504 VMSTATE_END_OF_LIST()
1505 },
1506 };
1507
1508 const VMStateDescription sdhci_vmstate = {
1509 .name = "sdhci",
1510 .version_id = 1,
1511 .minimum_version_id = 1,
1512 .fields = (const VMStateField[]) {
1513 VMSTATE_UINT32(sdmasysad, SDHCIState),
1514 VMSTATE_UINT16(blksize, SDHCIState),
1515 VMSTATE_UINT16(blkcnt, SDHCIState),
1516 VMSTATE_UINT32(argument, SDHCIState),
1517 VMSTATE_UINT16(trnmod, SDHCIState),
1518 VMSTATE_UINT16(cmdreg, SDHCIState),
1519 VMSTATE_UINT32_ARRAY(rspreg, SDHCIState, 4),
1520 VMSTATE_UINT32(prnsts, SDHCIState),
1521 VMSTATE_UINT8(hostctl1, SDHCIState),
1522 VMSTATE_UINT8(pwrcon, SDHCIState),
1523 VMSTATE_UINT8(blkgap, SDHCIState),
1524 VMSTATE_UINT8(wakcon, SDHCIState),
1525 VMSTATE_UINT16(clkcon, SDHCIState),
1526 VMSTATE_UINT8(timeoutcon, SDHCIState),
1527 VMSTATE_UINT8(admaerr, SDHCIState),
1528 VMSTATE_UINT16(norintsts, SDHCIState),
1529 VMSTATE_UINT16(errintsts, SDHCIState),
1530 VMSTATE_UINT16(norintstsen, SDHCIState),
1531 VMSTATE_UINT16(errintstsen, SDHCIState),
1532 VMSTATE_UINT16(norintsigen, SDHCIState),
1533 VMSTATE_UINT16(errintsigen, SDHCIState),
1534 VMSTATE_UINT16(acmd12errsts, SDHCIState),
1535 VMSTATE_UINT16(data_count, SDHCIState),
1536 VMSTATE_UINT64(admasysaddr, SDHCIState),
1537 VMSTATE_UINT8(stopped_state, SDHCIState),
1538 VMSTATE_VBUFFER_UINT32(fifo_buffer, SDHCIState, 1, NULL, buf_maxsz),
1539 VMSTATE_TIMER_PTR(insert_timer, SDHCIState),
1540 VMSTATE_TIMER_PTR(transfer_timer, SDHCIState),
1541 VMSTATE_END_OF_LIST()
1542 },
1543 .subsections = (const VMStateDescription * const []) {
1544 &sdhci_pending_insert_vmstate,
1545 NULL
1546 },
1547 };
1548
sdhci_common_class_init(ObjectClass * klass,const void * data)1549 void sdhci_common_class_init(ObjectClass *klass, const void *data)
1550 {
1551 DeviceClass *dc = DEVICE_CLASS(klass);
1552
1553 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
1554 dc->vmsd = &sdhci_vmstate;
1555 device_class_set_legacy_reset(dc, sdhci_poweron_reset);
1556 }
1557
1558 /* --- qdev SysBus --- */
1559
1560 static const Property sdhci_sysbus_properties[] = {
1561 DEFINE_SDHCI_COMMON_PROPERTIES(SDHCIState),
1562 DEFINE_PROP_BOOL("pending-insert-quirk", SDHCIState, pending_insert_quirk,
1563 false),
1564 DEFINE_PROP_LINK("dma", SDHCIState,
1565 dma_mr, TYPE_MEMORY_REGION, MemoryRegion *),
1566 DEFINE_PROP_BOOL("wp-inverted", SDHCIState,
1567 wp_inverted, false),
1568 };
1569
sdhci_sysbus_init(Object * obj)1570 static void sdhci_sysbus_init(Object *obj)
1571 {
1572 SDHCIState *s = SYSBUS_SDHCI(obj);
1573
1574 sdhci_initfn(s);
1575 }
1576
sdhci_sysbus_finalize(Object * obj)1577 static void sdhci_sysbus_finalize(Object *obj)
1578 {
1579 SDHCIState *s = SYSBUS_SDHCI(obj);
1580
1581 if (s->dma_mr) {
1582 object_unparent(OBJECT(s->dma_mr));
1583 }
1584
1585 sdhci_uninitfn(s);
1586 }
1587
sdhci_sysbus_realize(DeviceState * dev,Error ** errp)1588 static void sdhci_sysbus_realize(DeviceState *dev, Error **errp)
1589 {
1590 ERRP_GUARD();
1591 SDHCIState *s = SYSBUS_SDHCI(dev);
1592 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1593
1594 sdhci_common_realize(s, errp);
1595 if (*errp) {
1596 return;
1597 }
1598
1599 if (s->dma_mr) {
1600 s->dma_as = &s->sysbus_dma_as;
1601 address_space_init(s->dma_as, s->dma_mr, "sdhci-dma");
1602 } else {
1603 /* use system_memory() if property "dma" not set */
1604 s->dma_as = &address_space_memory;
1605 }
1606
1607 sysbus_init_irq(sbd, &s->irq);
1608
1609 sysbus_init_mmio(sbd, &s->iomem);
1610 }
1611
sdhci_sysbus_unrealize(DeviceState * dev)1612 static void sdhci_sysbus_unrealize(DeviceState *dev)
1613 {
1614 SDHCIState *s = SYSBUS_SDHCI(dev);
1615
1616 sdhci_common_unrealize(s);
1617
1618 if (s->dma_mr) {
1619 address_space_destroy(s->dma_as);
1620 }
1621 }
1622
sdhci_sysbus_class_init(ObjectClass * klass,const void * data)1623 static void sdhci_sysbus_class_init(ObjectClass *klass, const void *data)
1624 {
1625 DeviceClass *dc = DEVICE_CLASS(klass);
1626
1627 device_class_set_props(dc, sdhci_sysbus_properties);
1628 dc->realize = sdhci_sysbus_realize;
1629 dc->unrealize = sdhci_sysbus_unrealize;
1630
1631 sdhci_common_class_init(klass, data);
1632 }
1633
1634 /* --- qdev bus master --- */
1635
sdhci_bus_class_init(ObjectClass * klass,const void * data)1636 static void sdhci_bus_class_init(ObjectClass *klass, const void *data)
1637 {
1638 SDBusClass *sbc = SD_BUS_CLASS(klass);
1639
1640 sbc->set_inserted = sdhci_set_inserted;
1641 sbc->set_readonly = sdhci_set_readonly;
1642 }
1643
1644 /* --- qdev i.MX eSDHC --- */
1645
1646 #define USDHC_MIX_CTRL 0x48
1647
1648 #define USDHC_VENDOR_SPEC 0xc0
1649 #define USDHC_IMX_FRC_SDCLK_ON (1 << 8)
1650
1651 #define USDHC_DLL_CTRL 0x60
1652
1653 #define USDHC_TUNING_CTRL 0xcc
1654 #define USDHC_TUNE_CTRL_STATUS 0x68
1655 #define USDHC_WTMK_LVL 0x44
1656
1657 /* Undocumented register used by guests working around erratum ERR004536 */
1658 #define USDHC_UNDOCUMENTED_REG27 0x6c
1659
1660 #define USDHC_CTRL_4BITBUS (0x1 << 1)
1661 #define USDHC_CTRL_8BITBUS (0x2 << 1)
1662
1663 #define USDHC_PRNSTS_SDSTB (1 << 3)
1664
usdhc_read(void * opaque,hwaddr offset,unsigned size)1665 static uint64_t usdhc_read(void *opaque, hwaddr offset, unsigned size)
1666 {
1667 SDHCIState *s = SYSBUS_SDHCI(opaque);
1668 uint32_t ret;
1669 uint16_t hostctl1;
1670
1671 switch (offset) {
1672 default:
1673 return sdhci_read(opaque, offset, size);
1674
1675 case SDHC_HOSTCTL:
1676 /*
1677 * For a detailed explanation on the following bit
1678 * manipulation code see comments in a similar part of
1679 * usdhc_write()
1680 */
1681 hostctl1 = SDHC_DMA_TYPE(s->hostctl1) << (8 - 3);
1682
1683 if (s->hostctl1 & SDHC_CTRL_8BITBUS) {
1684 hostctl1 |= USDHC_CTRL_8BITBUS;
1685 }
1686
1687 if (s->hostctl1 & SDHC_CTRL_4BITBUS) {
1688 hostctl1 |= USDHC_CTRL_4BITBUS;
1689 }
1690
1691 ret = hostctl1;
1692 ret |= (uint32_t)s->blkgap << 16;
1693 ret |= (uint32_t)s->wakcon << 24;
1694
1695 break;
1696
1697 case SDHC_PRNSTS:
1698 /* Add SDSTB (SD Clock Stable) bit to PRNSTS */
1699 ret = sdhci_read(opaque, offset, size) & ~USDHC_PRNSTS_SDSTB;
1700 if (s->clkcon & SDHC_CLOCK_INT_STABLE) {
1701 ret |= USDHC_PRNSTS_SDSTB;
1702 }
1703 break;
1704
1705 case USDHC_VENDOR_SPEC:
1706 ret = s->vendor_spec;
1707 break;
1708 case USDHC_DLL_CTRL:
1709 case USDHC_TUNE_CTRL_STATUS:
1710 case USDHC_UNDOCUMENTED_REG27:
1711 case USDHC_TUNING_CTRL:
1712 case USDHC_MIX_CTRL:
1713 case USDHC_WTMK_LVL:
1714 ret = 0;
1715 break;
1716 }
1717
1718 return ret;
1719 }
1720
1721 static void
usdhc_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1722 usdhc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size)
1723 {
1724 SDHCIState *s = SYSBUS_SDHCI(opaque);
1725 uint8_t hostctl1;
1726 uint32_t value = (uint32_t)val;
1727
1728 switch (offset) {
1729 case USDHC_DLL_CTRL:
1730 case USDHC_TUNE_CTRL_STATUS:
1731 case USDHC_UNDOCUMENTED_REG27:
1732 case USDHC_TUNING_CTRL:
1733 case USDHC_WTMK_LVL:
1734 break;
1735
1736 case USDHC_VENDOR_SPEC:
1737 s->vendor_spec = value;
1738 if (value & USDHC_IMX_FRC_SDCLK_ON) {
1739 s->prnsts &= ~SDHC_IMX_CLOCK_GATE_OFF;
1740 } else {
1741 s->prnsts |= SDHC_IMX_CLOCK_GATE_OFF;
1742 }
1743 break;
1744
1745 case SDHC_HOSTCTL:
1746 /*
1747 * Here's What ESDHCI has at offset 0x28 (SDHC_HOSTCTL)
1748 *
1749 * 7 6 5 4 3 2 1 0
1750 * |-----------+--------+--------+-----------+----------+---------|
1751 * | Card | Card | Endian | DATA3 | Data | Led |
1752 * | Detect | Detect | Mode | as Card | Transfer | Control |
1753 * | Signal | Test | | Detection | Width | |
1754 * | Selection | Level | | Pin | | |
1755 * |-----------+--------+--------+-----------+----------+---------|
1756 *
1757 * and 0x29
1758 *
1759 * 15 10 9 8
1760 * |----------+------|
1761 * | Reserved | DMA |
1762 * | | Sel. |
1763 * | | |
1764 * |----------+------|
1765 *
1766 * and here's what SDCHI spec expects those offsets to be:
1767 *
1768 * 0x28 (Host Control Register)
1769 *
1770 * 7 6 5 4 3 2 1 0
1771 * |--------+--------+----------+------+--------+----------+---------|
1772 * | Card | Card | Extended | DMA | High | Data | LED |
1773 * | Detect | Detect | Data | Sel. | Speed | Transfer | Control |
1774 * | Signal | Test | Transfer | | Enable | Width | |
1775 * | Sel. | Level | Width | | | | |
1776 * |--------+--------+----------+------+--------+----------+---------|
1777 *
1778 * and 0x29 (Power Control Register)
1779 *
1780 * |----------------------------------|
1781 * | Power Control Register |
1782 * | |
1783 * | Description omitted, |
1784 * | since it has no analog in ESDHCI |
1785 * | |
1786 * |----------------------------------|
1787 *
1788 * Since offsets 0x2A and 0x2B should be compatible between
1789 * both IP specs we only need to reconcile least 16-bit of the
1790 * word we've been given.
1791 */
1792
1793 /*
1794 * First, save bits 7 6 and 0 since they are identical
1795 */
1796 hostctl1 = value & (SDHC_CTRL_LED |
1797 SDHC_CTRL_CDTEST_INS |
1798 SDHC_CTRL_CDTEST_EN);
1799 /*
1800 * Second, split "Data Transfer Width" from bits 2 and 1 in to
1801 * bits 5 and 1
1802 */
1803 if (value & USDHC_CTRL_8BITBUS) {
1804 hostctl1 |= SDHC_CTRL_8BITBUS;
1805 }
1806
1807 if (value & USDHC_CTRL_4BITBUS) {
1808 hostctl1 |= USDHC_CTRL_4BITBUS;
1809 }
1810
1811 /*
1812 * Third, move DMA select from bits 9 and 8 to bits 4 and 3
1813 */
1814 hostctl1 |= SDHC_DMA_TYPE(value >> (8 - 3));
1815
1816 /*
1817 * Now place the corrected value into low 16-bit of the value
1818 * we are going to give standard SDHCI write function
1819 *
1820 * NOTE: This transformation should be the inverse of what can
1821 * be found in drivers/mmc/host/sdhci-esdhc-imx.c in Linux
1822 * kernel
1823 */
1824 value &= ~UINT16_MAX;
1825 value |= hostctl1;
1826 value |= (uint16_t)s->pwrcon << 8;
1827
1828 sdhci_write(opaque, offset, value, size);
1829 break;
1830
1831 case USDHC_MIX_CTRL:
1832 /*
1833 * So, when SD/MMC stack in Linux tries to write to "Transfer
1834 * Mode Register", ESDHC i.MX quirk code will translate it
1835 * into a write to ESDHC_MIX_CTRL, so we do the opposite in
1836 * order to get where we started
1837 *
1838 * Note that Auto CMD23 Enable bit is located in a wrong place
1839 * on i.MX, but since it is not used by QEMU we do not care.
1840 *
1841 * We don't want to call sdhci_write(.., SDHC_TRNMOD, ...)
1842 * here because it will result in a call to
1843 * sdhci_send_command(s) which we don't want.
1844 *
1845 */
1846 s->trnmod = value & UINT16_MAX;
1847 break;
1848 case SDHC_TRNMOD:
1849 /*
1850 * Similar to above, but this time a write to "Command
1851 * Register" will be translated into a 4-byte write to
1852 * "Transfer Mode register" where lower 16-bit of value would
1853 * be set to zero. So what we do is fill those bits with
1854 * cached value from s->trnmod and let the SDHCI
1855 * infrastructure handle the rest
1856 */
1857 sdhci_write(opaque, offset, val | s->trnmod, size);
1858 break;
1859 case SDHC_BLKSIZE:
1860 /*
1861 * ESDHCI does not implement "Host SDMA Buffer Boundary", and
1862 * Linux driver will try to zero this field out which will
1863 * break the rest of SDHCI emulation.
1864 *
1865 * Linux defaults to maximum possible setting (512K boundary)
1866 * and it seems to be the only option that i.MX IP implements,
1867 * so we artificially set it to that value.
1868 */
1869 val |= 0x7 << 12;
1870 /* FALLTHROUGH */
1871 default:
1872 sdhci_write(opaque, offset, val, size);
1873 break;
1874 }
1875 }
1876
1877 static const MemoryRegionOps usdhc_mmio_ops = {
1878 .read = usdhc_read,
1879 .write = usdhc_write,
1880 .valid = {
1881 .min_access_size = 1,
1882 .max_access_size = 4,
1883 .unaligned = false
1884 },
1885 .endianness = DEVICE_LITTLE_ENDIAN,
1886 };
1887
imx_usdhc_init(Object * obj)1888 static void imx_usdhc_init(Object *obj)
1889 {
1890 SDHCIState *s = SYSBUS_SDHCI(obj);
1891
1892 s->io_ops = &usdhc_mmio_ops;
1893 s->quirks = SDHCI_QUIRK_NO_BUSY_IRQ;
1894 }
1895
1896 /* --- qdev Samsung s3c --- */
1897
1898 #define S3C_SDHCI_CONTROL2 0x80
1899 #define S3C_SDHCI_CONTROL3 0x84
1900 #define S3C_SDHCI_CONTROL4 0x8c
1901
sdhci_s3c_read(void * opaque,hwaddr offset,unsigned size)1902 static uint64_t sdhci_s3c_read(void *opaque, hwaddr offset, unsigned size)
1903 {
1904 uint64_t ret;
1905
1906 switch (offset) {
1907 case S3C_SDHCI_CONTROL2:
1908 case S3C_SDHCI_CONTROL3:
1909 case S3C_SDHCI_CONTROL4:
1910 /* ignore */
1911 ret = 0;
1912 break;
1913 default:
1914 ret = sdhci_read(opaque, offset, size);
1915 break;
1916 }
1917
1918 return ret;
1919 }
1920
sdhci_s3c_write(void * opaque,hwaddr offset,uint64_t val,unsigned size)1921 static void sdhci_s3c_write(void *opaque, hwaddr offset, uint64_t val,
1922 unsigned size)
1923 {
1924 switch (offset) {
1925 case S3C_SDHCI_CONTROL2:
1926 case S3C_SDHCI_CONTROL3:
1927 case S3C_SDHCI_CONTROL4:
1928 /* ignore */
1929 break;
1930 default:
1931 sdhci_write(opaque, offset, val, size);
1932 break;
1933 }
1934 }
1935
1936 static const MemoryRegionOps sdhci_s3c_mmio_ops = {
1937 .read = sdhci_s3c_read,
1938 .write = sdhci_s3c_write,
1939 .valid = {
1940 .min_access_size = 1,
1941 .max_access_size = 4,
1942 .unaligned = false
1943 },
1944 .endianness = DEVICE_LITTLE_ENDIAN,
1945 };
1946
sdhci_s3c_init(Object * obj)1947 static void sdhci_s3c_init(Object *obj)
1948 {
1949 SDHCIState *s = SYSBUS_SDHCI(obj);
1950
1951 s->io_ops = &sdhci_s3c_mmio_ops;
1952 }
1953
1954 static const TypeInfo sdhci_types[] = {
1955 {
1956 .name = TYPE_SDHCI_BUS,
1957 .parent = TYPE_SD_BUS,
1958 .instance_size = sizeof(SDBus),
1959 .class_init = sdhci_bus_class_init,
1960 },
1961 {
1962 .name = TYPE_SYSBUS_SDHCI,
1963 .parent = TYPE_SYS_BUS_DEVICE,
1964 .instance_size = sizeof(SDHCIState),
1965 .instance_init = sdhci_sysbus_init,
1966 .instance_finalize = sdhci_sysbus_finalize,
1967 .class_init = sdhci_sysbus_class_init,
1968 },
1969 {
1970 .name = TYPE_IMX_USDHC,
1971 .parent = TYPE_SYSBUS_SDHCI,
1972 .instance_init = imx_usdhc_init,
1973 },
1974 {
1975 .name = TYPE_S3C_SDHCI,
1976 .parent = TYPE_SYSBUS_SDHCI,
1977 .instance_init = sdhci_s3c_init,
1978 },
1979 };
1980
1981 DEFINE_TYPES(sdhci_types)
1982