1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver 4 * 5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved. 6 * 7 * Thanks to the following companies for their support: 8 * 9 * - JMicron (hardware and technical support) 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/ktime.h> 16 #include <linux/highmem.h> 17 #include <linux/io.h> 18 #include <linux/module.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/slab.h> 21 #include <linux/scatterlist.h> 22 #include <linux/sizes.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/of.h> 26 #include <linux/bug.h> 27 #include <linux/leds.h> 28 29 #include <linux/mmc/mmc.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/card.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 35 #include "sdhci.h" 36 37 #define DRIVER_NAME "sdhci" 38 39 #define DBG(f, x...) \ 40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 41 42 #define SDHCI_DUMP(f, x...) \ 43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x) 44 45 #define MAX_TUNING_LOOP 40 46 47 static unsigned int debug_quirks = 0; 48 static unsigned int debug_quirks2; 49 50 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); 51 52 void sdhci_dumpregs(struct sdhci_host *host) 53 { 54 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n"); 55 56 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n", 57 sdhci_readl(host, SDHCI_DMA_ADDRESS), 58 sdhci_readw(host, SDHCI_HOST_VERSION)); 59 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n", 60 sdhci_readw(host, SDHCI_BLOCK_SIZE), 61 sdhci_readw(host, SDHCI_BLOCK_COUNT)); 62 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n", 63 sdhci_readl(host, SDHCI_ARGUMENT), 64 sdhci_readw(host, SDHCI_TRANSFER_MODE)); 65 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n", 66 sdhci_readl(host, SDHCI_PRESENT_STATE), 67 sdhci_readb(host, SDHCI_HOST_CONTROL)); 68 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n", 69 sdhci_readb(host, SDHCI_POWER_CONTROL), 70 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); 71 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n", 72 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL), 73 sdhci_readw(host, SDHCI_CLOCK_CONTROL)); 74 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n", 75 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL), 76 sdhci_readl(host, SDHCI_INT_STATUS)); 77 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n", 78 sdhci_readl(host, SDHCI_INT_ENABLE), 79 sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); 80 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n", 81 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS), 82 sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); 83 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n", 84 sdhci_readl(host, SDHCI_CAPABILITIES), 85 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 86 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n", 87 sdhci_readw(host, SDHCI_COMMAND), 88 sdhci_readl(host, SDHCI_MAX_CURRENT)); 89 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n", 90 sdhci_readl(host, SDHCI_RESPONSE), 91 sdhci_readl(host, SDHCI_RESPONSE + 4)); 92 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n", 93 sdhci_readl(host, SDHCI_RESPONSE + 8), 94 sdhci_readl(host, SDHCI_RESPONSE + 12)); 95 SDHCI_DUMP("Host ctl2: 0x%08x\n", 96 sdhci_readw(host, SDHCI_HOST_CONTROL2)); 97 98 if (host->flags & SDHCI_USE_ADMA) { 99 if (host->flags & SDHCI_USE_64_BIT_DMA) { 100 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", 101 sdhci_readl(host, SDHCI_ADMA_ERROR), 102 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI), 103 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 104 } else { 105 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", 106 sdhci_readl(host, SDHCI_ADMA_ERROR), 107 sdhci_readl(host, SDHCI_ADMA_ADDRESS)); 108 } 109 } 110 111 if (host->ops->dump_uhs2_regs) 112 host->ops->dump_uhs2_regs(host); 113 114 if (host->ops->dump_vendor_regs) 115 host->ops->dump_vendor_regs(host); 116 117 SDHCI_DUMP("============================================\n"); 118 } 119 EXPORT_SYMBOL_GPL(sdhci_dumpregs); 120 121 /*****************************************************************************\ 122 * * 123 * Low level functions * 124 * * 125 \*****************************************************************************/ 126 127 static void sdhci_do_enable_v4_mode(struct sdhci_host *host) 128 { 129 u16 ctrl2; 130 131 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 132 if (ctrl2 & SDHCI_CTRL_V4_MODE) 133 return; 134 135 ctrl2 |= SDHCI_CTRL_V4_MODE; 136 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 137 } 138 139 /* 140 * This can be called before sdhci_add_host() by Vendor's host controller 141 * driver to enable v4 mode if supported. 142 */ 143 void sdhci_enable_v4_mode(struct sdhci_host *host) 144 { 145 host->v4_mode = true; 146 sdhci_do_enable_v4_mode(host); 147 } 148 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode); 149 150 bool sdhci_data_line_cmd(struct mmc_command *cmd) 151 { 152 return cmd->data || cmd->flags & MMC_RSP_BUSY; 153 } 154 EXPORT_SYMBOL_GPL(sdhci_data_line_cmd); 155 156 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) 157 { 158 u32 present; 159 160 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || 161 !mmc_card_is_removable(host->mmc) || mmc_host_can_gpio_cd(host->mmc)) 162 return; 163 164 if (enable) { 165 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 166 SDHCI_CARD_PRESENT; 167 168 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 169 SDHCI_INT_CARD_INSERT; 170 } else { 171 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 172 } 173 174 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 175 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 176 } 177 178 static void sdhci_enable_card_detection(struct sdhci_host *host) 179 { 180 sdhci_set_card_detection(host, true); 181 } 182 183 static void sdhci_disable_card_detection(struct sdhci_host *host) 184 { 185 sdhci_set_card_detection(host, false); 186 } 187 188 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) 189 { 190 if (host->bus_on) 191 return; 192 host->bus_on = true; 193 pm_runtime_get_noresume(mmc_dev(host->mmc)); 194 } 195 196 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) 197 { 198 if (!host->bus_on) 199 return; 200 host->bus_on = false; 201 pm_runtime_put_noidle(mmc_dev(host->mmc)); 202 } 203 204 void sdhci_reset(struct sdhci_host *host, u8 mask) 205 { 206 ktime_t timeout; 207 208 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); 209 210 if (mask & SDHCI_RESET_ALL) { 211 host->clock = 0; 212 /* Reset-all turns off SD Bus Power */ 213 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 214 sdhci_runtime_pm_bus_off(host); 215 } 216 217 /* Wait max 100 ms */ 218 timeout = ktime_add_ms(ktime_get(), 100); 219 220 /* hw clears the bit when it's done */ 221 while (1) { 222 bool timedout = ktime_after(ktime_get(), timeout); 223 224 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) 225 break; 226 if (timedout) { 227 pr_err("%s: Reset 0x%x never completed.\n", 228 mmc_hostname(host->mmc), (int)mask); 229 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 230 sdhci_dumpregs(host); 231 return; 232 } 233 udelay(10); 234 } 235 } 236 EXPORT_SYMBOL_GPL(sdhci_reset); 237 238 bool sdhci_do_reset(struct sdhci_host *host, u8 mask) 239 { 240 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 241 struct mmc_host *mmc = host->mmc; 242 243 if (!mmc->ops->get_cd(mmc)) 244 return false; 245 } 246 247 host->ops->reset(host, mask); 248 249 return true; 250 } 251 EXPORT_SYMBOL_GPL(sdhci_do_reset); 252 253 static void sdhci_reset_for_all(struct sdhci_host *host) 254 { 255 if (sdhci_do_reset(host, SDHCI_RESET_ALL)) { 256 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 257 if (host->ops->enable_dma) 258 host->ops->enable_dma(host); 259 } 260 /* Resetting the controller clears many */ 261 host->preset_enabled = false; 262 } 263 } 264 265 enum sdhci_reset_reason { 266 SDHCI_RESET_FOR_INIT, 267 SDHCI_RESET_FOR_REQUEST_ERROR, 268 SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY, 269 SDHCI_RESET_FOR_TUNING_ABORT, 270 SDHCI_RESET_FOR_CARD_REMOVED, 271 SDHCI_RESET_FOR_CQE_RECOVERY, 272 }; 273 274 static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason) 275 { 276 if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) { 277 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 278 return; 279 } 280 281 switch (reason) { 282 case SDHCI_RESET_FOR_INIT: 283 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 284 break; 285 case SDHCI_RESET_FOR_REQUEST_ERROR: 286 case SDHCI_RESET_FOR_TUNING_ABORT: 287 case SDHCI_RESET_FOR_CARD_REMOVED: 288 case SDHCI_RESET_FOR_CQE_RECOVERY: 289 sdhci_do_reset(host, SDHCI_RESET_CMD); 290 sdhci_do_reset(host, SDHCI_RESET_DATA); 291 break; 292 case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY: 293 sdhci_do_reset(host, SDHCI_RESET_DATA); 294 break; 295 } 296 } 297 298 #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r) 299 300 static void sdhci_set_default_irqs(struct sdhci_host *host) 301 { 302 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 303 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | 304 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | 305 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | 306 SDHCI_INT_RESPONSE; 307 308 if (host->tuning_mode == SDHCI_TUNING_MODE_2 || 309 host->tuning_mode == SDHCI_TUNING_MODE_3) 310 host->ier |= SDHCI_INT_RETUNE; 311 312 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 313 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 314 } 315 316 static void sdhci_config_dma(struct sdhci_host *host) 317 { 318 u8 ctrl; 319 u16 ctrl2; 320 321 if (host->version < SDHCI_SPEC_200) 322 return; 323 324 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 325 326 /* 327 * Always adjust the DMA selection as some controllers 328 * (e.g. JMicron) can't do PIO properly when the selection 329 * is ADMA. 330 */ 331 ctrl &= ~SDHCI_CTRL_DMA_MASK; 332 if (!(host->flags & SDHCI_REQ_USE_DMA)) 333 goto out; 334 335 /* Note if DMA Select is zero then SDMA is selected */ 336 if (host->flags & SDHCI_USE_ADMA) 337 ctrl |= SDHCI_CTRL_ADMA32; 338 339 if (host->flags & SDHCI_USE_64_BIT_DMA) { 340 /* 341 * If v4 mode, all supported DMA can be 64-bit addressing if 342 * controller supports 64-bit system address, otherwise only 343 * ADMA can support 64-bit addressing. 344 */ 345 if (host->v4_mode) { 346 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 347 ctrl2 |= SDHCI_CTRL_64BIT_ADDR; 348 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 349 } else if (host->flags & SDHCI_USE_ADMA) { 350 /* 351 * Don't need to undo SDHCI_CTRL_ADMA32 in order to 352 * set SDHCI_CTRL_ADMA64. 353 */ 354 ctrl |= SDHCI_CTRL_ADMA64; 355 } 356 } 357 358 out: 359 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 360 } 361 362 static void sdhci_init(struct sdhci_host *host, int soft) 363 { 364 struct mmc_host *mmc = host->mmc; 365 unsigned long flags; 366 367 if (soft) 368 sdhci_reset_for(host, INIT); 369 else 370 sdhci_reset_for_all(host); 371 372 if (host->v4_mode) 373 sdhci_do_enable_v4_mode(host); 374 375 spin_lock_irqsave(&host->lock, flags); 376 sdhci_set_default_irqs(host); 377 spin_unlock_irqrestore(&host->lock, flags); 378 379 host->cqe_on = false; 380 381 if (soft) { 382 /* force clock reconfiguration */ 383 host->clock = 0; 384 host->reinit_uhs = true; 385 mmc->ops->set_ios(mmc, &mmc->ios); 386 } 387 } 388 389 static void sdhci_reinit(struct sdhci_host *host) 390 { 391 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); 392 393 sdhci_init(host, 0); 394 sdhci_enable_card_detection(host); 395 396 /* 397 * A change to the card detect bits indicates a change in present state, 398 * refer sdhci_set_card_detection(). A card detect interrupt might have 399 * been missed while the host controller was being reset, so trigger a 400 * rescan to check. 401 */ 402 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT))) 403 mmc_detect_change(host->mmc, msecs_to_jiffies(200)); 404 } 405 406 static void __sdhci_led_activate(struct sdhci_host *host) 407 { 408 u8 ctrl; 409 410 if (host->quirks & SDHCI_QUIRK_NO_LED) 411 return; 412 413 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 414 ctrl |= SDHCI_CTRL_LED; 415 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 416 } 417 418 static void __sdhci_led_deactivate(struct sdhci_host *host) 419 { 420 u8 ctrl; 421 422 if (host->quirks & SDHCI_QUIRK_NO_LED) 423 return; 424 425 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 426 ctrl &= ~SDHCI_CTRL_LED; 427 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 428 } 429 430 #if IS_REACHABLE(CONFIG_LEDS_CLASS) 431 static void sdhci_led_control(struct led_classdev *led, 432 enum led_brightness brightness) 433 { 434 struct sdhci_host *host = container_of(led, struct sdhci_host, led); 435 unsigned long flags; 436 437 spin_lock_irqsave(&host->lock, flags); 438 439 if (host->runtime_suspended) 440 goto out; 441 442 if (brightness == LED_OFF) 443 __sdhci_led_deactivate(host); 444 else 445 __sdhci_led_activate(host); 446 out: 447 spin_unlock_irqrestore(&host->lock, flags); 448 } 449 450 static int sdhci_led_register(struct sdhci_host *host) 451 { 452 struct mmc_host *mmc = host->mmc; 453 454 if (host->quirks & SDHCI_QUIRK_NO_LED) 455 return 0; 456 457 snprintf(host->led_name, sizeof(host->led_name), 458 "%s::", mmc_hostname(mmc)); 459 460 host->led.name = host->led_name; 461 host->led.brightness = LED_OFF; 462 host->led.default_trigger = mmc_hostname(mmc); 463 host->led.brightness_set = sdhci_led_control; 464 465 return led_classdev_register(mmc_dev(mmc), &host->led); 466 } 467 468 static void sdhci_led_unregister(struct sdhci_host *host) 469 { 470 if (host->quirks & SDHCI_QUIRK_NO_LED) 471 return; 472 473 led_classdev_unregister(&host->led); 474 } 475 476 static inline void sdhci_led_activate(struct sdhci_host *host) 477 { 478 } 479 480 static inline void sdhci_led_deactivate(struct sdhci_host *host) 481 { 482 } 483 484 #else 485 486 static inline int sdhci_led_register(struct sdhci_host *host) 487 { 488 return 0; 489 } 490 491 static inline void sdhci_led_unregister(struct sdhci_host *host) 492 { 493 } 494 495 static inline void sdhci_led_activate(struct sdhci_host *host) 496 { 497 __sdhci_led_activate(host); 498 } 499 500 static inline void sdhci_led_deactivate(struct sdhci_host *host) 501 { 502 __sdhci_led_deactivate(host); 503 } 504 505 #endif 506 507 void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq, 508 unsigned long timeout) 509 { 510 if (sdhci_data_line_cmd(mrq->cmd)) 511 mod_timer(&host->data_timer, timeout); 512 else 513 mod_timer(&host->timer, timeout); 514 } 515 EXPORT_SYMBOL_GPL(sdhci_mod_timer); 516 517 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq) 518 { 519 if (sdhci_data_line_cmd(mrq->cmd)) 520 timer_delete(&host->data_timer); 521 else 522 timer_delete(&host->timer); 523 } 524 525 static inline bool sdhci_has_requests(struct sdhci_host *host) 526 { 527 return host->cmd || host->data_cmd; 528 } 529 530 /*****************************************************************************\ 531 * * 532 * Core functions * 533 * * 534 \*****************************************************************************/ 535 536 static void sdhci_read_block_pio(struct sdhci_host *host) 537 { 538 size_t blksize, len, chunk; 539 u32 scratch; 540 u8 *buf; 541 542 DBG("PIO reading\n"); 543 544 blksize = host->data->blksz; 545 chunk = 0; 546 547 while (blksize) { 548 BUG_ON(!sg_miter_next(&host->sg_miter)); 549 550 len = min(host->sg_miter.length, blksize); 551 552 blksize -= len; 553 host->sg_miter.consumed = len; 554 555 buf = host->sg_miter.addr; 556 557 while (len) { 558 if (chunk == 0) { 559 scratch = sdhci_readl(host, SDHCI_BUFFER); 560 chunk = 4; 561 } 562 563 *buf = scratch & 0xFF; 564 565 buf++; 566 scratch >>= 8; 567 chunk--; 568 len--; 569 } 570 } 571 572 sg_miter_stop(&host->sg_miter); 573 } 574 575 static void sdhci_write_block_pio(struct sdhci_host *host) 576 { 577 size_t blksize, len, chunk; 578 u32 scratch; 579 u8 *buf; 580 581 DBG("PIO writing\n"); 582 583 blksize = host->data->blksz; 584 chunk = 0; 585 scratch = 0; 586 587 while (blksize) { 588 BUG_ON(!sg_miter_next(&host->sg_miter)); 589 590 len = min(host->sg_miter.length, blksize); 591 592 blksize -= len; 593 host->sg_miter.consumed = len; 594 595 buf = host->sg_miter.addr; 596 597 while (len) { 598 scratch |= (u32)*buf << (chunk * 8); 599 600 buf++; 601 chunk++; 602 len--; 603 604 if ((chunk == 4) || ((len == 0) && (blksize == 0))) { 605 sdhci_writel(host, scratch, SDHCI_BUFFER); 606 chunk = 0; 607 scratch = 0; 608 } 609 } 610 } 611 612 sg_miter_stop(&host->sg_miter); 613 } 614 615 static void sdhci_transfer_pio(struct sdhci_host *host) 616 { 617 u32 mask; 618 619 if (host->blocks == 0) 620 return; 621 622 if (host->data->flags & MMC_DATA_READ) 623 mask = SDHCI_DATA_AVAILABLE; 624 else 625 mask = SDHCI_SPACE_AVAILABLE; 626 627 /* 628 * Some controllers (JMicron JMB38x) mess up the buffer bits 629 * for transfers < 4 bytes. As long as it is just one block, 630 * we can ignore the bits. 631 */ 632 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) && 633 (host->data->blocks == 1)) 634 mask = ~0; 635 636 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) { 637 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY) 638 udelay(100); 639 640 if (host->data->flags & MMC_DATA_READ) 641 sdhci_read_block_pio(host); 642 else 643 sdhci_write_block_pio(host); 644 645 host->blocks--; 646 if (host->blocks == 0) 647 break; 648 } 649 650 DBG("PIO transfer complete.\n"); 651 } 652 653 static int sdhci_pre_dma_transfer(struct sdhci_host *host, 654 struct mmc_data *data, int cookie) 655 { 656 int sg_count; 657 658 /* 659 * If the data buffers are already mapped, return the previous 660 * dma_map_sg() result. 661 */ 662 if (data->host_cookie == COOKIE_PRE_MAPPED) 663 return data->sg_count; 664 665 /* Bounce write requests to the bounce buffer */ 666 if (host->bounce_buffer) { 667 unsigned int length = data->blksz * data->blocks; 668 669 if (length > host->bounce_buffer_size) { 670 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n", 671 mmc_hostname(host->mmc), length, 672 host->bounce_buffer_size); 673 return -EIO; 674 } 675 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) { 676 /* Copy the data to the bounce buffer */ 677 if (host->ops->copy_to_bounce_buffer) { 678 host->ops->copy_to_bounce_buffer(host, 679 data, length); 680 } else { 681 sg_copy_to_buffer(data->sg, data->sg_len, 682 host->bounce_buffer, length); 683 } 684 } 685 /* Switch ownership to the DMA */ 686 dma_sync_single_for_device(mmc_dev(host->mmc), 687 host->bounce_addr, 688 host->bounce_buffer_size, 689 mmc_get_dma_dir(data)); 690 /* Just a dummy value */ 691 sg_count = 1; 692 } else { 693 /* Just access the data directly from memory */ 694 sg_count = dma_map_sg(mmc_dev(host->mmc), 695 data->sg, data->sg_len, 696 mmc_get_dma_dir(data)); 697 } 698 699 if (sg_count == 0) 700 return -ENOSPC; 701 702 data->sg_count = sg_count; 703 data->host_cookie = cookie; 704 705 return sg_count; 706 } 707 708 static char *sdhci_kmap_atomic(struct scatterlist *sg) 709 { 710 return kmap_local_page(sg_page(sg)) + sg->offset; 711 } 712 713 static void sdhci_kunmap_atomic(void *buffer) 714 { 715 kunmap_local(buffer); 716 } 717 718 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc, 719 dma_addr_t addr, int len, unsigned int cmd) 720 { 721 struct sdhci_adma2_64_desc *dma_desc = *desc; 722 723 /* 32-bit and 64-bit descriptors have these members in same position */ 724 dma_desc->cmd = cpu_to_le16(cmd); 725 dma_desc->len = cpu_to_le16(len); 726 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr)); 727 728 if (host->flags & SDHCI_USE_64_BIT_DMA) 729 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr)); 730 731 *desc += host->desc_sz; 732 } 733 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc); 734 735 static inline void __sdhci_adma_write_desc(struct sdhci_host *host, 736 void **desc, dma_addr_t addr, 737 int len, unsigned int cmd) 738 { 739 if (host->ops->adma_write_desc) 740 host->ops->adma_write_desc(host, desc, addr, len, cmd); 741 else 742 sdhci_adma_write_desc(host, desc, addr, len, cmd); 743 } 744 745 static void sdhci_adma_mark_end(void *desc) 746 { 747 struct sdhci_adma2_64_desc *dma_desc = desc; 748 749 /* 32-bit and 64-bit descriptors have 'cmd' in same position */ 750 dma_desc->cmd |= cpu_to_le16(ADMA2_END); 751 } 752 753 static void sdhci_adma_table_pre(struct sdhci_host *host, 754 struct mmc_data *data, int sg_count) 755 { 756 struct scatterlist *sg; 757 dma_addr_t addr, align_addr; 758 void *desc, *align; 759 char *buffer; 760 int len, offset, i; 761 762 /* 763 * The spec does not specify endianness of descriptor table. 764 * We currently guess that it is LE. 765 */ 766 767 host->sg_count = sg_count; 768 769 desc = host->adma_table; 770 align = host->align_buffer; 771 772 align_addr = host->align_addr; 773 774 for_each_sg(data->sg, sg, host->sg_count, i) { 775 addr = sg_dma_address(sg); 776 len = sg_dma_len(sg); 777 778 /* 779 * The SDHCI specification states that ADMA addresses must 780 * be 32-bit aligned. If they aren't, then we use a bounce 781 * buffer for the (up to three) bytes that screw up the 782 * alignment. 783 */ 784 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) & 785 SDHCI_ADMA2_MASK; 786 if (offset) { 787 if (data->flags & MMC_DATA_WRITE) { 788 buffer = sdhci_kmap_atomic(sg); 789 memcpy(align, buffer, offset); 790 sdhci_kunmap_atomic(buffer); 791 } 792 793 /* tran, valid */ 794 __sdhci_adma_write_desc(host, &desc, align_addr, 795 offset, ADMA2_TRAN_VALID); 796 797 BUG_ON(offset > 65536); 798 799 align += SDHCI_ADMA2_ALIGN; 800 align_addr += SDHCI_ADMA2_ALIGN; 801 802 addr += offset; 803 len -= offset; 804 } 805 806 /* 807 * The block layer forces a minimum segment size of PAGE_SIZE, 808 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write 809 * multiple descriptors, noting that the ADMA table is sized 810 * for 4KiB chunks anyway, so it will be big enough. 811 */ 812 while (len > host->max_adma) { 813 int n = 32 * 1024; /* 32KiB*/ 814 815 __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID); 816 addr += n; 817 len -= n; 818 } 819 820 /* tran, valid */ 821 if (len) 822 __sdhci_adma_write_desc(host, &desc, addr, len, 823 ADMA2_TRAN_VALID); 824 825 /* 826 * If this triggers then we have a calculation bug 827 * somewhere. :/ 828 */ 829 WARN_ON((desc - host->adma_table) >= host->adma_table_sz); 830 } 831 832 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { 833 /* Mark the last descriptor as the terminating descriptor */ 834 if (desc != host->adma_table) { 835 desc -= host->desc_sz; 836 sdhci_adma_mark_end(desc); 837 } 838 } else { 839 /* Add a terminating entry - nop, end, valid */ 840 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID); 841 } 842 } 843 844 static void sdhci_adma_table_post(struct sdhci_host *host, 845 struct mmc_data *data) 846 { 847 struct scatterlist *sg; 848 int i, size; 849 void *align; 850 char *buffer; 851 852 if (data->flags & MMC_DATA_READ) { 853 bool has_unaligned = false; 854 855 /* Do a quick scan of the SG list for any unaligned mappings */ 856 for_each_sg(data->sg, sg, host->sg_count, i) 857 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 858 has_unaligned = true; 859 break; 860 } 861 862 if (has_unaligned) { 863 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, 864 data->sg_len, DMA_FROM_DEVICE); 865 866 align = host->align_buffer; 867 868 for_each_sg(data->sg, sg, host->sg_count, i) { 869 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) { 870 size = SDHCI_ADMA2_ALIGN - 871 (sg_dma_address(sg) & SDHCI_ADMA2_MASK); 872 873 buffer = sdhci_kmap_atomic(sg); 874 memcpy(buffer, align, size); 875 sdhci_kunmap_atomic(buffer); 876 877 align += SDHCI_ADMA2_ALIGN; 878 } 879 } 880 } 881 } 882 } 883 884 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr) 885 { 886 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS); 887 if (host->flags & SDHCI_USE_64_BIT_DMA) 888 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI); 889 } 890 891 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host) 892 { 893 if (host->bounce_buffer) 894 return host->bounce_addr; 895 else 896 return sg_dma_address(host->data->sg); 897 } 898 899 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr) 900 { 901 if (host->v4_mode) 902 sdhci_set_adma_addr(host, addr); 903 else 904 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS); 905 } 906 907 static unsigned int sdhci_target_timeout(struct sdhci_host *host, 908 struct mmc_command *cmd, 909 struct mmc_data *data) 910 { 911 unsigned int target_timeout; 912 913 /* timeout in us */ 914 if (!data) { 915 target_timeout = cmd->busy_timeout * 1000; 916 } else { 917 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000); 918 if (host->clock && data->timeout_clks) { 919 unsigned long long val; 920 921 /* 922 * data->timeout_clks is in units of clock cycles. 923 * host->clock is in Hz. target_timeout is in us. 924 * Hence, us = 1000000 * cycles / Hz. Round up. 925 */ 926 val = 1000000ULL * data->timeout_clks; 927 if (do_div(val, host->clock)) 928 target_timeout++; 929 target_timeout += val; 930 } 931 } 932 933 return target_timeout; 934 } 935 936 static void sdhci_calc_sw_timeout(struct sdhci_host *host, 937 struct mmc_command *cmd) 938 { 939 struct mmc_data *data = cmd->data; 940 struct mmc_host *mmc = host->mmc; 941 struct mmc_ios *ios = &mmc->ios; 942 unsigned char bus_width = 1 << ios->bus_width; 943 unsigned int blksz; 944 unsigned int freq; 945 u64 target_timeout; 946 u64 transfer_time; 947 948 target_timeout = sdhci_target_timeout(host, cmd, data); 949 target_timeout *= NSEC_PER_USEC; 950 951 if (data) { 952 blksz = data->blksz; 953 freq = mmc->actual_clock ? : host->clock; 954 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width); 955 do_div(transfer_time, freq); 956 /* multiply by '2' to account for any unknowns */ 957 transfer_time = transfer_time * 2; 958 /* calculate timeout for the entire data */ 959 host->data_timeout = data->blocks * target_timeout + 960 transfer_time; 961 } else { 962 host->data_timeout = target_timeout; 963 } 964 965 if (host->data_timeout) 966 host->data_timeout += MMC_CMD_TRANSFER_TIME; 967 } 968 969 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd, 970 bool *too_big) 971 { 972 u8 count; 973 struct mmc_data *data; 974 unsigned target_timeout, current_timeout; 975 976 *too_big = false; 977 978 /* 979 * If the host controller provides us with an incorrect timeout 980 * value, just skip the check and use the maximum. The hardware may take 981 * longer to time out, but that's much better than having a too-short 982 * timeout value. 983 */ 984 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) 985 return host->max_timeout_count; 986 987 /* Unspecified command, assume max */ 988 if (cmd == NULL) 989 return host->max_timeout_count; 990 991 data = cmd->data; 992 /* Unspecified timeout, assume max */ 993 if (!data && !cmd->busy_timeout) 994 return host->max_timeout_count; 995 996 /* timeout in us */ 997 target_timeout = sdhci_target_timeout(host, cmd, data); 998 999 /* 1000 * Figure out needed cycles. 1001 * We do this in steps in order to fit inside a 32 bit int. 1002 * The first step is the minimum timeout, which will have a 1003 * minimum resolution of 6 bits: 1004 * (1) 2^13*1000 > 2^22, 1005 * (2) host->timeout_clk < 2^16 1006 * => 1007 * (1) / (2) > 2^6 1008 */ 1009 count = 0; 1010 current_timeout = (1 << 13) * 1000 / host->timeout_clk; 1011 while (current_timeout < target_timeout) { 1012 count++; 1013 current_timeout <<= 1; 1014 if (count > host->max_timeout_count) { 1015 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT)) 1016 DBG("Too large timeout 0x%x requested for CMD%d!\n", 1017 count, cmd->opcode); 1018 count = host->max_timeout_count; 1019 *too_big = true; 1020 break; 1021 } 1022 } 1023 1024 return count; 1025 } 1026 1027 static void sdhci_set_transfer_irqs(struct sdhci_host *host) 1028 { 1029 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL; 1030 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; 1031 1032 if (host->flags & SDHCI_REQ_USE_DMA) 1033 host->ier = (host->ier & ~pio_irqs) | dma_irqs; 1034 else 1035 host->ier = (host->ier & ~dma_irqs) | pio_irqs; 1036 1037 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12)) 1038 host->ier |= SDHCI_INT_AUTO_CMD_ERR; 1039 else 1040 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR; 1041 1042 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1043 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1044 } 1045 1046 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) 1047 { 1048 if (enable) 1049 host->ier |= SDHCI_INT_DATA_TIMEOUT; 1050 else 1051 host->ier &= ~SDHCI_INT_DATA_TIMEOUT; 1052 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 1053 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 1054 } 1055 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); 1056 1057 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1058 { 1059 bool too_big = false; 1060 u8 count = sdhci_calc_timeout(host, cmd, &too_big); 1061 1062 if (too_big && 1063 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { 1064 sdhci_calc_sw_timeout(host, cmd); 1065 sdhci_set_data_timeout_irq(host, false); 1066 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { 1067 sdhci_set_data_timeout_irq(host, true); 1068 } 1069 1070 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); 1071 } 1072 EXPORT_SYMBOL_GPL(__sdhci_set_timeout); 1073 1074 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) 1075 { 1076 if (host->ops->set_timeout) 1077 host->ops->set_timeout(host, cmd); 1078 else 1079 __sdhci_set_timeout(host, cmd); 1080 } 1081 1082 void sdhci_initialize_data(struct sdhci_host *host, struct mmc_data *data) 1083 { 1084 WARN_ON(host->data); 1085 1086 /* Sanity checks */ 1087 BUG_ON(data->blksz * data->blocks > 524288); 1088 BUG_ON(data->blksz > host->mmc->max_blk_size); 1089 BUG_ON(data->blocks > 65535); 1090 1091 host->data = data; 1092 host->data_early = 0; 1093 host->data->bytes_xfered = 0; 1094 } 1095 EXPORT_SYMBOL_GPL(sdhci_initialize_data); 1096 1097 static inline void sdhci_set_block_info(struct sdhci_host *host, 1098 struct mmc_data *data) 1099 { 1100 /* Set the DMA boundary value and block size */ 1101 sdhci_writew(host, 1102 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), 1103 SDHCI_BLOCK_SIZE); 1104 /* 1105 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count 1106 * can be supported, in that case 16-bit block count register must be 0. 1107 */ 1108 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1109 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { 1110 if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) 1111 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); 1112 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); 1113 } else { 1114 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); 1115 } 1116 } 1117 1118 void sdhci_prepare_dma(struct sdhci_host *host, struct mmc_data *data) 1119 { 1120 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 1121 struct scatterlist *sg; 1122 unsigned int length_mask, offset_mask; 1123 int i; 1124 1125 host->flags |= SDHCI_REQ_USE_DMA; 1126 1127 /* 1128 * FIXME: This doesn't account for merging when mapping the 1129 * scatterlist. 1130 * 1131 * The assumption here being that alignment and lengths are 1132 * the same after DMA mapping to device address space. 1133 */ 1134 length_mask = 0; 1135 offset_mask = 0; 1136 if (host->flags & SDHCI_USE_ADMA) { 1137 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) { 1138 length_mask = 3; 1139 /* 1140 * As we use up to 3 byte chunks to work 1141 * around alignment problems, we need to 1142 * check the offset as well. 1143 */ 1144 offset_mask = 3; 1145 } 1146 } else { 1147 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) 1148 length_mask = 3; 1149 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) 1150 offset_mask = 3; 1151 } 1152 1153 if (unlikely(length_mask | offset_mask)) { 1154 for_each_sg(data->sg, sg, data->sg_len, i) { 1155 if (sg->length & length_mask) { 1156 DBG("Reverting to PIO because of transfer size (%d)\n", 1157 sg->length); 1158 host->flags &= ~SDHCI_REQ_USE_DMA; 1159 break; 1160 } 1161 if (sg->offset & offset_mask) { 1162 DBG("Reverting to PIO because of bad alignment\n"); 1163 host->flags &= ~SDHCI_REQ_USE_DMA; 1164 break; 1165 } 1166 } 1167 } 1168 } 1169 1170 sdhci_config_dma(host); 1171 1172 if (host->flags & SDHCI_REQ_USE_DMA) { 1173 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1174 1175 if (sg_cnt <= 0) { 1176 /* 1177 * This only happens when someone fed 1178 * us an invalid request. 1179 */ 1180 WARN_ON(1); 1181 host->flags &= ~SDHCI_REQ_USE_DMA; 1182 } else if (host->flags & SDHCI_USE_ADMA) { 1183 sdhci_adma_table_pre(host, data, sg_cnt); 1184 sdhci_set_adma_addr(host, host->adma_addr); 1185 } else { 1186 WARN_ON(sg_cnt != 1); 1187 sdhci_set_sdma_addr(host, sdhci_sdma_address(host)); 1188 } 1189 } 1190 1191 if (!(host->flags & SDHCI_REQ_USE_DMA)) { 1192 int flags; 1193 1194 flags = SG_MITER_ATOMIC; 1195 if (host->data->flags & MMC_DATA_READ) 1196 flags |= SG_MITER_TO_SG; 1197 else 1198 flags |= SG_MITER_FROM_SG; 1199 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); 1200 host->blocks = data->blocks; 1201 } 1202 1203 sdhci_set_transfer_irqs(host); 1204 } 1205 EXPORT_SYMBOL_GPL(sdhci_prepare_dma); 1206 1207 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) 1208 { 1209 struct mmc_data *data = cmd->data; 1210 1211 sdhci_initialize_data(host, data); 1212 1213 sdhci_prepare_dma(host, data); 1214 1215 sdhci_set_block_info(host, data); 1216 } 1217 1218 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) 1219 1220 static int sdhci_external_dma_init(struct sdhci_host *host) 1221 { 1222 int ret = 0; 1223 struct mmc_host *mmc = host->mmc; 1224 1225 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx"); 1226 if (IS_ERR(host->tx_chan)) { 1227 ret = PTR_ERR(host->tx_chan); 1228 if (ret != -EPROBE_DEFER) 1229 pr_warn("Failed to request TX DMA channel.\n"); 1230 host->tx_chan = NULL; 1231 return ret; 1232 } 1233 1234 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx"); 1235 if (IS_ERR(host->rx_chan)) { 1236 if (host->tx_chan) { 1237 dma_release_channel(host->tx_chan); 1238 host->tx_chan = NULL; 1239 } 1240 1241 ret = PTR_ERR(host->rx_chan); 1242 if (ret != -EPROBE_DEFER) 1243 pr_warn("Failed to request RX DMA channel.\n"); 1244 host->rx_chan = NULL; 1245 } 1246 1247 return ret; 1248 } 1249 1250 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1251 struct mmc_data *data) 1252 { 1253 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; 1254 } 1255 1256 static int sdhci_external_dma_setup(struct sdhci_host *host, 1257 struct mmc_command *cmd) 1258 { 1259 int ret, i; 1260 enum dma_transfer_direction dir; 1261 struct dma_async_tx_descriptor *desc; 1262 struct mmc_data *data = cmd->data; 1263 struct dma_chan *chan; 1264 struct dma_slave_config cfg; 1265 dma_cookie_t cookie; 1266 int sg_cnt; 1267 1268 if (!host->mapbase) 1269 return -EINVAL; 1270 1271 memset(&cfg, 0, sizeof(cfg)); 1272 cfg.src_addr = host->mapbase + SDHCI_BUFFER; 1273 cfg.dst_addr = host->mapbase + SDHCI_BUFFER; 1274 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1275 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 1276 cfg.src_maxburst = data->blksz / 4; 1277 cfg.dst_maxburst = data->blksz / 4; 1278 1279 /* Sanity check: all the SG entries must be aligned by block size. */ 1280 for (i = 0; i < data->sg_len; i++) { 1281 if ((data->sg + i)->length % data->blksz) 1282 return -EINVAL; 1283 } 1284 1285 chan = sdhci_external_dma_channel(host, data); 1286 1287 ret = dmaengine_slave_config(chan, &cfg); 1288 if (ret) 1289 return ret; 1290 1291 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); 1292 if (sg_cnt <= 0) 1293 return -EINVAL; 1294 1295 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 1296 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, 1297 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1298 if (!desc) 1299 return -EINVAL; 1300 1301 desc->callback = NULL; 1302 desc->callback_param = NULL; 1303 1304 cookie = dmaengine_submit(desc); 1305 if (dma_submit_error(cookie)) 1306 ret = cookie; 1307 1308 return ret; 1309 } 1310 1311 static void sdhci_external_dma_release(struct sdhci_host *host) 1312 { 1313 if (host->tx_chan) { 1314 dma_release_channel(host->tx_chan); 1315 host->tx_chan = NULL; 1316 } 1317 1318 if (host->rx_chan) { 1319 dma_release_channel(host->rx_chan); 1320 host->rx_chan = NULL; 1321 } 1322 1323 sdhci_switch_external_dma(host, false); 1324 } 1325 1326 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, 1327 struct mmc_command *cmd) 1328 { 1329 struct mmc_data *data = cmd->data; 1330 1331 sdhci_initialize_data(host, data); 1332 1333 host->flags |= SDHCI_REQ_USE_DMA; 1334 sdhci_set_transfer_irqs(host); 1335 1336 sdhci_set_block_info(host, data); 1337 } 1338 1339 static void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1340 struct mmc_command *cmd) 1341 { 1342 if (!sdhci_external_dma_setup(host, cmd)) { 1343 __sdhci_external_dma_prepare_data(host, cmd); 1344 } else { 1345 sdhci_external_dma_release(host); 1346 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", 1347 mmc_hostname(host->mmc)); 1348 sdhci_prepare_data(host, cmd); 1349 } 1350 } 1351 1352 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1353 struct mmc_command *cmd) 1354 { 1355 struct dma_chan *chan; 1356 1357 if (!cmd->data) 1358 return; 1359 1360 chan = sdhci_external_dma_channel(host, cmd->data); 1361 if (chan) 1362 dma_async_issue_pending(chan); 1363 } 1364 1365 #else 1366 1367 static inline int sdhci_external_dma_init(struct sdhci_host *host) 1368 { 1369 return -EOPNOTSUPP; 1370 } 1371 1372 static inline void sdhci_external_dma_release(struct sdhci_host *host) 1373 { 1374 } 1375 1376 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, 1377 struct mmc_command *cmd) 1378 { 1379 /* This should never happen */ 1380 WARN_ON_ONCE(1); 1381 } 1382 1383 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, 1384 struct mmc_command *cmd) 1385 { 1386 } 1387 1388 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, 1389 struct mmc_data *data) 1390 { 1391 return NULL; 1392 } 1393 1394 #endif 1395 1396 void sdhci_switch_external_dma(struct sdhci_host *host, bool en) 1397 { 1398 host->use_external_dma = en; 1399 } 1400 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); 1401 1402 static inline bool sdhci_auto_cmd12(struct sdhci_host *host, 1403 struct mmc_request *mrq) 1404 { 1405 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) && 1406 !mrq->cap_cmd_during_tfr; 1407 } 1408 1409 static inline bool sdhci_auto_cmd23(struct sdhci_host *host, 1410 struct mmc_request *mrq) 1411 { 1412 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23); 1413 } 1414 1415 static inline bool sdhci_manual_cmd23(struct sdhci_host *host, 1416 struct mmc_request *mrq) 1417 { 1418 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23); 1419 } 1420 1421 static inline void sdhci_auto_cmd_select(struct sdhci_host *host, 1422 struct mmc_command *cmd, 1423 u16 *mode) 1424 { 1425 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) && 1426 (cmd->opcode != SD_IO_RW_EXTENDED); 1427 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq); 1428 u16 ctrl2; 1429 1430 /* 1431 * In case of Version 4.10 or later, use of 'Auto CMD Auto 1432 * Select' is recommended rather than use of 'Auto CMD12 1433 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode 1434 * here because some controllers (e.g sdhci-of-dwmshc) expect it. 1435 */ 1436 if (host->version >= SDHCI_SPEC_410 && host->v4_mode && 1437 (use_cmd12 || use_cmd23)) { 1438 *mode |= SDHCI_TRNS_AUTO_SEL; 1439 1440 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 1441 if (use_cmd23) 1442 ctrl2 |= SDHCI_CMD23_ENABLE; 1443 else 1444 ctrl2 &= ~SDHCI_CMD23_ENABLE; 1445 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 1446 1447 return; 1448 } 1449 1450 /* 1451 * If we are sending CMD23, CMD12 never gets sent 1452 * on successful completion (so no Auto-CMD12). 1453 */ 1454 if (use_cmd12) 1455 *mode |= SDHCI_TRNS_AUTO_CMD12; 1456 else if (use_cmd23) 1457 *mode |= SDHCI_TRNS_AUTO_CMD23; 1458 } 1459 1460 static void sdhci_set_transfer_mode(struct sdhci_host *host, 1461 struct mmc_command *cmd) 1462 { 1463 u16 mode = 0; 1464 struct mmc_data *data = cmd->data; 1465 1466 if (data == NULL) { 1467 if (host->quirks2 & 1468 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) { 1469 /* must not clear SDHCI_TRANSFER_MODE when tuning */ 1470 if (!mmc_op_tuning(cmd->opcode)) 1471 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE); 1472 } else { 1473 /* clear Auto CMD settings for no data CMDs */ 1474 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); 1475 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | 1476 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); 1477 } 1478 return; 1479 } 1480 1481 WARN_ON(!host->data); 1482 1483 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE)) 1484 mode = SDHCI_TRNS_BLK_CNT_EN; 1485 1486 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { 1487 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI; 1488 sdhci_auto_cmd_select(host, cmd, &mode); 1489 if (sdhci_auto_cmd23(host, cmd->mrq)) 1490 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2); 1491 } 1492 1493 if (data->flags & MMC_DATA_READ) 1494 mode |= SDHCI_TRNS_READ; 1495 if (host->flags & SDHCI_REQ_USE_DMA) 1496 mode |= SDHCI_TRNS_DMA; 1497 1498 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE); 1499 } 1500 1501 bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) 1502 { 1503 return (!(host->flags & SDHCI_DEVICE_DEAD) && 1504 ((mrq->cmd && mrq->cmd->error) || 1505 (mrq->sbc && mrq->sbc->error) || 1506 (mrq->data && mrq->data->stop && mrq->data->stop->error) || 1507 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); 1508 } 1509 EXPORT_SYMBOL_GPL(sdhci_needs_reset); 1510 1511 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) 1512 { 1513 int i; 1514 1515 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1516 if (host->mrqs_done[i] == mrq) { 1517 WARN_ON(1); 1518 return; 1519 } 1520 } 1521 1522 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 1523 if (!host->mrqs_done[i]) { 1524 host->mrqs_done[i] = mrq; 1525 break; 1526 } 1527 } 1528 1529 WARN_ON(i >= SDHCI_MAX_MRQS); 1530 } 1531 1532 void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1533 { 1534 if (host->cmd && host->cmd->mrq == mrq) 1535 host->cmd = NULL; 1536 1537 if (host->data_cmd && host->data_cmd->mrq == mrq) 1538 host->data_cmd = NULL; 1539 1540 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq) 1541 host->deferred_cmd = NULL; 1542 1543 if (host->data && host->data->mrq == mrq) 1544 host->data = NULL; 1545 1546 if (sdhci_needs_reset(host, mrq)) 1547 host->pending_reset = true; 1548 1549 sdhci_set_mrq_done(host, mrq); 1550 1551 sdhci_del_timer(host, mrq); 1552 1553 if (!sdhci_has_requests(host)) 1554 sdhci_led_deactivate(host); 1555 } 1556 EXPORT_SYMBOL_GPL(__sdhci_finish_mrq); 1557 1558 void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) 1559 { 1560 __sdhci_finish_mrq(host, mrq); 1561 1562 queue_work(host->complete_wq, &host->complete_work); 1563 } 1564 EXPORT_SYMBOL_GPL(sdhci_finish_mrq); 1565 1566 void __sdhci_finish_data_common(struct sdhci_host *host, bool defer_reset) 1567 { 1568 struct mmc_command *data_cmd = host->data_cmd; 1569 struct mmc_data *data = host->data; 1570 1571 host->data = NULL; 1572 host->data_cmd = NULL; 1573 1574 /* 1575 * The controller needs a reset of internal state machines upon error 1576 * conditions. 1577 */ 1578 if (data->error) { 1579 if (defer_reset) 1580 host->pending_reset = true; 1581 else if (!host->cmd || host->cmd == data_cmd) 1582 sdhci_reset_for(host, REQUEST_ERROR); 1583 else 1584 sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY); 1585 } 1586 1587 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) == 1588 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) 1589 sdhci_adma_table_post(host, data); 1590 1591 /* 1592 * The specification states that the block count register must 1593 * be updated, but it does not specify at what point in the 1594 * data flow. That makes the register entirely useless to read 1595 * back so we have to assume that nothing made it to the card 1596 * in the event of an error. 1597 */ 1598 if (data->error) 1599 data->bytes_xfered = 0; 1600 else 1601 data->bytes_xfered = data->blksz * data->blocks; 1602 } 1603 EXPORT_SYMBOL_GPL(__sdhci_finish_data_common); 1604 1605 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout) 1606 { 1607 struct mmc_data *data = host->data; 1608 1609 __sdhci_finish_data_common(host, false); 1610 1611 /* 1612 * Need to send CMD12 if - 1613 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) 1614 * b) error in multiblock transfer 1615 */ 1616 if (data->stop && 1617 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || 1618 data->error)) { 1619 /* 1620 * 'cap_cmd_during_tfr' request must not use the command line 1621 * after mmc_command_done() has been called. It is upper layer's 1622 * responsibility to send the stop command if required. 1623 */ 1624 if (data->mrq->cap_cmd_during_tfr) { 1625 __sdhci_finish_mrq(host, data->mrq); 1626 } else { 1627 /* Avoid triggering warning in sdhci_send_command() */ 1628 host->cmd = NULL; 1629 if (!sdhci_send_command(host, data->stop)) { 1630 if (sw_data_timeout) { 1631 /* 1632 * This is anyway a sw data timeout, so 1633 * give up now. 1634 */ 1635 data->stop->error = -EIO; 1636 __sdhci_finish_mrq(host, data->mrq); 1637 } else { 1638 WARN_ON(host->deferred_cmd); 1639 host->deferred_cmd = data->stop; 1640 } 1641 } 1642 } 1643 } else { 1644 __sdhci_finish_mrq(host, data->mrq); 1645 } 1646 } 1647 1648 static void sdhci_finish_data(struct sdhci_host *host) 1649 { 1650 __sdhci_finish_data(host, false); 1651 } 1652 1653 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) 1654 { 1655 int flags; 1656 u32 mask; 1657 unsigned long timeout; 1658 1659 WARN_ON(host->cmd); 1660 1661 /* Initially, a command has no error */ 1662 cmd->error = 0; 1663 1664 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) && 1665 cmd->opcode == MMC_STOP_TRANSMISSION) 1666 cmd->flags |= MMC_RSP_BUSY; 1667 1668 mask = SDHCI_CMD_INHIBIT; 1669 if (sdhci_data_line_cmd(cmd)) 1670 mask |= SDHCI_DATA_INHIBIT; 1671 1672 /* We shouldn't wait for data inihibit for stop commands, even 1673 though they might use busy signaling */ 1674 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop)) 1675 mask &= ~SDHCI_DATA_INHIBIT; 1676 1677 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) 1678 return false; 1679 1680 host->cmd = cmd; 1681 host->data_timeout = 0; 1682 if (sdhci_data_line_cmd(cmd)) { 1683 WARN_ON(host->data_cmd); 1684 host->data_cmd = cmd; 1685 sdhci_set_timeout(host, cmd); 1686 } 1687 1688 if (cmd->data) { 1689 if (host->use_external_dma) 1690 sdhci_external_dma_prepare_data(host, cmd); 1691 else 1692 sdhci_prepare_data(host, cmd); 1693 } 1694 1695 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); 1696 1697 sdhci_set_transfer_mode(host, cmd); 1698 1699 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 1700 WARN_ONCE(1, "Unsupported response type!\n"); 1701 /* 1702 * This does not happen in practice because 136-bit response 1703 * commands never have busy waiting, so rather than complicate 1704 * the error path, just remove busy waiting and continue. 1705 */ 1706 cmd->flags &= ~MMC_RSP_BUSY; 1707 } 1708 1709 if (!(cmd->flags & MMC_RSP_PRESENT)) 1710 flags = SDHCI_CMD_RESP_NONE; 1711 else if (cmd->flags & MMC_RSP_136) 1712 flags = SDHCI_CMD_RESP_LONG; 1713 else if (cmd->flags & MMC_RSP_BUSY) 1714 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1715 else 1716 flags = SDHCI_CMD_RESP_SHORT; 1717 1718 if (cmd->flags & MMC_RSP_CRC) 1719 flags |= SDHCI_CMD_CRC; 1720 if (cmd->flags & MMC_RSP_OPCODE) 1721 flags |= SDHCI_CMD_INDEX; 1722 1723 /* CMD19 is special in that the Data Present Select should be set */ 1724 if (cmd->data || mmc_op_tuning(cmd->opcode)) 1725 flags |= SDHCI_CMD_DATA; 1726 1727 timeout = jiffies; 1728 if (host->data_timeout) 1729 timeout += nsecs_to_jiffies(host->data_timeout); 1730 else if (!cmd->data && cmd->busy_timeout > 9000) 1731 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; 1732 else 1733 timeout += 10 * HZ; 1734 sdhci_mod_timer(host, cmd->mrq, timeout); 1735 1736 if (host->use_external_dma) 1737 sdhci_external_dma_pre_transfer(host, cmd); 1738 1739 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); 1740 1741 return true; 1742 } 1743 1744 bool sdhci_present_error(struct sdhci_host *host, 1745 struct mmc_command *cmd, bool present) 1746 { 1747 if (!present || host->flags & SDHCI_DEVICE_DEAD) { 1748 cmd->error = -ENOMEDIUM; 1749 return true; 1750 } 1751 1752 return false; 1753 } 1754 EXPORT_SYMBOL_GPL(sdhci_present_error); 1755 1756 static bool sdhci_send_command_retry(struct sdhci_host *host, 1757 struct mmc_command *cmd, 1758 unsigned long flags) 1759 __releases(host->lock) 1760 __acquires(host->lock) 1761 { 1762 struct mmc_command *deferred_cmd = host->deferred_cmd; 1763 int timeout = 10; /* Approx. 10 ms */ 1764 bool present; 1765 1766 while (!sdhci_send_command(host, cmd)) { 1767 if (!timeout--) { 1768 pr_err("%s: Controller never released inhibit bit(s).\n", 1769 mmc_hostname(host->mmc)); 1770 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 1771 sdhci_dumpregs(host); 1772 cmd->error = -EIO; 1773 return false; 1774 } 1775 1776 spin_unlock_irqrestore(&host->lock, flags); 1777 1778 usleep_range(1000, 1250); 1779 1780 present = host->mmc->ops->get_cd(host->mmc); 1781 1782 spin_lock_irqsave(&host->lock, flags); 1783 1784 /* A deferred command might disappear, handle that */ 1785 if (cmd == deferred_cmd && cmd != host->deferred_cmd) 1786 return true; 1787 1788 if (sdhci_present_error(host, cmd, present)) 1789 return false; 1790 } 1791 1792 if (cmd == host->deferred_cmd) 1793 host->deferred_cmd = NULL; 1794 1795 return true; 1796 } 1797 1798 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd) 1799 { 1800 int i, reg; 1801 1802 for (i = 0; i < 4; i++) { 1803 reg = SDHCI_RESPONSE + (3 - i) * 4; 1804 cmd->resp[i] = sdhci_readl(host, reg); 1805 } 1806 1807 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC) 1808 return; 1809 1810 /* CRC is stripped so we need to do some shifting */ 1811 for (i = 0; i < 4; i++) { 1812 cmd->resp[i] <<= 8; 1813 if (i != 3) 1814 cmd->resp[i] |= cmd->resp[i + 1] >> 24; 1815 } 1816 } 1817 1818 static void sdhci_finish_command(struct sdhci_host *host) 1819 { 1820 struct mmc_command *cmd = host->cmd; 1821 1822 host->cmd = NULL; 1823 1824 if (cmd->flags & MMC_RSP_PRESENT) { 1825 if (cmd->flags & MMC_RSP_136) { 1826 sdhci_read_rsp_136(host, cmd); 1827 } else { 1828 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE); 1829 } 1830 } 1831 1832 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd) 1833 mmc_command_done(host->mmc, cmd->mrq); 1834 1835 /* 1836 * The host can send and interrupt when the busy state has 1837 * ended, allowing us to wait without wasting CPU cycles. 1838 * The busy signal uses DAT0 so this is similar to waiting 1839 * for data to complete. 1840 * 1841 * Note: The 1.0 specification is a bit ambiguous about this 1842 * feature so there might be some problems with older 1843 * controllers. 1844 */ 1845 if (cmd->flags & MMC_RSP_BUSY) { 1846 if (cmd->data) { 1847 DBG("Cannot wait for busy signal when also doing a data transfer"); 1848 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) && 1849 cmd == host->data_cmd) { 1850 /* Command complete before busy is ended */ 1851 return; 1852 } 1853 } 1854 1855 /* Finished CMD23, now send actual command. */ 1856 if (cmd == cmd->mrq->sbc) { 1857 if (!sdhci_send_command(host, cmd->mrq->cmd)) { 1858 WARN_ON(host->deferred_cmd); 1859 host->deferred_cmd = cmd->mrq->cmd; 1860 } 1861 } else { 1862 1863 /* Processed actual command. */ 1864 if (host->data && host->data_early) 1865 sdhci_finish_data(host); 1866 1867 if (!cmd->data) 1868 __sdhci_finish_mrq(host, cmd->mrq); 1869 } 1870 } 1871 1872 static u16 sdhci_get_preset_value(struct sdhci_host *host) 1873 { 1874 u16 preset = 0; 1875 1876 switch (host->timing) { 1877 case MMC_TIMING_MMC_HS: 1878 case MMC_TIMING_SD_HS: 1879 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED); 1880 break; 1881 case MMC_TIMING_UHS_SDR12: 1882 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1883 break; 1884 case MMC_TIMING_UHS_SDR25: 1885 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); 1886 break; 1887 case MMC_TIMING_UHS_SDR50: 1888 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); 1889 break; 1890 case MMC_TIMING_UHS_SDR104: 1891 case MMC_TIMING_MMC_HS200: 1892 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); 1893 break; 1894 case MMC_TIMING_UHS_DDR50: 1895 case MMC_TIMING_MMC_DDR52: 1896 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); 1897 break; 1898 case MMC_TIMING_MMC_HS400: 1899 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400); 1900 break; 1901 case MMC_TIMING_UHS2_SPEED_A: 1902 case MMC_TIMING_UHS2_SPEED_A_HD: 1903 case MMC_TIMING_UHS2_SPEED_B: 1904 case MMC_TIMING_UHS2_SPEED_B_HD: 1905 preset = sdhci_readw(host, SDHCI_PRESET_FOR_UHS2); 1906 break; 1907 default: 1908 pr_warn("%s: Invalid UHS-I mode selected\n", 1909 mmc_hostname(host->mmc)); 1910 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); 1911 break; 1912 } 1913 return preset; 1914 } 1915 1916 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock, 1917 unsigned int *actual_clock) 1918 { 1919 int div = 0; /* Initialized for compiler warning */ 1920 int real_div = div, clk_mul = 1; 1921 u16 clk = 0; 1922 bool switch_base_clk = false; 1923 1924 if (host->version >= SDHCI_SPEC_300) { 1925 if (host->preset_enabled) { 1926 u16 pre_val; 1927 1928 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 1929 pre_val = sdhci_get_preset_value(host); 1930 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val); 1931 if (host->clk_mul && 1932 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) { 1933 clk = SDHCI_PROG_CLOCK_MODE; 1934 real_div = div + 1; 1935 clk_mul = host->clk_mul; 1936 } else { 1937 real_div = max_t(int, 1, div << 1); 1938 } 1939 goto clock_set; 1940 } 1941 1942 /* 1943 * Check if the Host Controller supports Programmable Clock 1944 * Mode. 1945 */ 1946 if (host->clk_mul) { 1947 for (div = 1; div <= 1024; div++) { 1948 if ((host->max_clk * host->clk_mul / div) 1949 <= clock) 1950 break; 1951 } 1952 if ((host->max_clk * host->clk_mul / div) <= clock) { 1953 /* 1954 * Set Programmable Clock Mode in the Clock 1955 * Control register. 1956 */ 1957 clk = SDHCI_PROG_CLOCK_MODE; 1958 real_div = div; 1959 clk_mul = host->clk_mul; 1960 div--; 1961 } else { 1962 /* 1963 * Divisor can be too small to reach clock 1964 * speed requirement. Then use the base clock. 1965 */ 1966 switch_base_clk = true; 1967 } 1968 } 1969 1970 if (!host->clk_mul || switch_base_clk) { 1971 /* Version 3.00 divisors must be a multiple of 2. */ 1972 if (host->max_clk <= clock) 1973 div = 1; 1974 else { 1975 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; 1976 div += 2) { 1977 if ((host->max_clk / div) <= clock) 1978 break; 1979 } 1980 } 1981 real_div = div; 1982 div >>= 1; 1983 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN) 1984 && !div && host->max_clk <= 25000000) 1985 div = 1; 1986 } 1987 } else { 1988 /* Version 2.00 divisors must be a power of 2. */ 1989 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { 1990 if ((host->max_clk / div) <= clock) 1991 break; 1992 } 1993 real_div = div; 1994 div >>= 1; 1995 } 1996 1997 clock_set: 1998 if (real_div) 1999 *actual_clock = (host->max_clk * clk_mul) / real_div; 2000 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; 2001 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) 2002 << SDHCI_DIVIDER_HI_SHIFT; 2003 2004 return clk; 2005 } 2006 EXPORT_SYMBOL_GPL(sdhci_calc_clk); 2007 2008 void sdhci_enable_clk(struct sdhci_host *host, u16 clk) 2009 { 2010 ktime_t timeout; 2011 2012 clk |= SDHCI_CLOCK_INT_EN; 2013 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2014 2015 /* Wait max 150 ms */ 2016 timeout = ktime_add_ms(ktime_get(), 150); 2017 while (1) { 2018 bool timedout = ktime_after(ktime_get(), timeout); 2019 2020 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2021 if (clk & SDHCI_CLOCK_INT_STABLE) 2022 break; 2023 if (timedout) { 2024 pr_err("%s: Internal clock never stabilised.\n", 2025 mmc_hostname(host->mmc)); 2026 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2027 sdhci_dumpregs(host); 2028 return; 2029 } 2030 udelay(10); 2031 } 2032 2033 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) { 2034 clk |= SDHCI_CLOCK_PLL_EN; 2035 clk &= ~SDHCI_CLOCK_INT_STABLE; 2036 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2037 2038 /* Wait max 150 ms */ 2039 timeout = ktime_add_ms(ktime_get(), 150); 2040 while (1) { 2041 bool timedout = ktime_after(ktime_get(), timeout); 2042 2043 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2044 if (clk & SDHCI_CLOCK_INT_STABLE) 2045 break; 2046 if (timedout) { 2047 pr_err("%s: PLL clock never stabilised.\n", 2048 mmc_hostname(host->mmc)); 2049 sdhci_err_stats_inc(host, CTRL_TIMEOUT); 2050 sdhci_dumpregs(host); 2051 return; 2052 } 2053 udelay(10); 2054 } 2055 } 2056 2057 clk |= SDHCI_CLOCK_CARD_EN; 2058 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2059 } 2060 EXPORT_SYMBOL_GPL(sdhci_enable_clk); 2061 2062 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) 2063 { 2064 u16 clk; 2065 2066 host->mmc->actual_clock = 0; 2067 2068 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2069 if (clk & SDHCI_CLOCK_CARD_EN) 2070 sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, 2071 SDHCI_CLOCK_CONTROL); 2072 2073 if (clock == 0) { 2074 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 2075 return; 2076 } 2077 2078 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); 2079 sdhci_enable_clk(host, clk); 2080 } 2081 EXPORT_SYMBOL_GPL(sdhci_set_clock); 2082 2083 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, 2084 unsigned short vdd) 2085 { 2086 struct mmc_host *mmc = host->mmc; 2087 2088 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2089 2090 if (mode != MMC_POWER_OFF) 2091 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); 2092 else 2093 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2094 } 2095 2096 unsigned short sdhci_get_vdd_value(unsigned short vdd) 2097 { 2098 switch (1 << vdd) { 2099 case MMC_VDD_165_195: 2100 /* 2101 * Without a regulator, SDHCI does not support 2.0v 2102 * so we only get here if the driver deliberately 2103 * added the 2.0v range to ocr_avail. Map it to 1.8v 2104 * for the purpose of turning on the power. 2105 */ 2106 case MMC_VDD_20_21: 2107 return SDHCI_POWER_180; 2108 case MMC_VDD_29_30: 2109 case MMC_VDD_30_31: 2110 return SDHCI_POWER_300; 2111 case MMC_VDD_32_33: 2112 case MMC_VDD_33_34: 2113 /* 2114 * 3.4V ~ 3.6V are valid only for those platforms where it's 2115 * known that the voltage range is supported by hardware. 2116 */ 2117 case MMC_VDD_34_35: 2118 case MMC_VDD_35_36: 2119 return SDHCI_POWER_330; 2120 default: 2121 return 0; 2122 } 2123 } 2124 EXPORT_SYMBOL_GPL(sdhci_get_vdd_value); 2125 2126 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, 2127 unsigned short vdd) 2128 { 2129 u8 pwr = 0; 2130 2131 if (mode != MMC_POWER_OFF) { 2132 pwr = sdhci_get_vdd_value(vdd); 2133 if (!pwr) { 2134 WARN(1, "%s: Invalid vdd %#x\n", 2135 mmc_hostname(host->mmc), vdd); 2136 } 2137 } 2138 2139 if (host->pwr == pwr) 2140 return; 2141 2142 host->pwr = pwr; 2143 2144 if (pwr == 0) { 2145 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2146 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2147 sdhci_runtime_pm_bus_off(host); 2148 } else { 2149 /* 2150 * Spec says that we should clear the power reg before setting 2151 * a new value. Some controllers don't seem to like this though. 2152 */ 2153 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) 2154 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); 2155 2156 /* 2157 * At least the Marvell CaFe chip gets confused if we set the 2158 * voltage and set turn on power at the same time, so set the 2159 * voltage first. 2160 */ 2161 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) 2162 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2163 2164 pwr |= SDHCI_POWER_ON; 2165 2166 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); 2167 2168 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) 2169 sdhci_runtime_pm_bus_on(host); 2170 2171 /* 2172 * Some controllers need an extra 10ms delay of 10ms before 2173 * they can apply clock after applying power 2174 */ 2175 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) 2176 mdelay(10); 2177 } 2178 } 2179 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg); 2180 2181 void sdhci_set_power(struct sdhci_host *host, unsigned char mode, 2182 unsigned short vdd) 2183 { 2184 if (IS_ERR(host->mmc->supply.vmmc)) 2185 sdhci_set_power_noreg(host, mode, vdd); 2186 else 2187 sdhci_set_power_reg(host, mode, vdd); 2188 } 2189 EXPORT_SYMBOL_GPL(sdhci_set_power); 2190 2191 /* 2192 * Some controllers need to configure a valid bus voltage on their power 2193 * register regardless of whether an external regulator is taking care of power 2194 * supply. This helper function takes care of it if set as the controller's 2195 * sdhci_ops.set_power callback. 2196 */ 2197 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host, 2198 unsigned char mode, 2199 unsigned short vdd) 2200 { 2201 if (!IS_ERR(host->mmc->supply.vmmc)) { 2202 struct mmc_host *mmc = host->mmc; 2203 2204 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); 2205 } 2206 sdhci_set_power_noreg(host, mode, vdd); 2207 } 2208 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage); 2209 2210 /*****************************************************************************\ 2211 * * 2212 * MMC callbacks * 2213 * * 2214 \*****************************************************************************/ 2215 2216 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) 2217 { 2218 struct sdhci_host *host = mmc_priv(mmc); 2219 struct mmc_command *cmd; 2220 unsigned long flags; 2221 bool present; 2222 2223 /* Firstly check card presence */ 2224 present = mmc->ops->get_cd(mmc); 2225 2226 spin_lock_irqsave(&host->lock, flags); 2227 2228 sdhci_led_activate(host); 2229 2230 if (sdhci_present_error(host, mrq->cmd, present)) 2231 goto out_finish; 2232 2233 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2234 2235 if (!sdhci_send_command_retry(host, cmd, flags)) 2236 goto out_finish; 2237 2238 spin_unlock_irqrestore(&host->lock, flags); 2239 2240 return; 2241 2242 out_finish: 2243 sdhci_finish_mrq(host, mrq); 2244 spin_unlock_irqrestore(&host->lock, flags); 2245 } 2246 EXPORT_SYMBOL_GPL(sdhci_request); 2247 2248 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq) 2249 { 2250 struct sdhci_host *host = mmc_priv(mmc); 2251 struct mmc_command *cmd; 2252 unsigned long flags; 2253 int ret = 0; 2254 2255 spin_lock_irqsave(&host->lock, flags); 2256 2257 if (sdhci_present_error(host, mrq->cmd, true)) { 2258 sdhci_finish_mrq(host, mrq); 2259 goto out_finish; 2260 } 2261 2262 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd; 2263 2264 /* 2265 * The HSQ may send a command in interrupt context without polling 2266 * the busy signaling, which means we should return BUSY if controller 2267 * has not released inhibit bits to allow HSQ trying to send request 2268 * again in non-atomic context. So we should not finish this request 2269 * here. 2270 */ 2271 if (!sdhci_send_command(host, cmd)) 2272 ret = -EBUSY; 2273 else 2274 sdhci_led_activate(host); 2275 2276 out_finish: 2277 spin_unlock_irqrestore(&host->lock, flags); 2278 return ret; 2279 } 2280 EXPORT_SYMBOL_GPL(sdhci_request_atomic); 2281 2282 void sdhci_set_bus_width(struct sdhci_host *host, int width) 2283 { 2284 u8 ctrl; 2285 2286 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2287 if (width == MMC_BUS_WIDTH_8) { 2288 ctrl &= ~SDHCI_CTRL_4BITBUS; 2289 ctrl |= SDHCI_CTRL_8BITBUS; 2290 } else { 2291 if (host->mmc->caps & MMC_CAP_8_BIT_DATA) 2292 ctrl &= ~SDHCI_CTRL_8BITBUS; 2293 if (width == MMC_BUS_WIDTH_4) 2294 ctrl |= SDHCI_CTRL_4BITBUS; 2295 else 2296 ctrl &= ~SDHCI_CTRL_4BITBUS; 2297 } 2298 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2299 } 2300 EXPORT_SYMBOL_GPL(sdhci_set_bus_width); 2301 2302 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) 2303 { 2304 u16 ctrl_2; 2305 2306 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2307 /* Select Bus Speed Mode for host */ 2308 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; 2309 if ((timing == MMC_TIMING_MMC_HS200) || 2310 (timing == MMC_TIMING_UHS_SDR104)) 2311 ctrl_2 |= SDHCI_CTRL_UHS_SDR104; 2312 else if (timing == MMC_TIMING_UHS_SDR12) 2313 ctrl_2 |= SDHCI_CTRL_UHS_SDR12; 2314 else if (timing == MMC_TIMING_UHS_SDR25) 2315 ctrl_2 |= SDHCI_CTRL_UHS_SDR25; 2316 else if (timing == MMC_TIMING_UHS_SDR50) 2317 ctrl_2 |= SDHCI_CTRL_UHS_SDR50; 2318 else if ((timing == MMC_TIMING_UHS_DDR50) || 2319 (timing == MMC_TIMING_MMC_DDR52)) 2320 ctrl_2 |= SDHCI_CTRL_UHS_DDR50; 2321 else if (timing == MMC_TIMING_MMC_HS400) 2322 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ 2323 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2324 } 2325 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); 2326 2327 static bool sdhci_timing_has_preset(unsigned char timing) 2328 { 2329 switch (timing) { 2330 case MMC_TIMING_UHS_SDR12: 2331 case MMC_TIMING_UHS_SDR25: 2332 case MMC_TIMING_UHS_SDR50: 2333 case MMC_TIMING_UHS_SDR104: 2334 case MMC_TIMING_UHS_DDR50: 2335 case MMC_TIMING_MMC_DDR52: 2336 return true; 2337 } 2338 return false; 2339 } 2340 2341 static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing) 2342 { 2343 return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && 2344 sdhci_timing_has_preset(timing); 2345 } 2346 2347 static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios) 2348 { 2349 /* 2350 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK 2351 * Frequency. Check if preset values need to be enabled, or the Driver 2352 * Strength needs updating. Note, clock changes are handled separately. 2353 */ 2354 return !host->preset_enabled && 2355 (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type); 2356 } 2357 2358 void sdhci_set_ios_common(struct mmc_host *mmc, struct mmc_ios *ios) 2359 { 2360 struct sdhci_host *host = mmc_priv(mmc); 2361 2362 /* 2363 * Reset the chip on each power off. 2364 * Should clear out any weird states. 2365 */ 2366 if (ios->power_mode == MMC_POWER_OFF) { 2367 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 2368 sdhci_reinit(host); 2369 } 2370 2371 if (host->version >= SDHCI_SPEC_300 && 2372 (ios->power_mode == MMC_POWER_UP) && 2373 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) 2374 sdhci_enable_preset_value(host, false); 2375 2376 if (!ios->clock || ios->clock != host->clock) { 2377 host->ops->set_clock(host, ios->clock); 2378 host->clock = ios->clock; 2379 2380 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK && 2381 host->clock) { 2382 host->timeout_clk = mmc->actual_clock ? 2383 mmc->actual_clock / 1000 : 2384 host->clock / 1000; 2385 mmc->max_busy_timeout = 2386 host->ops->get_max_timeout_count ? 2387 host->ops->get_max_timeout_count(host) : 2388 1 << 27; 2389 mmc->max_busy_timeout /= host->timeout_clk; 2390 } 2391 } 2392 } 2393 EXPORT_SYMBOL_GPL(sdhci_set_ios_common); 2394 2395 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 2396 { 2397 struct sdhci_host *host = mmc_priv(mmc); 2398 bool reinit_uhs = host->reinit_uhs; 2399 bool turning_on_clk; 2400 u8 ctrl; 2401 2402 host->reinit_uhs = false; 2403 2404 if (ios->power_mode == MMC_POWER_UNDEFINED) 2405 return; 2406 2407 if (host->flags & SDHCI_DEVICE_DEAD) { 2408 if (!IS_ERR(mmc->supply.vmmc) && 2409 ios->power_mode == MMC_POWER_OFF) 2410 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 2411 return; 2412 } 2413 2414 turning_on_clk = ios->clock != host->clock && ios->clock && !host->clock; 2415 2416 sdhci_set_ios_common(mmc, ios); 2417 2418 if (host->ops->set_power) 2419 host->ops->set_power(host, ios->power_mode, ios->vdd); 2420 else 2421 sdhci_set_power(host, ios->power_mode, ios->vdd); 2422 2423 if (host->ops->platform_send_init_74_clocks) 2424 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 2425 2426 host->ops->set_bus_width(host, ios->bus_width); 2427 2428 /* 2429 * Special case to avoid multiple clock changes during voltage 2430 * switching. 2431 */ 2432 if (!reinit_uhs && 2433 turning_on_clk && 2434 host->timing == ios->timing && 2435 host->version >= SDHCI_SPEC_300 && 2436 !sdhci_presetable_values_change(host, ios)) 2437 return; 2438 2439 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 2440 2441 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) { 2442 if (ios->timing == MMC_TIMING_SD_HS || 2443 ios->timing == MMC_TIMING_MMC_HS || 2444 ios->timing == MMC_TIMING_MMC_HS400 || 2445 ios->timing == MMC_TIMING_MMC_HS200 || 2446 ios->timing == MMC_TIMING_MMC_DDR52 || 2447 ios->timing == MMC_TIMING_UHS_SDR50 || 2448 ios->timing == MMC_TIMING_UHS_SDR104 || 2449 ios->timing == MMC_TIMING_UHS_DDR50 || 2450 ios->timing == MMC_TIMING_UHS_SDR25) 2451 ctrl |= SDHCI_CTRL_HISPD; 2452 else 2453 ctrl &= ~SDHCI_CTRL_HISPD; 2454 } 2455 2456 if (host->version >= SDHCI_SPEC_300) { 2457 u16 clk, ctrl_2; 2458 2459 /* 2460 * According to SDHCI Spec v3.00, if the Preset Value 2461 * Enable in the Host Control 2 register is set, we 2462 * need to reset SD Clock Enable before changing High 2463 * Speed Enable to avoid generating clock glitches. 2464 */ 2465 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); 2466 if (clk & SDHCI_CLOCK_CARD_EN) { 2467 clk &= ~SDHCI_CLOCK_CARD_EN; 2468 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); 2469 } 2470 2471 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2472 2473 if (!host->preset_enabled) { 2474 /* 2475 * We only need to set Driver Strength if the 2476 * preset value enable is not set. 2477 */ 2478 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2479 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; 2480 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) 2481 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; 2482 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B) 2483 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2484 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) 2485 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; 2486 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D) 2487 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D; 2488 else { 2489 pr_warn("%s: invalid driver type, default to driver type B\n", 2490 mmc_hostname(mmc)); 2491 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B; 2492 } 2493 2494 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); 2495 host->drv_type = ios->drv_type; 2496 } 2497 2498 host->ops->set_uhs_signaling(host, ios->timing); 2499 host->timing = ios->timing; 2500 2501 if (sdhci_preset_needed(host, ios->timing)) { 2502 u16 preset; 2503 2504 sdhci_enable_preset_value(host, true); 2505 preset = sdhci_get_preset_value(host); 2506 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK, 2507 preset); 2508 host->drv_type = ios->drv_type; 2509 } 2510 2511 /* Re-enable SD Clock */ 2512 host->ops->set_clock(host, host->clock); 2513 } else 2514 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 2515 } 2516 EXPORT_SYMBOL_GPL(sdhci_set_ios); 2517 2518 static int sdhci_get_cd(struct mmc_host *mmc) 2519 { 2520 struct sdhci_host *host = mmc_priv(mmc); 2521 int gpio_cd = mmc_gpio_get_cd(mmc); 2522 2523 if (host->flags & SDHCI_DEVICE_DEAD) 2524 return 0; 2525 2526 /* If nonremovable, assume that the card is always present. */ 2527 if (!mmc_card_is_removable(mmc)) 2528 return 1; 2529 2530 /* 2531 * Try slot gpio detect, if defined it take precedence 2532 * over build in controller functionality 2533 */ 2534 if (gpio_cd >= 0) 2535 return !!gpio_cd; 2536 2537 /* If polling, assume that the card is always present. */ 2538 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) 2539 return 1; 2540 2541 /* Host native card detect */ 2542 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2543 } 2544 2545 int sdhci_get_cd_nogpio(struct mmc_host *mmc) 2546 { 2547 struct sdhci_host *host = mmc_priv(mmc); 2548 unsigned long flags; 2549 int ret = 0; 2550 2551 spin_lock_irqsave(&host->lock, flags); 2552 2553 if (host->flags & SDHCI_DEVICE_DEAD) 2554 goto out; 2555 2556 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 2557 out: 2558 spin_unlock_irqrestore(&host->lock, flags); 2559 2560 return ret; 2561 } 2562 EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio); 2563 2564 int sdhci_get_ro(struct mmc_host *mmc) 2565 { 2566 struct sdhci_host *host = mmc_priv(mmc); 2567 bool allow_invert = false; 2568 int is_readonly; 2569 2570 if (host->flags & SDHCI_DEVICE_DEAD) { 2571 is_readonly = 0; 2572 } else if (host->ops->get_ro) { 2573 is_readonly = host->ops->get_ro(host); 2574 } else if (mmc_host_can_gpio_ro(mmc)) { 2575 is_readonly = mmc_gpio_get_ro(mmc); 2576 /* Do not invert twice */ 2577 allow_invert = !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); 2578 } else { 2579 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE) 2580 & SDHCI_WRITE_PROTECT); 2581 allow_invert = true; 2582 } 2583 2584 if (is_readonly >= 0 && 2585 allow_invert && 2586 (host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT)) 2587 is_readonly = !is_readonly; 2588 2589 return is_readonly; 2590 } 2591 EXPORT_SYMBOL_GPL(sdhci_get_ro); 2592 2593 static void sdhci_hw_reset(struct mmc_host *mmc) 2594 { 2595 struct sdhci_host *host = mmc_priv(mmc); 2596 2597 if (host->ops && host->ops->hw_reset) 2598 host->ops->hw_reset(host); 2599 } 2600 2601 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) 2602 { 2603 if (!(host->flags & SDHCI_DEVICE_DEAD)) { 2604 if (enable) 2605 host->ier |= SDHCI_INT_CARD_INT; 2606 else 2607 host->ier &= ~SDHCI_INT_CARD_INT; 2608 2609 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2610 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2611 } 2612 } 2613 2614 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) 2615 { 2616 struct sdhci_host *host = mmc_priv(mmc); 2617 unsigned long flags; 2618 2619 if (enable) 2620 pm_runtime_get_noresume(mmc_dev(mmc)); 2621 2622 spin_lock_irqsave(&host->lock, flags); 2623 sdhci_enable_sdio_irq_nolock(host, enable); 2624 spin_unlock_irqrestore(&host->lock, flags); 2625 2626 if (!enable) 2627 pm_runtime_put_noidle(mmc_dev(mmc)); 2628 } 2629 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq); 2630 2631 static void sdhci_ack_sdio_irq(struct mmc_host *mmc) 2632 { 2633 struct sdhci_host *host = mmc_priv(mmc); 2634 unsigned long flags; 2635 2636 spin_lock_irqsave(&host->lock, flags); 2637 sdhci_enable_sdio_irq_nolock(host, true); 2638 spin_unlock_irqrestore(&host->lock, flags); 2639 } 2640 2641 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, 2642 struct mmc_ios *ios) 2643 { 2644 struct sdhci_host *host = mmc_priv(mmc); 2645 u16 ctrl; 2646 int ret; 2647 2648 /* 2649 * Signal Voltage Switching is only applicable for Host Controllers 2650 * v3.00 and above. 2651 */ 2652 if (host->version < SDHCI_SPEC_300) 2653 return 0; 2654 2655 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2656 2657 switch (ios->signal_voltage) { 2658 case MMC_SIGNAL_VOLTAGE_330: 2659 if (!(host->flags & SDHCI_SIGNALING_330)) 2660 return -EINVAL; 2661 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ 2662 ctrl &= ~SDHCI_CTRL_VDD_180; 2663 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2664 2665 if (!IS_ERR(mmc->supply.vqmmc)) { 2666 ret = mmc_regulator_set_vqmmc(mmc, ios); 2667 if (ret < 0) { 2668 pr_warn("%s: Switching to 3.3V signalling voltage failed\n", 2669 mmc_hostname(mmc)); 2670 return -EIO; 2671 } 2672 } 2673 /* Wait for 5ms */ 2674 usleep_range(5000, 5500); 2675 2676 /* 3.3V regulator output should be stable within 5 ms */ 2677 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2678 if (!(ctrl & SDHCI_CTRL_VDD_180)) 2679 return 0; 2680 2681 pr_warn("%s: 3.3V regulator output did not become stable\n", 2682 mmc_hostname(mmc)); 2683 2684 return -EAGAIN; 2685 case MMC_SIGNAL_VOLTAGE_180: 2686 if (!(host->flags & SDHCI_SIGNALING_180)) 2687 return -EINVAL; 2688 if (!IS_ERR(mmc->supply.vqmmc)) { 2689 ret = mmc_regulator_set_vqmmc(mmc, ios); 2690 if (ret < 0) { 2691 pr_warn("%s: Switching to 1.8V signalling voltage failed\n", 2692 mmc_hostname(mmc)); 2693 return -EIO; 2694 } 2695 } 2696 2697 /* 2698 * Enable 1.8V Signal Enable in the Host Control2 2699 * register 2700 */ 2701 ctrl |= SDHCI_CTRL_VDD_180; 2702 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2703 2704 /* Some controller need to do more when switching */ 2705 if (host->ops->voltage_switch) 2706 host->ops->voltage_switch(host); 2707 2708 /* 1.8V regulator output should be stable within 5 ms */ 2709 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2710 if (ctrl & SDHCI_CTRL_VDD_180) 2711 return 0; 2712 2713 pr_warn("%s: 1.8V regulator output did not become stable\n", 2714 mmc_hostname(mmc)); 2715 2716 return -EAGAIN; 2717 case MMC_SIGNAL_VOLTAGE_120: 2718 if (!(host->flags & SDHCI_SIGNALING_120)) 2719 return -EINVAL; 2720 if (!IS_ERR(mmc->supply.vqmmc)) { 2721 ret = mmc_regulator_set_vqmmc(mmc, ios); 2722 if (ret < 0) { 2723 pr_warn("%s: Switching to 1.2V signalling voltage failed\n", 2724 mmc_hostname(mmc)); 2725 return -EIO; 2726 } 2727 } 2728 return 0; 2729 default: 2730 /* No signal voltage switch required */ 2731 return 0; 2732 } 2733 } 2734 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch); 2735 2736 static int sdhci_card_busy(struct mmc_host *mmc) 2737 { 2738 struct sdhci_host *host = mmc_priv(mmc); 2739 u32 present_state; 2740 2741 /* Check whether DAT[0] is 0 */ 2742 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); 2743 2744 return !(present_state & SDHCI_DATA_0_LVL_MASK); 2745 } 2746 2747 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) 2748 { 2749 struct sdhci_host *host = mmc_priv(mmc); 2750 unsigned long flags; 2751 2752 spin_lock_irqsave(&host->lock, flags); 2753 host->flags |= SDHCI_HS400_TUNING; 2754 spin_unlock_irqrestore(&host->lock, flags); 2755 2756 return 0; 2757 } 2758 2759 void sdhci_start_tuning(struct sdhci_host *host) 2760 { 2761 u16 ctrl; 2762 2763 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2764 ctrl |= SDHCI_CTRL_EXEC_TUNING; 2765 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND) 2766 ctrl |= SDHCI_CTRL_TUNED_CLK; 2767 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2768 2769 /* 2770 * As per the Host Controller spec v3.00, tuning command 2771 * generates Buffer Read Ready interrupt, so enable that. 2772 * 2773 * Note: The spec clearly says that when tuning sequence 2774 * is being performed, the controller does not generate 2775 * interrupts other than Buffer Read Ready interrupt. But 2776 * to make sure we don't hit a controller bug, we _only_ 2777 * enable Buffer Read Ready interrupt here. 2778 */ 2779 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); 2780 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); 2781 } 2782 EXPORT_SYMBOL_GPL(sdhci_start_tuning); 2783 2784 void sdhci_end_tuning(struct sdhci_host *host) 2785 { 2786 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 2787 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 2788 } 2789 EXPORT_SYMBOL_GPL(sdhci_end_tuning); 2790 2791 void sdhci_reset_tuning(struct sdhci_host *host) 2792 { 2793 u16 ctrl; 2794 2795 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2796 ctrl &= ~SDHCI_CTRL_TUNED_CLK; 2797 ctrl &= ~SDHCI_CTRL_EXEC_TUNING; 2798 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 2799 } 2800 EXPORT_SYMBOL_GPL(sdhci_reset_tuning); 2801 2802 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode) 2803 { 2804 sdhci_reset_tuning(host); 2805 2806 sdhci_reset_for(host, TUNING_ABORT); 2807 2808 sdhci_end_tuning(host); 2809 2810 mmc_send_abort_tuning(host->mmc, opcode); 2811 } 2812 EXPORT_SYMBOL_GPL(sdhci_abort_tuning); 2813 2814 /* 2815 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI 2816 * tuning command does not have a data payload (or rather the hardware does it 2817 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command 2818 * interrupt setup is different to other commands and there is no timeout 2819 * interrupt so special handling is needed. 2820 */ 2821 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode) 2822 { 2823 struct mmc_host *mmc = host->mmc; 2824 struct mmc_command cmd = {}; 2825 struct mmc_request mrq = {}; 2826 unsigned long flags; 2827 u32 b = host->sdma_boundary; 2828 2829 spin_lock_irqsave(&host->lock, flags); 2830 2831 cmd.opcode = opcode; 2832 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; 2833 cmd.mrq = &mrq; 2834 2835 mrq.cmd = &cmd; 2836 /* 2837 * In response to CMD19, the card sends 64 bytes of tuning 2838 * block to the Host Controller. So we set the block size 2839 * to 64 here. 2840 */ 2841 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 && 2842 mmc->ios.bus_width == MMC_BUS_WIDTH_8) 2843 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE); 2844 else 2845 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE); 2846 2847 /* 2848 * The tuning block is sent by the card to the host controller. 2849 * So we set the TRNS_READ bit in the Transfer Mode register. 2850 * This also takes care of setting DMA Enable and Multi Block 2851 * Select in the same register to 0. 2852 */ 2853 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); 2854 2855 if (!sdhci_send_command_retry(host, &cmd, flags)) { 2856 spin_unlock_irqrestore(&host->lock, flags); 2857 host->tuning_done = 0; 2858 return; 2859 } 2860 2861 host->cmd = NULL; 2862 2863 sdhci_del_timer(host, &mrq); 2864 2865 host->tuning_done = 0; 2866 2867 spin_unlock_irqrestore(&host->lock, flags); 2868 2869 /* Wait for Buffer Read Ready interrupt */ 2870 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1), 2871 msecs_to_jiffies(50)); 2872 2873 } 2874 EXPORT_SYMBOL_GPL(sdhci_send_tuning); 2875 2876 int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) 2877 { 2878 int i; 2879 2880 /* 2881 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number 2882 * of loops reaches tuning loop count. 2883 */ 2884 for (i = 0; i < host->tuning_loop_count; i++) { 2885 u16 ctrl; 2886 2887 sdhci_send_tuning(host, opcode); 2888 2889 if (!host->tuning_done) { 2890 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n", 2891 mmc_hostname(host->mmc)); 2892 sdhci_abort_tuning(host, opcode); 2893 return -ETIMEDOUT; 2894 } 2895 2896 /* Spec does not require a delay between tuning cycles */ 2897 if (host->tuning_delay > 0) 2898 mdelay(host->tuning_delay); 2899 2900 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2901 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) { 2902 if (ctrl & SDHCI_CTRL_TUNED_CLK) 2903 return 0; /* Success! */ 2904 break; 2905 } 2906 2907 } 2908 2909 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n", 2910 mmc_hostname(host->mmc)); 2911 sdhci_reset_tuning(host); 2912 return -EAGAIN; 2913 } 2914 EXPORT_SYMBOL_GPL(__sdhci_execute_tuning); 2915 2916 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) 2917 { 2918 struct sdhci_host *host = mmc_priv(mmc); 2919 int err = 0; 2920 unsigned int tuning_count = 0; 2921 bool hs400_tuning; 2922 2923 hs400_tuning = host->flags & SDHCI_HS400_TUNING; 2924 2925 if (host->tuning_mode == SDHCI_TUNING_MODE_1) 2926 tuning_count = host->tuning_count; 2927 2928 /* 2929 * The Host Controller needs tuning in case of SDR104 and DDR50 2930 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in 2931 * the Capabilities register. 2932 * If the Host Controller supports the HS200 mode then the 2933 * tuning function has to be executed. 2934 */ 2935 switch (host->timing) { 2936 /* HS400 tuning is done in HS200 mode */ 2937 case MMC_TIMING_MMC_HS400: 2938 err = -EINVAL; 2939 goto out; 2940 2941 case MMC_TIMING_MMC_HS200: 2942 /* 2943 * Periodic re-tuning for HS400 is not expected to be needed, so 2944 * disable it here. 2945 */ 2946 if (hs400_tuning) 2947 tuning_count = 0; 2948 break; 2949 2950 case MMC_TIMING_UHS_SDR104: 2951 case MMC_TIMING_UHS_DDR50: 2952 break; 2953 2954 case MMC_TIMING_UHS_SDR50: 2955 if (host->flags & SDHCI_SDR50_NEEDS_TUNING) 2956 break; 2957 fallthrough; 2958 2959 default: 2960 goto out; 2961 } 2962 2963 if (host->ops->platform_execute_tuning) { 2964 err = host->ops->platform_execute_tuning(host, opcode); 2965 goto out; 2966 } 2967 2968 mmc->retune_period = tuning_count; 2969 2970 if (host->tuning_delay < 0) 2971 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK; 2972 2973 sdhci_start_tuning(host); 2974 2975 host->tuning_err = __sdhci_execute_tuning(host, opcode); 2976 2977 sdhci_end_tuning(host); 2978 out: 2979 host->flags &= ~SDHCI_HS400_TUNING; 2980 2981 return err; 2982 } 2983 EXPORT_SYMBOL_GPL(sdhci_execute_tuning); 2984 2985 void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) 2986 { 2987 /* Host Controller v3.00 defines preset value registers */ 2988 if (host->version < SDHCI_SPEC_300) 2989 return; 2990 2991 /* 2992 * We only enable or disable Preset Value if they are not already 2993 * enabled or disabled respectively. Otherwise, we bail out. 2994 */ 2995 if (host->preset_enabled != enable) { 2996 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); 2997 2998 if (enable) 2999 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; 3000 else 3001 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; 3002 3003 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); 3004 3005 if (enable) 3006 host->flags |= SDHCI_PV_ENABLED; 3007 else 3008 host->flags &= ~SDHCI_PV_ENABLED; 3009 3010 host->preset_enabled = enable; 3011 } 3012 } 3013 EXPORT_SYMBOL_GPL(sdhci_enable_preset_value); 3014 3015 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 3016 int err) 3017 { 3018 struct mmc_data *data = mrq->data; 3019 3020 if (data->host_cookie != COOKIE_UNMAPPED) 3021 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 3022 mmc_get_dma_dir(data)); 3023 3024 data->host_cookie = COOKIE_UNMAPPED; 3025 } 3026 3027 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 3028 { 3029 struct sdhci_host *host = mmc_priv(mmc); 3030 3031 mrq->data->host_cookie = COOKIE_UNMAPPED; 3032 3033 /* 3034 * No pre-mapping in the pre hook if we're using the bounce buffer, 3035 * for that we would need two bounce buffers since one buffer is 3036 * in flight when this is getting called. 3037 */ 3038 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer) 3039 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED); 3040 } 3041 3042 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err) 3043 { 3044 if (host->data_cmd) { 3045 host->data_cmd->error = err; 3046 sdhci_finish_mrq(host, host->data_cmd->mrq); 3047 } 3048 3049 if (host->cmd) { 3050 host->cmd->error = err; 3051 sdhci_finish_mrq(host, host->cmd->mrq); 3052 } 3053 } 3054 3055 static void sdhci_card_event(struct mmc_host *mmc) 3056 { 3057 struct sdhci_host *host = mmc_priv(mmc); 3058 unsigned long flags; 3059 int present; 3060 3061 /* First check if client has provided their own card event */ 3062 if (host->ops->card_event) 3063 host->ops->card_event(host); 3064 3065 present = mmc->ops->get_cd(mmc); 3066 3067 spin_lock_irqsave(&host->lock, flags); 3068 3069 /* Check sdhci_has_requests() first in case we are runtime suspended */ 3070 if (sdhci_has_requests(host) && !present) { 3071 pr_err("%s: Card removed during transfer!\n", 3072 mmc_hostname(mmc)); 3073 pr_err("%s: Resetting controller.\n", 3074 mmc_hostname(mmc)); 3075 3076 sdhci_reset_for(host, CARD_REMOVED); 3077 3078 sdhci_error_out_mrqs(host, -ENOMEDIUM); 3079 } 3080 3081 spin_unlock_irqrestore(&host->lock, flags); 3082 } 3083 3084 static const struct mmc_host_ops sdhci_ops = { 3085 .request = sdhci_request, 3086 .post_req = sdhci_post_req, 3087 .pre_req = sdhci_pre_req, 3088 .set_ios = sdhci_set_ios, 3089 .get_cd = sdhci_get_cd, 3090 .get_ro = sdhci_get_ro, 3091 .card_hw_reset = sdhci_hw_reset, 3092 .enable_sdio_irq = sdhci_enable_sdio_irq, 3093 .ack_sdio_irq = sdhci_ack_sdio_irq, 3094 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, 3095 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, 3096 .execute_tuning = sdhci_execute_tuning, 3097 .card_event = sdhci_card_event, 3098 .card_busy = sdhci_card_busy, 3099 }; 3100 3101 /*****************************************************************************\ 3102 * * 3103 * Request done * 3104 * * 3105 \*****************************************************************************/ 3106 3107 void sdhci_request_done_dma(struct sdhci_host *host, struct mmc_request *mrq) 3108 { 3109 struct mmc_data *data = mrq->data; 3110 3111 if (data && data->host_cookie == COOKIE_MAPPED) { 3112 if (host->bounce_buffer) { 3113 /* 3114 * On reads, copy the bounced data into the 3115 * sglist 3116 */ 3117 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) { 3118 unsigned int length = data->bytes_xfered; 3119 3120 if (length > host->bounce_buffer_size) { 3121 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n", 3122 mmc_hostname(host->mmc), 3123 host->bounce_buffer_size, 3124 data->bytes_xfered); 3125 /* Cap it down and continue */ 3126 length = host->bounce_buffer_size; 3127 } 3128 dma_sync_single_for_cpu(mmc_dev(host->mmc), 3129 host->bounce_addr, 3130 host->bounce_buffer_size, 3131 DMA_FROM_DEVICE); 3132 sg_copy_from_buffer(data->sg, 3133 data->sg_len, 3134 host->bounce_buffer, 3135 length); 3136 } else { 3137 /* No copying, just switch ownership */ 3138 dma_sync_single_for_cpu(mmc_dev(host->mmc), 3139 host->bounce_addr, 3140 host->bounce_buffer_size, 3141 mmc_get_dma_dir(data)); 3142 } 3143 } else { 3144 /* Unmap the raw data */ 3145 dma_unmap_sg(mmc_dev(host->mmc), data->sg, 3146 data->sg_len, 3147 mmc_get_dma_dir(data)); 3148 } 3149 data->host_cookie = COOKIE_UNMAPPED; 3150 } 3151 } 3152 EXPORT_SYMBOL_GPL(sdhci_request_done_dma); 3153 3154 static bool sdhci_request_done(struct sdhci_host *host) 3155 { 3156 unsigned long flags; 3157 struct mmc_request *mrq; 3158 int i; 3159 3160 spin_lock_irqsave(&host->lock, flags); 3161 3162 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3163 mrq = host->mrqs_done[i]; 3164 if (mrq) 3165 break; 3166 } 3167 3168 if (!mrq) { 3169 spin_unlock_irqrestore(&host->lock, flags); 3170 return true; 3171 } 3172 3173 /* 3174 * The controller needs a reset of internal state machines 3175 * upon error conditions. 3176 */ 3177 if (sdhci_needs_reset(host, mrq)) { 3178 /* 3179 * Do not finish until command and data lines are available for 3180 * reset. Note there can only be one other mrq, so it cannot 3181 * also be in mrqs_done, otherwise host->cmd and host->data_cmd 3182 * would both be null. 3183 */ 3184 if (host->cmd || host->data_cmd) { 3185 spin_unlock_irqrestore(&host->lock, flags); 3186 return true; 3187 } 3188 3189 /* Some controllers need this kick or reset won't work here */ 3190 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) 3191 /* This is to force an update */ 3192 host->ops->set_clock(host, host->clock); 3193 3194 sdhci_reset_for(host, REQUEST_ERROR); 3195 3196 host->pending_reset = false; 3197 } 3198 3199 /* 3200 * Always unmap the data buffers if they were mapped by 3201 * sdhci_prepare_data() whenever we finish with a request. 3202 * This avoids leaking DMA mappings on error. 3203 */ 3204 if (host->flags & SDHCI_REQ_USE_DMA) { 3205 struct mmc_data *data = mrq->data; 3206 3207 if (host->use_external_dma && data && 3208 (mrq->cmd->error || data->error)) { 3209 struct dma_chan *chan = sdhci_external_dma_channel(host, data); 3210 3211 host->mrqs_done[i] = NULL; 3212 spin_unlock_irqrestore(&host->lock, flags); 3213 dmaengine_terminate_sync(chan); 3214 spin_lock_irqsave(&host->lock, flags); 3215 sdhci_set_mrq_done(host, mrq); 3216 } 3217 3218 sdhci_request_done_dma(host, mrq); 3219 } 3220 3221 host->mrqs_done[i] = NULL; 3222 3223 spin_unlock_irqrestore(&host->lock, flags); 3224 3225 if (host->ops->request_done) 3226 host->ops->request_done(host, mrq); 3227 else 3228 mmc_request_done(host->mmc, mrq); 3229 3230 return false; 3231 } 3232 3233 void sdhci_complete_work(struct work_struct *work) 3234 { 3235 struct sdhci_host *host = container_of(work, struct sdhci_host, 3236 complete_work); 3237 3238 while (!sdhci_request_done(host)) 3239 ; 3240 } 3241 EXPORT_SYMBOL_GPL(sdhci_complete_work); 3242 3243 static void sdhci_timeout_timer(struct timer_list *t) 3244 { 3245 struct sdhci_host *host; 3246 unsigned long flags; 3247 3248 host = timer_container_of(host, t, timer); 3249 3250 spin_lock_irqsave(&host->lock, flags); 3251 3252 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) { 3253 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n", 3254 mmc_hostname(host->mmc)); 3255 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3256 sdhci_dumpregs(host); 3257 3258 host->cmd->error = -ETIMEDOUT; 3259 sdhci_finish_mrq(host, host->cmd->mrq); 3260 } 3261 3262 spin_unlock_irqrestore(&host->lock, flags); 3263 } 3264 3265 static void sdhci_timeout_data_timer(struct timer_list *t) 3266 { 3267 struct sdhci_host *host; 3268 unsigned long flags; 3269 3270 host = timer_container_of(host, t, data_timer); 3271 3272 spin_lock_irqsave(&host->lock, flags); 3273 3274 if (host->data || host->data_cmd || 3275 (host->cmd && sdhci_data_line_cmd(host->cmd))) { 3276 pr_err("%s: Timeout waiting for hardware interrupt.\n", 3277 mmc_hostname(host->mmc)); 3278 sdhci_err_stats_inc(host, REQ_TIMEOUT); 3279 sdhci_dumpregs(host); 3280 3281 if (host->data) { 3282 host->data->error = -ETIMEDOUT; 3283 __sdhci_finish_data(host, true); 3284 queue_work(host->complete_wq, &host->complete_work); 3285 } else if (host->data_cmd) { 3286 host->data_cmd->error = -ETIMEDOUT; 3287 sdhci_finish_mrq(host, host->data_cmd->mrq); 3288 } else { 3289 host->cmd->error = -ETIMEDOUT; 3290 sdhci_finish_mrq(host, host->cmd->mrq); 3291 } 3292 } 3293 3294 spin_unlock_irqrestore(&host->lock, flags); 3295 } 3296 3297 /*****************************************************************************\ 3298 * * 3299 * Interrupt handling * 3300 * * 3301 \*****************************************************************************/ 3302 3303 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p) 3304 { 3305 /* Handle auto-CMD12 error */ 3306 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) { 3307 struct mmc_request *mrq = host->data_cmd->mrq; 3308 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3309 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3310 SDHCI_INT_DATA_TIMEOUT : 3311 SDHCI_INT_DATA_CRC; 3312 3313 /* Treat auto-CMD12 error the same as data error */ 3314 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { 3315 *intmask_p |= data_err_bit; 3316 return; 3317 } 3318 } 3319 3320 if (!host->cmd) { 3321 /* 3322 * SDHCI recovers from errors by resetting the cmd and data 3323 * circuits. Until that is done, there very well might be more 3324 * interrupts, so ignore them in that case. 3325 */ 3326 if (host->pending_reset) 3327 return; 3328 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n", 3329 mmc_hostname(host->mmc), (unsigned)intmask); 3330 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3331 sdhci_dumpregs(host); 3332 return; 3333 } 3334 3335 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC | 3336 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) { 3337 if (intmask & SDHCI_INT_TIMEOUT) { 3338 host->cmd->error = -ETIMEDOUT; 3339 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3340 } else { 3341 host->cmd->error = -EILSEQ; 3342 if (!mmc_op_tuning(host->cmd->opcode)) 3343 sdhci_err_stats_inc(host, CMD_CRC); 3344 } 3345 /* Treat data command CRC error the same as data CRC error */ 3346 if (host->cmd->data && 3347 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) == 3348 SDHCI_INT_CRC) { 3349 host->cmd = NULL; 3350 *intmask_p |= SDHCI_INT_DATA_CRC; 3351 return; 3352 } 3353 3354 __sdhci_finish_mrq(host, host->cmd->mrq); 3355 return; 3356 } 3357 3358 /* Handle auto-CMD23 error */ 3359 if (intmask & SDHCI_INT_AUTO_CMD_ERR) { 3360 struct mmc_request *mrq = host->cmd->mrq; 3361 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS); 3362 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ? 3363 -ETIMEDOUT : 3364 -EILSEQ; 3365 3366 sdhci_err_stats_inc(host, AUTO_CMD); 3367 3368 if (sdhci_auto_cmd23(host, mrq)) { 3369 mrq->sbc->error = err; 3370 __sdhci_finish_mrq(host, mrq); 3371 return; 3372 } 3373 } 3374 3375 if (intmask & SDHCI_INT_RESPONSE) 3376 sdhci_finish_command(host); 3377 } 3378 3379 static void sdhci_adma_show_error(struct sdhci_host *host) 3380 { 3381 void *desc = host->adma_table; 3382 dma_addr_t dma = host->adma_addr; 3383 3384 sdhci_dumpregs(host); 3385 3386 while (true) { 3387 struct sdhci_adma2_64_desc *dma_desc = desc; 3388 3389 if (host->flags & SDHCI_USE_64_BIT_DMA) 3390 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", 3391 (unsigned long long)dma, 3392 le32_to_cpu(dma_desc->addr_hi), 3393 le32_to_cpu(dma_desc->addr_lo), 3394 le16_to_cpu(dma_desc->len), 3395 le16_to_cpu(dma_desc->cmd)); 3396 else 3397 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", 3398 (unsigned long long)dma, 3399 le32_to_cpu(dma_desc->addr_lo), 3400 le16_to_cpu(dma_desc->len), 3401 le16_to_cpu(dma_desc->cmd)); 3402 3403 desc += host->desc_sz; 3404 dma += host->desc_sz; 3405 3406 if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) 3407 break; 3408 } 3409 } 3410 3411 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) 3412 { 3413 /* 3414 * CMD19 generates _only_ Buffer Read Ready interrupt if 3415 * use sdhci_send_tuning. 3416 * Need to exclude this case: PIO mode and use mmc_send_tuning, 3417 * If not, sdhci_transfer_pio will never be called, make the 3418 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm. 3419 */ 3420 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) { 3421 if (mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) { 3422 host->tuning_done = 1; 3423 wake_up(&host->buf_ready_int); 3424 return; 3425 } 3426 } 3427 3428 if (!host->data) { 3429 struct mmc_command *data_cmd = host->data_cmd; 3430 3431 /* 3432 * The "data complete" interrupt is also used to 3433 * indicate that a busy state has ended. See comment 3434 * above in sdhci_cmd_irq(). 3435 */ 3436 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) { 3437 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3438 host->data_cmd = NULL; 3439 data_cmd->error = -ETIMEDOUT; 3440 sdhci_err_stats_inc(host, CMD_TIMEOUT); 3441 __sdhci_finish_mrq(host, data_cmd->mrq); 3442 return; 3443 } 3444 if (intmask & SDHCI_INT_DATA_END) { 3445 host->data_cmd = NULL; 3446 /* 3447 * Some cards handle busy-end interrupt 3448 * before the command completed, so make 3449 * sure we do things in the proper order. 3450 */ 3451 if (host->cmd == data_cmd) 3452 return; 3453 3454 __sdhci_finish_mrq(host, data_cmd->mrq); 3455 return; 3456 } 3457 } 3458 3459 /* 3460 * SDHCI recovers from errors by resetting the cmd and data 3461 * circuits. Until that is done, there very well might be more 3462 * interrupts, so ignore them in that case. 3463 */ 3464 if (host->pending_reset) 3465 return; 3466 3467 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n", 3468 mmc_hostname(host->mmc), (unsigned)intmask); 3469 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3470 sdhci_dumpregs(host); 3471 3472 return; 3473 } 3474 3475 if (intmask & SDHCI_INT_DATA_TIMEOUT) { 3476 host->data->error = -ETIMEDOUT; 3477 sdhci_err_stats_inc(host, DAT_TIMEOUT); 3478 } else if (intmask & SDHCI_INT_DATA_END_BIT) { 3479 host->data->error = -EILSEQ; 3480 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3481 sdhci_err_stats_inc(host, DAT_CRC); 3482 } else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) && 3483 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) 3484 != MMC_BUS_TEST_R) { 3485 host->data->error = -EILSEQ; 3486 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 3487 sdhci_err_stats_inc(host, DAT_CRC); 3488 if (intmask & SDHCI_INT_TUNING_ERROR) { 3489 u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); 3490 3491 ctrl2 &= ~SDHCI_CTRL_TUNED_CLK; 3492 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2); 3493 } 3494 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 3495 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc), 3496 intmask); 3497 sdhci_adma_show_error(host); 3498 sdhci_err_stats_inc(host, ADMA); 3499 host->data->error = -EIO; 3500 if (host->ops->adma_workaround) 3501 host->ops->adma_workaround(host, intmask); 3502 } 3503 3504 if (host->data->error) 3505 sdhci_finish_data(host); 3506 else { 3507 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) 3508 sdhci_transfer_pio(host); 3509 3510 /* 3511 * We currently don't do anything fancy with DMA 3512 * boundaries, but as we can't disable the feature 3513 * we need to at least restart the transfer. 3514 * 3515 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) 3516 * should return a valid address to continue from, but as 3517 * some controllers are faulty, don't trust them. 3518 */ 3519 if (intmask & SDHCI_INT_DMA_END) { 3520 dma_addr_t dmastart, dmanow; 3521 3522 dmastart = sdhci_sdma_address(host); 3523 dmanow = dmastart + host->data->bytes_xfered; 3524 /* 3525 * Force update to the next DMA block boundary. 3526 */ 3527 dmanow = (dmanow & 3528 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + 3529 SDHCI_DEFAULT_BOUNDARY_SIZE; 3530 host->data->bytes_xfered = dmanow - dmastart; 3531 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n", 3532 &dmastart, host->data->bytes_xfered, &dmanow); 3533 sdhci_set_sdma_addr(host, dmanow); 3534 } 3535 3536 if (intmask & SDHCI_INT_DATA_END) { 3537 if (host->cmd == host->data_cmd) { 3538 /* 3539 * Data managed to finish before the 3540 * command completed. Make sure we do 3541 * things in the proper order. 3542 */ 3543 host->data_early = 1; 3544 } else { 3545 sdhci_finish_data(host); 3546 } 3547 } 3548 } 3549 } 3550 3551 static inline bool sdhci_defer_done(struct sdhci_host *host, 3552 struct mmc_request *mrq) 3553 { 3554 struct mmc_data *data = mrq->data; 3555 3556 return host->pending_reset || host->always_defer_done || 3557 ((host->flags & SDHCI_REQ_USE_DMA) && data && 3558 data->host_cookie == COOKIE_MAPPED); 3559 } 3560 3561 static irqreturn_t sdhci_irq(int irq, void *dev_id) 3562 { 3563 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0}; 3564 irqreturn_t result = IRQ_NONE; 3565 struct sdhci_host *host = dev_id; 3566 u32 intmask, mask, unexpected = 0; 3567 int max_loops = 16; 3568 int i; 3569 3570 spin_lock(&host->lock); 3571 3572 if (host->runtime_suspended) { 3573 spin_unlock(&host->lock); 3574 return IRQ_NONE; 3575 } 3576 3577 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3578 if (!intmask || intmask == 0xffffffff) { 3579 result = IRQ_NONE; 3580 goto out; 3581 } 3582 3583 do { 3584 DBG("IRQ status 0x%08x\n", intmask); 3585 3586 if (host->ops->irq) { 3587 intmask = host->ops->irq(host, intmask); 3588 if (!intmask) 3589 goto cont; 3590 } 3591 3592 /* Clear selected interrupts. */ 3593 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3594 SDHCI_INT_BUS_POWER); 3595 sdhci_writel(host, mask, SDHCI_INT_STATUS); 3596 3597 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3598 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & 3599 SDHCI_CARD_PRESENT; 3600 3601 /* 3602 * There is a observation on i.mx esdhc. INSERT 3603 * bit will be immediately set again when it gets 3604 * cleared, if a card is inserted. We have to mask 3605 * the irq to prevent interrupt storm which will 3606 * freeze the system. And the REMOVE gets the 3607 * same situation. 3608 * 3609 * More testing are needed here to ensure it works 3610 * for other platforms though. 3611 */ 3612 host->ier &= ~(SDHCI_INT_CARD_INSERT | 3613 SDHCI_INT_CARD_REMOVE); 3614 host->ier |= present ? SDHCI_INT_CARD_REMOVE : 3615 SDHCI_INT_CARD_INSERT; 3616 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3617 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3618 3619 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | 3620 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); 3621 3622 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | 3623 SDHCI_INT_CARD_REMOVE); 3624 result = IRQ_WAKE_THREAD; 3625 } 3626 3627 if (intmask & SDHCI_INT_CMD_MASK) 3628 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask); 3629 3630 if (intmask & SDHCI_INT_DATA_MASK) 3631 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); 3632 3633 if (intmask & SDHCI_INT_BUS_POWER) 3634 pr_err("%s: Card is consuming too much power!\n", 3635 mmc_hostname(host->mmc)); 3636 3637 if (intmask & SDHCI_INT_RETUNE) 3638 mmc_retune_needed(host->mmc); 3639 3640 if ((intmask & SDHCI_INT_CARD_INT) && 3641 (host->ier & SDHCI_INT_CARD_INT)) { 3642 sdhci_enable_sdio_irq_nolock(host, false); 3643 sdio_signal_irq(host->mmc); 3644 } 3645 3646 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | 3647 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | 3648 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | 3649 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT); 3650 3651 if (intmask) { 3652 unexpected |= intmask; 3653 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 3654 } 3655 cont: 3656 if (result == IRQ_NONE) 3657 result = IRQ_HANDLED; 3658 3659 intmask = sdhci_readl(host, SDHCI_INT_STATUS); 3660 } while (intmask && --max_loops); 3661 3662 /* Determine if mrqs can be completed immediately */ 3663 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3664 struct mmc_request *mrq = host->mrqs_done[i]; 3665 3666 if (!mrq) 3667 continue; 3668 3669 if (sdhci_defer_done(host, mrq)) { 3670 result = IRQ_WAKE_THREAD; 3671 } else { 3672 mrqs_done[i] = mrq; 3673 host->mrqs_done[i] = NULL; 3674 } 3675 } 3676 out: 3677 if (host->deferred_cmd) 3678 result = IRQ_WAKE_THREAD; 3679 3680 spin_unlock(&host->lock); 3681 3682 /* Process mrqs ready for immediate completion */ 3683 for (i = 0; i < SDHCI_MAX_MRQS; i++) { 3684 if (!mrqs_done[i]) 3685 continue; 3686 3687 if (host->ops->request_done) 3688 host->ops->request_done(host, mrqs_done[i]); 3689 else 3690 mmc_request_done(host->mmc, mrqs_done[i]); 3691 } 3692 3693 if (unexpected) { 3694 pr_err("%s: Unexpected interrupt 0x%08x.\n", 3695 mmc_hostname(host->mmc), unexpected); 3696 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 3697 sdhci_dumpregs(host); 3698 } 3699 3700 return result; 3701 } 3702 3703 irqreturn_t sdhci_thread_irq(int irq, void *dev_id) 3704 { 3705 struct sdhci_host *host = dev_id; 3706 struct mmc_command *cmd; 3707 unsigned long flags; 3708 u32 isr; 3709 3710 while (!sdhci_request_done(host)) 3711 ; 3712 3713 spin_lock_irqsave(&host->lock, flags); 3714 3715 isr = host->thread_isr; 3716 host->thread_isr = 0; 3717 3718 cmd = host->deferred_cmd; 3719 if (cmd && !sdhci_send_command_retry(host, cmd, flags)) 3720 sdhci_finish_mrq(host, cmd->mrq); 3721 3722 spin_unlock_irqrestore(&host->lock, flags); 3723 3724 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 3725 struct mmc_host *mmc = host->mmc; 3726 3727 mmc->ops->card_event(mmc); 3728 mmc_detect_change(mmc, msecs_to_jiffies(200)); 3729 } 3730 3731 return IRQ_HANDLED; 3732 } 3733 EXPORT_SYMBOL_GPL(sdhci_thread_irq); 3734 3735 /*****************************************************************************\ 3736 * * 3737 * Suspend/resume * 3738 * * 3739 \*****************************************************************************/ 3740 3741 #ifdef CONFIG_PM 3742 3743 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host) 3744 { 3745 return mmc_card_is_removable(host->mmc) && 3746 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 3747 !mmc_host_can_gpio_cd(host->mmc); 3748 } 3749 3750 /* 3751 * To enable wakeup events, the corresponding events have to be enabled in 3752 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal 3753 * Table' in the SD Host Controller Standard Specification. 3754 * It is useless to restore SDHCI_INT_ENABLE state in 3755 * sdhci_disable_irq_wakeups() since it will be set by 3756 * sdhci_enable_card_detection() or sdhci_init(). 3757 */ 3758 bool sdhci_enable_irq_wakeups(struct sdhci_host *host) 3759 { 3760 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE | 3761 SDHCI_WAKE_ON_INT; 3762 u32 irq_val = 0; 3763 u8 wake_val = 0; 3764 u8 val; 3765 3766 if (sdhci_cd_irq_can_wakeup(host)) { 3767 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE; 3768 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE; 3769 } 3770 3771 if (mmc_card_wake_sdio_irq(host->mmc)) { 3772 wake_val |= SDHCI_WAKE_ON_INT; 3773 irq_val |= SDHCI_INT_CARD_INT; 3774 } 3775 3776 if (!irq_val) 3777 return false; 3778 3779 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3780 val &= ~mask; 3781 val |= wake_val; 3782 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3783 3784 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE); 3785 3786 host->irq_wake_enabled = !enable_irq_wake(host->irq); 3787 3788 return host->irq_wake_enabled; 3789 } 3790 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); 3791 3792 void sdhci_disable_irq_wakeups(struct sdhci_host *host) 3793 { 3794 u8 val; 3795 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE 3796 | SDHCI_WAKE_ON_INT; 3797 3798 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); 3799 val &= ~mask; 3800 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); 3801 3802 disable_irq_wake(host->irq); 3803 3804 host->irq_wake_enabled = false; 3805 } 3806 EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups); 3807 3808 int sdhci_suspend_host(struct sdhci_host *host) 3809 { 3810 sdhci_disable_card_detection(host); 3811 3812 mmc_retune_timer_stop(host->mmc); 3813 3814 if (!device_may_wakeup(mmc_dev(host->mmc)) || 3815 !sdhci_enable_irq_wakeups(host)) { 3816 host->ier = 0; 3817 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 3818 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 3819 free_irq(host->irq, host); 3820 } 3821 3822 return 0; 3823 } 3824 3825 EXPORT_SYMBOL_GPL(sdhci_suspend_host); 3826 3827 int sdhci_resume_host(struct sdhci_host *host) 3828 { 3829 struct mmc_host *mmc = host->mmc; 3830 int ret = 0; 3831 3832 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3833 if (host->ops->enable_dma) 3834 host->ops->enable_dma(host); 3835 } 3836 3837 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) && 3838 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { 3839 /* Card keeps power but host controller does not */ 3840 sdhci_init(host, 0); 3841 host->pwr = 0; 3842 host->clock = 0; 3843 host->reinit_uhs = true; 3844 mmc->ops->set_ios(mmc, &mmc->ios); 3845 } else { 3846 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER)); 3847 } 3848 3849 if (host->irq_wake_enabled) { 3850 sdhci_disable_irq_wakeups(host); 3851 } else { 3852 ret = request_threaded_irq(host->irq, sdhci_irq, 3853 sdhci_thread_irq, IRQF_SHARED, 3854 mmc_hostname(mmc), host); 3855 if (ret) 3856 return ret; 3857 } 3858 3859 sdhci_enable_card_detection(host); 3860 3861 return ret; 3862 } 3863 3864 EXPORT_SYMBOL_GPL(sdhci_resume_host); 3865 3866 int sdhci_runtime_suspend_host(struct sdhci_host *host) 3867 { 3868 unsigned long flags; 3869 3870 mmc_retune_timer_stop(host->mmc); 3871 3872 spin_lock_irqsave(&host->lock, flags); 3873 host->ier &= SDHCI_INT_CARD_INT; 3874 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3875 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3876 spin_unlock_irqrestore(&host->lock, flags); 3877 3878 synchronize_hardirq(host->irq); 3879 3880 spin_lock_irqsave(&host->lock, flags); 3881 host->runtime_suspended = true; 3882 spin_unlock_irqrestore(&host->lock, flags); 3883 3884 return 0; 3885 } 3886 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); 3887 3888 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset) 3889 { 3890 struct mmc_host *mmc = host->mmc; 3891 unsigned long flags; 3892 int host_flags = host->flags; 3893 3894 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 3895 if (host->ops->enable_dma) 3896 host->ops->enable_dma(host); 3897 } 3898 3899 sdhci_init(host, soft_reset); 3900 3901 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED && 3902 mmc->ios.power_mode != MMC_POWER_OFF) { 3903 /* Force clock and power re-program */ 3904 host->pwr = 0; 3905 host->clock = 0; 3906 host->reinit_uhs = true; 3907 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios); 3908 mmc->ops->set_ios(mmc, &mmc->ios); 3909 3910 if ((host_flags & SDHCI_PV_ENABLED) && 3911 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { 3912 spin_lock_irqsave(&host->lock, flags); 3913 sdhci_enable_preset_value(host, true); 3914 spin_unlock_irqrestore(&host->lock, flags); 3915 } 3916 3917 if ((mmc->caps2 & MMC_CAP2_HS400_ES) && 3918 mmc->ops->hs400_enhanced_strobe) 3919 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios); 3920 } 3921 3922 spin_lock_irqsave(&host->lock, flags); 3923 3924 host->runtime_suspended = false; 3925 3926 /* Enable SDIO IRQ */ 3927 if (sdio_irq_claimed(mmc)) 3928 sdhci_enable_sdio_irq_nolock(host, true); 3929 3930 /* Enable Card Detection */ 3931 sdhci_enable_card_detection(host); 3932 3933 spin_unlock_irqrestore(&host->lock, flags); 3934 3935 return 0; 3936 } 3937 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); 3938 3939 #endif /* CONFIG_PM */ 3940 3941 /*****************************************************************************\ 3942 * * 3943 * Command Queue Engine (CQE) helpers * 3944 * * 3945 \*****************************************************************************/ 3946 3947 void sdhci_cqe_enable(struct mmc_host *mmc) 3948 { 3949 struct sdhci_host *host = mmc_priv(mmc); 3950 unsigned long flags; 3951 u8 ctrl; 3952 3953 spin_lock_irqsave(&host->lock, flags); 3954 3955 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 3956 ctrl &= ~SDHCI_CTRL_DMA_MASK; 3957 /* 3958 * Host from V4.10 supports ADMA3 DMA type. 3959 * ADMA3 performs integrated descriptor which is more suitable 3960 * for cmd queuing to fetch both command and transfer descriptors. 3961 */ 3962 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3)) 3963 ctrl |= SDHCI_CTRL_ADMA3; 3964 else if (host->flags & SDHCI_USE_64_BIT_DMA) 3965 ctrl |= SDHCI_CTRL_ADMA64; 3966 else 3967 ctrl |= SDHCI_CTRL_ADMA32; 3968 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); 3969 3970 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512), 3971 SDHCI_BLOCK_SIZE); 3972 3973 /* Set maximum timeout */ 3974 sdhci_set_timeout(host, NULL); 3975 3976 host->ier = host->cqe_ier; 3977 3978 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); 3979 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); 3980 3981 host->cqe_on = true; 3982 3983 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n", 3984 mmc_hostname(mmc), host->ier, 3985 sdhci_readl(host, SDHCI_INT_STATUS)); 3986 3987 spin_unlock_irqrestore(&host->lock, flags); 3988 } 3989 EXPORT_SYMBOL_GPL(sdhci_cqe_enable); 3990 3991 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery) 3992 { 3993 struct sdhci_host *host = mmc_priv(mmc); 3994 unsigned long flags; 3995 3996 spin_lock_irqsave(&host->lock, flags); 3997 3998 sdhci_set_default_irqs(host); 3999 4000 host->cqe_on = false; 4001 4002 if (recovery) 4003 sdhci_reset_for(host, CQE_RECOVERY); 4004 4005 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n", 4006 mmc_hostname(mmc), host->ier, 4007 sdhci_readl(host, SDHCI_INT_STATUS)); 4008 4009 spin_unlock_irqrestore(&host->lock, flags); 4010 } 4011 EXPORT_SYMBOL_GPL(sdhci_cqe_disable); 4012 4013 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error, 4014 int *data_error) 4015 { 4016 u32 mask; 4017 4018 if (!host->cqe_on) 4019 return false; 4020 4021 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) { 4022 *cmd_error = -EILSEQ; 4023 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 4024 sdhci_err_stats_inc(host, CMD_CRC); 4025 } else if (intmask & SDHCI_INT_TIMEOUT) { 4026 *cmd_error = -ETIMEDOUT; 4027 sdhci_err_stats_inc(host, CMD_TIMEOUT); 4028 } else 4029 *cmd_error = 0; 4030 4031 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) { 4032 *data_error = -EILSEQ; 4033 if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) 4034 sdhci_err_stats_inc(host, DAT_CRC); 4035 } else if (intmask & SDHCI_INT_DATA_TIMEOUT) { 4036 *data_error = -ETIMEDOUT; 4037 sdhci_err_stats_inc(host, DAT_TIMEOUT); 4038 } else if (intmask & SDHCI_INT_ADMA_ERROR) { 4039 *data_error = -EIO; 4040 sdhci_err_stats_inc(host, ADMA); 4041 } else 4042 *data_error = 0; 4043 4044 /* Clear selected interrupts. */ 4045 mask = intmask & host->cqe_ier; 4046 sdhci_writel(host, mask, SDHCI_INT_STATUS); 4047 4048 if (intmask & SDHCI_INT_BUS_POWER) 4049 pr_err("%s: Card is consuming too much power!\n", 4050 mmc_hostname(host->mmc)); 4051 4052 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR); 4053 if (intmask) { 4054 sdhci_writel(host, intmask, SDHCI_INT_STATUS); 4055 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n", 4056 mmc_hostname(host->mmc), intmask); 4057 sdhci_err_stats_inc(host, UNEXPECTED_IRQ); 4058 sdhci_dumpregs(host); 4059 } 4060 4061 return true; 4062 } 4063 EXPORT_SYMBOL_GPL(sdhci_cqe_irq); 4064 4065 /*****************************************************************************\ 4066 * * 4067 * Device allocation/registration * 4068 * * 4069 \*****************************************************************************/ 4070 4071 struct sdhci_host *sdhci_alloc_host(struct device *dev, 4072 size_t priv_size) 4073 { 4074 struct mmc_host *mmc; 4075 struct sdhci_host *host; 4076 4077 WARN_ON(dev == NULL); 4078 4079 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev); 4080 if (!mmc) 4081 return ERR_PTR(-ENOMEM); 4082 4083 host = mmc_priv(mmc); 4084 host->mmc = mmc; 4085 host->mmc_host_ops = sdhci_ops; 4086 mmc->ops = &host->mmc_host_ops; 4087 4088 host->flags = SDHCI_SIGNALING_330; 4089 4090 host->cqe_ier = SDHCI_CQE_INT_MASK; 4091 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK; 4092 4093 host->tuning_delay = -1; 4094 host->tuning_loop_count = MAX_TUNING_LOOP; 4095 4096 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG; 4097 4098 /* 4099 * The DMA table descriptor count is calculated as the maximum 4100 * number of segments times 2, to allow for an alignment 4101 * descriptor for each segment, plus 1 for a nop end descriptor. 4102 */ 4103 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1; 4104 host->max_adma = 65536; 4105 4106 host->max_timeout_count = 0xE; 4107 4108 host->complete_work_fn = sdhci_complete_work; 4109 host->thread_irq_fn = sdhci_thread_irq; 4110 4111 return host; 4112 } 4113 4114 EXPORT_SYMBOL_GPL(sdhci_alloc_host); 4115 4116 static int sdhci_set_dma_mask(struct sdhci_host *host) 4117 { 4118 struct mmc_host *mmc = host->mmc; 4119 struct device *dev = mmc_dev(mmc); 4120 int ret = -EINVAL; 4121 4122 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA) 4123 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4124 4125 /* Try 64-bit mask if hardware is capable of it */ 4126 if (host->flags & SDHCI_USE_64_BIT_DMA) { 4127 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 4128 if (ret) { 4129 pr_warn("%s: Failed to set 64-bit DMA mask.\n", 4130 mmc_hostname(mmc)); 4131 host->flags &= ~SDHCI_USE_64_BIT_DMA; 4132 } 4133 } 4134 4135 /* 32-bit mask as default & fallback */ 4136 if (ret) { 4137 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 4138 if (ret) 4139 pr_warn("%s: Failed to set 32-bit DMA mask.\n", 4140 mmc_hostname(mmc)); 4141 } 4142 4143 return ret; 4144 } 4145 4146 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver, 4147 const u32 *caps, const u32 *caps1) 4148 { 4149 u16 v; 4150 u64 dt_caps_mask = 0; 4151 u64 dt_caps = 0; 4152 4153 if (host->read_caps) 4154 return; 4155 4156 host->read_caps = true; 4157 4158 if (debug_quirks) 4159 host->quirks = debug_quirks; 4160 4161 if (debug_quirks2) 4162 host->quirks2 = debug_quirks2; 4163 4164 sdhci_reset_for_all(host); 4165 4166 if (host->v4_mode) 4167 sdhci_do_enable_v4_mode(host); 4168 4169 device_property_read_u64(mmc_dev(host->mmc), 4170 "sdhci-caps-mask", &dt_caps_mask); 4171 device_property_read_u64(mmc_dev(host->mmc), 4172 "sdhci-caps", &dt_caps); 4173 4174 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION); 4175 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT; 4176 4177 if (caps) { 4178 host->caps = *caps; 4179 } else { 4180 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); 4181 host->caps &= ~lower_32_bits(dt_caps_mask); 4182 host->caps |= lower_32_bits(dt_caps); 4183 } 4184 4185 if (host->version < SDHCI_SPEC_300) 4186 return; 4187 4188 if (caps1) { 4189 host->caps1 = *caps1; 4190 } else { 4191 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); 4192 host->caps1 &= ~upper_32_bits(dt_caps_mask); 4193 host->caps1 |= upper_32_bits(dt_caps); 4194 } 4195 } 4196 EXPORT_SYMBOL_GPL(__sdhci_read_caps); 4197 4198 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host) 4199 { 4200 struct mmc_host *mmc = host->mmc; 4201 unsigned int max_blocks; 4202 unsigned int bounce_size; 4203 int ret; 4204 4205 /* 4206 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer 4207 * has diminishing returns, this is probably because SD/MMC 4208 * cards are usually optimized to handle this size of requests. 4209 */ 4210 bounce_size = SZ_64K; 4211 /* 4212 * Adjust downwards to maximum request size if this is less 4213 * than our segment size, else hammer down the maximum 4214 * request size to the maximum buffer size. 4215 */ 4216 if (mmc->max_req_size < bounce_size) 4217 bounce_size = mmc->max_req_size; 4218 max_blocks = bounce_size / 512; 4219 4220 /* 4221 * When we just support one segment, we can get significant 4222 * speedups by the help of a bounce buffer to group scattered 4223 * reads/writes together. 4224 */ 4225 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc), 4226 bounce_size, 4227 GFP_KERNEL); 4228 if (!host->bounce_buffer) { 4229 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n", 4230 mmc_hostname(mmc), 4231 bounce_size); 4232 /* 4233 * Exiting with zero here makes sure we proceed with 4234 * mmc->max_segs == 1. 4235 */ 4236 return; 4237 } 4238 4239 host->bounce_addr = dma_map_single(mmc_dev(mmc), 4240 host->bounce_buffer, 4241 bounce_size, 4242 DMA_BIDIRECTIONAL); 4243 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr); 4244 if (ret) { 4245 devm_kfree(mmc_dev(mmc), host->bounce_buffer); 4246 host->bounce_buffer = NULL; 4247 /* Again fall back to max_segs == 1 */ 4248 return; 4249 } 4250 4251 host->bounce_buffer_size = bounce_size; 4252 4253 /* Lie about this since we're bouncing */ 4254 mmc->max_segs = max_blocks; 4255 mmc->max_seg_size = bounce_size; 4256 mmc->max_req_size = bounce_size; 4257 4258 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n", 4259 mmc_hostname(mmc), max_blocks, bounce_size); 4260 } 4261 4262 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host) 4263 { 4264 /* 4265 * According to SD Host Controller spec v4.10, bit[27] added from 4266 * version 4.10 in Capabilities Register is used as 64-bit System 4267 * Address support for V4 mode. 4268 */ 4269 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) 4270 return host->caps & SDHCI_CAN_64BIT_V4; 4271 4272 return host->caps & SDHCI_CAN_64BIT; 4273 } 4274 4275 int sdhci_setup_host(struct sdhci_host *host) 4276 { 4277 struct mmc_host *mmc; 4278 u32 max_current_caps; 4279 unsigned int ocr_avail; 4280 unsigned int override_timeout_clk; 4281 u32 max_clk; 4282 int ret = 0; 4283 bool enable_vqmmc = false; 4284 4285 WARN_ON(host == NULL); 4286 if (host == NULL) 4287 return -EINVAL; 4288 4289 mmc = host->mmc; 4290 4291 /* 4292 * If there are external regulators, get them. Note this must be done 4293 * early before resetting the host and reading the capabilities so that 4294 * the host can take the appropriate action if regulators are not 4295 * available. 4296 */ 4297 if (!mmc->supply.vqmmc) { 4298 ret = mmc_regulator_get_supply(mmc); 4299 if (ret) 4300 return ret; 4301 enable_vqmmc = true; 4302 } 4303 4304 DBG("Version: 0x%08x | Present: 0x%08x\n", 4305 sdhci_readw(host, SDHCI_HOST_VERSION), 4306 sdhci_readl(host, SDHCI_PRESENT_STATE)); 4307 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n", 4308 sdhci_readl(host, SDHCI_CAPABILITIES), 4309 sdhci_readl(host, SDHCI_CAPABILITIES_1)); 4310 4311 sdhci_read_caps(host); 4312 4313 override_timeout_clk = host->timeout_clk; 4314 4315 if (host->version > SDHCI_SPEC_420) { 4316 pr_err("%s: Unknown controller version (%d). You may experience problems.\n", 4317 mmc_hostname(mmc), host->version); 4318 } 4319 4320 if (host->quirks & SDHCI_QUIRK_FORCE_DMA) 4321 host->flags |= SDHCI_USE_SDMA; 4322 else if (!(host->caps & SDHCI_CAN_DO_SDMA)) 4323 DBG("Controller doesn't have SDMA capability\n"); 4324 else 4325 host->flags |= SDHCI_USE_SDMA; 4326 4327 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) && 4328 (host->flags & SDHCI_USE_SDMA)) { 4329 DBG("Disabling DMA as it is marked broken\n"); 4330 host->flags &= ~SDHCI_USE_SDMA; 4331 } 4332 4333 if ((host->version >= SDHCI_SPEC_200) && 4334 (host->caps & SDHCI_CAN_DO_ADMA2)) 4335 host->flags |= SDHCI_USE_ADMA; 4336 4337 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && 4338 (host->flags & SDHCI_USE_ADMA)) { 4339 DBG("Disabling ADMA as it is marked broken\n"); 4340 host->flags &= ~SDHCI_USE_ADMA; 4341 } 4342 4343 if (sdhci_can_64bit_dma(host)) 4344 host->flags |= SDHCI_USE_64_BIT_DMA; 4345 4346 if (host->use_external_dma) { 4347 ret = sdhci_external_dma_init(host); 4348 if (ret == -EPROBE_DEFER) 4349 goto unreg; 4350 /* 4351 * Fall back to use the DMA/PIO integrated in standard SDHCI 4352 * instead of external DMA devices. 4353 */ 4354 else if (ret) 4355 sdhci_switch_external_dma(host, false); 4356 /* Disable internal DMA sources */ 4357 else 4358 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4359 } 4360 4361 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { 4362 if (host->ops->set_dma_mask) 4363 ret = host->ops->set_dma_mask(host); 4364 else 4365 ret = sdhci_set_dma_mask(host); 4366 4367 if (!ret && host->ops->enable_dma) 4368 ret = host->ops->enable_dma(host); 4369 4370 if (ret) { 4371 pr_warn("%s: No suitable DMA available - falling back to PIO\n", 4372 mmc_hostname(mmc)); 4373 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); 4374 4375 ret = 0; 4376 } 4377 } 4378 4379 /* SDMA does not support 64-bit DMA if v4 mode not set */ 4380 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode) 4381 host->flags &= ~SDHCI_USE_SDMA; 4382 4383 if (host->flags & SDHCI_USE_ADMA) { 4384 dma_addr_t dma; 4385 void *buf; 4386 4387 if (!(host->flags & SDHCI_USE_64_BIT_DMA)) 4388 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; 4389 else if (!host->alloc_desc_sz) 4390 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); 4391 4392 host->desc_sz = host->alloc_desc_sz; 4393 host->adma_table_sz = host->adma_table_cnt * host->desc_sz; 4394 4395 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; 4396 /* 4397 * Use zalloc to zero the reserved high 32-bits of 128-bit 4398 * descriptors so that they never need to be written. 4399 */ 4400 buf = dma_alloc_coherent(mmc_dev(mmc), 4401 host->align_buffer_sz + host->adma_table_sz, 4402 &dma, GFP_KERNEL); 4403 if (!buf) { 4404 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 4405 mmc_hostname(mmc)); 4406 host->flags &= ~SDHCI_USE_ADMA; 4407 } else if ((dma + host->align_buffer_sz) & 4408 (SDHCI_ADMA2_DESC_ALIGN - 1)) { 4409 pr_warn("%s: unable to allocate aligned ADMA descriptor\n", 4410 mmc_hostname(mmc)); 4411 host->flags &= ~SDHCI_USE_ADMA; 4412 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4413 host->adma_table_sz, buf, dma); 4414 } else { 4415 host->align_buffer = buf; 4416 host->align_addr = dma; 4417 4418 host->adma_table = buf + host->align_buffer_sz; 4419 host->adma_addr = dma + host->align_buffer_sz; 4420 } 4421 } 4422 4423 /* 4424 * If we use DMA, then it's up to the caller to set the DMA 4425 * mask, but PIO does not need the hw shim so we set a new 4426 * mask here in that case. 4427 */ 4428 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) { 4429 host->dma_mask = DMA_BIT_MASK(64); 4430 mmc_dev(mmc)->dma_mask = &host->dma_mask; 4431 } 4432 4433 if (host->version >= SDHCI_SPEC_300) 4434 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps); 4435 else 4436 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps); 4437 4438 host->max_clk *= 1000000; 4439 if (host->max_clk == 0 || host->quirks & 4440 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) { 4441 if (!host->ops->get_max_clock) { 4442 pr_err("%s: Hardware doesn't specify base clock frequency.\n", 4443 mmc_hostname(mmc)); 4444 ret = -ENODEV; 4445 goto undma; 4446 } 4447 host->max_clk = host->ops->get_max_clock(host); 4448 } 4449 4450 /* 4451 * In case of Host Controller v3.00, find out whether clock 4452 * multiplier is supported. 4453 */ 4454 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1); 4455 4456 /* 4457 * In case the value in Clock Multiplier is 0, then programmable 4458 * clock mode is not supported, otherwise the actual clock 4459 * multiplier is one more than the value of Clock Multiplier 4460 * in the Capabilities Register. 4461 */ 4462 if (host->clk_mul) 4463 host->clk_mul += 1; 4464 4465 /* 4466 * Set host parameters. 4467 */ 4468 max_clk = host->max_clk; 4469 4470 if (host->ops->get_min_clock) 4471 mmc->f_min = host->ops->get_min_clock(host); 4472 else if (host->version >= SDHCI_SPEC_300) { 4473 if (host->clk_mul) 4474 max_clk = host->max_clk * host->clk_mul; 4475 /* 4476 * Divided Clock Mode minimum clock rate is always less than 4477 * Programmable Clock Mode minimum clock rate. 4478 */ 4479 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 4480 } else 4481 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 4482 4483 if (!mmc->f_max || mmc->f_max > max_clk) 4484 mmc->f_max = max_clk; 4485 4486 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 4487 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps); 4488 4489 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) 4490 host->timeout_clk *= 1000; 4491 4492 if (host->timeout_clk == 0) { 4493 if (!host->ops->get_timeout_clock) { 4494 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n", 4495 mmc_hostname(mmc)); 4496 ret = -ENODEV; 4497 goto undma; 4498 } 4499 4500 host->timeout_clk = 4501 DIV_ROUND_UP(host->ops->get_timeout_clock(host), 4502 1000); 4503 } 4504 4505 if (override_timeout_clk) 4506 host->timeout_clk = override_timeout_clk; 4507 4508 mmc->max_busy_timeout = host->ops->get_max_timeout_count ? 4509 host->ops->get_max_timeout_count(host) : 1 << 27; 4510 mmc->max_busy_timeout /= host->timeout_clk; 4511 } 4512 4513 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT && 4514 !host->ops->get_max_timeout_count) 4515 mmc->max_busy_timeout = 0; 4516 4517 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23; 4518 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; 4519 4520 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) 4521 host->flags |= SDHCI_AUTO_CMD12; 4522 4523 /* 4524 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO. 4525 * For v4 mode, SDMA may use Auto-CMD23 as well. 4526 */ 4527 if ((host->version >= SDHCI_SPEC_300) && 4528 ((host->flags & SDHCI_USE_ADMA) || 4529 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) && 4530 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { 4531 host->flags |= SDHCI_AUTO_CMD23; 4532 DBG("Auto-CMD23 available\n"); 4533 } else { 4534 DBG("Auto-CMD23 unavailable\n"); 4535 } 4536 4537 /* 4538 * A controller may support 8-bit width, but the board itself 4539 * might not have the pins brought out. Boards that support 4540 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in 4541 * their platform code before calling sdhci_add_host(), and we 4542 * won't assume 8-bit width for hosts without that CAP. 4543 */ 4544 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 4545 mmc->caps |= MMC_CAP_4_BIT_DATA; 4546 4547 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) 4548 mmc->caps &= ~MMC_CAP_CMD23; 4549 4550 if (host->caps & SDHCI_CAN_DO_HISPD) 4551 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 4552 4553 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && 4554 mmc_card_is_removable(mmc) && 4555 mmc_gpio_get_cd(mmc) < 0) 4556 mmc->caps |= MMC_CAP_NEEDS_POLL; 4557 4558 if (!IS_ERR(mmc->supply.vqmmc)) { 4559 if (enable_vqmmc) { 4560 ret = regulator_enable(mmc->supply.vqmmc); 4561 host->sdhci_core_to_disable_vqmmc = !ret; 4562 } 4563 4564 /* If vqmmc provides no 1.8V signalling, then there's no UHS */ 4565 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000, 4566 1950000)) 4567 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | 4568 SDHCI_SUPPORT_SDR50 | 4569 SDHCI_SUPPORT_DDR50); 4570 4571 /* In eMMC case vqmmc might be a fixed 1.8V regulator */ 4572 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000, 4573 3600000)) 4574 host->flags &= ~SDHCI_SIGNALING_330; 4575 4576 if (ret) { 4577 pr_warn("%s: Failed to enable vqmmc regulator: %d\n", 4578 mmc_hostname(mmc), ret); 4579 mmc->supply.vqmmc = ERR_PTR(-EINVAL); 4580 } 4581 4582 } 4583 4584 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) { 4585 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4586 SDHCI_SUPPORT_DDR50); 4587 /* 4588 * The SDHCI controller in a SoC might support HS200/HS400 4589 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property), 4590 * but if the board is modeled such that the IO lines are not 4591 * connected to 1.8v then HS200/HS400 cannot be supported. 4592 * Disable HS200/HS400 if the board does not have 1.8v connected 4593 * to the IO lines. (Applicable for other modes in 1.8v) 4594 */ 4595 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES); 4596 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS); 4597 } 4598 4599 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ 4600 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | 4601 SDHCI_SUPPORT_DDR50)) 4602 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; 4603 4604 /* SDR104 supports also implies SDR50 support */ 4605 if (host->caps1 & SDHCI_SUPPORT_SDR104) { 4606 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; 4607 /* SD3.0: SDR104 is supported so (for eMMC) the caps2 4608 * field can be promoted to support HS200. 4609 */ 4610 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) 4611 mmc->caps2 |= MMC_CAP2_HS200; 4612 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) { 4613 mmc->caps |= MMC_CAP_UHS_SDR50; 4614 } 4615 4616 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 && 4617 (host->caps1 & SDHCI_SUPPORT_HS400)) 4618 mmc->caps2 |= MMC_CAP2_HS400; 4619 4620 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) && 4621 (IS_ERR(mmc->supply.vqmmc) || 4622 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000, 4623 1300000))) 4624 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V; 4625 4626 if ((host->caps1 & SDHCI_SUPPORT_DDR50) && 4627 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) 4628 mmc->caps |= MMC_CAP_UHS_DDR50; 4629 4630 /* Does the host need tuning for SDR50? */ 4631 if (host->caps1 & SDHCI_USE_SDR50_TUNING) 4632 host->flags |= SDHCI_SDR50_NEEDS_TUNING; 4633 4634 /* Driver Type(s) (A, C, D) supported by the host */ 4635 if (host->caps1 & SDHCI_DRIVER_TYPE_A) 4636 mmc->caps |= MMC_CAP_DRIVER_TYPE_A; 4637 if (host->caps1 & SDHCI_DRIVER_TYPE_C) 4638 mmc->caps |= MMC_CAP_DRIVER_TYPE_C; 4639 if (host->caps1 & SDHCI_DRIVER_TYPE_D) 4640 mmc->caps |= MMC_CAP_DRIVER_TYPE_D; 4641 4642 /* Initial value for re-tuning timer count */ 4643 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK, 4644 host->caps1); 4645 4646 /* 4647 * In case Re-tuning Timer is not disabled, the actual value of 4648 * re-tuning timer will be 2 ^ (n - 1). 4649 */ 4650 if (host->tuning_count) 4651 host->tuning_count = 1 << (host->tuning_count - 1); 4652 4653 /* Re-tuning mode supported by the Host Controller */ 4654 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1); 4655 4656 ocr_avail = 0; 4657 4658 /* 4659 * According to SD Host Controller spec v3.00, if the Host System 4660 * can afford more than 150mA, Host Driver should set XPC to 1. Also 4661 * the value is meaningful only if Voltage Support in the Capabilities 4662 * register is set. The actual current value is 4 times the register 4663 * value. 4664 */ 4665 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); 4666 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) { 4667 int curr = regulator_get_current_limit(mmc->supply.vmmc); 4668 if (curr > 0) { 4669 4670 /* convert to SDHCI_MAX_CURRENT format */ 4671 curr = curr/1000; /* convert to mA */ 4672 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; 4673 4674 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); 4675 max_current_caps = 4676 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) | 4677 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) | 4678 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr); 4679 } 4680 } 4681 4682 if (host->caps & SDHCI_CAN_VDD_330) { 4683 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; 4684 4685 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK, 4686 max_current_caps) * 4687 SDHCI_MAX_CURRENT_MULTIPLIER; 4688 } 4689 if (host->caps & SDHCI_CAN_VDD_300) { 4690 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; 4691 4692 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK, 4693 max_current_caps) * 4694 SDHCI_MAX_CURRENT_MULTIPLIER; 4695 } 4696 if (host->caps & SDHCI_CAN_VDD_180) { 4697 ocr_avail |= MMC_VDD_165_195; 4698 4699 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK, 4700 max_current_caps) * 4701 SDHCI_MAX_CURRENT_MULTIPLIER; 4702 } 4703 4704 /* If OCR set by host, use it instead. */ 4705 if (host->ocr_mask) 4706 ocr_avail = host->ocr_mask; 4707 4708 /* If OCR set by external regulators, give it highest prio. */ 4709 if (mmc->ocr_avail) 4710 ocr_avail = mmc->ocr_avail; 4711 4712 mmc->ocr_avail = ocr_avail; 4713 mmc->ocr_avail_sdio = ocr_avail; 4714 if (host->ocr_avail_sdio) 4715 mmc->ocr_avail_sdio &= host->ocr_avail_sdio; 4716 mmc->ocr_avail_sd = ocr_avail; 4717 if (host->ocr_avail_sd) 4718 mmc->ocr_avail_sd &= host->ocr_avail_sd; 4719 else /* normal SD controllers don't support 1.8V */ 4720 mmc->ocr_avail_sd &= ~MMC_VDD_165_195; 4721 mmc->ocr_avail_mmc = ocr_avail; 4722 if (host->ocr_avail_mmc) 4723 mmc->ocr_avail_mmc &= host->ocr_avail_mmc; 4724 4725 if (mmc->ocr_avail == 0) { 4726 pr_err("%s: Hardware doesn't report any support voltages.\n", 4727 mmc_hostname(mmc)); 4728 ret = -ENODEV; 4729 goto unreg; 4730 } 4731 4732 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | 4733 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | 4734 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) || 4735 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) 4736 host->flags |= SDHCI_SIGNALING_180; 4737 4738 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V) 4739 host->flags |= SDHCI_SIGNALING_120; 4740 4741 spin_lock_init(&host->lock); 4742 4743 /* 4744 * Maximum number of sectors in one transfer. Limited by SDMA boundary 4745 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this 4746 * is less anyway. 4747 */ 4748 mmc->max_req_size = 524288; 4749 4750 /* 4751 * Maximum number of segments. Depends on if the hardware 4752 * can do scatter/gather or not. 4753 */ 4754 if (host->flags & SDHCI_USE_ADMA) { 4755 mmc->max_segs = SDHCI_MAX_SEGS; 4756 } else if (host->flags & SDHCI_USE_SDMA) { 4757 mmc->max_segs = 1; 4758 mmc->max_req_size = min_t(size_t, mmc->max_req_size, 4759 dma_max_mapping_size(mmc_dev(mmc))); 4760 } else { /* PIO */ 4761 mmc->max_segs = SDHCI_MAX_SEGS; 4762 } 4763 4764 /* 4765 * Maximum segment size. Could be one segment with the maximum number 4766 * of bytes. When doing hardware scatter/gather, each entry cannot 4767 * be larger than 64 KiB though. 4768 */ 4769 if (host->flags & SDHCI_USE_ADMA) { 4770 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) { 4771 host->max_adma = 65532; /* 32-bit alignment */ 4772 mmc->max_seg_size = 65535; 4773 /* 4774 * sdhci_adma_table_pre() expects to define 1 DMA 4775 * descriptor per segment, so the maximum segment size 4776 * is set accordingly. SDHCI allows up to 64KiB per DMA 4777 * descriptor (16-bit field), but some controllers do 4778 * not support "zero means 65536" reducing the maximum 4779 * for them to 65535. That is a problem if PAGE_SIZE is 4780 * 64KiB because the block layer does not support 4781 * max_seg_size < PAGE_SIZE, however 4782 * sdhci_adma_table_pre() has a workaround to handle 4783 * that case, and split the descriptor. Refer also 4784 * comment in sdhci_adma_table_pre(). 4785 */ 4786 if (mmc->max_seg_size < PAGE_SIZE) 4787 mmc->max_seg_size = PAGE_SIZE; 4788 } else { 4789 mmc->max_seg_size = 65536; 4790 } 4791 } else { 4792 mmc->max_seg_size = mmc->max_req_size; 4793 } 4794 4795 /* 4796 * Maximum block size. This varies from controller to controller and 4797 * is specified in the capabilities register. 4798 */ 4799 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { 4800 mmc->max_blk_size = 2; 4801 } else { 4802 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >> 4803 SDHCI_MAX_BLOCK_SHIFT; 4804 if (mmc->max_blk_size >= 3) { 4805 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n", 4806 mmc_hostname(mmc)); 4807 mmc->max_blk_size = 0; 4808 } 4809 } 4810 4811 mmc->max_blk_size = 512 << mmc->max_blk_size; 4812 4813 /* 4814 * Maximum block count. 4815 */ 4816 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; 4817 4818 if (mmc->max_segs == 1) 4819 /* This may alter mmc->*_blk_* parameters */ 4820 sdhci_allocate_bounce_buffer(host); 4821 4822 return 0; 4823 4824 unreg: 4825 if (host->sdhci_core_to_disable_vqmmc) 4826 regulator_disable(mmc->supply.vqmmc); 4827 undma: 4828 if (host->align_buffer) 4829 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4830 host->adma_table_sz, host->align_buffer, 4831 host->align_addr); 4832 host->adma_table = NULL; 4833 host->align_buffer = NULL; 4834 4835 return ret; 4836 } 4837 EXPORT_SYMBOL_GPL(sdhci_setup_host); 4838 4839 void sdhci_cleanup_host(struct sdhci_host *host) 4840 { 4841 struct mmc_host *mmc = host->mmc; 4842 4843 if (host->sdhci_core_to_disable_vqmmc) 4844 regulator_disable(mmc->supply.vqmmc); 4845 4846 if (host->align_buffer) 4847 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4848 host->adma_table_sz, host->align_buffer, 4849 host->align_addr); 4850 4851 if (host->use_external_dma) 4852 sdhci_external_dma_release(host); 4853 4854 host->adma_table = NULL; 4855 host->align_buffer = NULL; 4856 } 4857 EXPORT_SYMBOL_GPL(sdhci_cleanup_host); 4858 4859 int __sdhci_add_host(struct sdhci_host *host) 4860 { 4861 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI; 4862 struct mmc_host *mmc = host->mmc; 4863 int ret; 4864 4865 if ((mmc->caps2 & MMC_CAP2_CQE) && 4866 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) { 4867 mmc->caps2 &= ~MMC_CAP2_CQE; 4868 mmc->cqe_ops = NULL; 4869 } 4870 4871 host->complete_wq = alloc_workqueue("sdhci", flags, 0); 4872 if (!host->complete_wq) 4873 return -ENOMEM; 4874 4875 INIT_WORK(&host->complete_work, host->complete_work_fn); 4876 4877 timer_setup(&host->timer, sdhci_timeout_timer, 0); 4878 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); 4879 4880 init_waitqueue_head(&host->buf_ready_int); 4881 4882 sdhci_init(host, 0); 4883 4884 ret = request_threaded_irq(host->irq, sdhci_irq, host->thread_irq_fn, 4885 IRQF_SHARED, mmc_hostname(mmc), host); 4886 if (ret) { 4887 pr_err("%s: Failed to request IRQ %d: %d\n", 4888 mmc_hostname(mmc), host->irq, ret); 4889 goto unwq; 4890 } 4891 4892 ret = sdhci_led_register(host); 4893 if (ret) { 4894 pr_err("%s: Failed to register LED device: %d\n", 4895 mmc_hostname(mmc), ret); 4896 goto unirq; 4897 } 4898 4899 ret = mmc_add_host(mmc); 4900 if (ret) 4901 goto unled; 4902 4903 pr_info("%s: SDHCI controller on %s [%s] using %s\n", 4904 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), 4905 host->use_external_dma ? "External DMA" : 4906 (host->flags & SDHCI_USE_ADMA) ? 4907 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : 4908 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); 4909 4910 sdhci_enable_card_detection(host); 4911 4912 return 0; 4913 4914 unled: 4915 sdhci_led_unregister(host); 4916 unirq: 4917 sdhci_reset_for_all(host); 4918 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4919 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4920 free_irq(host->irq, host); 4921 unwq: 4922 destroy_workqueue(host->complete_wq); 4923 4924 return ret; 4925 } 4926 EXPORT_SYMBOL_GPL(__sdhci_add_host); 4927 4928 int sdhci_add_host(struct sdhci_host *host) 4929 { 4930 int ret; 4931 4932 ret = sdhci_setup_host(host); 4933 if (ret) 4934 return ret; 4935 4936 ret = __sdhci_add_host(host); 4937 if (ret) 4938 goto cleanup; 4939 4940 return 0; 4941 4942 cleanup: 4943 sdhci_cleanup_host(host); 4944 4945 return ret; 4946 } 4947 EXPORT_SYMBOL_GPL(sdhci_add_host); 4948 4949 void sdhci_remove_host(struct sdhci_host *host, int dead) 4950 { 4951 struct mmc_host *mmc = host->mmc; 4952 unsigned long flags; 4953 4954 if (dead) { 4955 spin_lock_irqsave(&host->lock, flags); 4956 4957 host->flags |= SDHCI_DEVICE_DEAD; 4958 4959 if (sdhci_has_requests(host)) { 4960 pr_err("%s: Controller removed during " 4961 " transfer!\n", mmc_hostname(mmc)); 4962 sdhci_error_out_mrqs(host, -ENOMEDIUM); 4963 } 4964 4965 spin_unlock_irqrestore(&host->lock, flags); 4966 } 4967 4968 sdhci_disable_card_detection(host); 4969 4970 mmc_remove_host(mmc); 4971 4972 sdhci_led_unregister(host); 4973 4974 if (!dead) 4975 sdhci_reset_for_all(host); 4976 4977 sdhci_writel(host, 0, SDHCI_INT_ENABLE); 4978 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); 4979 free_irq(host->irq, host); 4980 4981 timer_delete_sync(&host->timer); 4982 timer_delete_sync(&host->data_timer); 4983 4984 destroy_workqueue(host->complete_wq); 4985 4986 if (host->sdhci_core_to_disable_vqmmc) 4987 regulator_disable(mmc->supply.vqmmc); 4988 4989 if (host->align_buffer) 4990 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + 4991 host->adma_table_sz, host->align_buffer, 4992 host->align_addr); 4993 4994 if (host->use_external_dma) 4995 sdhci_external_dma_release(host); 4996 4997 host->adma_table = NULL; 4998 host->align_buffer = NULL; 4999 } 5000 5001 EXPORT_SYMBOL_GPL(sdhci_remove_host); 5002 5003 void sdhci_free_host(struct sdhci_host *host) 5004 { 5005 mmc_free_host(host->mmc); 5006 } 5007 5008 EXPORT_SYMBOL_GPL(sdhci_free_host); 5009 5010 /*****************************************************************************\ 5011 * * 5012 * Driver init/exit * 5013 * * 5014 \*****************************************************************************/ 5015 5016 static int __init sdhci_drv_init(void) 5017 { 5018 pr_info(DRIVER_NAME 5019 ": Secure Digital Host Controller Interface driver\n"); 5020 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); 5021 5022 return 0; 5023 } 5024 5025 static void __exit sdhci_drv_exit(void) 5026 { 5027 } 5028 5029 module_init(sdhci_drv_init); 5030 module_exit(sdhci_drv_exit); 5031 5032 module_param(debug_quirks, uint, 0444); 5033 module_param(debug_quirks2, uint, 0444); 5034 5035 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); 5036 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); 5037 MODULE_LICENSE("GPL"); 5038 5039 MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); 5040 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); 5041