1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> 4 * Copyright (C) 2013, Imagination Technologies 5 * 6 * JZ4740 SD/MMC controller driver 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dmaengine.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/irq.h> 18 #include <linux/mmc/host.h> 19 #include <linux/mmc/slot-gpio.h> 20 #include <linux/module.h> 21 #include <linux/of.h> 22 #include <linux/pinctrl/consumer.h> 23 #include <linux/platform_device.h> 24 #include <linux/property.h> 25 #include <linux/regulator/consumer.h> 26 #include <linux/scatterlist.h> 27 28 #include <asm/cacheflush.h> 29 30 #define JZ_REG_MMC_STRPCL 0x00 31 #define JZ_REG_MMC_STATUS 0x04 32 #define JZ_REG_MMC_CLKRT 0x08 33 #define JZ_REG_MMC_CMDAT 0x0C 34 #define JZ_REG_MMC_RESTO 0x10 35 #define JZ_REG_MMC_RDTO 0x14 36 #define JZ_REG_MMC_BLKLEN 0x18 37 #define JZ_REG_MMC_NOB 0x1C 38 #define JZ_REG_MMC_SNOB 0x20 39 #define JZ_REG_MMC_IMASK 0x24 40 #define JZ_REG_MMC_IREG 0x28 41 #define JZ_REG_MMC_CMD 0x2C 42 #define JZ_REG_MMC_ARG 0x30 43 #define JZ_REG_MMC_RESP_FIFO 0x34 44 #define JZ_REG_MMC_RXFIFO 0x38 45 #define JZ_REG_MMC_TXFIFO 0x3C 46 #define JZ_REG_MMC_LPM 0x40 47 #define JZ_REG_MMC_DMAC 0x44 48 49 #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7) 50 #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6) 51 #define JZ_MMC_STRPCL_START_READWAIT BIT(5) 52 #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4) 53 #define JZ_MMC_STRPCL_RESET BIT(3) 54 #define JZ_MMC_STRPCL_START_OP BIT(2) 55 #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0)) 56 #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0) 57 #define JZ_MMC_STRPCL_CLOCK_START BIT(1) 58 59 60 #define JZ_MMC_STATUS_IS_RESETTING BIT(15) 61 #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14) 62 #define JZ_MMC_STATUS_PRG_DONE BIT(13) 63 #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12) 64 #define JZ_MMC_STATUS_END_CMD_RES BIT(11) 65 #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10) 66 #define JZ_MMC_STATUS_IS_READWAIT BIT(9) 67 #define JZ_MMC_STATUS_CLK_EN BIT(8) 68 #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7) 69 #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6) 70 #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5) 71 #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4) 72 #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3) 73 #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2) 74 #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1) 75 #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0) 76 77 #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0)) 78 #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2)) 79 80 81 #define JZ_MMC_CMDAT_IO_ABORT BIT(11) 82 #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10) 83 #define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9)) 84 #define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9)) 85 #define JZ_MMC_CMDAT_DMA_EN BIT(8) 86 #define JZ_MMC_CMDAT_INIT BIT(7) 87 #define JZ_MMC_CMDAT_BUSY BIT(6) 88 #define JZ_MMC_CMDAT_STREAM BIT(5) 89 #define JZ_MMC_CMDAT_WRITE BIT(4) 90 #define JZ_MMC_CMDAT_DATA_EN BIT(3) 91 #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0)) 92 #define JZ_MMC_CMDAT_RSP_R1 1 93 #define JZ_MMC_CMDAT_RSP_R2 2 94 #define JZ_MMC_CMDAT_RSP_R3 3 95 96 #define JZ_MMC_IRQ_SDIO BIT(7) 97 #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6) 98 #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5) 99 #define JZ_MMC_IRQ_END_CMD_RES BIT(2) 100 #define JZ_MMC_IRQ_PRG_DONE BIT(1) 101 #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0) 102 103 #define JZ_MMC_DMAC_DMA_SEL BIT(1) 104 #define JZ_MMC_DMAC_DMA_EN BIT(0) 105 106 #define JZ_MMC_LPM_DRV_RISING BIT(31) 107 #define JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY BIT(31) 108 #define JZ_MMC_LPM_DRV_RISING_1NS_DLY BIT(30) 109 #define JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY BIT(29) 110 #define JZ_MMC_LPM_LOW_POWER_MODE_EN BIT(0) 111 112 #define JZ_MMC_CLK_RATE 24000000 113 #define JZ_MMC_REQ_TIMEOUT_MS 5000 114 115 enum jz4740_mmc_version { 116 JZ_MMC_JZ4740, 117 JZ_MMC_JZ4725B, 118 JZ_MMC_JZ4760, 119 JZ_MMC_JZ4780, 120 JZ_MMC_X1000, 121 }; 122 123 enum jz4740_mmc_state { 124 JZ4740_MMC_STATE_READ_RESPONSE, 125 JZ4740_MMC_STATE_TRANSFER_DATA, 126 JZ4740_MMC_STATE_SEND_STOP, 127 JZ4740_MMC_STATE_DONE, 128 }; 129 130 /* 131 * The MMC core allows to prepare a mmc_request while another mmc_request 132 * is in-flight. This is used via the pre_req/post_req hooks. 133 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request. 134 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie 135 * flags to keep track of the mmc_request mapping state. 136 * 137 * COOKIE_UNMAPPED: the request is not mapped. 138 * COOKIE_PREMAPPED: the request was mapped in pre_req, 139 * and should be unmapped in post_req. 140 * COOKIE_MAPPED: the request was mapped in the irq handler, 141 * and should be unmapped before mmc_request_done is called.. 142 */ 143 enum jz4780_cookie { 144 COOKIE_UNMAPPED = 0, 145 COOKIE_PREMAPPED, 146 COOKIE_MAPPED, 147 }; 148 149 struct jz4740_mmc_host { 150 struct mmc_host *mmc; 151 struct platform_device *pdev; 152 struct clk *clk; 153 154 enum jz4740_mmc_version version; 155 156 int irq; 157 158 void __iomem *base; 159 struct resource *mem_res; 160 struct mmc_request *req; 161 struct mmc_command *cmd; 162 163 bool vqmmc_enabled; 164 165 unsigned long waiting; 166 167 uint32_t cmdat; 168 169 uint32_t irq_mask; 170 171 spinlock_t lock; 172 173 struct timer_list timeout_timer; 174 struct sg_mapping_iter miter; 175 enum jz4740_mmc_state state; 176 177 /* DMA support */ 178 struct dma_chan *dma_rx; 179 struct dma_chan *dma_tx; 180 bool use_dma; 181 182 /* The DMA trigger level is 8 words, that is to say, the DMA read 183 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write 184 * trigger is when data words in MSC_TXFIFO is < 8. 185 */ 186 #define JZ4740_MMC_FIFO_HALF_SIZE 8 187 }; 188 189 static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host, 190 uint32_t val) 191 { 192 if (host->version >= JZ_MMC_JZ4725B) 193 return writel(val, host->base + JZ_REG_MMC_IMASK); 194 else 195 return writew(val, host->base + JZ_REG_MMC_IMASK); 196 } 197 198 static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host, 199 uint32_t val) 200 { 201 if (host->version >= JZ_MMC_JZ4780) 202 writel(val, host->base + JZ_REG_MMC_IREG); 203 else 204 writew(val, host->base + JZ_REG_MMC_IREG); 205 } 206 207 static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host) 208 { 209 if (host->version >= JZ_MMC_JZ4780) 210 return readl(host->base + JZ_REG_MMC_IREG); 211 else 212 return readw(host->base + JZ_REG_MMC_IREG); 213 } 214 215 /*----------------------------------------------------------------------------*/ 216 /* DMA infrastructure */ 217 218 static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host) 219 { 220 if (!host->use_dma) 221 return; 222 223 dma_release_channel(host->dma_tx); 224 if (host->dma_rx) 225 dma_release_channel(host->dma_rx); 226 } 227 228 static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) 229 { 230 struct device *dev = mmc_dev(host->mmc); 231 232 host->dma_tx = dma_request_chan(dev, "tx-rx"); 233 if (!IS_ERR(host->dma_tx)) 234 return 0; 235 236 if (PTR_ERR(host->dma_tx) != -ENODEV) { 237 dev_err(dev, "Failed to get dma tx-rx channel\n"); 238 return PTR_ERR(host->dma_tx); 239 } 240 241 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 242 if (IS_ERR(host->dma_tx)) { 243 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); 244 return PTR_ERR(host->dma_tx); 245 } 246 247 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 248 if (IS_ERR(host->dma_rx)) { 249 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); 250 dma_release_channel(host->dma_tx); 251 return PTR_ERR(host->dma_rx); 252 } 253 254 /* 255 * Limit the maximum segment size in any SG entry according to 256 * the parameters of the DMA engine device. 257 */ 258 if (host->dma_tx) { 259 struct device *dev = host->dma_tx->device->dev; 260 unsigned int max_seg_size = dma_get_max_seg_size(dev); 261 262 if (max_seg_size < host->mmc->max_seg_size) 263 host->mmc->max_seg_size = max_seg_size; 264 } 265 266 if (host->dma_rx) { 267 struct device *dev = host->dma_rx->device->dev; 268 unsigned int max_seg_size = dma_get_max_seg_size(dev); 269 270 if (max_seg_size < host->mmc->max_seg_size) 271 host->mmc->max_seg_size = max_seg_size; 272 } 273 274 return 0; 275 } 276 277 static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host, 278 struct mmc_data *data) 279 { 280 if ((data->flags & MMC_DATA_READ) && host->dma_rx) 281 return host->dma_rx; 282 else 283 return host->dma_tx; 284 } 285 286 static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, 287 struct mmc_data *data) 288 { 289 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 290 enum dma_data_direction dir = mmc_get_dma_dir(data); 291 292 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 293 data->host_cookie = COOKIE_UNMAPPED; 294 } 295 296 /* Prepares DMA data for current or next transfer. 297 * A request can be in-flight when this is called. 298 */ 299 static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, 300 struct mmc_data *data, 301 int cookie) 302 { 303 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 304 enum dma_data_direction dir = mmc_get_dma_dir(data); 305 unsigned int sg_count; 306 307 if (data->host_cookie == COOKIE_PREMAPPED) 308 return data->sg_count; 309 310 sg_count = dma_map_sg(chan->device->dev, 311 data->sg, 312 data->sg_len, 313 dir); 314 315 if (!sg_count) { 316 dev_err(mmc_dev(host->mmc), 317 "Failed to map scatterlist for DMA operation\n"); 318 return -EINVAL; 319 } 320 321 data->sg_count = sg_count; 322 data->host_cookie = cookie; 323 324 return data->sg_count; 325 } 326 327 static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, 328 struct mmc_data *data) 329 { 330 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 331 struct dma_async_tx_descriptor *desc; 332 struct dma_slave_config conf = { 333 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 334 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 335 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 336 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 337 }; 338 int sg_count; 339 340 if (data->flags & MMC_DATA_WRITE) { 341 conf.direction = DMA_MEM_TO_DEV; 342 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; 343 } else { 344 conf.direction = DMA_DEV_TO_MEM; 345 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; 346 } 347 348 sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED); 349 if (sg_count < 0) 350 return sg_count; 351 352 dmaengine_slave_config(chan, &conf); 353 desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count, 354 conf.direction, 355 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 356 if (!desc) { 357 dev_err(mmc_dev(host->mmc), 358 "Failed to allocate DMA %s descriptor", 359 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX"); 360 goto dma_unmap; 361 } 362 363 dmaengine_submit(desc); 364 dma_async_issue_pending(chan); 365 366 return 0; 367 368 dma_unmap: 369 if (data->host_cookie == COOKIE_MAPPED) 370 jz4740_mmc_dma_unmap(host, data); 371 return -ENOMEM; 372 } 373 374 static void jz4740_mmc_pre_request(struct mmc_host *mmc, 375 struct mmc_request *mrq) 376 { 377 struct jz4740_mmc_host *host = mmc_priv(mmc); 378 struct mmc_data *data = mrq->data; 379 380 if (!host->use_dma) 381 return; 382 383 data->host_cookie = COOKIE_UNMAPPED; 384 if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0) 385 data->host_cookie = COOKIE_UNMAPPED; 386 } 387 388 static void jz4740_mmc_post_request(struct mmc_host *mmc, 389 struct mmc_request *mrq, 390 int err) 391 { 392 struct jz4740_mmc_host *host = mmc_priv(mmc); 393 struct mmc_data *data = mrq->data; 394 395 if (data && data->host_cookie != COOKIE_UNMAPPED) 396 jz4740_mmc_dma_unmap(host, data); 397 398 if (err) { 399 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 400 401 dmaengine_terminate_all(chan); 402 } 403 } 404 405 /*----------------------------------------------------------------------------*/ 406 407 static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, 408 unsigned int irq, bool enabled) 409 { 410 unsigned long flags; 411 412 spin_lock_irqsave(&host->lock, flags); 413 if (enabled) 414 host->irq_mask &= ~irq; 415 else 416 host->irq_mask |= irq; 417 418 jz4740_mmc_write_irq_mask(host, host->irq_mask); 419 spin_unlock_irqrestore(&host->lock, flags); 420 } 421 422 static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, 423 bool start_transfer) 424 { 425 uint16_t val = JZ_MMC_STRPCL_CLOCK_START; 426 427 if (start_transfer) 428 val |= JZ_MMC_STRPCL_START_OP; 429 430 writew(val, host->base + JZ_REG_MMC_STRPCL); 431 } 432 433 static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) 434 { 435 uint32_t status; 436 unsigned int timeout = 1000; 437 438 writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); 439 do { 440 status = readl(host->base + JZ_REG_MMC_STATUS); 441 } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); 442 } 443 444 static void jz4740_mmc_reset(struct jz4740_mmc_host *host) 445 { 446 uint32_t status; 447 unsigned int timeout = 1000; 448 449 writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); 450 udelay(10); 451 do { 452 status = readl(host->base + JZ_REG_MMC_STATUS); 453 } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); 454 } 455 456 static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) 457 { 458 struct mmc_request *req; 459 struct mmc_data *data; 460 461 req = host->req; 462 data = req->data; 463 host->req = NULL; 464 465 if (data && data->host_cookie == COOKIE_MAPPED) 466 jz4740_mmc_dma_unmap(host, data); 467 mmc_request_done(host->mmc, req); 468 } 469 470 static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, 471 unsigned int irq) 472 { 473 unsigned int timeout = 0x800; 474 uint32_t status; 475 476 do { 477 status = jz4740_mmc_read_irq_reg(host); 478 } while (!(status & irq) && --timeout); 479 480 if (timeout == 0) { 481 set_bit(0, &host->waiting); 482 mod_timer(&host->timeout_timer, 483 jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS)); 484 jz4740_mmc_set_irq_enabled(host, irq, true); 485 return true; 486 } 487 488 return false; 489 } 490 491 static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, 492 struct mmc_data *data) 493 { 494 int status; 495 496 status = readl(host->base + JZ_REG_MMC_STATUS); 497 if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) { 498 if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) { 499 host->req->cmd->error = -ETIMEDOUT; 500 data->error = -ETIMEDOUT; 501 } else { 502 host->req->cmd->error = -EIO; 503 data->error = -EIO; 504 } 505 } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) { 506 if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) { 507 host->req->cmd->error = -ETIMEDOUT; 508 data->error = -ETIMEDOUT; 509 } else { 510 host->req->cmd->error = -EIO; 511 data->error = -EIO; 512 } 513 } 514 } 515 516 static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, 517 struct mmc_data *data) 518 { 519 struct sg_mapping_iter *miter = &host->miter; 520 void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; 521 uint32_t *buf; 522 bool timeout; 523 size_t i, j; 524 525 while (sg_miter_next(miter)) { 526 buf = miter->addr; 527 i = miter->length / 4; 528 j = i / 8; 529 i = i & 0x7; 530 while (j) { 531 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 532 if (unlikely(timeout)) 533 goto poll_timeout; 534 535 writel(buf[0], fifo_addr); 536 writel(buf[1], fifo_addr); 537 writel(buf[2], fifo_addr); 538 writel(buf[3], fifo_addr); 539 writel(buf[4], fifo_addr); 540 writel(buf[5], fifo_addr); 541 writel(buf[6], fifo_addr); 542 writel(buf[7], fifo_addr); 543 buf += 8; 544 --j; 545 } 546 if (unlikely(i)) { 547 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 548 if (unlikely(timeout)) 549 goto poll_timeout; 550 551 while (i) { 552 writel(*buf, fifo_addr); 553 ++buf; 554 --i; 555 } 556 } 557 data->bytes_xfered += miter->length; 558 } 559 sg_miter_stop(miter); 560 561 return false; 562 563 poll_timeout: 564 miter->consumed = (void *)buf - miter->addr; 565 data->bytes_xfered += miter->consumed; 566 sg_miter_stop(miter); 567 568 return true; 569 } 570 571 static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, 572 struct mmc_data *data) 573 { 574 struct sg_mapping_iter *miter = &host->miter; 575 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; 576 uint32_t *buf; 577 uint32_t d; 578 uint32_t status; 579 size_t i, j; 580 unsigned int timeout; 581 582 while (sg_miter_next(miter)) { 583 buf = miter->addr; 584 i = miter->length; 585 j = i / 32; 586 i = i & 0x1f; 587 while (j) { 588 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 589 if (unlikely(timeout)) 590 goto poll_timeout; 591 592 buf[0] = readl(fifo_addr); 593 buf[1] = readl(fifo_addr); 594 buf[2] = readl(fifo_addr); 595 buf[3] = readl(fifo_addr); 596 buf[4] = readl(fifo_addr); 597 buf[5] = readl(fifo_addr); 598 buf[6] = readl(fifo_addr); 599 buf[7] = readl(fifo_addr); 600 601 buf += 8; 602 --j; 603 } 604 605 if (unlikely(i)) { 606 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 607 if (unlikely(timeout)) 608 goto poll_timeout; 609 610 while (i >= 4) { 611 *buf++ = readl(fifo_addr); 612 i -= 4; 613 } 614 if (unlikely(i > 0)) { 615 d = readl(fifo_addr); 616 memcpy(buf, &d, i); 617 } 618 } 619 data->bytes_xfered += miter->length; 620 } 621 sg_miter_stop(miter); 622 623 /* For whatever reason there is sometime one word more in the fifo then 624 * requested */ 625 timeout = 1000; 626 status = readl(host->base + JZ_REG_MMC_STATUS); 627 while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { 628 d = readl(fifo_addr); 629 status = readl(host->base + JZ_REG_MMC_STATUS); 630 } 631 632 return false; 633 634 poll_timeout: 635 miter->consumed = (void *)buf - miter->addr; 636 data->bytes_xfered += miter->consumed; 637 sg_miter_stop(miter); 638 639 return true; 640 } 641 642 static void jz4740_mmc_timeout(struct timer_list *t) 643 { 644 struct jz4740_mmc_host *host = timer_container_of(host, t, 645 timeout_timer); 646 647 if (!test_and_clear_bit(0, &host->waiting)) 648 return; 649 650 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); 651 652 host->req->cmd->error = -ETIMEDOUT; 653 jz4740_mmc_request_done(host); 654 } 655 656 static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, 657 struct mmc_command *cmd) 658 { 659 int i; 660 uint16_t tmp; 661 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; 662 663 if (cmd->flags & MMC_RSP_136) { 664 tmp = readw(fifo_addr); 665 for (i = 0; i < 4; ++i) { 666 cmd->resp[i] = tmp << 24; 667 tmp = readw(fifo_addr); 668 cmd->resp[i] |= tmp << 8; 669 tmp = readw(fifo_addr); 670 cmd->resp[i] |= tmp >> 8; 671 } 672 } else { 673 cmd->resp[0] = readw(fifo_addr) << 24; 674 cmd->resp[0] |= readw(fifo_addr) << 8; 675 cmd->resp[0] |= readw(fifo_addr) & 0xff; 676 } 677 } 678 679 static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, 680 struct mmc_command *cmd) 681 { 682 uint32_t cmdat = host->cmdat; 683 684 host->cmdat &= ~JZ_MMC_CMDAT_INIT; 685 jz4740_mmc_clock_disable(host); 686 687 host->cmd = cmd; 688 689 if (cmd->flags & MMC_RSP_BUSY) 690 cmdat |= JZ_MMC_CMDAT_BUSY; 691 692 switch (mmc_resp_type(cmd)) { 693 case MMC_RSP_R1B: 694 case MMC_RSP_R1: 695 cmdat |= JZ_MMC_CMDAT_RSP_R1; 696 break; 697 case MMC_RSP_R2: 698 cmdat |= JZ_MMC_CMDAT_RSP_R2; 699 break; 700 case MMC_RSP_R3: 701 cmdat |= JZ_MMC_CMDAT_RSP_R3; 702 break; 703 default: 704 break; 705 } 706 707 if (cmd->data) { 708 cmdat |= JZ_MMC_CMDAT_DATA_EN; 709 if (cmd->data->flags & MMC_DATA_WRITE) 710 cmdat |= JZ_MMC_CMDAT_WRITE; 711 if (host->use_dma) { 712 /* 713 * The JZ4780's MMC controller has integrated DMA ability 714 * in addition to being able to use the external DMA 715 * controller. It moves DMA control bits to a separate 716 * register. The DMA_SEL bit chooses the external 717 * controller over the integrated one. Earlier SoCs 718 * can only use the external controller, and have a 719 * single DMA enable bit in CMDAT. 720 */ 721 if (host->version >= JZ_MMC_JZ4780) { 722 writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL, 723 host->base + JZ_REG_MMC_DMAC); 724 } else { 725 cmdat |= JZ_MMC_CMDAT_DMA_EN; 726 } 727 } else if (host->version >= JZ_MMC_JZ4780) { 728 writel(0, host->base + JZ_REG_MMC_DMAC); 729 } 730 731 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); 732 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); 733 } 734 735 writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); 736 writel(cmd->arg, host->base + JZ_REG_MMC_ARG); 737 writel(cmdat, host->base + JZ_REG_MMC_CMDAT); 738 739 jz4740_mmc_clock_enable(host, 1); 740 } 741 742 static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) 743 { 744 struct mmc_command *cmd = host->req->cmd; 745 struct mmc_data *data = cmd->data; 746 int direction; 747 748 if (data->flags & MMC_DATA_READ) 749 direction = SG_MITER_TO_SG; 750 else 751 direction = SG_MITER_FROM_SG; 752 753 sg_miter_start(&host->miter, data->sg, data->sg_len, direction); 754 } 755 756 757 static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) 758 { 759 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; 760 struct mmc_command *cmd = host->req->cmd; 761 struct mmc_request *req = host->req; 762 struct mmc_data *data = cmd->data; 763 bool timeout = false; 764 765 if (cmd->error) 766 host->state = JZ4740_MMC_STATE_DONE; 767 768 switch (host->state) { 769 case JZ4740_MMC_STATE_READ_RESPONSE: 770 if (cmd->flags & MMC_RSP_PRESENT) 771 jz4740_mmc_read_response(host, cmd); 772 773 if (!data) 774 break; 775 776 jz_mmc_prepare_data_transfer(host); 777 fallthrough; 778 779 case JZ4740_MMC_STATE_TRANSFER_DATA: 780 if (host->use_dma) { 781 /* Use DMA if enabled. 782 * Data transfer direction is defined later by 783 * relying on data flags in 784 * jz4740_mmc_prepare_dma_data() and 785 * jz4740_mmc_start_dma_transfer(). 786 */ 787 timeout = jz4740_mmc_start_dma_transfer(host, data); 788 data->bytes_xfered = data->blocks * data->blksz; 789 } else if (data->flags & MMC_DATA_READ) 790 /* Use PIO if DMA is not enabled. 791 * Data transfer direction was defined before 792 * by relying on data flags in 793 * jz_mmc_prepare_data_transfer(). 794 */ 795 timeout = jz4740_mmc_read_data(host, data); 796 else 797 timeout = jz4740_mmc_write_data(host, data); 798 799 if (unlikely(timeout)) { 800 host->state = JZ4740_MMC_STATE_TRANSFER_DATA; 801 break; 802 } 803 804 jz4740_mmc_transfer_check_state(host, data); 805 806 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 807 if (unlikely(timeout)) { 808 host->state = JZ4740_MMC_STATE_SEND_STOP; 809 break; 810 } 811 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 812 fallthrough; 813 814 case JZ4740_MMC_STATE_SEND_STOP: 815 if (!req->stop) 816 break; 817 818 jz4740_mmc_send_command(host, req->stop); 819 820 if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { 821 timeout = jz4740_mmc_poll_irq(host, 822 JZ_MMC_IRQ_PRG_DONE); 823 if (timeout) { 824 host->state = JZ4740_MMC_STATE_DONE; 825 break; 826 } 827 } 828 fallthrough; 829 830 case JZ4740_MMC_STATE_DONE: 831 break; 832 } 833 834 if (!timeout) 835 jz4740_mmc_request_done(host); 836 837 return IRQ_HANDLED; 838 } 839 840 static irqreturn_t jz_mmc_irq(int irq, void *devid) 841 { 842 struct jz4740_mmc_host *host = devid; 843 struct mmc_command *cmd = host->cmd; 844 uint32_t irq_reg, status, tmp; 845 846 status = readl(host->base + JZ_REG_MMC_STATUS); 847 irq_reg = jz4740_mmc_read_irq_reg(host); 848 849 tmp = irq_reg; 850 irq_reg &= ~host->irq_mask; 851 852 tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ | 853 JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE); 854 855 if (tmp != irq_reg) 856 jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg); 857 858 if (irq_reg & JZ_MMC_IRQ_SDIO) { 859 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO); 860 mmc_signal_sdio_irq(host->mmc); 861 irq_reg &= ~JZ_MMC_IRQ_SDIO; 862 } 863 864 if (host->req && cmd && irq_reg) { 865 if (test_and_clear_bit(0, &host->waiting)) { 866 timer_delete(&host->timeout_timer); 867 868 if (status & JZ_MMC_STATUS_TIMEOUT_RES) { 869 cmd->error = -ETIMEDOUT; 870 } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) { 871 cmd->error = -EIO; 872 } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | 873 JZ_MMC_STATUS_CRC_WRITE_ERROR)) { 874 if (cmd->data) 875 cmd->data->error = -EIO; 876 cmd->error = -EIO; 877 } 878 879 jz4740_mmc_set_irq_enabled(host, irq_reg, false); 880 jz4740_mmc_write_irq_reg(host, irq_reg); 881 882 return IRQ_WAKE_THREAD; 883 } 884 } 885 886 return IRQ_HANDLED; 887 } 888 889 static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) 890 { 891 int div = 0; 892 int real_rate; 893 894 jz4740_mmc_clock_disable(host); 895 clk_set_rate(host->clk, host->mmc->f_max); 896 897 real_rate = clk_get_rate(host->clk); 898 899 while (real_rate > rate && div < 7) { 900 ++div; 901 real_rate >>= 1; 902 } 903 904 writew(div, host->base + JZ_REG_MMC_CLKRT); 905 906 if (real_rate > 25000000) { 907 if (host->version >= JZ_MMC_JZ4780) { 908 writel(JZ_MMC_LPM_DRV_RISING_QTR_PHASE_DLY | 909 JZ_MMC_LPM_SMP_RISING_QTR_OR_HALF_PHASE_DLY | 910 JZ_MMC_LPM_LOW_POWER_MODE_EN, 911 host->base + JZ_REG_MMC_LPM); 912 } else if (host->version >= JZ_MMC_JZ4760) { 913 writel(JZ_MMC_LPM_DRV_RISING | 914 JZ_MMC_LPM_LOW_POWER_MODE_EN, 915 host->base + JZ_REG_MMC_LPM); 916 } else if (host->version >= JZ_MMC_JZ4725B) 917 writel(JZ_MMC_LPM_LOW_POWER_MODE_EN, 918 host->base + JZ_REG_MMC_LPM); 919 } 920 921 return real_rate; 922 } 923 924 static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req) 925 { 926 struct jz4740_mmc_host *host = mmc_priv(mmc); 927 928 host->req = req; 929 930 jz4740_mmc_write_irq_reg(host, ~0); 931 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); 932 933 host->state = JZ4740_MMC_STATE_READ_RESPONSE; 934 set_bit(0, &host->waiting); 935 mod_timer(&host->timeout_timer, 936 jiffies + msecs_to_jiffies(JZ_MMC_REQ_TIMEOUT_MS)); 937 jz4740_mmc_send_command(host, req->cmd); 938 } 939 940 static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 941 { 942 struct jz4740_mmc_host *host = mmc_priv(mmc); 943 int ret; 944 945 if (ios->clock) 946 jz4740_mmc_set_clock_rate(host, ios->clock); 947 948 switch (ios->power_mode) { 949 case MMC_POWER_UP: 950 jz4740_mmc_reset(host); 951 if (!IS_ERR(mmc->supply.vmmc)) 952 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 953 host->cmdat |= JZ_MMC_CMDAT_INIT; 954 clk_prepare_enable(host->clk); 955 break; 956 case MMC_POWER_ON: 957 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 958 ret = regulator_enable(mmc->supply.vqmmc); 959 if (ret) 960 dev_err(&host->pdev->dev, "Failed to set vqmmc power!\n"); 961 else 962 host->vqmmc_enabled = true; 963 } 964 break; 965 case MMC_POWER_OFF: 966 if (!IS_ERR(mmc->supply.vmmc)) 967 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 968 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 969 regulator_disable(mmc->supply.vqmmc); 970 host->vqmmc_enabled = false; 971 } 972 clk_disable_unprepare(host->clk); 973 break; 974 default: 975 break; 976 } 977 978 switch (ios->bus_width) { 979 case MMC_BUS_WIDTH_1: 980 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; 981 break; 982 case MMC_BUS_WIDTH_4: 983 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; 984 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; 985 break; 986 case MMC_BUS_WIDTH_8: 987 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; 988 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT; 989 break; 990 default: 991 break; 992 } 993 } 994 995 static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 996 { 997 struct jz4740_mmc_host *host = mmc_priv(mmc); 998 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); 999 } 1000 1001 static int jz4740_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) 1002 { 1003 int ret; 1004 1005 /* vqmmc regulator is available */ 1006 if (!IS_ERR(mmc->supply.vqmmc)) { 1007 ret = mmc_regulator_set_vqmmc(mmc, ios); 1008 return ret < 0 ? ret : 0; 1009 } 1010 1011 /* no vqmmc regulator, assume fixed regulator at 3/3.3V */ 1012 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) 1013 return 0; 1014 1015 return -EINVAL; 1016 } 1017 1018 static const struct mmc_host_ops jz4740_mmc_ops = { 1019 .request = jz4740_mmc_request, 1020 .pre_req = jz4740_mmc_pre_request, 1021 .post_req = jz4740_mmc_post_request, 1022 .set_ios = jz4740_mmc_set_ios, 1023 .get_ro = mmc_gpio_get_ro, 1024 .get_cd = mmc_gpio_get_cd, 1025 .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, 1026 .start_signal_voltage_switch = jz4740_voltage_switch, 1027 }; 1028 1029 static const struct of_device_id jz4740_mmc_of_match[] = { 1030 { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 }, 1031 { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B }, 1032 { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 }, 1033 { .compatible = "ingenic,jz4775-mmc", .data = (void *) JZ_MMC_JZ4780 }, 1034 { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 }, 1035 { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 }, 1036 {}, 1037 }; 1038 MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match); 1039 1040 static int jz4740_mmc_probe(struct platform_device* pdev) 1041 { 1042 int ret; 1043 struct mmc_host *mmc; 1044 struct jz4740_mmc_host *host; 1045 1046 mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); 1047 if (!mmc) { 1048 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); 1049 return -ENOMEM; 1050 } 1051 1052 host = mmc_priv(mmc); 1053 1054 /* Default if no match is JZ4740 */ 1055 host->version = (enum jz4740_mmc_version)device_get_match_data(&pdev->dev); 1056 1057 ret = mmc_of_parse(mmc); 1058 if (ret) { 1059 dev_err_probe(&pdev->dev, ret, "could not parse device properties\n"); 1060 goto err_free_host; 1061 } 1062 1063 mmc_regulator_get_supply(mmc); 1064 1065 host->irq = platform_get_irq(pdev, 0); 1066 if (host->irq < 0) { 1067 ret = host->irq; 1068 goto err_free_host; 1069 } 1070 1071 host->clk = devm_clk_get(&pdev->dev, "mmc"); 1072 if (IS_ERR(host->clk)) { 1073 ret = PTR_ERR(host->clk); 1074 dev_err(&pdev->dev, "Failed to get mmc clock\n"); 1075 goto err_free_host; 1076 } 1077 1078 host->base = devm_platform_get_and_ioremap_resource(pdev, 0, &host->mem_res); 1079 if (IS_ERR(host->base)) { 1080 ret = PTR_ERR(host->base); 1081 goto err_free_host; 1082 } 1083 1084 mmc->ops = &jz4740_mmc_ops; 1085 if (!mmc->f_max) 1086 mmc->f_max = JZ_MMC_CLK_RATE; 1087 1088 /* 1089 * There seems to be a problem with this driver on the JZ4760 and 1090 * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz), 1091 * the communication fails with many SD cards. 1092 * Until this bug is sorted out, limit the maximum rate to 24 MHz. 1093 */ 1094 if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE) 1095 mmc->f_max = JZ_MMC_CLK_RATE; 1096 1097 mmc->f_min = mmc->f_max / 128; 1098 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1099 1100 /* 1101 * We use a fixed timeout of 5s, hence inform the core about it. A 1102 * future improvement should instead respect the cmd->busy_timeout. 1103 */ 1104 mmc->max_busy_timeout = JZ_MMC_REQ_TIMEOUT_MS; 1105 1106 mmc->max_blk_size = (1 << 10) - 1; 1107 mmc->max_blk_count = (1 << 15) - 1; 1108 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1109 1110 mmc->max_segs = 128; 1111 mmc->max_seg_size = mmc->max_req_size; 1112 1113 host->mmc = mmc; 1114 host->pdev = pdev; 1115 spin_lock_init(&host->lock); 1116 host->irq_mask = ~0; 1117 1118 jz4740_mmc_reset(host); 1119 1120 ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, 1121 dev_name(&pdev->dev), host); 1122 if (ret) { 1123 dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); 1124 goto err_free_host; 1125 } 1126 1127 jz4740_mmc_clock_disable(host); 1128 timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0); 1129 1130 ret = jz4740_mmc_acquire_dma_channels(host); 1131 if (ret == -EPROBE_DEFER) 1132 goto err_free_irq; 1133 host->use_dma = !ret; 1134 1135 platform_set_drvdata(pdev, host); 1136 ret = mmc_add_host(mmc); 1137 1138 if (ret) { 1139 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); 1140 goto err_release_dma; 1141 } 1142 dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n"); 1143 1144 dev_info(&pdev->dev, "Using %s, %d-bit mode\n", 1145 host->use_dma ? "DMA" : "PIO", 1146 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 : 1147 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1)); 1148 1149 return 0; 1150 1151 err_release_dma: 1152 if (host->use_dma) 1153 jz4740_mmc_release_dma_channels(host); 1154 err_free_irq: 1155 free_irq(host->irq, host); 1156 err_free_host: 1157 mmc_free_host(mmc); 1158 1159 return ret; 1160 } 1161 1162 static void jz4740_mmc_remove(struct platform_device *pdev) 1163 { 1164 struct jz4740_mmc_host *host = platform_get_drvdata(pdev); 1165 1166 timer_delete_sync(&host->timeout_timer); 1167 jz4740_mmc_set_irq_enabled(host, 0xff, false); 1168 jz4740_mmc_reset(host); 1169 1170 mmc_remove_host(host->mmc); 1171 1172 free_irq(host->irq, host); 1173 1174 if (host->use_dma) 1175 jz4740_mmc_release_dma_channels(host); 1176 1177 mmc_free_host(host->mmc); 1178 } 1179 1180 static int jz4740_mmc_suspend(struct device *dev) 1181 { 1182 return pinctrl_pm_select_sleep_state(dev); 1183 } 1184 1185 static int jz4740_mmc_resume(struct device *dev) 1186 { 1187 return pinctrl_select_default_state(dev); 1188 } 1189 1190 static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, 1191 jz4740_mmc_resume); 1192 1193 static struct platform_driver jz4740_mmc_driver = { 1194 .probe = jz4740_mmc_probe, 1195 .remove = jz4740_mmc_remove, 1196 .driver = { 1197 .name = "jz4740-mmc", 1198 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1199 .of_match_table = jz4740_mmc_of_match, 1200 .pm = pm_sleep_ptr(&jz4740_mmc_pm_ops), 1201 }, 1202 }; 1203 1204 module_platform_driver(jz4740_mmc_driver); 1205 1206 MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); 1207 MODULE_LICENSE("GPL"); 1208 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 1209