Lines Matching +full:spi +full:- +full:qup +full:- +full:v2

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
18 #include <linux/spi/spi.h>
20 #include <linux/dma-mapping.h>
117 #define SPI_MAX_XFER (SZ_64K - 64)
145 int w_size; /* bytes per SPI word */
160 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer);
164 u32 opflag = readl_relaxed(controller->base + QUP_OPERATIONAL); in spi_qup_is_flag_set()
180 return controller->n_words * controller->w_size; in spi_qup_len()
185 u32 opstate = readl_relaxed(controller->base + QUP_STATE); in spi_qup_is_valid_state()
195 if (controller->bw_speed_hz == speed_hz) in spi_qup_vote_bw()
199 ret = icc_set_bw(controller->icc_path, 0, needed_peak_bw); in spi_qup_vote_bw()
203 controller->bw_speed_hz = speed_hz; in spi_qup_vote_bw()
218 return -EIO; in spi_qup_set_state()
222 dev_dbg(controller->dev, "invalid state for %ld,us %d\n", in spi_qup_set_state()
225 cur_state = readl_relaxed(controller->base + QUP_STATE); in spi_qup_set_state()
232 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE); in spi_qup_set_state()
233 writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE); in spi_qup_set_state()
237 writel_relaxed(cur_state, controller->base + QUP_STATE); in spi_qup_set_state()
246 return -EIO; in spi_qup_set_state()
254 u8 *rx_buf = controller->rx_buf; in spi_qup_read_from_fifo()
258 for (; num_words; num_words--) { in spi_qup_read_from_fifo()
260 word = readl_relaxed(controller->base + QUP_INPUT_FIFO); in spi_qup_read_from_fifo()
262 num_bytes = min_t(int, spi_qup_len(controller) - in spi_qup_read_from_fifo()
263 controller->rx_bytes, in spi_qup_read_from_fifo()
264 controller->w_size); in spi_qup_read_from_fifo()
267 controller->rx_bytes += num_bytes; in spi_qup_read_from_fifo()
271 for (i = 0; i < num_bytes; i++, controller->rx_bytes++) { in spi_qup_read_from_fifo()
273 * The data format depends on bytes per SPI word: in spi_qup_read_from_fifo()
279 shift *= (controller->w_size - i - 1); in spi_qup_read_from_fifo()
280 rx_buf[controller->rx_bytes] = word >> shift; in spi_qup_read_from_fifo()
288 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK; in spi_qup_read()
290 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->rx_bytes, in spi_qup_read()
291 controller->w_size); in spi_qup_read()
292 words_per_block = controller->in_blk_sz >> 2; in spi_qup_read()
297 controller->base + QUP_OPERATIONAL); in spi_qup_read()
316 remainder -= num_words; in spi_qup_read()
333 *opflags = readl_relaxed(controller->base + QUP_OPERATIONAL); in spi_qup_read()
336 controller->base + QUP_OPERATIONAL); in spi_qup_read()
342 const u8 *tx_buf = controller->tx_buf; in spi_qup_write_to_fifo()
346 for (; num_words; num_words--) { in spi_qup_write_to_fifo()
349 num_bytes = min_t(int, spi_qup_len(controller) - in spi_qup_write_to_fifo()
350 controller->tx_bytes, in spi_qup_write_to_fifo()
351 controller->w_size); in spi_qup_write_to_fifo()
354 data = tx_buf[controller->tx_bytes + i]; in spi_qup_write_to_fifo()
355 word |= data << (BITS_PER_BYTE * (3 - i)); in spi_qup_write_to_fifo()
358 controller->tx_bytes += num_bytes; in spi_qup_write_to_fifo()
360 writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO); in spi_qup_write_to_fifo()
366 struct spi_qup *qup = data; in spi_qup_dma_done() local
368 complete(&qup->done); in spi_qup_dma_done()
373 bool is_block_mode = controller->mode == QUP_IO_M_MODE_BLOCK; in spi_qup_write()
376 remainder = DIV_ROUND_UP(spi_qup_len(controller) - controller->tx_bytes, in spi_qup_write()
377 controller->w_size); in spi_qup_write()
378 words_per_block = controller->out_blk_sz >> 2; in spi_qup_write()
383 controller->base + QUP_OPERATIONAL); in spi_qup_write()
402 remainder -= num_words; in spi_qup_write()
416 struct spi_qup *qup = spi_controller_get_devdata(host); in spi_qup_prep_sg() local
423 chan = host->dma_tx; in spi_qup_prep_sg()
425 chan = host->dma_rx; in spi_qup_prep_sg()
429 return desc ? PTR_ERR(desc) : -EINVAL; in spi_qup_prep_sg()
431 desc->callback = callback; in spi_qup_prep_sg()
432 desc->callback_param = qup; in spi_qup_prep_sg()
442 if (xfer->tx_buf) in spi_qup_dma_terminate()
443 dmaengine_terminate_all(host->dma_tx); in spi_qup_dma_terminate()
444 if (xfer->rx_buf) in spi_qup_dma_terminate()
445 dmaengine_terminate_all(host->dma_rx); in spi_qup_dma_terminate()
468 static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, in spi_qup_do_dma() argument
472 struct spi_controller *host = spi->controller; in spi_qup_do_dma()
473 struct spi_qup *qup = spi_controller_get_devdata(host); in spi_qup_do_dma() local
477 ret = spi_qup_vote_bw(qup, xfer->speed_hz); in spi_qup_do_dma()
479 dev_err(qup->dev, "fail to vote for ICC bandwidth: %d\n", ret); in spi_qup_do_dma()
480 return -EIO; in spi_qup_do_dma()
483 if (xfer->rx_buf) in spi_qup_do_dma()
485 else if (xfer->tx_buf) in spi_qup_do_dma()
488 rx_sgl = xfer->rx_sg.sgl; in spi_qup_do_dma()
489 tx_sgl = xfer->tx_sg.sgl; in spi_qup_do_dma()
495 qup->n_words = spi_qup_sgl_get_nents_len(rx_sgl, in spi_qup_do_dma()
496 SPI_MAX_XFER, &rx_nents) / qup->w_size; in spi_qup_do_dma()
498 qup->n_words = spi_qup_sgl_get_nents_len(tx_sgl, in spi_qup_do_dma()
499 SPI_MAX_XFER, &tx_nents) / qup->w_size; in spi_qup_do_dma()
500 if (!qup->n_words) in spi_qup_do_dma()
501 return -EIO; in spi_qup_do_dma()
503 ret = spi_qup_io_config(spi, xfer); in spi_qup_do_dma()
507 /* before issuing the descriptors, set the QUP to run */ in spi_qup_do_dma()
508 ret = spi_qup_set_state(qup, QUP_STATE_RUN); in spi_qup_do_dma()
510 dev_warn(qup->dev, "cannot set RUN state\n"); in spi_qup_do_dma()
518 dma_async_issue_pending(host->dma_rx); in spi_qup_do_dma()
527 dma_async_issue_pending(host->dma_tx); in spi_qup_do_dma()
530 if (!wait_for_completion_timeout(&qup->done, timeout)) in spi_qup_do_dma()
531 return -ETIMEDOUT; in spi_qup_do_dma()
533 for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl)) in spi_qup_do_dma()
535 for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl)) in spi_qup_do_dma()
543 static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer, in spi_qup_do_pio() argument
546 struct spi_controller *host = spi->controller; in spi_qup_do_pio()
547 struct spi_qup *qup = spi_controller_get_devdata(host); in spi_qup_do_pio() local
550 n_words = qup->n_words; in spi_qup_do_pio()
552 qup->rx_buf = xfer->rx_buf; in spi_qup_do_pio()
553 qup->tx_buf = xfer->tx_buf; in spi_qup_do_pio()
557 qup->n_words = SPI_MAX_XFER; in spi_qup_do_pio()
559 qup->n_words = n_words % SPI_MAX_XFER; in spi_qup_do_pio()
561 if (qup->tx_buf && offset) in spi_qup_do_pio()
562 qup->tx_buf = xfer->tx_buf + offset * SPI_MAX_XFER; in spi_qup_do_pio()
564 if (qup->rx_buf && offset) in spi_qup_do_pio()
565 qup->rx_buf = xfer->rx_buf + offset * SPI_MAX_XFER; in spi_qup_do_pio()
571 if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32))) in spi_qup_do_pio()
572 qup->mode = QUP_IO_M_MODE_FIFO; in spi_qup_do_pio()
574 ret = spi_qup_io_config(spi, xfer); in spi_qup_do_pio()
578 ret = spi_qup_set_state(qup, QUP_STATE_RUN); in spi_qup_do_pio()
580 dev_warn(qup->dev, "cannot set RUN state\n"); in spi_qup_do_pio()
584 ret = spi_qup_set_state(qup, QUP_STATE_PAUSE); in spi_qup_do_pio()
586 dev_warn(qup->dev, "cannot set PAUSE state\n"); in spi_qup_do_pio()
590 if (qup->mode == QUP_IO_M_MODE_FIFO) in spi_qup_do_pio()
591 spi_qup_write(qup); in spi_qup_do_pio()
593 ret = spi_qup_set_state(qup, QUP_STATE_RUN); in spi_qup_do_pio()
595 dev_warn(qup->dev, "cannot set RUN state\n"); in spi_qup_do_pio()
599 if (!wait_for_completion_timeout(&qup->done, timeout)) in spi_qup_do_pio()
600 return -ETIMEDOUT; in spi_qup_do_pio()
603 } while (iterations--); in spi_qup_do_pio()
612 remainder_tx = DIV_ROUND_UP(spi_qup_len(controller) - in spi_qup_data_pending()
613 controller->tx_bytes, controller->w_size); in spi_qup_data_pending()
615 remainder_rx = DIV_ROUND_UP(spi_qup_len(controller) - in spi_qup_data_pending()
616 controller->rx_bytes, controller->w_size); in spi_qup_data_pending()
627 qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS); in spi_qup_qup_irq()
628 spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS); in spi_qup_qup_irq()
629 opflags = readl_relaxed(controller->base + QUP_OPERATIONAL); in spi_qup_qup_irq()
631 writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS); in spi_qup_qup_irq()
632 writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS); in spi_qup_qup_irq()
636 dev_warn(controller->dev, "OUTPUT_OVER_RUN\n"); in spi_qup_qup_irq()
638 dev_warn(controller->dev, "INPUT_UNDER_RUN\n"); in spi_qup_qup_irq()
640 dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n"); in spi_qup_qup_irq()
642 dev_warn(controller->dev, "INPUT_OVER_RUN\n"); in spi_qup_qup_irq()
644 error = -EIO; in spi_qup_qup_irq()
649 dev_warn(controller->dev, "CLK_OVER_RUN\n"); in spi_qup_qup_irq()
651 dev_warn(controller->dev, "CLK_UNDER_RUN\n"); in spi_qup_qup_irq()
653 error = -EIO; in spi_qup_qup_irq()
656 spin_lock(&controller->lock); in spi_qup_qup_irq()
657 if (!controller->error) in spi_qup_qup_irq()
658 controller->error = error; in spi_qup_qup_irq()
659 spin_unlock(&controller->lock); in spi_qup_qup_irq()
661 if (spi_qup_is_dma_xfer(controller->mode)) { in spi_qup_qup_irq()
662 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL); in spi_qup_qup_irq()
671 complete(&controller->done); in spi_qup_qup_irq()
675 complete(&controller->done); in spi_qup_qup_irq()
678 if (!spi_qup_is_dma_xfer(controller->mode)) { in spi_qup_qup_irq()
682 complete(&controller->done); in spi_qup_qup_irq()
689 static int spi_qup_io_prep(struct spi_device *spi, struct spi_transfer *xfer) in spi_qup_io_prep() argument
691 struct spi_qup *controller = spi_controller_get_devdata(spi->controller); in spi_qup_io_prep()
694 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { in spi_qup_io_prep()
695 dev_err(controller->dev, "too big size for loopback %d > %d\n", in spi_qup_io_prep()
696 xfer->len, controller->in_fifo_sz); in spi_qup_io_prep()
697 return -EIO; in spi_qup_io_prep()
700 ret = dev_pm_opp_set_rate(controller->dev, xfer->speed_hz); in spi_qup_io_prep()
702 dev_err(controller->dev, "fail to set frequency %d", in spi_qup_io_prep()
703 xfer->speed_hz); in spi_qup_io_prep()
704 return -EIO; in spi_qup_io_prep()
707 controller->w_size = DIV_ROUND_UP(xfer->bits_per_word, 8); in spi_qup_io_prep()
708 controller->n_words = xfer->len / controller->w_size; in spi_qup_io_prep()
710 if (controller->n_words <= (controller->in_fifo_sz / sizeof(u32))) in spi_qup_io_prep()
711 controller->mode = QUP_IO_M_MODE_FIFO; in spi_qup_io_prep()
712 else if (spi->controller->can_dma && in spi_qup_io_prep()
713 spi->controller->can_dma(spi->controller, spi, xfer) && in spi_qup_io_prep()
714 spi->controller->cur_msg_mapped) in spi_qup_io_prep()
715 controller->mode = QUP_IO_M_MODE_BAM; in spi_qup_io_prep()
717 controller->mode = QUP_IO_M_MODE_BLOCK; in spi_qup_io_prep()
722 /* prep qup for another spi transaction of specific type */
723 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) in spi_qup_io_config() argument
725 struct spi_qup *controller = spi_controller_get_devdata(spi->controller); in spi_qup_io_config()
729 spin_lock_irqsave(&controller->lock, flags); in spi_qup_io_config()
730 controller->xfer = xfer; in spi_qup_io_config()
731 controller->error = 0; in spi_qup_io_config()
732 controller->rx_bytes = 0; in spi_qup_io_config()
733 controller->tx_bytes = 0; in spi_qup_io_config()
734 spin_unlock_irqrestore(&controller->lock, flags); in spi_qup_io_config()
738 dev_err(controller->dev, "cannot set RESET state\n"); in spi_qup_io_config()
739 return -EIO; in spi_qup_io_config()
742 switch (controller->mode) { in spi_qup_io_config()
744 writel_relaxed(controller->n_words, in spi_qup_io_config()
745 controller->base + QUP_MX_READ_CNT); in spi_qup_io_config()
746 writel_relaxed(controller->n_words, in spi_qup_io_config()
747 controller->base + QUP_MX_WRITE_CNT); in spi_qup_io_config()
749 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); in spi_qup_io_config()
750 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); in spi_qup_io_config()
753 writel_relaxed(controller->n_words, in spi_qup_io_config()
754 controller->base + QUP_MX_INPUT_CNT); in spi_qup_io_config()
755 writel_relaxed(controller->n_words, in spi_qup_io_config()
756 controller->base + QUP_MX_OUTPUT_CNT); in spi_qup_io_config()
758 writel_relaxed(0, controller->base + QUP_MX_READ_CNT); in spi_qup_io_config()
759 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); in spi_qup_io_config()
761 if (!controller->qup_v1) { in spi_qup_io_config()
764 input_cnt = controller->base + QUP_MX_INPUT_CNT; in spi_qup_io_config()
768 * That case is a non-balanced transfer when there is in spi_qup_io_config()
771 if (xfer->tx_buf) in spi_qup_io_config()
774 writel_relaxed(controller->n_words, input_cnt); in spi_qup_io_config()
776 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); in spi_qup_io_config()
780 reinit_completion(&controller->done); in spi_qup_io_config()
781 writel_relaxed(controller->n_words, in spi_qup_io_config()
782 controller->base + QUP_MX_INPUT_CNT); in spi_qup_io_config()
783 writel_relaxed(controller->n_words, in spi_qup_io_config()
784 controller->base + QUP_MX_OUTPUT_CNT); in spi_qup_io_config()
786 writel_relaxed(0, controller->base + QUP_MX_READ_CNT); in spi_qup_io_config()
787 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); in spi_qup_io_config()
790 dev_err(controller->dev, "unknown mode = %d\n", in spi_qup_io_config()
791 controller->mode); in spi_qup_io_config()
792 return -EIO; in spi_qup_io_config()
795 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); in spi_qup_io_config()
799 if (!spi_qup_is_dma_xfer(controller->mode)) in spi_qup_io_config()
804 iomode |= (controller->mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); in spi_qup_io_config()
805 iomode |= (controller->mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); in spi_qup_io_config()
807 writel_relaxed(iomode, controller->base + QUP_IO_M_MODES); in spi_qup_io_config()
809 control = readl_relaxed(controller->base + SPI_IO_CONTROL); in spi_qup_io_config()
811 if (spi->mode & SPI_CPOL) in spi_qup_io_config()
816 writel_relaxed(control, controller->base + SPI_IO_CONTROL); in spi_qup_io_config()
818 config = readl_relaxed(controller->base + SPI_CONFIG); in spi_qup_io_config()
820 if (spi->mode & SPI_LOOP) in spi_qup_io_config()
825 if (spi->mode & SPI_CPHA) in spi_qup_io_config()
831 * HS_MODE improves signal stability for spi-clk high rates, in spi_qup_io_config()
834 if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP)) in spi_qup_io_config()
839 writel_relaxed(config, controller->base + SPI_CONFIG); in spi_qup_io_config()
841 config = readl_relaxed(controller->base + QUP_CONFIG); in spi_qup_io_config()
843 config |= xfer->bits_per_word - 1; in spi_qup_io_config()
846 if (spi_qup_is_dma_xfer(controller->mode)) { in spi_qup_io_config()
847 if (!xfer->tx_buf) in spi_qup_io_config()
849 if (!xfer->rx_buf) in spi_qup_io_config()
853 writel_relaxed(config, controller->base + QUP_CONFIG); in spi_qup_io_config()
856 if (!controller->qup_v1) { in spi_qup_io_config()
864 if (spi_qup_is_dma_xfer(controller->mode)) in spi_qup_io_config()
867 writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK); in spi_qup_io_config()
874 struct spi_device *spi, in spi_qup_transfer_one() argument
881 ret = spi_qup_io_prep(spi, xfer); in spi_qup_transfer_one()
885 timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC); in spi_qup_transfer_one()
887 xfer->len) * 8, timeout); in spi_qup_transfer_one()
890 reinit_completion(&controller->done); in spi_qup_transfer_one()
892 spin_lock_irqsave(&controller->lock, flags); in spi_qup_transfer_one()
893 controller->xfer = xfer; in spi_qup_transfer_one()
894 controller->error = 0; in spi_qup_transfer_one()
895 controller->rx_bytes = 0; in spi_qup_transfer_one()
896 controller->tx_bytes = 0; in spi_qup_transfer_one()
897 spin_unlock_irqrestore(&controller->lock, flags); in spi_qup_transfer_one()
899 if (spi_qup_is_dma_xfer(controller->mode)) in spi_qup_transfer_one()
900 ret = spi_qup_do_dma(spi, xfer, timeout); in spi_qup_transfer_one()
902 ret = spi_qup_do_pio(spi, xfer, timeout); in spi_qup_transfer_one()
905 spin_lock_irqsave(&controller->lock, flags); in spi_qup_transfer_one()
907 ret = controller->error; in spi_qup_transfer_one()
908 spin_unlock_irqrestore(&controller->lock, flags); in spi_qup_transfer_one()
910 if (ret && spi_qup_is_dma_xfer(controller->mode)) in spi_qup_transfer_one()
916 static bool spi_qup_can_dma(struct spi_controller *host, struct spi_device *spi, in spi_qup_can_dma() argument
919 struct spi_qup *qup = spi_controller_get_devdata(host); in spi_qup_can_dma() local
923 if (xfer->rx_buf) { in spi_qup_can_dma()
924 if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) || in spi_qup_can_dma()
925 IS_ERR_OR_NULL(host->dma_rx)) in spi_qup_can_dma()
927 if (qup->qup_v1 && (xfer->len % qup->in_blk_sz)) in spi_qup_can_dma()
931 if (xfer->tx_buf) { in spi_qup_can_dma()
932 if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) || in spi_qup_can_dma()
933 IS_ERR_OR_NULL(host->dma_tx)) in spi_qup_can_dma()
935 if (qup->qup_v1 && (xfer->len % qup->out_blk_sz)) in spi_qup_can_dma()
939 n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8); in spi_qup_can_dma()
940 if (n_words <= (qup->in_fifo_sz / sizeof(u32))) in spi_qup_can_dma()
948 if (!IS_ERR_OR_NULL(host->dma_rx)) in spi_qup_release_dma()
949 dma_release_channel(host->dma_rx); in spi_qup_release_dma()
950 if (!IS_ERR_OR_NULL(host->dma_tx)) in spi_qup_release_dma()
951 dma_release_channel(host->dma_tx); in spi_qup_release_dma()
956 struct spi_qup *spi = spi_controller_get_devdata(host); in spi_qup_init_dma() local
957 struct dma_slave_config *rx_conf = &spi->rx_conf, in spi_qup_init_dma()
958 *tx_conf = &spi->tx_conf; in spi_qup_init_dma()
959 struct device *dev = spi->dev; in spi_qup_init_dma()
963 host->dma_rx = dma_request_chan(dev, "rx"); in spi_qup_init_dma()
964 if (IS_ERR(host->dma_rx)) in spi_qup_init_dma()
965 return PTR_ERR(host->dma_rx); in spi_qup_init_dma()
967 host->dma_tx = dma_request_chan(dev, "tx"); in spi_qup_init_dma()
968 if (IS_ERR(host->dma_tx)) { in spi_qup_init_dma()
969 ret = PTR_ERR(host->dma_tx); in spi_qup_init_dma()
974 rx_conf->direction = DMA_DEV_TO_MEM; in spi_qup_init_dma()
975 rx_conf->device_fc = 1; in spi_qup_init_dma()
976 rx_conf->src_addr = base + QUP_INPUT_FIFO; in spi_qup_init_dma()
977 rx_conf->src_maxburst = spi->in_blk_sz; in spi_qup_init_dma()
979 tx_conf->direction = DMA_MEM_TO_DEV; in spi_qup_init_dma()
980 tx_conf->device_fc = 1; in spi_qup_init_dma()
981 tx_conf->dst_addr = base + QUP_OUTPUT_FIFO; in spi_qup_init_dma()
982 tx_conf->dst_maxburst = spi->out_blk_sz; in spi_qup_init_dma()
984 ret = dmaengine_slave_config(host->dma_rx, rx_conf); in spi_qup_init_dma()
990 ret = dmaengine_slave_config(host->dma_tx, tx_conf); in spi_qup_init_dma()
999 dma_release_channel(host->dma_tx); in spi_qup_init_dma()
1001 dma_release_channel(host->dma_rx); in spi_qup_init_dma()
1005 static void spi_qup_set_cs(struct spi_device *spi, bool val) in spi_qup_set_cs() argument
1011 controller = spi_controller_get_devdata(spi->controller); in spi_qup_set_cs()
1012 spi_ioc = readl_relaxed(controller->base + SPI_IO_CONTROL); in spi_qup_set_cs()
1020 writel_relaxed(spi_ioc, controller->base + SPI_IO_CONTROL); in spi_qup_set_cs()
1035 dev = &pdev->dev; in spi_qup_probe()
1058 if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq)) in spi_qup_probe()
1063 return -ENXIO; in spi_qup_probe()
1072 if (ret && ret != -ENODEV) in spi_qup_probe()
1078 return -ENOMEM; in spi_qup_probe()
1081 /* use num-cs unless not present or out of range */ in spi_qup_probe()
1082 if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) || in spi_qup_probe()
1084 host->num_chipselect = SPI_NUM_CHIPSELECTS; in spi_qup_probe()
1086 host->num_chipselect = num_cs; in spi_qup_probe()
1088 host->use_gpio_descriptors = true; in spi_qup_probe()
1089 host->max_native_cs = SPI_NUM_CHIPSELECTS; in spi_qup_probe()
1090 host->bus_num = pdev->id; in spi_qup_probe()
1091 host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; in spi_qup_probe()
1092 host->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); in spi_qup_probe()
1093 host->max_speed_hz = max_freq; in spi_qup_probe()
1094 host->transfer_one = spi_qup_transfer_one; in spi_qup_probe()
1095 host->dev.of_node = pdev->dev.of_node; in spi_qup_probe()
1096 host->auto_runtime_pm = true; in spi_qup_probe()
1097 host->dma_alignment = dma_get_cache_alignment(); in spi_qup_probe()
1098 host->max_dma_len = SPI_MAX_XFER; in spi_qup_probe()
1104 controller->dev = dev; in spi_qup_probe()
1105 controller->base = base; in spi_qup_probe()
1106 controller->iclk = iclk; in spi_qup_probe()
1107 controller->cclk = cclk; in spi_qup_probe()
1108 controller->icc_path = icc_path; in spi_qup_probe()
1109 controller->irq = irq; in spi_qup_probe()
1111 ret = spi_qup_init_dma(host, res->start); in spi_qup_probe()
1112 if (ret == -EPROBE_DEFER) in spi_qup_probe()
1115 host->can_dma = spi_qup_can_dma; in spi_qup_probe()
1117 controller->qup_v1 = (uintptr_t)of_device_get_match_data(dev); in spi_qup_probe()
1119 if (!controller->qup_v1) in spi_qup_probe()
1120 host->set_cs = spi_qup_set_cs; in spi_qup_probe()
1122 spin_lock_init(&controller->lock); in spi_qup_probe()
1123 init_completion(&controller->done); in spi_qup_probe()
1142 controller->out_blk_sz = size * 16; in spi_qup_probe()
1144 controller->out_blk_sz = 4; in spi_qup_probe()
1148 controller->in_blk_sz = size * 16; in spi_qup_probe()
1150 controller->in_blk_sz = 4; in spi_qup_probe()
1153 controller->out_fifo_sz = controller->out_blk_sz * (2 << size); in spi_qup_probe()
1156 controller->in_fifo_sz = controller->in_blk_sz * (2 << size); in spi_qup_probe()
1159 controller->in_blk_sz, controller->in_fifo_sz, in spi_qup_probe()
1160 controller->out_blk_sz, controller->out_fifo_sz); in spi_qup_probe()
1173 if (!controller->qup_v1) in spi_qup_probe()
1179 /* if earlier version of the QUP, disable INPUT_OVERRUN */ in spi_qup_probe()
1180 if (controller->qup_v1) in spi_qup_probe()
1189 IRQF_TRIGGER_HIGH, pdev->name, controller); in spi_qup_probe()
1205 pm_runtime_disable(&pdev->dev); in spi_qup_probe()
1224 config = readl(controller->base + QUP_CONFIG); in spi_qup_pm_suspend_runtime()
1226 writel_relaxed(config, controller->base + QUP_CONFIG); in spi_qup_pm_suspend_runtime()
1228 clk_disable_unprepare(controller->cclk); in spi_qup_pm_suspend_runtime()
1230 clk_disable_unprepare(controller->iclk); in spi_qup_pm_suspend_runtime()
1242 ret = clk_prepare_enable(controller->iclk); in spi_qup_pm_resume_runtime()
1246 ret = clk_prepare_enable(controller->cclk); in spi_qup_pm_resume_runtime()
1248 clk_disable_unprepare(controller->iclk); in spi_qup_pm_resume_runtime()
1253 config = readl_relaxed(controller->base + QUP_CONFIG); in spi_qup_pm_resume_runtime()
1255 writel_relaxed(config, controller->base + QUP_CONFIG); in spi_qup_pm_resume_runtime()
1280 clk_disable_unprepare(controller->cclk); in spi_qup_suspend()
1282 clk_disable_unprepare(controller->iclk); in spi_qup_suspend()
1292 ret = clk_prepare_enable(controller->iclk); in spi_qup_resume()
1296 ret = clk_prepare_enable(controller->cclk); in spi_qup_resume()
1298 clk_disable_unprepare(controller->iclk); in spi_qup_resume()
1313 clk_disable_unprepare(controller->cclk); in spi_qup_resume()
1314 clk_disable_unprepare(controller->iclk); in spi_qup_resume()
1321 struct spi_controller *host = dev_get_drvdata(&pdev->dev); in spi_qup_remove()
1325 ret = pm_runtime_get_sync(&pdev->dev); in spi_qup_remove()
1330 dev_warn(&pdev->dev, "failed to reset controller (%pe)\n", in spi_qup_remove()
1333 clk_disable_unprepare(controller->cclk); in spi_qup_remove()
1334 clk_disable_unprepare(controller->iclk); in spi_qup_remove()
1336 dev_warn(&pdev->dev, "failed to resume, skip hw disable (%pe)\n", in spi_qup_remove()
1342 pm_runtime_put_noidle(&pdev->dev); in spi_qup_remove()
1343 pm_runtime_disable(&pdev->dev); in spi_qup_remove()
1347 { .compatible = "qcom,spi-qup-v1.1.1", .data = (void *)1, },
1348 { .compatible = "qcom,spi-qup-v2.1.1", },
1349 { .compatible = "qcom,spi-qup-v2.2.1", },
1372 MODULE_LICENSE("GPL v2");