Lines Matching +full:wp +full:- +full:controller

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
10 #include <linux/dma-direction.h>
11 #include <linux/dma-mapping.h>
59 [MHI_PM_STATE_M3_ENTER] = "M?->M3",
61 [MHI_PM_STATE_M3_EXIT] = "M3->M0",
87 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in serial_number_show()
90 mhi_cntrl->serial_number); in serial_number_show()
99 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in oem_pk_hash_show()
102 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) in oem_pk_hash_show()
104 i, mhi_cntrl->oem_pk_hash[i]); in oem_pk_hash_show()
116 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in soc_reset_store()
136 ring->alloc_size = len + (len - 1); in mhi_alloc_aligned_ring()
137 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_alloc_aligned_ring()
138 &ring->dma_handle, GFP_KERNEL); in mhi_alloc_aligned_ring()
139 if (!ring->pre_aligned) in mhi_alloc_aligned_ring()
140 return -ENOMEM; in mhi_alloc_aligned_ring()
142 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); in mhi_alloc_aligned_ring()
143 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); in mhi_alloc_aligned_ring()
151 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; in mhi_deinit_free_irq()
153 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_deinit_free_irq()
154 if (mhi_event->offload_ev) in mhi_deinit_free_irq()
157 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); in mhi_deinit_free_irq()
160 free_irq(mhi_cntrl->irq[0], mhi_cntrl); in mhi_deinit_free_irq()
165 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; in mhi_init_irq_setup()
166 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_init_irq_setup()
170 /* if controller driver has set irq_flags, use it */ in mhi_init_irq_setup()
171 if (mhi_cntrl->irq_flags) in mhi_init_irq_setup()
172 irq_flags = mhi_cntrl->irq_flags; in mhi_init_irq_setup()
175 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, in mhi_init_irq_setup()
186 disable_irq(mhi_cntrl->irq[0]); in mhi_init_irq_setup()
188 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_init_irq_setup()
189 if (mhi_event->offload_ev) in mhi_init_irq_setup()
192 if (mhi_event->irq >= mhi_cntrl->nr_irqs) { in mhi_init_irq_setup()
194 mhi_event->irq); in mhi_init_irq_setup()
195 ret = -EINVAL; in mhi_init_irq_setup()
199 ret = request_irq(mhi_cntrl->irq[mhi_event->irq], in mhi_init_irq_setup()
205 mhi_cntrl->irq[mhi_event->irq], i); in mhi_init_irq_setup()
209 disable_irq(mhi_cntrl->irq[mhi_event->irq]); in mhi_init_irq_setup()
215 for (--i, --mhi_event; i >= 0; i--, mhi_event--) { in mhi_init_irq_setup()
216 if (mhi_event->offload_ev) in mhi_init_irq_setup()
219 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); in mhi_init_irq_setup()
221 free_irq(mhi_cntrl->irq[0], mhi_cntrl); in mhi_init_irq_setup()
229 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; in mhi_deinit_dev_ctxt()
234 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_deinit_dev_ctxt()
236 ring = &mhi_cmd->ring; in mhi_deinit_dev_ctxt()
237 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_deinit_dev_ctxt()
238 ring->pre_aligned, ring->dma_handle); in mhi_deinit_dev_ctxt()
239 ring->base = NULL; in mhi_deinit_dev_ctxt()
240 ring->iommu_base = 0; in mhi_deinit_dev_ctxt()
243 dma_free_coherent(mhi_cntrl->cntrl_dev, in mhi_deinit_dev_ctxt()
244 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, in mhi_deinit_dev_ctxt()
245 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); in mhi_deinit_dev_ctxt()
247 mhi_event = mhi_cntrl->mhi_event; in mhi_deinit_dev_ctxt()
248 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_deinit_dev_ctxt()
249 if (mhi_event->offload_ev) in mhi_deinit_dev_ctxt()
252 ring = &mhi_event->ring; in mhi_deinit_dev_ctxt()
253 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_deinit_dev_ctxt()
254 ring->pre_aligned, ring->dma_handle); in mhi_deinit_dev_ctxt()
255 ring->base = NULL; in mhi_deinit_dev_ctxt()
256 ring->iommu_base = 0; in mhi_deinit_dev_ctxt()
259 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * in mhi_deinit_dev_ctxt()
260 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, in mhi_deinit_dev_ctxt()
261 mhi_ctxt->er_ctxt_addr); in mhi_deinit_dev_ctxt()
263 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * in mhi_deinit_dev_ctxt()
264 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, in mhi_deinit_dev_ctxt()
265 mhi_ctxt->chan_ctxt_addr); in mhi_deinit_dev_ctxt()
268 mhi_cntrl->mhi_ctxt = NULL; in mhi_deinit_dev_ctxt()
281 int ret = -ENOMEM, i; in mhi_init_dev_ctxt()
283 atomic_set(&mhi_cntrl->dev_wake, 0); in mhi_init_dev_ctxt()
284 atomic_set(&mhi_cntrl->pending_pkts, 0); in mhi_init_dev_ctxt()
288 return -ENOMEM; in mhi_init_dev_ctxt()
291 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, in mhi_init_dev_ctxt()
292 sizeof(*mhi_ctxt->chan_ctxt) * in mhi_init_dev_ctxt()
293 mhi_cntrl->max_chan, in mhi_init_dev_ctxt()
294 &mhi_ctxt->chan_ctxt_addr, in mhi_init_dev_ctxt()
296 if (!mhi_ctxt->chan_ctxt) in mhi_init_dev_ctxt()
299 mhi_chan = mhi_cntrl->mhi_chan; in mhi_init_dev_ctxt()
300 chan_ctxt = mhi_ctxt->chan_ctxt; in mhi_init_dev_ctxt()
301 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { in mhi_init_dev_ctxt()
303 if (mhi_chan->offload_ch) in mhi_init_dev_ctxt()
306 tmp = le32_to_cpu(chan_ctxt->chcfg); in mhi_init_dev_ctxt()
310 tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode); in mhi_init_dev_ctxt()
312 tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg); in mhi_init_dev_ctxt()
313 chan_ctxt->chcfg = cpu_to_le32(tmp); in mhi_init_dev_ctxt()
315 chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); in mhi_init_dev_ctxt()
316 chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); in mhi_init_dev_ctxt()
318 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_init_dev_ctxt()
319 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; in mhi_init_dev_ctxt()
323 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, in mhi_init_dev_ctxt()
324 sizeof(*mhi_ctxt->er_ctxt) * in mhi_init_dev_ctxt()
325 mhi_cntrl->total_ev_rings, in mhi_init_dev_ctxt()
326 &mhi_ctxt->er_ctxt_addr, in mhi_init_dev_ctxt()
328 if (!mhi_ctxt->er_ctxt) in mhi_init_dev_ctxt()
331 er_ctxt = mhi_ctxt->er_ctxt; in mhi_init_dev_ctxt()
332 mhi_event = mhi_cntrl->mhi_event; in mhi_init_dev_ctxt()
333 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, in mhi_init_dev_ctxt()
335 struct mhi_ring *ring = &mhi_event->ring; in mhi_init_dev_ctxt()
338 if (mhi_event->offload_ev) in mhi_init_dev_ctxt()
341 tmp = le32_to_cpu(er_ctxt->intmod); in mhi_init_dev_ctxt()
344 tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod); in mhi_init_dev_ctxt()
345 er_ctxt->intmod = cpu_to_le32(tmp); in mhi_init_dev_ctxt()
347 er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); in mhi_init_dev_ctxt()
348 er_ctxt->msivec = cpu_to_le32(mhi_event->irq); in mhi_init_dev_ctxt()
349 mhi_event->db_cfg.db_mode = true; in mhi_init_dev_ctxt()
351 ring->el_size = sizeof(struct mhi_ring_element); in mhi_init_dev_ctxt()
352 ring->len = ring->el_size * ring->elements; in mhi_init_dev_ctxt()
353 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); in mhi_init_dev_ctxt()
361 ring->rp = ring->wp = ring->base; in mhi_init_dev_ctxt()
362 er_ctxt->rbase = cpu_to_le64(ring->iommu_base); in mhi_init_dev_ctxt()
363 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; in mhi_init_dev_ctxt()
364 er_ctxt->rlen = cpu_to_le64(ring->len); in mhi_init_dev_ctxt()
365 ring->ctxt_wp = &er_ctxt->wp; in mhi_init_dev_ctxt()
369 ret = -ENOMEM; in mhi_init_dev_ctxt()
370 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, in mhi_init_dev_ctxt()
371 sizeof(*mhi_ctxt->cmd_ctxt) * in mhi_init_dev_ctxt()
373 &mhi_ctxt->cmd_ctxt_addr, in mhi_init_dev_ctxt()
375 if (!mhi_ctxt->cmd_ctxt) in mhi_init_dev_ctxt()
378 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_init_dev_ctxt()
379 cmd_ctxt = mhi_ctxt->cmd_ctxt; in mhi_init_dev_ctxt()
381 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_init_dev_ctxt()
383 ring->el_size = sizeof(struct mhi_ring_element); in mhi_init_dev_ctxt()
384 ring->elements = CMD_EL_PER_RING; in mhi_init_dev_ctxt()
385 ring->len = ring->el_size * ring->elements; in mhi_init_dev_ctxt()
386 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); in mhi_init_dev_ctxt()
390 ring->rp = ring->wp = ring->base; in mhi_init_dev_ctxt()
391 cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); in mhi_init_dev_ctxt()
392 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; in mhi_init_dev_ctxt()
393 cmd_ctxt->rlen = cpu_to_le64(ring->len); in mhi_init_dev_ctxt()
394 ring->ctxt_wp = &cmd_ctxt->wp; in mhi_init_dev_ctxt()
397 mhi_cntrl->mhi_ctxt = mhi_ctxt; in mhi_init_dev_ctxt()
402 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { in mhi_init_dev_ctxt()
403 struct mhi_ring *ring = &mhi_cmd->ring; in mhi_init_dev_ctxt()
405 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_init_dev_ctxt()
406 ring->pre_aligned, ring->dma_handle); in mhi_init_dev_ctxt()
408 dma_free_coherent(mhi_cntrl->cntrl_dev, in mhi_init_dev_ctxt()
409 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, in mhi_init_dev_ctxt()
410 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); in mhi_init_dev_ctxt()
411 i = mhi_cntrl->total_ev_rings; in mhi_init_dev_ctxt()
412 mhi_event = mhi_cntrl->mhi_event + i; in mhi_init_dev_ctxt()
415 for (--i, --mhi_event; i >= 0; i--, mhi_event--) { in mhi_init_dev_ctxt()
416 struct mhi_ring *ring = &mhi_event->ring; in mhi_init_dev_ctxt()
418 if (mhi_event->offload_ev) in mhi_init_dev_ctxt()
421 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, in mhi_init_dev_ctxt()
422 ring->pre_aligned, ring->dma_handle); in mhi_init_dev_ctxt()
424 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * in mhi_init_dev_ctxt()
425 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, in mhi_init_dev_ctxt()
426 mhi_ctxt->er_ctxt_addr); in mhi_init_dev_ctxt()
429 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * in mhi_init_dev_ctxt()
430 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, in mhi_init_dev_ctxt()
431 mhi_ctxt->chan_ctxt_addr); in mhi_init_dev_ctxt()
445 void __iomem *base = mhi_cntrl->regs; in mhi_init_mmio()
446 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_init_mmio()
453 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), in mhi_init_mmio()
457 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), in mhi_init_mmio()
461 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), in mhi_init_mmio()
465 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), in mhi_init_mmio()
469 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), in mhi_init_mmio()
473 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), in mhi_init_mmio()
477 upper_32_bits(mhi_cntrl->iova_start), in mhi_init_mmio()
481 lower_32_bits(mhi_cntrl->iova_start), in mhi_init_mmio()
485 upper_32_bits(mhi_cntrl->iova_start), in mhi_init_mmio()
489 lower_32_bits(mhi_cntrl->iova_start), in mhi_init_mmio()
493 upper_32_bits(mhi_cntrl->iova_stop), in mhi_init_mmio()
497 lower_32_bits(mhi_cntrl->iova_stop), in mhi_init_mmio()
501 upper_32_bits(mhi_cntrl->iova_stop), in mhi_init_mmio()
505 lower_32_bits(mhi_cntrl->iova_stop), in mhi_init_mmio()
516 return -EIO; in mhi_init_mmio()
519 if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) { in mhi_init_mmio()
521 val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)); in mhi_init_mmio()
522 return -ERANGE; in mhi_init_mmio()
526 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); in mhi_init_mmio()
527 mhi_cntrl->wake_set = false; in mhi_init_mmio()
530 mhi_chan = mhi_cntrl->mhi_chan; in mhi_init_mmio()
531 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) in mhi_init_mmio()
532 mhi_chan->tre_ring.db_addr = base + val; in mhi_init_mmio()
538 return -EIO; in mhi_init_mmio()
541 if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) { in mhi_init_mmio()
543 val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)); in mhi_init_mmio()
544 return -ERANGE; in mhi_init_mmio()
548 mhi_event = mhi_cntrl->mhi_event; in mhi_init_mmio()
549 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { in mhi_init_mmio()
550 if (mhi_event->offload_ev) in mhi_init_mmio()
553 mhi_event->ring.db_addr = base + val; in mhi_init_mmio()
557 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; in mhi_init_mmio()
565 mhi_cntrl->total_ev_rings); in mhi_init_mmio()
572 mhi_cntrl->hw_ev_rings); in mhi_init_mmio()
589 buf_ring = &mhi_chan->buf_ring; in mhi_deinit_chan_ctxt()
590 tre_ring = &mhi_chan->tre_ring; in mhi_deinit_chan_ctxt()
591 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; in mhi_deinit_chan_ctxt()
593 if (!chan_ctxt->rbase) /* Already uninitialized */ in mhi_deinit_chan_ctxt()
596 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, in mhi_deinit_chan_ctxt()
597 tre_ring->pre_aligned, tre_ring->dma_handle); in mhi_deinit_chan_ctxt()
598 vfree(buf_ring->base); in mhi_deinit_chan_ctxt()
600 buf_ring->base = tre_ring->base = NULL; in mhi_deinit_chan_ctxt()
601 tre_ring->ctxt_wp = NULL; in mhi_deinit_chan_ctxt()
602 chan_ctxt->rbase = 0; in mhi_deinit_chan_ctxt()
603 chan_ctxt->rlen = 0; in mhi_deinit_chan_ctxt()
604 chan_ctxt->rp = 0; in mhi_deinit_chan_ctxt()
605 chan_ctxt->wp = 0; in mhi_deinit_chan_ctxt()
607 tmp = le32_to_cpu(chan_ctxt->chcfg); in mhi_deinit_chan_ctxt()
610 chan_ctxt->chcfg = cpu_to_le32(tmp); in mhi_deinit_chan_ctxt()
625 buf_ring = &mhi_chan->buf_ring; in mhi_init_chan_ctxt()
626 tre_ring = &mhi_chan->tre_ring; in mhi_init_chan_ctxt()
627 tre_ring->el_size = sizeof(struct mhi_ring_element); in mhi_init_chan_ctxt()
628 tre_ring->len = tre_ring->el_size * tre_ring->elements; in mhi_init_chan_ctxt()
629 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; in mhi_init_chan_ctxt()
630 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); in mhi_init_chan_ctxt()
632 return -ENOMEM; in mhi_init_chan_ctxt()
634 buf_ring->el_size = sizeof(struct mhi_buf_info); in mhi_init_chan_ctxt()
635 buf_ring->len = buf_ring->el_size * buf_ring->elements; in mhi_init_chan_ctxt()
636 buf_ring->base = vzalloc(buf_ring->len); in mhi_init_chan_ctxt()
638 if (!buf_ring->base) { in mhi_init_chan_ctxt()
639 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, in mhi_init_chan_ctxt()
640 tre_ring->pre_aligned, tre_ring->dma_handle); in mhi_init_chan_ctxt()
641 return -ENOMEM; in mhi_init_chan_ctxt()
644 tmp = le32_to_cpu(chan_ctxt->chcfg); in mhi_init_chan_ctxt()
647 chan_ctxt->chcfg = cpu_to_le32(tmp); in mhi_init_chan_ctxt()
649 chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); in mhi_init_chan_ctxt()
650 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; in mhi_init_chan_ctxt()
651 chan_ctxt->rlen = cpu_to_le64(tre_ring->len); in mhi_init_chan_ctxt()
652 tre_ring->ctxt_wp = &chan_ctxt->wp; in mhi_init_chan_ctxt()
654 tre_ring->rp = tre_ring->wp = tre_ring->base; in mhi_init_chan_ctxt()
655 buf_ring->rp = buf_ring->wp = buf_ring->base; in mhi_init_chan_ctxt()
656 mhi_chan->db_cfg.db_mode = 1; in mhi_init_chan_ctxt()
669 struct device *dev = mhi_cntrl->cntrl_dev; in parse_ev_cfg()
672 num = config->num_events; in parse_ev_cfg()
673 mhi_cntrl->total_ev_rings = num; in parse_ev_cfg()
674 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), in parse_ev_cfg()
676 if (!mhi_cntrl->mhi_event) in parse_ev_cfg()
677 return -ENOMEM; in parse_ev_cfg()
680 mhi_event = mhi_cntrl->mhi_event; in parse_ev_cfg()
682 event_cfg = &config->event_cfg[i]; in parse_ev_cfg()
684 mhi_event->er_index = i; in parse_ev_cfg()
685 mhi_event->ring.elements = event_cfg->num_elements; in parse_ev_cfg()
686 mhi_event->intmod = event_cfg->irq_moderation_ms; in parse_ev_cfg()
687 mhi_event->irq = event_cfg->irq; in parse_ev_cfg()
689 if (event_cfg->channel != U32_MAX) { in parse_ev_cfg()
691 mhi_event->chan = event_cfg->channel; in parse_ev_cfg()
692 if (mhi_event->chan >= mhi_cntrl->max_chan) { in parse_ev_cfg()
698 mhi_event->mhi_chan = in parse_ev_cfg()
699 &mhi_cntrl->mhi_chan[mhi_event->chan]; in parse_ev_cfg()
703 mhi_event->priority = 1; in parse_ev_cfg()
705 mhi_event->db_cfg.brstmode = event_cfg->mode; in parse_ev_cfg()
706 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) in parse_ev_cfg()
709 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) in parse_ev_cfg()
710 mhi_event->db_cfg.process_db = mhi_db_brstmode; in parse_ev_cfg()
712 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; in parse_ev_cfg()
714 mhi_event->data_type = event_cfg->data_type; in parse_ev_cfg()
716 switch (mhi_event->data_type) { in parse_ev_cfg()
718 mhi_event->process_event = mhi_process_data_event_ring; in parse_ev_cfg()
721 mhi_event->process_event = mhi_process_ctrl_ev_ring; in parse_ev_cfg()
728 mhi_event->hw_ring = event_cfg->hardware_event; in parse_ev_cfg()
729 if (mhi_event->hw_ring) in parse_ev_cfg()
730 mhi_cntrl->hw_ev_rings++; in parse_ev_cfg()
732 mhi_cntrl->sw_ev_rings++; in parse_ev_cfg()
734 mhi_event->cl_manage = event_cfg->client_managed; in parse_ev_cfg()
735 mhi_event->offload_ev = event_cfg->offload_channel; in parse_ev_cfg()
743 kfree(mhi_cntrl->mhi_event); in parse_ev_cfg()
744 return -EINVAL; in parse_ev_cfg()
751 struct device *dev = mhi_cntrl->cntrl_dev; in parse_ch_cfg()
755 mhi_cntrl->max_chan = config->max_channels; in parse_ch_cfg()
762 mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan, in parse_ch_cfg()
763 sizeof(*mhi_cntrl->mhi_chan)); in parse_ch_cfg()
764 if (!mhi_cntrl->mhi_chan) in parse_ch_cfg()
765 return -ENOMEM; in parse_ch_cfg()
767 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); in parse_ch_cfg()
770 for (i = 0; i < config->num_channels; i++) { in parse_ch_cfg()
773 ch_cfg = &config->ch_cfg[i]; in parse_ch_cfg()
775 chan = ch_cfg->num; in parse_ch_cfg()
776 if (chan >= mhi_cntrl->max_chan) { in parse_ch_cfg()
781 mhi_chan = &mhi_cntrl->mhi_chan[chan]; in parse_ch_cfg()
782 mhi_chan->name = ch_cfg->name; in parse_ch_cfg()
783 mhi_chan->chan = chan; in parse_ch_cfg()
785 mhi_chan->tre_ring.elements = ch_cfg->num_elements; in parse_ch_cfg()
786 if (!mhi_chan->tre_ring.elements) in parse_ch_cfg()
796 mhi_chan->buf_ring.elements = ch_cfg->local_elements; in parse_ch_cfg()
797 if (!mhi_chan->buf_ring.elements) in parse_ch_cfg()
798 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; in parse_ch_cfg()
799 mhi_chan->er_index = ch_cfg->event_ring; in parse_ch_cfg()
800 mhi_chan->dir = ch_cfg->dir; in parse_ch_cfg()
807 mhi_chan->type = ch_cfg->type; in parse_ch_cfg()
808 if (!mhi_chan->type) in parse_ch_cfg()
809 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; in parse_ch_cfg()
811 mhi_chan->ee_mask = ch_cfg->ee_mask; in parse_ch_cfg()
812 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; in parse_ch_cfg()
813 mhi_chan->lpm_notify = ch_cfg->lpm_notify; in parse_ch_cfg()
814 mhi_chan->offload_ch = ch_cfg->offload_channel; in parse_ch_cfg()
815 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; in parse_ch_cfg()
816 mhi_chan->pre_alloc = ch_cfg->auto_queue; in parse_ch_cfg()
817 mhi_chan->wake_capable = ch_cfg->wake_capable; in parse_ch_cfg()
823 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { in parse_ch_cfg()
829 * Bi-directional and direction less channel must be an in parse_ch_cfg()
832 if ((mhi_chan->dir == DMA_BIDIRECTIONAL || in parse_ch_cfg()
833 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { in parse_ch_cfg()
838 if (!mhi_chan->offload_ch) { in parse_ch_cfg()
839 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; in parse_ch_cfg()
840 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { in parse_ch_cfg()
846 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) in parse_ch_cfg()
847 mhi_chan->db_cfg.process_db = mhi_db_brstmode; in parse_ch_cfg()
849 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; in parse_ch_cfg()
851 mhi_chan->configured = true; in parse_ch_cfg()
853 if (mhi_chan->lpm_notify) in parse_ch_cfg()
854 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); in parse_ch_cfg()
860 vfree(mhi_cntrl->mhi_chan); in parse_ch_cfg()
862 return -EINVAL; in parse_ch_cfg()
880 mhi_cntrl->timeout_ms = config->timeout_ms; in parse_config()
881 if (!mhi_cntrl->timeout_ms) in parse_config()
882 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; in parse_config()
884 mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms; in parse_config()
885 mhi_cntrl->bounce_buf = config->use_bounce_buf; in parse_config()
886 mhi_cntrl->buffer_len = config->buf_len; in parse_config()
887 if (!mhi_cntrl->buffer_len) in parse_config()
888 mhi_cntrl->buffer_len = MHI_MAX_MTU; in parse_config()
891 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; in parse_config()
892 if (config->m2_no_db) in parse_config()
893 mhi_cntrl->db_access &= ~MHI_PM_M2; in parse_config()
898 vfree(mhi_cntrl->mhi_chan); in parse_config()
913 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || in mhi_register_controller()
914 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || in mhi_register_controller()
915 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || in mhi_register_controller()
916 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || in mhi_register_controller()
917 !mhi_cntrl->irq || !mhi_cntrl->reg_len) in mhi_register_controller()
918 return -EINVAL; in mhi_register_controller()
922 return -EINVAL; in mhi_register_controller()
924 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, in mhi_register_controller()
925 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); in mhi_register_controller()
926 if (!mhi_cntrl->mhi_cmd) { in mhi_register_controller()
927 ret = -ENOMEM; in mhi_register_controller()
931 INIT_LIST_HEAD(&mhi_cntrl->transition_list); in mhi_register_controller()
932 mutex_init(&mhi_cntrl->pm_mutex); in mhi_register_controller()
933 rwlock_init(&mhi_cntrl->pm_lock); in mhi_register_controller()
934 spin_lock_init(&mhi_cntrl->transition_lock); in mhi_register_controller()
935 spin_lock_init(&mhi_cntrl->wlock); in mhi_register_controller()
936 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); in mhi_register_controller()
937 init_waitqueue_head(&mhi_cntrl->state_event); in mhi_register_controller()
939 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); in mhi_register_controller()
940 if (!mhi_cntrl->hiprio_wq) { in mhi_register_controller()
941 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); in mhi_register_controller()
942 ret = -ENOMEM; in mhi_register_controller()
946 mhi_cmd = mhi_cntrl->mhi_cmd; in mhi_register_controller()
948 spin_lock_init(&mhi_cmd->lock); in mhi_register_controller()
950 mhi_event = mhi_cntrl->mhi_event; in mhi_register_controller()
951 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { in mhi_register_controller()
953 if (mhi_event->offload_ev) in mhi_register_controller()
956 mhi_event->mhi_cntrl = mhi_cntrl; in mhi_register_controller()
957 spin_lock_init(&mhi_event->lock); in mhi_register_controller()
958 if (mhi_event->data_type == MHI_ER_CTRL) in mhi_register_controller()
959 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, in mhi_register_controller()
962 tasklet_init(&mhi_event->task, mhi_ev_task, in mhi_register_controller()
966 mhi_chan = mhi_cntrl->mhi_chan; in mhi_register_controller()
967 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_register_controller()
968 mutex_init(&mhi_chan->mutex); in mhi_register_controller()
969 init_completion(&mhi_chan->completion); in mhi_register_controller()
970 rwlock_init(&mhi_chan->lock); in mhi_register_controller()
973 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; in mhi_register_controller()
974 mhi_chan->intmod = mhi_event->intmod; in mhi_register_controller()
977 if (mhi_cntrl->bounce_buf) { in mhi_register_controller()
978 mhi_cntrl->map_single = mhi_map_single_use_bb; in mhi_register_controller()
979 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; in mhi_register_controller()
981 mhi_cntrl->map_single = mhi_map_single_no_bb; in mhi_register_controller()
982 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; in mhi_register_controller()
986 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, in mhi_register_controller()
991 mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info); in mhi_register_controller()
992 mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info); in mhi_register_controller()
993 mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info); in mhi_register_controller()
994 mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info); in mhi_register_controller()
996 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); in mhi_register_controller()
997 if (mhi_cntrl->index < 0) { in mhi_register_controller()
998 ret = mhi_cntrl->index; in mhi_register_controller()
1006 /* Register controller with MHI bus */ in mhi_register_controller()
1009 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); in mhi_register_controller()
1014 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; in mhi_register_controller()
1015 mhi_dev->mhi_cntrl = mhi_cntrl; in mhi_register_controller()
1016 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); in mhi_register_controller()
1017 mhi_dev->name = dev_name(&mhi_dev->dev); in mhi_register_controller()
1020 device_init_wakeup(&mhi_dev->dev, true); in mhi_register_controller()
1022 ret = device_add(&mhi_dev->dev); in mhi_register_controller()
1026 mhi_cntrl->mhi_dev = mhi_dev; in mhi_register_controller()
1033 put_device(&mhi_dev->dev); in mhi_register_controller()
1037 ida_free(&mhi_controller_ida, mhi_cntrl->index); in mhi_register_controller()
1039 destroy_workqueue(mhi_cntrl->hiprio_wq); in mhi_register_controller()
1041 kfree(mhi_cntrl->mhi_cmd); in mhi_register_controller()
1043 kfree(mhi_cntrl->mhi_event); in mhi_register_controller()
1044 vfree(mhi_cntrl->mhi_chan); in mhi_register_controller()
1052 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; in mhi_unregister_controller()
1053 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; in mhi_unregister_controller()
1059 destroy_workqueue(mhi_cntrl->hiprio_wq); in mhi_unregister_controller()
1060 kfree(mhi_cntrl->mhi_cmd); in mhi_unregister_controller()
1061 kfree(mhi_cntrl->mhi_event); in mhi_unregister_controller()
1064 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { in mhi_unregister_controller()
1065 if (!mhi_chan->mhi_dev) in mhi_unregister_controller()
1068 put_device(&mhi_chan->mhi_dev->dev); in mhi_unregister_controller()
1070 vfree(mhi_cntrl->mhi_chan); in mhi_unregister_controller()
1072 device_del(&mhi_dev->dev); in mhi_unregister_controller()
1073 put_device(&mhi_dev->dev); in mhi_unregister_controller()
1075 ida_free(&mhi_controller_ida, mhi_cntrl->index); in mhi_unregister_controller()
1097 struct device *dev = &mhi_cntrl->mhi_dev->dev; in mhi_prepare_for_power_up()
1101 mutex_lock(&mhi_cntrl->pm_mutex); in mhi_prepare_for_power_up()
1107 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); in mhi_prepare_for_power_up()
1113 if (bhi_off >= mhi_cntrl->reg_len) { in mhi_prepare_for_power_up()
1115 bhi_off, mhi_cntrl->reg_len); in mhi_prepare_for_power_up()
1116 ret = -ERANGE; in mhi_prepare_for_power_up()
1119 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; in mhi_prepare_for_power_up()
1121 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { in mhi_prepare_for_power_up()
1122 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, in mhi_prepare_for_power_up()
1129 if (bhie_off >= mhi_cntrl->reg_len) { in mhi_prepare_for_power_up()
1132 bhie_off, mhi_cntrl->reg_len); in mhi_prepare_for_power_up()
1133 ret = -ERANGE; in mhi_prepare_for_power_up()
1136 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; in mhi_prepare_for_power_up()
1139 if (mhi_cntrl->rddm_size) { in mhi_prepare_for_power_up()
1141 * This controller supports RDDM, so we need to manually clear in mhi_prepare_for_power_up()
1144 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, in mhi_prepare_for_power_up()
1145 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + in mhi_prepare_for_power_up()
1150 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, in mhi_prepare_for_power_up()
1151 mhi_cntrl->rddm_size); in mhi_prepare_for_power_up()
1152 if (mhi_cntrl->rddm_image) { in mhi_prepare_for_power_up()
1154 mhi_cntrl->rddm_image); in mhi_prepare_for_power_up()
1157 mhi_cntrl->rddm_image); in mhi_prepare_for_power_up()
1163 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_prepare_for_power_up()
1171 mutex_unlock(&mhi_cntrl->pm_mutex); in mhi_prepare_for_power_up()
1179 if (mhi_cntrl->fbc_image) { in mhi_unprepare_after_power_down()
1180 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); in mhi_unprepare_after_power_down()
1181 mhi_cntrl->fbc_image = NULL; in mhi_unprepare_after_power_down()
1184 if (mhi_cntrl->rddm_image) { in mhi_unprepare_after_power_down()
1185 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); in mhi_unprepare_after_power_down()
1186 mhi_cntrl->rddm_image = NULL; in mhi_unprepare_after_power_down()
1189 mhi_cntrl->bhi = NULL; in mhi_unprepare_after_power_down()
1190 mhi_cntrl->bhie = NULL; in mhi_unprepare_after_power_down()
1201 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI in mhi_release_device()
1204 * controller suspend and resume. in mhi_release_device()
1206 if (mhi_dev->ul_chan) in mhi_release_device()
1207 mhi_dev->ul_chan->mhi_dev = NULL; in mhi_release_device()
1209 if (mhi_dev->dl_chan) in mhi_release_device()
1210 mhi_dev->dl_chan->mhi_dev = NULL; in mhi_release_device()
1222 return ERR_PTR(-ENOMEM); in mhi_alloc_device()
1224 dev = &mhi_dev->dev; in mhi_alloc_device()
1226 dev->bus = &mhi_bus_type; in mhi_alloc_device()
1227 dev->release = mhi_release_device; in mhi_alloc_device()
1229 if (mhi_cntrl->mhi_dev) { in mhi_alloc_device()
1230 /* for MHI client devices, parent is the MHI controller device */ in mhi_alloc_device()
1231 dev->parent = &mhi_cntrl->mhi_dev->dev; in mhi_alloc_device()
1233 /* for MHI controller device, parent is the bus device (e.g. pci device) */ in mhi_alloc_device()
1234 dev->parent = mhi_cntrl->cntrl_dev; in mhi_alloc_device()
1237 mhi_dev->mhi_cntrl = mhi_cntrl; in mhi_alloc_device()
1238 mhi_dev->dev_wake = 0; in mhi_alloc_device()
1246 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_driver_probe()
1247 struct device_driver *drv = dev->driver; in mhi_driver_probe()
1250 struct mhi_chan *ul_chan = mhi_dev->ul_chan; in mhi_driver_probe()
1251 struct mhi_chan *dl_chan = mhi_dev->dl_chan; in mhi_driver_probe()
1259 ret = -EINVAL; in mhi_driver_probe()
1266 if (ul_chan->lpm_notify && !mhi_drv->status_cb) in mhi_driver_probe()
1269 /* For non-offload channels then xfer_cb should be provided */ in mhi_driver_probe()
1270 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) in mhi_driver_probe()
1273 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; in mhi_driver_probe()
1276 ret = -EINVAL; in mhi_driver_probe()
1282 if (dl_chan->lpm_notify && !mhi_drv->status_cb) in mhi_driver_probe()
1285 /* For non-offload channels then xfer_cb should be provided */ in mhi_driver_probe()
1286 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) in mhi_driver_probe()
1289 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; in mhi_driver_probe()
1296 if (mhi_event->cl_manage && !mhi_drv->status_cb) in mhi_driver_probe()
1299 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; in mhi_driver_probe()
1303 ret = mhi_drv->probe(mhi_dev, mhi_dev->id); in mhi_driver_probe()
1322 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); in mhi_driver_remove()
1323 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; in mhi_driver_remove()
1331 /* Skip if it is a controller device */ in mhi_driver_remove()
1332 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_driver_remove()
1337 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_driver_remove()
1343 write_lock_irq(&mhi_chan->lock); in mhi_driver_remove()
1344 mhi_chan->ccs = MHI_EV_CC_INVALID; in mhi_driver_remove()
1345 complete_all(&mhi_chan->completion); in mhi_driver_remove()
1346 write_unlock_irq(&mhi_chan->lock); in mhi_driver_remove()
1349 mutex_lock(&mhi_chan->mutex); in mhi_driver_remove()
1350 write_lock_irq(&mhi_chan->lock); in mhi_driver_remove()
1351 ch_state[dir] = mhi_chan->ch_state; in mhi_driver_remove()
1352 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; in mhi_driver_remove()
1353 write_unlock_irq(&mhi_chan->lock); in mhi_driver_remove()
1355 /* Reset the non-offload channel */ in mhi_driver_remove()
1356 if (!mhi_chan->offload_ch) in mhi_driver_remove()
1359 mutex_unlock(&mhi_chan->mutex); in mhi_driver_remove()
1362 mhi_drv->remove(mhi_dev); in mhi_driver_remove()
1364 /* De-init channel if it was enabled */ in mhi_driver_remove()
1366 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; in mhi_driver_remove()
1371 mutex_lock(&mhi_chan->mutex); in mhi_driver_remove()
1375 !mhi_chan->offload_ch) in mhi_driver_remove()
1378 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; in mhi_driver_remove()
1380 mutex_unlock(&mhi_chan->mutex); in mhi_driver_remove()
1383 while (mhi_dev->dev_wake) in mhi_driver_remove()
1391 struct device_driver *driver = &mhi_drv->driver; in __mhi_driver_register()
1393 if (!mhi_drv->probe || !mhi_drv->remove) in __mhi_driver_register()
1394 return -EINVAL; in __mhi_driver_register()
1396 driver->bus = &mhi_bus_type; in __mhi_driver_register()
1397 driver->owner = owner; in __mhi_driver_register()
1398 driver->probe = mhi_driver_probe; in __mhi_driver_register()
1399 driver->remove = mhi_driver_remove; in __mhi_driver_register()
1407 driver_unregister(&mhi_drv->driver); in mhi_driver_unregister()
1416 mhi_dev->name); in mhi_uevent()
1426 * If the device is a controller type then there is no client driver in mhi_match()
1429 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) in mhi_match()
1432 for (id = mhi_drv->id_table; id->chan[0]; id++) in mhi_match()
1433 if (!strcmp(mhi_dev->name, id->chan)) { in mhi_match()
1434 mhi_dev->id = id; in mhi_match()