Lines Matching defs:gpii
475 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
476 u32 gpii_mask; /* gpii instances available for apps */
478 struct gpii *gpiis;
486 struct gpii *gpii;
497 struct gpii {
539 static void gpi_process_events(struct gpii *gpii);
562 static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
567 static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
573 gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
575 void __iomem *addr = gpii->regs + offset;
576 u32 tmp = gpi_read_reg(gpii, addr);
581 gpi_write_reg(gpii, addr, tmp);
584 static void gpi_disable_interrupts(struct gpii *gpii)
586 gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
588 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
590 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
592 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
594 gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
596 gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
598 gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
601 gpii->cntxt_type_irq_msk = 0;
602 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
603 gpii->configured_irq = false;
607 static int gpi_config_interrupts(struct gpii *gpii, enum gpii_irq_settings settings, bool mask)
616 if (!gpii->configured_irq) {
617 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
619 "gpi-dma", gpii);
621 dev_err(gpii->gpi_dev->dev, "error request irq:%d ret:%d\n",
622 gpii->irq, ret);
629 * GPII only uses one EV ring per gpii so we can globally
633 gpii->cntxt_type_irq_msk |= GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
635 gpii->cntxt_type_irq_msk &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
636 gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
637 GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, gpii->cntxt_type_irq_msk);
639 gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
641 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
644 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
647 gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
650 gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
653 gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
655 gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_LSB_OFFS(gpii->gpii_id), U32_MAX, 0);
656 gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_MSB_OFFS(gpii->gpii_id), U32_MAX, 0);
657 gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id), U32_MAX, 0);
658 gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_1_OFFS(gpii->gpii_id), U32_MAX, 0);
659 gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
661 gpi_update_reg(gpii, GPII_n_ERROR_LOG_OFFS(gpii->gpii_id), U32_MAX, 0);
663 gpii->cntxt_type_irq_msk = enable;
666 gpii->configured_irq = true;
670 /* Sends gpii event or channel command */
671 static int gpi_send_cmd(struct gpii *gpii, struct gchan *gchan,
684 dev_dbg(gpii->gpi_dev->dev,
688 reinit_completion(&gpii->cmd_completion);
689 gpii->gpi_cmd = gpi_cmd;
691 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gchan->ch_cmd_reg : gpii->ev_cmd_reg;
694 gpi_write_reg(gpii, cmd_reg, cmd);
695 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
698 dev_err(gpii->gpi_dev->dev, "cmd: %s completion timeout:%u\n",
710 if (!IS_CHAN_CMD(gpi_cmd) && gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
720 struct gpii *gpii = gchan->gpii;
724 gpi_write_reg(gpii, gchan->ch_cntxt_db_reg, p_wp);
728 static inline void gpi_write_ev_db(struct gpii *gpii,
734 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, p_wp);
738 static void gpi_process_ieob(struct gpii *gpii)
740 gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
742 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
743 tasklet_hi_schedule(&gpii->ev_task);
747 static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
749 u32 gpii_id = gpii->gpii_id;
751 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
757 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
763 gchan = &gpii->gchan[chid];
764 state = gpi_read_reg(gpii, gchan->ch_cntxt_base_reg +
773 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
783 complete_all(&gpii->cmd_completion);
788 static void gpi_process_gen_err_irq(struct gpii *gpii)
790 u32 gpii_id = gpii->gpii_id;
792 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
795 dev_dbg(gpii->gpi_dev->dev, "irq_stts:0x%x\n", irq_stts);
799 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
803 static void gpi_process_glob_err_irq(struct gpii *gpii)
805 u32 gpii_id = gpii->gpii_id;
807 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
810 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
814 dev_err(gpii->gpi_dev->dev, "invalid error status:0x%x\n", irq_stts);
819 gpi_write_reg(gpii, gpii->regs + offset, 0);
822 /* gpii interrupt handler */
825 struct gpii *gpii = data;
826 u32 gpii_id = gpii->gpii_id;
830 read_lock_irqsave(&gpii->pm_lock, flags);
836 if (!REG_ACCESS_VALID(gpii->pm_state)) {
837 dev_err(gpii->gpi_dev->dev, "receive interrupt while in %s state\n",
838 TO_GPI_PM_STR(gpii->pm_state));
842 offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
843 type = gpi_read_reg(gpii, gpii->regs + offset);
846 /* global gpii error */
848 gpi_process_glob_err_irq(gpii);
854 gpi_process_ieob(gpii);
863 dev_dbg(gpii->gpi_dev->dev,
866 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
870 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
871 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
880 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
883 gpii->ev_state = ev_state;
884 dev_dbg(gpii->gpi_dev->dev, "setting EV state to %s\n",
885 TO_GPI_EV_STATE_STR(gpii->ev_state));
886 complete_all(&gpii->cmd_completion);
892 dev_dbg(gpii->gpi_dev->dev, "process CH CTRL interrupts\n");
893 gpi_process_ch_ctrl_irq(gpii);
898 dev_err(gpii->gpi_dev->dev, "Unhandled interrupt status:0x%x\n", type);
899 gpi_process_gen_err_irq(gpii);
903 offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
904 type = gpi_read_reg(gpii, gpii->regs + offset);
908 read_unlock_irqrestore(&gpii->pm_lock, flags);
917 struct gpii *gpii = gchan->gpii;
930 dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
942 dev_dbg(gpii->gpi_dev->dev, "event without a pending descriptor!\n");
944 dev_dbg(gpii->gpi_dev->dev,
949 dev_dbg(gpii->gpi_dev->dev,
971 if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
999 struct gpii *gpii = gchan->gpii;
1010 dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
1021 dev_err(gpii->gpi_dev->dev, "Event without a pending descriptor!\n");
1023 dev_err(gpii->gpi_dev->dev,
1046 if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
1054 dev_err(gpii->gpi_dev->dev, "Error in Transaction\n");
1057 dev_dbg(gpii->gpi_dev->dev, "Transaction Success\n");
1061 dev_dbg(gpii->gpi_dev->dev, "Residue %d\n", result.residue);
1075 static void gpi_process_events(struct gpii *gpii)
1077 struct gpi_ring *ev_ring = &gpii->ev_ring;
1084 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1093 dev_dbg(gpii->gpi_dev->dev,
1101 gchan = &gpii->gchan[chid];
1106 dev_dbg(gpii->gpi_dev->dev, "stale event, not processing\n");
1109 gchan = &gpii->gchan[chid];
1114 dev_dbg(gpii->gpi_dev->dev, "QUP_NOTIF_EV_TYPE\n");
1117 dev_dbg(gpii->gpi_dev->dev,
1122 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1125 gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
1127 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1136 struct gpii *gpii = (struct gpii *)data;
1138 read_lock(&gpii->pm_lock);
1139 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1140 read_unlock(&gpii->pm_lock);
1141 dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n",
1142 TO_GPI_PM_STR(gpii->pm_state));
1147 gpi_process_events(gpii);
1150 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1151 read_unlock(&gpii->pm_lock);
1157 struct gpii *gpii = gchan->gpii;
1158 struct gpi_ring *ev_ring = &gpii->ev_ring;
1162 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1175 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1183 struct gpii *gpii = gchan->gpii;
1188 ret = gpi_send_cmd(gpii, gchan, gpi_cmd);
1190 dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1203 write_lock_irq(&gpii->pm_lock);
1210 write_unlock_irq(&gpii->pm_lock);
1218 struct gpii *gpii = gchan->gpii;
1221 ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_START);
1223 dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1228 /* gpii CH is active now */
1229 write_lock_irq(&gpii->pm_lock);
1231 write_unlock_irq(&gpii->pm_lock);
1238 struct gpii *gpii = gchan->gpii;
1241 ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_STOP);
1243 dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1254 struct gpii *gpii = chan->gpii;
1257 u32 id = gpii->gpii_id;
1262 ret = gpi_send_cmd(gpii, chan, GPI_CH_CMD_ALLOCATE);
1264 dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
1270 gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_0_CONFIG,
1272 gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_1_R_LENGTH, ring->len);
1273 gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_2_RING_BASE_LSB, ring->phys_addr);
1274 gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_3_RING_BASE_MSB,
1276 gpi_write_reg(gpii, chan->ch_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1278 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid),
1280 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_1_OFFS(id, chid), 0);
1281 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_2_OFFS(id, chid), 0);
1282 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_3_OFFS(id, chid), 0);
1283 gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_QOS_OFFS(id, chid), 1);
1291 static int gpi_alloc_ev_chan(struct gpii *gpii)
1293 struct gpi_ring *ring = &gpii->ev_ring;
1294 void __iomem *base = gpii->ev_cntxt_base_reg;
1297 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1299 dev_err(gpii->gpi_dev->dev, "error with cmd:%s ret:%d\n",
1305 gpi_write_reg(gpii, base + CNTXT_0_CONFIG,
1307 gpi_write_reg(gpii, base + CNTXT_1_R_LENGTH, ring->len);
1308 gpi_write_reg(gpii, base + CNTXT_2_RING_BASE_LSB, lower_32_bits(ring->phys_addr));
1309 gpi_write_reg(gpii, base + CNTXT_3_RING_BASE_MSB, upper_32_bits(ring->phys_addr));
1310 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1312 gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
1313 gpi_write_reg(gpii, base + CNTXT_10_RING_MSI_LSB, 0);
1314 gpi_write_reg(gpii, base + CNTXT_11_RING_MSI_MSB, 0);
1315 gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
1316 gpi_write_reg(gpii, base + CNTXT_12_RING_RP_UPDATE_LSB, 0);
1317 gpi_write_reg(gpii, base + CNTXT_13_RING_RP_UPDATE_MSB, 0);
1325 /* gpii is active now */
1326 write_lock_irq(&gpii->pm_lock);
1327 gpii->pm_state = ACTIVE_STATE;
1328 write_unlock_irq(&gpii->pm_lock);
1329 gpi_write_ev_db(gpii, ring, ring->wp);
1382 struct gpii *gpii)
1384 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1391 u32 el_size, struct gpii *gpii)
1402 dev_dbg(gpii->gpi_dev->dev,
1407 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1411 dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
1430 dev_dbg(gpii->gpi_dev->dev,
1439 static void gpi_queue_xfer(struct gpii *gpii, struct gchan *gchan,
1448 dev_err(gpii->gpi_dev->dev, "Error adding ring element to xfer ring\n");
1461 struct gpii *gpii = gchan->gpii;
1465 mutex_lock(&gpii->ctrl_lock);
1476 gchan = &gpii->gchan[i];
1479 write_lock_irq(&gpii->pm_lock);
1481 write_unlock_irq(&gpii->pm_lock);
1489 gchan = &gpii->gchan[i];
1493 dev_err(gpii->gpi_dev->dev, "Error resetting channel ret:%d\n", ret);
1500 dev_err(gpii->gpi_dev->dev, "Error alloc_channel ret:%d\n", ret);
1507 gchan = &gpii->gchan[i];
1511 dev_err(gpii->gpi_dev->dev, "Error Starting Channel ret:%d\n", ret);
1517 mutex_unlock(&gpii->ctrl_lock);
1525 struct gpii *gpii = gchan->gpii;
1528 mutex_lock(&gpii->ctrl_lock);
1531 * pause/resume are per gpii not per channel, so
1534 if (gpii->pm_state == PAUSE_STATE) {
1535 dev_dbg(gpii->gpi_dev->dev, "channel is already paused\n");
1536 mutex_unlock(&gpii->ctrl_lock);
1542 ret = gpi_stop_chan(&gpii->gchan[i]);
1544 mutex_unlock(&gpii->ctrl_lock);
1549 disable_irq(gpii->irq);
1552 tasklet_kill(&gpii->ev_task);
1554 write_lock_irq(&gpii->pm_lock);
1555 gpii->pm_state = PAUSE_STATE;
1556 write_unlock_irq(&gpii->pm_lock);
1557 mutex_unlock(&gpii->ctrl_lock);
1566 struct gpii *gpii = gchan->gpii;
1569 mutex_lock(&gpii->ctrl_lock);
1570 if (gpii->pm_state == ACTIVE_STATE) {
1571 dev_dbg(gpii->gpi_dev->dev, "channel is already active\n");
1572 mutex_unlock(&gpii->ctrl_lock);
1576 enable_irq(gpii->irq);
1580 ret = gpi_send_cmd(gpii, &gpii->gchan[i], GPI_CH_CMD_START);
1582 dev_err(gpii->gpi_dev->dev, "Error starting chan, ret:%d\n", ret);
1583 mutex_unlock(&gpii->ctrl_lock);
1588 write_lock_irq(&gpii->pm_lock);
1589 gpii->pm_state = ACTIVE_STATE;
1590 write_unlock_irq(&gpii->pm_lock);
1591 mutex_unlock(&gpii->ctrl_lock);
1625 struct device *dev = chan->gpii->gpi_dev->dev;
1700 struct device *dev = chan->gpii->gpi_dev->dev;
1797 struct gpii *gpii = gchan->gpii;
1798 struct device *dev = gpii->gpi_dev->dev;
1805 gpii->ieob_set = false;
1807 dev_err(gpii->gpi_dev->dev, "invalid dma direction: %d\n", direction);
1857 struct gpii *gpii = gchan->gpii;
1865 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
1876 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
1883 gpi_queue_xfer(gpii, gchan, tre, &wp);
1888 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
1893 struct gpii *gpii = gchan->gpii;
1894 const int ev_factor = gpii->gpi_dev->ev_factor;
1902 if (gpii->gchan[i].pm_state != CONFIG_STATE)
1906 if (gpii->gchan[0].protocol != gpii->gchan[1].protocol) {
1907 dev_err(gpii->gpi_dev->dev, "protocol did not match protocol %u != %u\n",
1908 gpii->gchan[0].protocol, gpii->gchan[1].protocol);
1915 ret = gpi_alloc_ring(&gpii->ev_ring, elements,
1916 sizeof(union gpi_event), gpii);
1921 write_lock_irq(&gpii->pm_lock);
1922 gpii->pm_state = PREPARE_HARDWARE;
1923 write_unlock_irq(&gpii->pm_lock);
1924 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
1926 dev_err(gpii->gpi_dev->dev, "error config. interrupts, ret:%d\n", ret);
1931 ret = gpi_alloc_ev_chan(gpii);
1933 dev_err(gpii->gpi_dev->dev, "error alloc_ev_chan:%d\n", ret);
1939 ret = gpi_alloc_chan(&gpii->gchan[i], true);
1941 dev_err(gpii->gpi_dev->dev, "Error allocating chan:%d\n", ret);
1948 ret = gpi_start_chan(&gpii->gchan[i]);
1950 dev_err(gpii->gpi_dev->dev, "Error start chan:%d\n", ret);
1958 gpi_stop_chan(&gpii->gchan[i]);
1959 gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
1966 gpi_disable_interrupts(gpii);
1968 gpi_free_ring(&gpii->ev_ring, gpii);
1977 struct gpii *gpii = gchan->gpii;
1981 mutex_lock(&gpii->ctrl_lock);
1986 write_lock_irq(&gpii->pm_lock);
1988 write_unlock_irq(&gpii->pm_lock);
1994 ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
1996 dev_err(gpii->gpi_dev->dev, "error resetting channel:%d\n", ret);
2002 gpi_free_ring(&gchan->ch_ring, gpii);
2006 write_lock_irq(&gpii->pm_lock);
2008 write_unlock_irq(&gpii->pm_lock);
2012 if (gpii->gchan[i].ch_ring.configured)
2016 cur_state = gpii->pm_state;
2017 write_lock_irq(&gpii->pm_lock);
2018 gpii->pm_state = PREPARE_TERMINATE;
2019 write_unlock_irq(&gpii->pm_lock);
2022 tasklet_kill(&gpii->ev_task);
2026 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2028 gpi_free_ring(&gpii->ev_ring, gpii);
2032 gpi_disable_interrupts(gpii);
2035 write_lock_irq(&gpii->pm_lock);
2036 gpii->pm_state = DISABLE_STATE;
2037 write_unlock_irq(&gpii->pm_lock);
2040 mutex_unlock(&gpii->ctrl_lock);
2047 struct gpii *gpii = gchan->gpii;
2050 mutex_lock(&gpii->ctrl_lock);
2054 sizeof(struct gpi_tre), gpii);
2060 mutex_unlock(&gpii->ctrl_lock);
2064 mutex_unlock(&gpii->ctrl_lock);
2072 unsigned int gpii;
2075 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2076 if (!((1 << gpii) & gpi_dev->gpii_mask))
2079 tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
2080 rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
2083 return gpii;
2085 return gpii;
2088 /* no channels configured with same seid, return next avail gpii */
2089 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2090 if (!((1 << gpii) & gpi_dev->gpii_mask))
2093 tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
2094 rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
2096 /* check if gpii is configured */
2101 /* found a free gpii */
2102 return gpii;
2105 /* no gpii instance available to use */
2115 int gpii;
2119 dev_err(gpi_dev->dev, "gpii require minimum 2 args, client passed:%d args\n",
2126 dev_err(gpi_dev->dev, "gpii channel:%d not valid\n", chid);
2132 /* find next available gpii to use */
2133 gpii = gpi_find_avail_gpii(gpi_dev, seid);
2134 if (gpii < 0) {
2135 dev_err(gpi_dev->dev, "no available gpii instances\n");
2139 gchan = &gpi_dev->gpiis[gpii].gchan[chid];
2141 dev_err(gpi_dev->dev, "gpii:%d chid:%d seid:%d already configured\n",
2142 gpii, chid, gchan->seid);
2172 dev_err(gpi_dev->dev, "missing 'max-no-gpii' DT node\n");
2179 dev_err(gpi_dev->dev, "missing 'gpii-mask' DT node\n");
2199 /* setup all the supported gpii */
2202 struct gpii *gpii = &gpi_dev->gpiis[i];
2209 gpii->ev_cntxt_base_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2210 gpii->ev_cntxt_db_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2211 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg + CNTXT_4_RING_RP_LSB;
2212 gpii->ev_cmd_reg = gpi_dev->ee_base + GPII_n_EV_CH_CMD_OFFS(i);
2213 gpii->ieob_clr_reg = gpi_dev->ee_base + GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2219 gpii->irq = ret;
2223 struct gchan *gchan = &gpii->gchan[chan];
2236 gchan->gpii = gpii;
2239 mutex_init(&gpii->ctrl_lock);
2240 rwlock_init(&gpii->pm_lock);
2241 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2242 (unsigned long)gpii);
2243 init_completion(&gpii->cmd_completion);
2244 gpii->gpii_id = i;
2245 gpii->regs = gpi_dev->ee_base;
2246 gpii->gpi_dev = gpi_dev;