Lines Matching +full:no +full:- +full:tick +full:- +full:in +full:- +full:suspend

1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
12 #include <linux/dma-direction.h>
26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
42 /** enum ipa_status_opcode - status element opcode hardware values */
50 /** enum ipa_status_exception - status element exception type */
52 /* 0 means no exception */
83 * enough space in a receive buffer to hold a complete MTU in ipa_endpoint_validate_build()
88 * size that would exceed what we can represent in the field in ipa_endpoint_validate_build()
107 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid_one()
113 if (!data->toward_ipa) { in ipa_endpoint_data_valid_one()
114 if (data->endpoint.filter_support) { in ipa_endpoint_data_valid_one()
117 data->endpoint_id); in ipa_endpoint_data_valid_one()
124 if (data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
125 other_name = data->endpoint.config.tx.status_endpoint; in ipa_endpoint_data_valid_one()
129 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
138 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
143 if (other_data->toward_ipa) { in ipa_endpoint_data_valid_one()
146 data->endpoint_id); in ipa_endpoint_data_valid_one()
151 if (other_data->ee_id == GSI_EE_AP) { in ipa_endpoint_data_valid_one()
153 if (!other_data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
156 other_data->endpoint_id); in ipa_endpoint_data_valid_one()
162 if (data->endpoint.config.dma_mode) { in ipa_endpoint_data_valid_one()
163 other_name = data->endpoint.config.dma_endpoint; in ipa_endpoint_data_valid_one()
167 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
175 other_name, data->endpoint_id); in ipa_endpoint_data_valid_one()
187 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid()
208 dev_err(dev, "AP->modem TX endpoint not defined\n"); in ipa_endpoint_data_valid()
212 dev_err(dev, "AP<-modem RX endpoint not defined\n"); in ipa_endpoint_data_valid()
233 /* Allocate a transaction to use on a non-command endpoint */
237 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
238 u32 channel_id = endpoint->channel_id; in ipa_endpoint_trans_alloc()
241 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in ipa_endpoint_trans_alloc()
246 /* suspend_delay represents suspend for RX, delay for TX endpoints.
247 * Note that suspend is not supported starting with IPA v4.0.
252 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_ctrl()
253 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl()
258 /* Suspend is not supported for IPA v4.0+. Delay doesn't work in ipa_endpoint_init_ctrl()
261 * if (endpoint->toward_ipa) in ipa_endpoint_init_ctrl()
262 * assert(ipa->version != IPA_VERSION_4.2); in ipa_endpoint_init_ctrl()
264 * assert(ipa->version == IPA_VERSION_3_5_1); in ipa_endpoint_init_ctrl()
266 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; in ipa_endpoint_init_ctrl()
268 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
269 /* Don't bother if it's already in the requested state */ in ipa_endpoint_init_ctrl()
273 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
283 /* assert(endpoint->toward_ipa); */ in ipa_endpoint_program_delay()
286 if (endpoint->ipa->version != IPA_VERSION_4_2) in ipa_endpoint_program_delay()
292 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_aggr_active()
293 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active()
297 /* assert(mask & ipa->available); */ in ipa_endpoint_aggr_active()
298 offset = ipa_reg_state_aggr_active_offset(ipa->version); in ipa_endpoint_aggr_active()
299 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_aggr_active()
306 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_force_close()
307 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close()
309 /* assert(mask & ipa->available); */ in ipa_endpoint_force_close()
310 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); in ipa_endpoint_force_close()
314 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
315 * @endpoint: Endpoint on which to emulate a suspend
317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
319 * issue in IPA version 3.5.1 where the suspend interrupt will not be
324 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr()
326 if (!endpoint->data->aggregation) in ipa_endpoint_suspend_aggr()
336 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
339 /* Returns previous suspend state (true means suspend was enabled) */
345 if (endpoint->ipa->version != IPA_VERSION_3_5_1) in ipa_endpoint_program_suspend()
346 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
348 /* assert(!endpoint->toward_ipa); */ in ipa_endpoint_program_suspend()
353 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
362 /* Enable or disable delay or suspend mode on all modem endpoints */
368 if (ipa->version == IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_pause_all()
374 if (endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_pause_all()
377 /* Set TX delay mode or RX suspend mode */ in ipa_endpoint_modem_pause_all()
378 if (endpoint->toward_ipa) in ipa_endpoint_modem_pause_all()
388 u32 initialized = ipa->initialized; in ipa_endpoint_modem_exception_reset_all()
393 * bound on that by assuming all initialized endpoints are modem->IPA. in ipa_endpoint_modem_exception_reset_all()
400 dev_err(&ipa->pdev->dev, in ipa_endpoint_modem_exception_reset_all()
401 "no transaction to reset modem exception endpoints\n"); in ipa_endpoint_modem_exception_reset_all()
402 return -EBUSY; in ipa_endpoint_modem_exception_reset_all()
413 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
414 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) in ipa_endpoint_modem_exception_reset_all()
421 * result all other fields in the register are ignored. in ipa_endpoint_modem_exception_reset_all()
436 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_cfg()
440 if (endpoint->data->checksum) { in ipa_endpoint_init_cfg()
441 if (endpoint->toward_ipa) { in ipa_endpoint_init_cfg()
446 /* Checksum header offset is in 4-byte units */ in ipa_endpoint_init_cfg()
461 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_cfg()
465 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
469 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
471 * received packet. The header is configured (in the HDR_EXT register)
477 * The mux_id comes from a 4-byte metadata value supplied with each packet
479 * value that we want, in its low-order byte. A bitmask defined in the
487 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr()
490 if (endpoint->data->qmap) { in ipa_endpoint_init_hdr()
494 if (endpoint->toward_ipa && endpoint->data->checksum) in ipa_endpoint_init_hdr()
498 /* Define how to fill fields in a received QMAP header */ in ipa_endpoint_init_hdr()
499 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr()
520 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr()
525 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr_ext()
526 u32 pad_align = endpoint->data->rx.pad_align; in ipa_endpoint_init_hdr_ext()
532 * driver assumes this field is meaningful in packets it receives, in ipa_endpoint_init_hdr_ext()
537 if (endpoint->data->qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
545 if (!endpoint->toward_ipa) in ipa_endpoint_init_hdr_ext()
548 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_ext()
554 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_metadata_mask()
558 if (endpoint->toward_ipa) in ipa_endpoint_init_hdr_metadata_mask()
564 if (endpoint->data->qmap) in ipa_endpoint_init_hdr_metadata_mask()
567 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
572 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_mode()
575 if (!endpoint->toward_ipa) in ipa_endpoint_init_mode()
578 if (endpoint->data->dma_mode) { in ipa_endpoint_init_mode()
579 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; in ipa_endpoint_init_mode()
582 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
591 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_mode()
601 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; in ipa_aggr_size_kb()
608 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_aggr()
611 if (endpoint->data->aggregation) { in ipa_endpoint_init_aggr()
612 if (!endpoint->toward_ipa) { in ipa_endpoint_init_aggr()
627 if (endpoint->data->rx.aggr_close_eof) in ipa_endpoint_init_aggr()
642 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_aggr()
645 /* The head-of-line blocking timer is defined as a tick count, where each
646 * tick represents 128 cycles of the IPA core clock. Return the value
665 /* ...but we still need to fit into a 32-bit register */ in ipa_reg_init_hol_block_timer_val()
668 /* IPA v3.5.1 just records the tick count */ in ipa_reg_init_hol_block_timer_val()
669 if (ipa->version == IPA_VERSION_3_5_1) in ipa_reg_init_hol_block_timer_val()
672 /* For IPA v4.2, the tick count is represented by base and in ipa_reg_init_hol_block_timer_val()
673 * scale fields within the 32-bit timer register, where: in ipa_reg_init_hol_block_timer_val()
676 * large as possible. Find the highest set bit in the tick in ipa_reg_init_hol_block_timer_val()
677 * count, and extract the number of bits in the base field in ipa_reg_init_hol_block_timer_val()
682 scale = high > width ? high - width : 0; in ipa_reg_init_hol_block_timer_val()
685 ticks += 1 << (scale - 1); in ipa_reg_init_hol_block_timer_val()
701 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_timer()
702 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer()
708 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_timer()
714 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_enable()
720 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_enable()
728 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; in ipa_endpoint_modem_hol_block_clear_all()
730 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_hol_block_clear_all()
740 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_deaggr()
743 if (!endpoint->toward_ipa) in ipa_endpoint_init_deaggr()
751 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_deaggr()
756 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_seq()
757 u32 seq_type = endpoint->seq_type; in ipa_endpoint_init_seq()
760 if (!endpoint->toward_ipa) in ipa_endpoint_init_seq()
770 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_seq()
774 * ipa_endpoint_skb_tx() - Transmit a socket buffer
790 nr_frags = skb_shinfo(skb)->nr_frags; in ipa_endpoint_skb_tx()
791 if (1 + nr_frags > endpoint->trans_tre_max) { in ipa_endpoint_skb_tx()
793 return -E2BIG; in ipa_endpoint_skb_tx()
799 return -EBUSY; in ipa_endpoint_skb_tx()
804 trans->data = skb; /* transaction owns skb now */ in ipa_endpoint_skb_tx()
813 return -ENOMEM; in ipa_endpoint_skb_tx()
818 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_status()
819 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status()
825 if (endpoint->data->status_enable) { in ipa_endpoint_status()
827 if (endpoint->toward_ipa) { in ipa_endpoint_status()
831 name = endpoint->data->tx.status_endpoint; in ipa_endpoint_status()
832 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
842 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_status()
856 return -ENOMEM; in ipa_endpoint_replenish_one()
864 len = IPA_RX_BUFFER_SIZE - offset; in ipa_endpoint_replenish_one()
869 trans->data = page; /* transaction owns page now */ in ipa_endpoint_replenish_one()
871 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { in ipa_endpoint_replenish_one()
873 endpoint->replenish_ready = 0; in ipa_endpoint_replenish_one()
885 return -ENOMEM; in ipa_endpoint_replenish_one()
889 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
902 if (!endpoint->replenish_enabled) { in ipa_endpoint_replenish()
904 atomic_add(count, &endpoint->replenish_saved); in ipa_endpoint_replenish()
909 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) in ipa_endpoint_replenish()
913 atomic_add(count, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
919 backlog = atomic_inc_return(&endpoint->replenish_backlog); in ipa_endpoint_replenish()
922 atomic_add(count, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
930 gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish()
931 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
932 schedule_delayed_work(&endpoint->replenish_work, in ipa_endpoint_replenish()
938 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish_enable()
942 endpoint->replenish_enabled = true; in ipa_endpoint_replenish_enable()
943 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) in ipa_endpoint_replenish_enable()
944 atomic_add(saved, &endpoint->replenish_backlog); in ipa_endpoint_replenish_enable()
946 /* Start replenishing if hardware currently has no buffers */ in ipa_endpoint_replenish_enable()
947 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); in ipa_endpoint_replenish_enable()
948 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) in ipa_endpoint_replenish_enable()
956 endpoint->replenish_enabled = false; in ipa_endpoint_replenish_disable()
957 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) in ipa_endpoint_replenish_disable()
958 atomic_add(backlog, &endpoint->replenish_saved); in ipa_endpoint_replenish_disable()
979 memcpy(skb->data, data, len); in ipa_endpoint_skb_copy()
980 skb->truesize += extra; in ipa_endpoint_skb_copy()
983 /* Now receive it, or drop it if there's no netdev */ in ipa_endpoint_skb_copy()
984 if (endpoint->netdev) in ipa_endpoint_skb_copy()
985 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_copy()
995 /* Nothing to do if there's no netdev */ in ipa_endpoint_skb_build()
996 if (!endpoint->netdev) in ipa_endpoint_skb_build()
999 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */ in ipa_endpoint_skb_build()
1008 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_build()
1034 if (!ipa_status_format_packet(status->opcode)) in ipa_endpoint_status_skip()
1036 if (!status->pkt_len) in ipa_endpoint_status_skip()
1038 endpoint_id = u32_get_bits(status->endp_dst_idx, in ipa_endpoint_status_skip()
1040 if (endpoint_id != endpoint->endpoint_id) in ipa_endpoint_status_skip()
1052 if (status->exception) in ipa_status_drop_packet()
1053 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR; in ipa_status_drop_packet()
1055 /* Drop the packet if it fails to match a routing rule; otherwise no */ in ipa_status_drop_packet()
1056 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK); in ipa_status_drop_packet()
1065 u32 unused = IPA_RX_BUFFER_SIZE - total_len; in ipa_endpoint_status_parse()
1074 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1083 resid -= sizeof(*status); in ipa_endpoint_status_parse()
1094 align = endpoint->data->rx.pad_align ? : 1; in ipa_endpoint_status_parse()
1095 len = le16_to_cpu(status->pkt_len); in ipa_endpoint_status_parse()
1097 if (endpoint->data->checksum) in ipa_endpoint_status_parse()
1101 * the unused space in the original receive buffer. in ipa_endpoint_status_parse()
1107 u32 len2 = le16_to_cpu(status->pkt_len); in ipa_endpoint_status_parse()
1109 /* Client receives only packet data (no status) */ in ipa_endpoint_status_parse()
1115 resid -= len; in ipa_endpoint_status_parse()
1125 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
1133 if (trans->cancelled) in ipa_endpoint_rx_complete()
1137 page = trans->data; in ipa_endpoint_rx_complete()
1138 if (endpoint->data->status_enable) in ipa_endpoint_rx_complete()
1139 ipa_endpoint_status_parse(endpoint, page, trans->len); in ipa_endpoint_rx_complete()
1140 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) in ipa_endpoint_rx_complete()
1141 trans->data = NULL; /* Pages have been consumed */ in ipa_endpoint_rx_complete()
1147 if (endpoint->toward_ipa) in ipa_endpoint_trans_complete()
1156 if (endpoint->toward_ipa) { in ipa_endpoint_trans_release()
1157 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release()
1160 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1161 struct sk_buff *skb = trans->data; in ipa_endpoint_trans_release()
1167 struct page *page = trans->data; in ipa_endpoint_trans_release()
1185 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); in ipa_endpoint_default_route_set()
1194 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1205 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1206 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr()
1207 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1218 return -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1222 ret = -ENOMEM; in ipa_endpoint_reset_rx_aggr()
1230 * disabled. Then poll until we know aggregation is no longer in ipa_endpoint_reset_rx_aggr()
1231 * active. We'll re-enable the doorbell (if appropriate) when in ipa_endpoint_reset_rx_aggr()
1234 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1240 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1244 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1254 } while (retries--); in ipa_endpoint_reset_rx_aggr()
1259 endpoint->endpoint_id); in ipa_endpoint_reset_rx_aggr()
1261 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1263 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1267 /* Finally, reset and reconfigure the channel again (re-enabling the in ipa_endpoint_reset_rx_aggr()
1272 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset_rx_aggr()
1273 gsi_channel_reset(gsi, endpoint->channel_id, legacy); in ipa_endpoint_reset_rx_aggr()
1280 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1293 u32 channel_id = endpoint->channel_id; in ipa_endpoint_reset()
1294 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset()
1305 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset()
1306 special = !endpoint->toward_ipa && endpoint->data->aggregation; in ipa_endpoint_reset()
1310 gsi_channel_reset(&ipa->gsi, channel_id, legacy); in ipa_endpoint_reset()
1313 dev_err(&ipa->pdev->dev, in ipa_endpoint_reset()
1315 ret, endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_reset()
1320 if (endpoint->toward_ipa) in ipa_endpoint_program()
1337 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one()
1338 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1341 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_enable_one()
1343 dev_err(&ipa->pdev->dev, in ipa_endpoint_enable_one()
1345 ret, endpoint->toward_ipa ? 'T' : 'R', in ipa_endpoint_enable_one()
1346 endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_enable_one()
1350 if (!endpoint->toward_ipa) { in ipa_endpoint_enable_one()
1351 ipa_interrupt_suspend_enable(ipa->interrupt, in ipa_endpoint_enable_one()
1352 endpoint->endpoint_id); in ipa_endpoint_enable_one()
1356 ipa->enabled |= BIT(endpoint->endpoint_id); in ipa_endpoint_enable_one()
1363 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_disable_one()
1364 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one()
1365 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1368 if (!(ipa->enabled & mask)) in ipa_endpoint_disable_one()
1371 ipa->enabled ^= mask; in ipa_endpoint_disable_one()
1373 if (!endpoint->toward_ipa) { in ipa_endpoint_disable_one()
1375 ipa_interrupt_suspend_disable(ipa->interrupt, in ipa_endpoint_disable_one()
1376 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1379 /* Note that if stop fails, the channel's state is not well-defined */ in ipa_endpoint_disable_one()
1380 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_disable_one()
1382 dev_err(&ipa->pdev->dev, in ipa_endpoint_disable_one()
1384 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1389 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1390 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1394 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_suspend_one()
1397 if (!endpoint->toward_ipa) { in ipa_endpoint_suspend_one()
1402 /* IPA v3.5.1 doesn't use channel stop for suspend */ in ipa_endpoint_suspend_one()
1403 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_suspend_one()
1404 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); in ipa_endpoint_suspend_one()
1407 endpoint->channel_id); in ipa_endpoint_suspend_one()
1412 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1413 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1417 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_resume_one()
1420 if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1424 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_resume_one()
1425 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); in ipa_endpoint_resume_one()
1428 endpoint->channel_id); in ipa_endpoint_resume_one()
1429 else if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1435 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1438 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1439 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1443 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1444 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1449 if (!ipa->setup_complete) in ipa_endpoint_resume()
1452 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1453 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1455 if (ipa->modem_netdev) in ipa_endpoint_resume()
1456 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1461 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1462 u32 channel_id = endpoint->channel_id; in ipa_endpoint_setup_one()
1465 if (endpoint->ee_id != GSI_EE_AP) in ipa_endpoint_setup_one()
1468 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); in ipa_endpoint_setup_one()
1469 if (!endpoint->toward_ipa) { in ipa_endpoint_setup_one()
1473 endpoint->replenish_enabled = false; in ipa_endpoint_setup_one()
1474 atomic_set(&endpoint->replenish_saved, in ipa_endpoint_setup_one()
1475 gsi_channel_tre_max(gsi, endpoint->channel_id)); in ipa_endpoint_setup_one()
1476 atomic_set(&endpoint->replenish_backlog, 0); in ipa_endpoint_setup_one()
1477 INIT_DELAYED_WORK(&endpoint->replenish_work, in ipa_endpoint_setup_one()
1483 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); in ipa_endpoint_setup_one()
1488 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_teardown_one()
1490 if (!endpoint->toward_ipa) in ipa_endpoint_teardown_one()
1491 cancel_delayed_work_sync(&endpoint->replenish_work); in ipa_endpoint_teardown_one()
1498 u32 initialized = ipa->initialized; in ipa_endpoint_setup()
1500 ipa->set_up = 0; in ipa_endpoint_setup()
1506 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1512 u32 set_up = ipa->set_up; in ipa_endpoint_teardown()
1519 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1521 ipa->set_up = 0; in ipa_endpoint_teardown()
1526 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_config()
1538 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); in ipa_endpoint_config()
1546 return -EINVAL; in ipa_endpoint_config()
1548 rx_mask = GENMASK(max - 1, rx_base); in ipa_endpoint_config()
1552 tx_mask = GENMASK(max - 1, 0); in ipa_endpoint_config()
1554 ipa->available = rx_mask | tx_mask; in ipa_endpoint_config()
1557 if (ipa->initialized & ~ipa->available) { in ipa_endpoint_config()
1559 ipa->initialized & ~ipa->available); in ipa_endpoint_config()
1560 ret = -EINVAL; /* Report other errors too */ in ipa_endpoint_config()
1563 initialized = ipa->initialized; in ipa_endpoint_config()
1570 /* Make sure it's pointing in the right direction */ in ipa_endpoint_config()
1571 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
1572 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { in ipa_endpoint_config()
1575 ret = -EINVAL; in ipa_endpoint_config()
1584 ipa->available = 0; /* Nothing more to do */ in ipa_endpoint_deconfig()
1592 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
1594 if (data->ee_id == GSI_EE_AP) in ipa_endpoint_init_one()
1595 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
1596 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
1598 endpoint->ipa = ipa; in ipa_endpoint_init_one()
1599 endpoint->ee_id = data->ee_id; in ipa_endpoint_init_one()
1600 endpoint->seq_type = data->endpoint.seq_type; in ipa_endpoint_init_one()
1601 endpoint->channel_id = data->channel_id; in ipa_endpoint_init_one()
1602 endpoint->endpoint_id = data->endpoint_id; in ipa_endpoint_init_one()
1603 endpoint->toward_ipa = data->toward_ipa; in ipa_endpoint_init_one()
1604 endpoint->data = &data->endpoint.config; in ipa_endpoint_init_one()
1606 ipa->initialized |= BIT(endpoint->endpoint_id); in ipa_endpoint_init_one()
1611 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_exit_one()
1618 u32 initialized = ipa->initialized; in ipa_endpoint_exit()
1625 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
1627 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
1628 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
1641 ipa->initialized = 0; in ipa_endpoint_init()
1650 if (data->endpoint.filter_support) in ipa_endpoint_init()
1651 filter_map |= BIT(data->endpoint_id); in ipa_endpoint_init()
1657 return filter_map; /* Non-zero bitmask */ in ipa_endpoint_init()