Lines Matching full:ipa
16 #include "ipa.h"
37 /** enum ipa_status_opcode - IPA status opcode field hardware values */
48 /** enum ipa_status_exception - IPA status exception field hardware values */
64 /** enum ipa_status_mask - IPA status mask field bitmask hardware values */
84 /* Special IPA filter/router rule field value indicating "rule miss" */
87 /** The IPA status nat_type field uses enum ipa_nat_type hardware values */
89 /* enum ipa_status_field_id - IPA packet status structure field identifiers */
121 /* Size in bytes of an IPA packet status structure */
124 /* IPA status structure decoder; looks up field values for a structure */
125 static u32 ipa_status_extract(struct ipa *ipa, const void *data, in ipa_status_extract() argument
128 enum ipa_version version = ipa->version; in ipa_status_extract()
144 /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */ in ipa_status_extract()
145 /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */ in ipa_status_extract()
163 /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */ in ipa_status_extract()
206 /* Status word 7, bit 31 is reserved (not IPA v5.0+) */ in ipa_status_extract()
231 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument
236 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid_one()
305 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_data_valid_one()
318 /* Starting with IPA v4.5 sequencer replication is obsolete */ in ipa_endpoint_data_valid_one()
319 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_data_valid_one()
387 static u32 ipa_endpoint_max(struct ipa *ipa, u32 count, in ipa_endpoint_max() argument
391 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_max()
421 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_max()
433 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
443 * Note that suspend is not supported starting with IPA v4.0, and
444 * delay mode should not be used starting with IPA v4.2.
449 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local
458 WARN_ON(ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_init_ctrl()
460 WARN_ON(ipa->version >= IPA_VERSION_4_0); in ipa_endpoint_init_ctrl()
462 reg = ipa_reg(ipa, ENDP_INIT_CTRL); in ipa_endpoint_init_ctrl()
464 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
474 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
484 /* Delay mode should not be used for IPA v4.2+ */ in ipa_endpoint_program_delay()
485 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); in ipa_endpoint_program_delay()
494 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active() local
499 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_aggr_active()
501 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE); in ipa_endpoint_aggr_active()
502 val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_aggr_active()
511 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close() local
515 WARN_ON(!test_bit(endpoint_id, ipa->available)); in ipa_endpoint_force_close()
517 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE); in ipa_endpoint_force_close()
518 iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit)); in ipa_endpoint_force_close()
525 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
527 * issue in IPA version 3.5.1 where the suspend interrupt will not be
532 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr() local
544 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
553 if (endpoint->ipa->version >= IPA_VERSION_4_0) in ipa_endpoint_program_suspend()
554 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
561 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
571 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
572 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
575 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) in ipa_endpoint_modem_pause_all() argument
579 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_pause_all()
580 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_pause_all()
587 else if (ipa->version < IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
590 gsi_modem_channel_flow_control(&ipa->gsi, in ipa_endpoint_modem_pause_all()
597 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) in ipa_endpoint_modem_exception_reset_all() argument
606 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); in ipa_endpoint_modem_exception_reset_all()
607 trans = ipa_cmd_trans_alloc(ipa, count); in ipa_endpoint_modem_exception_reset_all()
609 dev_err(&ipa->pdev->dev, in ipa_endpoint_modem_exception_reset_all()
614 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_modem_exception_reset_all()
620 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
624 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_modem_exception_reset_all()
638 ipa_cmd_pipeline_clear_wait(ipa); in ipa_endpoint_modem_exception_reset_all()
646 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_cfg() local
651 reg = ipa_reg(ipa, ENDP_INIT_CFG); in ipa_endpoint_init_cfg()
654 enum ipa_version version = ipa->version; in ipa_endpoint_init_cfg()
677 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_cfg()
683 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_nat() local
690 reg = ipa_reg(ipa, ENDP_INIT_NAT); in ipa_endpoint_init_nat()
693 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_nat()
731 /* IPA v4.5 adds a few more most-significant bits */ in ipa_header_size_encode()
753 /* IPA v4.5 adds a few more most-significant bits */ in ipa_metadata_offset_encode()
767 * packet size field, and we have the IPA hardware populate both for each
785 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr() local
789 reg = ipa_reg(ipa, ENDP_INIT_HDR); in ipa_endpoint_init_hdr()
791 enum ipa_version version = ipa->version; in ipa_endpoint_init_hdr()
801 /* Where IPA will write the metadata value */ in ipa_endpoint_init_hdr()
805 /* Where IPA will write the length */ in ipa_endpoint_init_hdr()
807 /* Upper bits are stored in HDR_EXT with IPA v4.5 */ in ipa_endpoint_init_hdr()
823 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr()
830 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_ext() local
834 reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT); in ipa_endpoint_init_hdr_ext()
858 /* IPA v4.5 adds some most-significant bits to a few fields, in ipa_endpoint_init_hdr_ext()
861 if (ipa->version >= IPA_VERSION_4_5) { in ipa_endpoint_init_hdr_ext()
875 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hdr_ext()
881 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hdr_metadata_mask() local
889 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK); in ipa_endpoint_init_hdr_metadata_mask()
896 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
901 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_mode() local
909 reg = ipa_reg(ipa, ENDP_INIT_MODE); in ipa_endpoint_init_mode()
912 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
922 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_mode()
925 /* For IPA v4.5+, times are expressed using Qtime. A time is represented
927 * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
931 * available to the AP; a third is available starting with IPA v5.0.
937 ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select) in ipa_qtime_val() argument
953 if (ipa->version >= IPA_VERSION_5_0) { in ipa_qtime_val()
965 /* Encode the aggregation timer limit (microseconds) based on IPA version */
966 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg, in aggr_time_limit_encode() argument
976 if (ipa->version >= IPA_VERSION_4_5) { in aggr_time_limit_encode()
979 ticks = ipa_qtime_val(ipa, microseconds, max, &select); in aggr_time_limit_encode()
996 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_aggr() local
1000 reg = ipa_reg(ipa, ENDP_INIT_AGGR); in ipa_endpoint_init_aggr()
1017 val |= aggr_time_limit_encode(ipa, reg, limit); in ipa_endpoint_init_aggr()
1029 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ in ipa_endpoint_init_aggr()
1035 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_aggr()
1039 * IPA version 4.5 the tick count is based on the Qtimer, which is
1040 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
1041 * each tick represents 128 cycles of the IPA core clock.
1046 static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg, in hol_block_timer_encode() argument
1059 if (ipa->version >= IPA_VERSION_4_5) { in hol_block_timer_encode()
1064 ticks = ipa_qtime_val(ipa, microseconds, max, &select); in hol_block_timer_encode()
1071 rate = ipa_core_clock_rate(ipa); in hol_block_timer_encode()
1077 /* IPA v3.5.1 through v4.1 just record the tick count */ in hol_block_timer_encode()
1078 if (ipa->version < IPA_VERSION_4_2) in hol_block_timer_encode()
1081 /* For IPA v4.2, the tick count is represented by base and in hol_block_timer_encode()
1111 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer() local
1116 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER); in ipa_endpoint_init_hol_block_timer()
1117 val = hol_block_timer_encode(ipa, reg, microseconds); in ipa_endpoint_init_hol_block_timer()
1119 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_hol_block_timer()
1126 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_en() local
1131 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN); in ipa_endpoint_init_hol_block_en()
1135 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1137 /* When enabling, the register must be written twice for IPA v4.5+ */ in ipa_endpoint_init_hol_block_en()
1138 if (enable && ipa->version >= IPA_VERSION_4_5) in ipa_endpoint_init_hol_block_en()
1139 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_en()
1155 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) in ipa_endpoint_modem_hol_block_clear_all() argument
1159 while (endpoint_id < ipa->endpoint_count) { in ipa_endpoint_modem_hol_block_clear_all()
1160 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++]; in ipa_endpoint_modem_hol_block_clear_all()
1173 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_deaggr() local
1180 reg = ipa_reg(ipa, ENDP_INIT_DEAGGR); in ipa_endpoint_init_deaggr()
1186 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_deaggr()
1193 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_rsrc_grp() local
1197 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP); in ipa_endpoint_init_rsrc_grp()
1200 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_rsrc_grp()
1206 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_seq() local
1213 reg = ipa_reg(ipa, ENDP_INIT_SEQ); in ipa_endpoint_init_seq()
1219 if (ipa->version < IPA_VERSION_4_5) in ipa_endpoint_init_seq()
1223 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_init_seq()
1272 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status() local
1276 reg = ipa_reg(ipa, ENDP_STATUS); in ipa_endpoint_status()
1284 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
1288 /* STATUS_LOCATION is 0, meaning IPA packet status in ipa_endpoint_status()
1289 * precedes the packet (not present for IPA v4.5+) in ipa_endpoint_status()
1294 iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); in ipa_endpoint_status()
1328 * The IPA hardware can hold a fixed number of receive buffers for an RX
1372 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
1382 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish_enable()
1445 /* The format of an IPA packet status structure is the same for several
1464 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_skip() local
1468 opcode = ipa_status_extract(ipa, data, STATUS_OPCODE); in ipa_endpoint_status_skip()
1472 endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT); in ipa_endpoint_status_skip()
1484 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_tag_valid() local
1487 status_mask = ipa_status_extract(ipa, data, STATUS_MASK); in ipa_endpoint_status_tag_valid()
1496 endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT); in ipa_endpoint_status_tag_valid()
1497 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; in ipa_endpoint_status_tag_valid()
1499 complete(&ipa->completion); in ipa_endpoint_status_tag_valid()
1501 dev_err(&ipa->pdev->dev, in ipa_endpoint_status_tag_valid()
1514 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_drop() local
1522 exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION); in ipa_endpoint_status_drop()
1527 rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX); in ipa_endpoint_status_drop()
1538 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status_parse() local
1547 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1554 length = ipa_status_extract(ipa, data, STATUS_LENGTH); in ipa_endpoint_status_parse()
1619 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release() local
1622 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1636 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) in ipa_endpoint_default_route_set() argument
1641 reg = ipa_reg(ipa, ROUTE); in ipa_endpoint_default_route_set()
1649 iowrite32(val, ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_default_route_set()
1652 void ipa_endpoint_default_route_clear(struct ipa *ipa) in ipa_endpoint_default_route_clear() argument
1654 ipa_endpoint_default_route_set(ipa, 0); in ipa_endpoint_default_route_clear()
1663 * taken to ensure the IPA pipeline is properly cleared.
1669 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1670 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr() local
1671 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1756 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset() local
1760 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1764 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && in ipa_endpoint_reset()
1769 gsi_channel_reset(&ipa->gsi, channel_id, true); in ipa_endpoint_reset()
1772 dev_err(&ipa->pdev->dev, in ipa_endpoint_reset()
1780 /* Newer versions of IPA use GSI channel flow control in ipa_endpoint_program()
1786 if (endpoint->ipa->version < IPA_VERSION_4_2) in ipa_endpoint_program()
1814 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one() local
1815 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1820 dev_err(&ipa->pdev->dev, in ipa_endpoint_enable_one()
1828 ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id); in ipa_endpoint_enable_one()
1832 __set_bit(endpoint_id, ipa->enabled); in ipa_endpoint_enable_one()
1840 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one() local
1841 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1844 if (!test_bit(endpoint_id, ipa->enabled)) in ipa_endpoint_disable_one()
1847 __clear_bit(endpoint_id, endpoint->ipa->enabled); in ipa_endpoint_disable_one()
1851 ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id); in ipa_endpoint_disable_one()
1857 dev_err(&ipa->pdev->dev, in ipa_endpoint_disable_one()
1864 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1865 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1868 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_suspend_one()
1884 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1885 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1888 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled)) in ipa_endpoint_resume_one()
1902 void ipa_endpoint_suspend(struct ipa *ipa) in ipa_endpoint_suspend() argument
1904 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1907 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1908 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1910 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1911 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1914 void ipa_endpoint_resume(struct ipa *ipa) in ipa_endpoint_resume() argument
1916 if (!ipa->setup_complete) in ipa_endpoint_resume()
1919 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1920 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1922 if (ipa->modem_netdev) in ipa_endpoint_resume()
1923 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1928 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1948 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_setup_one()
1953 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up); in ipa_endpoint_teardown_one()
1961 void ipa_endpoint_setup(struct ipa *ipa) in ipa_endpoint_setup() argument
1965 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_setup()
1966 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1969 void ipa_endpoint_teardown(struct ipa *ipa) in ipa_endpoint_teardown() argument
1973 for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count) in ipa_endpoint_teardown()
1974 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1977 void ipa_endpoint_deconfig(struct ipa *ipa) in ipa_endpoint_deconfig() argument
1979 ipa->available_count = 0; in ipa_endpoint_deconfig()
1980 bitmap_free(ipa->available); in ipa_endpoint_deconfig()
1981 ipa->available = NULL; in ipa_endpoint_deconfig()
1984 int ipa_endpoint_config(struct ipa *ipa) in ipa_endpoint_config() argument
1986 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_config()
1996 /* Prior to IPA v3.5, the FLAVOR_0 register was not supported. in ipa_endpoint_config()
2006 if (ipa->version < IPA_VERSION_3_5) { in ipa_endpoint_config()
2007 ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL); in ipa_endpoint_config()
2008 if (!ipa->available) in ipa_endpoint_config()
2010 ipa->available_count = IPA_ENDPOINT_MAX; in ipa_endpoint_config()
2012 bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX); in ipa_endpoint_config()
2020 reg = ipa_reg(ipa, FLAVOR_0); in ipa_endpoint_config()
2021 val = ioread32(ipa->reg_virt + reg_offset(reg)); in ipa_endpoint_config()
2023 /* Our RX is an IPA producer; our TX is an IPA consumer. */ in ipa_endpoint_config()
2035 /* Until IPA v5.0, the max endpoint ID was 32 */ in ipa_endpoint_config()
2036 hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1; in ipa_endpoint_config()
2044 ipa->available = bitmap_zalloc(limit, GFP_KERNEL); in ipa_endpoint_config()
2045 if (!ipa->available) in ipa_endpoint_config()
2047 ipa->available_count = limit; in ipa_endpoint_config()
2050 bitmap_set(ipa->available, 0, tx_count); in ipa_endpoint_config()
2051 bitmap_set(ipa->available, rx_base, rx_count); in ipa_endpoint_config()
2053 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) { in ipa_endpoint_config()
2062 if (!test_bit(endpoint_id, ipa->available)) { in ipa_endpoint_config()
2069 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
2084 ipa_endpoint_deconfig(ipa); in ipa_endpoint_config()
2089 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, in ipa_endpoint_init_one() argument
2094 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
2097 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
2098 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
2100 endpoint->ipa = ipa; in ipa_endpoint_init_one()
2107 __set_bit(endpoint->endpoint_id, ipa->defined); in ipa_endpoint_init_one()
2112 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined); in ipa_endpoint_exit_one()
2117 void ipa_endpoint_exit(struct ipa *ipa) in ipa_endpoint_exit() argument
2121 ipa->filtered = 0; in ipa_endpoint_exit()
2123 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) in ipa_endpoint_exit()
2124 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
2126 bitmap_free(ipa->enabled); in ipa_endpoint_exit()
2127 ipa->enabled = NULL; in ipa_endpoint_exit()
2128 bitmap_free(ipa->set_up); in ipa_endpoint_exit()
2129 ipa->set_up = NULL; in ipa_endpoint_exit()
2130 bitmap_free(ipa->defined); in ipa_endpoint_exit()
2131 ipa->defined = NULL; in ipa_endpoint_exit()
2133 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
2134 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
2138 int ipa_endpoint_init(struct ipa *ipa, u32 count, in ipa_endpoint_init() argument
2147 ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1; in ipa_endpoint_init()
2148 if (!ipa->endpoint_count) in ipa_endpoint_init()
2152 ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2153 if (!ipa->defined) in ipa_endpoint_init()
2156 ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2157 if (!ipa->set_up) in ipa_endpoint_init()
2160 ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL); in ipa_endpoint_init()
2161 if (!ipa->enabled) in ipa_endpoint_init()
2169 ipa_endpoint_init_one(ipa, name, data); in ipa_endpoint_init()
2174 ipa->modem_tx_count++; in ipa_endpoint_init()
2178 if (!ipa_filtered_valid(ipa, filtered)) { in ipa_endpoint_init()
2179 ipa_endpoint_exit(ipa); in ipa_endpoint_init()
2184 ipa->filtered = filtered; in ipa_endpoint_init()
2189 bitmap_free(ipa->set_up); in ipa_endpoint_init()
2190 ipa->set_up = NULL; in ipa_endpoint_init()
2192 bitmap_free(ipa->defined); in ipa_endpoint_init()
2193 ipa->defined = NULL; in ipa_endpoint_init()