1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Thunderbolt driver - Tunneling support 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #ifndef TB_TUNNEL_H_ 10 #define TB_TUNNEL_H_ 11 12 #include "tb.h" 13 14 enum tb_tunnel_type { 15 TB_TUNNEL_PCI, 16 TB_TUNNEL_DP, 17 TB_TUNNEL_DMA, 18 TB_TUNNEL_USB3, 19 }; 20 21 /** 22 * enum tb_tunnel_state - State of a tunnel 23 * @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel 24 * @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel 25 * @TB_TUNNEL_ACTIVE: The tunnel is fully active 26 */ 27 enum tb_tunnel_state { 28 TB_TUNNEL_INACTIVE, 29 TB_TUNNEL_ACTIVATING, 30 TB_TUNNEL_ACTIVE, 31 }; 32 33 /** 34 * struct tb_tunnel - Tunnel between two ports 35 * @kref: Reference count 36 * @tb: Pointer to the domain 37 * @src_port: Source port of the tunnel 38 * @dst_port: Destination port of the tunnel. For discovered incomplete 39 * tunnels may be %NULL or null adapter port instead. 40 * @paths: All paths required by the tunnel 41 * @npaths: Number of paths in @paths 42 * @pre_activate: Optional tunnel specific initialization called before 43 * activation. Can touch hardware. 44 * @activate: Optional tunnel specific activation/deactivation 45 * @post_deactivate: Optional tunnel specific de-initialization called 46 * after deactivation. Can touch hardware. 47 * @destroy: Optional tunnel specific callback called when the tunnel 48 * memory is being released. Should not touch hardware. 49 * @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel 50 * @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel 51 * @alloc_bandwidth: Change tunnel bandwidth allocation 52 * @consumed_bandwidth: Return how much bandwidth the tunnel consumes 53 * @release_unused_bandwidth: Release all unused bandwidth 54 * @reclaim_available_bandwidth: Reclaim back available bandwidth 55 * @list: Tunnels are linked using this field 56 * @type: Type of the tunnel 57 * @state: Current state of the tunnel 58 * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel. 59 * Only set if the bandwidth needs to be limited. 60 * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel. 61 * Only set if the bandwidth needs to be limited. 62 * @allocated_up: Allocated upstream bandwidth (only for USB3) 63 * @allocated_down: Allocated downstream bandwidth (only for USB3) 64 * @bw_mode: DP bandwidth allocation mode registers can be used to 65 * determine consumed and allocated bandwidth 66 * @dprx_started: DPRX negotiation was started (tb_dp_dprx_start() was called for it) 67 * @dprx_canceled: Was DPRX capabilities read poll canceled 68 * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes 69 * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read 70 * @callback: Optional callback called when DP tunnel is fully activated 71 * @callback_data: Optional data for @callback 72 */ 73 struct tb_tunnel { 74 struct kref kref; 75 struct tb *tb; 76 struct tb_port *src_port; 77 struct tb_port *dst_port; 78 struct tb_path **paths; 79 size_t npaths; 80 int (*pre_activate)(struct tb_tunnel *tunnel); 81 int (*activate)(struct tb_tunnel *tunnel, bool activate); 82 void (*post_deactivate)(struct tb_tunnel *tunnel); 83 void (*destroy)(struct tb_tunnel *tunnel); 84 int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up, 85 int *max_down); 86 int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up, 87 int *allocated_down); 88 int (*alloc_bandwidth)(struct tb_tunnel *tunnel, int *alloc_up, 89 int *alloc_down); 90 int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up, 91 int *consumed_down); 92 int (*release_unused_bandwidth)(struct tb_tunnel *tunnel); 93 void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel, 94 int *available_up, 95 int *available_down); 96 struct list_head list; 97 enum tb_tunnel_type type; 98 enum tb_tunnel_state state; 99 int max_up; 100 int max_down; 101 int allocated_up; 102 int allocated_down; 103 bool bw_mode; 104 bool dprx_started; 105 bool dprx_canceled; 106 ktime_t dprx_timeout; 107 struct delayed_work dprx_work; 108 void (*callback)(struct tb_tunnel *tunnel, void *data); 109 void *callback_data; 110 }; 111 112 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, 113 bool alloc_hopid); 114 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, 115 struct tb_port *down); 116 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up, 117 int *reserved_down); 118 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, 119 bool alloc_hopid); 120 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, 121 struct tb_port *out, int link_nr, 122 int max_up, int max_down, 123 void (*callback)(struct tb_tunnel *, void *), 124 void *callback_data); 125 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, 126 struct tb_port *dst, int transmit_path, 127 int transmit_ring, int receive_path, 128 int receive_ring); 129 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, 130 int transmit_ring, int receive_path, int receive_ring); 131 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, 132 bool alloc_hopid); 133 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, 134 struct tb_port *down, int max_up, 135 int max_down); 136 137 void tb_tunnel_put(struct tb_tunnel *tunnel); 138 int tb_tunnel_activate(struct tb_tunnel *tunnel); 139 void tb_tunnel_deactivate(struct tb_tunnel *tunnel); 140 141 /** 142 * tb_tunnel_is_active() - Is tunnel fully activated 143 * @tunnel: Tunnel to check 144 * 145 * Returns %true if @tunnel is fully activated. For other than DP 146 * tunnels this is pretty much once tb_tunnel_activate() returns 147 * successfully. However, for DP tunnels this returns %true only once the 148 * DPRX capabilities read has been issued successfully. 149 */ 150 static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) 151 { 152 return tunnel->state == TB_TUNNEL_ACTIVE; 153 } 154 155 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel); 156 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, 157 const struct tb_port *port); 158 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, 159 int *max_down); 160 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, 161 int *allocated_down); 162 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, 163 int *alloc_down); 164 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, 165 int *consumed_down); 166 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel); 167 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, 168 int *available_up, 169 int *available_down); 170 171 static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel) 172 { 173 return tunnel->type == TB_TUNNEL_PCI; 174 } 175 176 static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel) 177 { 178 return tunnel->type == TB_TUNNEL_DP; 179 } 180 181 static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel) 182 { 183 return tunnel->type == TB_TUNNEL_DMA; 184 } 185 186 static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel) 187 { 188 return tunnel->type == TB_TUNNEL_USB3; 189 } 190 191 static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel) 192 { 193 return tb_port_path_direction_downstream(tunnel->src_port, 194 tunnel->dst_port); 195 } 196 197 /** 198 * enum tb_tunnel_event - Tunnel related events 199 * @TB_TUNNEL_ACTIVATED: A tunnel was activated 200 * @TB_TUNNEL_CHANGED: There is a tunneling change in the domain. Includes 201 * full %TUNNEL_DETAILS if the tunnel in question is known 202 * (ICM does not provide that information). 203 * @TB_TUNNEL_DEACTIVATED: A tunnel was torn down 204 * @TB_TUNNEL_LOW_BANDWIDTH: Tunnel bandwidth is not optimal 205 * @TB_TUNNEL_NO_BANDWIDTH: There is not enough bandwidth for a tunnel 206 */ 207 enum tb_tunnel_event { 208 TB_TUNNEL_ACTIVATED, 209 TB_TUNNEL_CHANGED, 210 TB_TUNNEL_DEACTIVATED, 211 TB_TUNNEL_LOW_BANDWIDTH, 212 TB_TUNNEL_NO_BANDWIDTH, 213 }; 214 215 void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event, 216 enum tb_tunnel_type type, 217 const struct tb_port *src_port, 218 const struct tb_port *dst_port); 219 220 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel); 221 222 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ 223 do { \ 224 struct tb_tunnel *__tunnel = (tunnel); \ 225 level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \ 226 tb_route(__tunnel->src_port->sw), \ 227 __tunnel->src_port->port, \ 228 tb_route(__tunnel->dst_port->sw), \ 229 __tunnel->dst_port->port, \ 230 tb_tunnel_type_name(__tunnel), \ 231 ## arg); \ 232 } while (0) 233 234 #define tb_tunnel_WARN(tunnel, fmt, arg...) \ 235 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) 236 #define tb_tunnel_warn(tunnel, fmt, arg...) \ 237 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) 238 #define tb_tunnel_info(tunnel, fmt, arg...) \ 239 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) 240 #define tb_tunnel_dbg(tunnel, fmt, arg...) \ 241 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) 242 243 #endif 244