1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* IP Virtual Server 3 * data structure and functionality definitions 4 */ 5 6 #ifndef _NET_IP_VS_H 7 #define _NET_IP_VS_H 8 9 #include <linux/ip_vs.h> /* definitions shared with userland */ 10 11 #include <asm/types.h> /* for __uXX types */ 12 13 #include <linux/list.h> /* for struct list_head */ 14 #include <linux/rculist_bl.h> /* for struct hlist_bl_head */ 15 #include <linux/spinlock.h> /* for struct rwlock_t */ 16 #include <linux/atomic.h> /* for struct atomic_t */ 17 #include <linux/refcount.h> /* for struct refcount_t */ 18 #include <linux/workqueue.h> 19 20 #include <linux/compiler.h> 21 #include <linux/timer.h> 22 #include <linux/bug.h> 23 24 #include <net/checksum.h> 25 #include <linux/netfilter.h> /* for union nf_inet_addr */ 26 #include <linux/ip.h> 27 #include <linux/ipv6.h> /* for struct ipv6hdr */ 28 #include <net/ipv6.h> 29 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 30 #include <net/netfilter/nf_conntrack.h> 31 #endif 32 #include <net/net_namespace.h> /* Netw namespace */ 33 #include <linux/sched/isolation.h> 34 #include <linux/siphash.h> 35 36 #define IP_VS_HDR_INVERSE 1 37 #define IP_VS_HDR_ICMP 2 38 39 /* conn_tab limits (as per Kconfig) */ 40 #define IP_VS_CONN_TAB_MIN_BITS 8 41 #if BITS_PER_LONG > 32 42 #define IP_VS_CONN_TAB_MAX_BITS 27 43 #else 44 #define IP_VS_CONN_TAB_MAX_BITS 20 45 #endif 46 47 /* svc_table limits */ 48 #define IP_VS_SVC_TAB_MIN_BITS 4 49 #define IP_VS_SVC_TAB_MAX_BITS 20 50 51 /* Generic access of ipvs struct */ 52 static inline struct netns_ipvs *net_ipvs(struct net* net) 53 { 54 return net->ipvs; 55 } 56 57 /* Connections' size value needed by ip_vs_ctl.c */ 58 extern int ip_vs_conn_tab_size; 59 60 struct ip_vs_iphdr { 61 int hdr_flags; /* ipvs flags */ 62 __u32 off; /* Where IP or IPv4 header starts */ 63 __u32 len; /* IPv4 simply where L4 starts 64 * IPv6 where L4 Transport Header starts */ 65 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ 66 __s16 protocol; 67 __s32 flags; 68 union nf_inet_addr saddr; 69 union nf_inet_addr daddr; 70 }; 71 72 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, 73 int len, void *buffer) 74 { 75 return skb_header_pointer(skb, offset, len, buffer); 76 } 77 78 /* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6. 79 * IPv6 requires some extra work, as finding proper header position, 80 * depend on the IPv6 extension headers. 81 */ 82 static inline int 83 ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset, 84 int hdr_flags, struct ip_vs_iphdr *iphdr) 85 { 86 iphdr->hdr_flags = hdr_flags; 87 iphdr->off = offset; 88 89 #ifdef CONFIG_IP_VS_IPV6 90 if (af == AF_INET6) { 91 struct ipv6hdr _iph; 92 const struct ipv6hdr *iph = skb_header_pointer( 93 skb, offset, sizeof(_iph), &_iph); 94 if (!iph) 95 return 0; 96 97 iphdr->saddr.in6 = iph->saddr; 98 iphdr->daddr.in6 = iph->daddr; 99 /* ipv6_find_hdr() updates len, flags */ 100 iphdr->len = offset; 101 iphdr->flags = 0; 102 iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, 103 &iphdr->fragoffs, 104 &iphdr->flags); 105 if (iphdr->protocol < 0) 106 return 0; 107 } else 108 #endif 109 { 110 struct iphdr _iph; 111 const struct iphdr *iph = skb_header_pointer( 112 skb, offset, sizeof(_iph), &_iph); 113 if (!iph) 114 return 0; 115 116 iphdr->len = offset + iph->ihl * 4; 117 iphdr->fragoffs = 0; 118 iphdr->protocol = iph->protocol; 119 iphdr->saddr.ip = iph->saddr; 120 iphdr->daddr.ip = iph->daddr; 121 } 122 123 return 1; 124 } 125 126 static inline int 127 ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset, 128 bool inverse, struct ip_vs_iphdr *iphdr) 129 { 130 int hdr_flags = IP_VS_HDR_ICMP; 131 132 if (inverse) 133 hdr_flags |= IP_VS_HDR_INVERSE; 134 135 return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr); 136 } 137 138 static inline int 139 ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse, 140 struct ip_vs_iphdr *iphdr) 141 { 142 int hdr_flags = 0; 143 144 if (inverse) 145 hdr_flags |= IP_VS_HDR_INVERSE; 146 147 return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb), 148 hdr_flags, iphdr); 149 } 150 151 static inline bool 152 ip_vs_iph_inverse(const struct ip_vs_iphdr *iph) 153 { 154 return !!(iph->hdr_flags & IP_VS_HDR_INVERSE); 155 } 156 157 static inline bool 158 ip_vs_iph_icmp(const struct ip_vs_iphdr *iph) 159 { 160 return !!(iph->hdr_flags & IP_VS_HDR_ICMP); 161 } 162 163 static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, 164 const union nf_inet_addr *src) 165 { 166 #ifdef CONFIG_IP_VS_IPV6 167 if (af == AF_INET6) 168 dst->in6 = src->in6; 169 else 170 #endif 171 dst->ip = src->ip; 172 } 173 174 static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst, 175 const union nf_inet_addr *src) 176 { 177 #ifdef CONFIG_IP_VS_IPV6 178 if (af == AF_INET6) { 179 dst->in6 = src->in6; 180 return; 181 } 182 #endif 183 dst->ip = src->ip; 184 dst->all[1] = 0; 185 dst->all[2] = 0; 186 dst->all[3] = 0; 187 } 188 189 static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a, 190 const union nf_inet_addr *b) 191 { 192 #ifdef CONFIG_IP_VS_IPV6 193 if (af == AF_INET6) 194 return ipv6_addr_equal(&a->in6, &b->in6); 195 #endif 196 return a->ip == b->ip; 197 } 198 199 #ifdef CONFIG_IP_VS_DEBUG 200 #include <linux/net.h> 201 202 int ip_vs_get_debug_level(void); 203 204 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, 205 const union nf_inet_addr *addr, 206 int *idx) 207 { 208 int len; 209 #ifdef CONFIG_IP_VS_IPV6 210 if (af == AF_INET6) 211 len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]", 212 &addr->in6) + 1; 213 else 214 #endif 215 len = snprintf(&buf[*idx], buf_len - *idx, "%pI4", 216 &addr->ip) + 1; 217 218 *idx += len; 219 BUG_ON(*idx > buf_len + 1); 220 return &buf[*idx - len]; 221 } 222 223 #define IP_VS_DBG_BUF(level, msg, ...) \ 224 do { \ 225 char ip_vs_dbg_buf[160]; \ 226 int ip_vs_dbg_idx = 0; \ 227 if (level <= ip_vs_get_debug_level()) \ 228 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 229 } while (0) 230 #define IP_VS_ERR_BUF(msg...) \ 231 do { \ 232 char ip_vs_dbg_buf[160]; \ 233 int ip_vs_dbg_idx = 0; \ 234 pr_err(msg); \ 235 } while (0) 236 237 /* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */ 238 #define IP_VS_DBG_ADDR(af, addr) \ 239 ip_vs_dbg_addr(af, ip_vs_dbg_buf, \ 240 sizeof(ip_vs_dbg_buf), addr, \ 241 &ip_vs_dbg_idx) 242 243 #define IP_VS_DBG(level, msg, ...) \ 244 do { \ 245 if (level <= ip_vs_get_debug_level()) \ 246 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 247 } while (0) 248 #define IP_VS_DBG_RL(msg, ...) \ 249 do { \ 250 if (net_ratelimit()) \ 251 printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ 252 } while (0) 253 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \ 254 do { \ 255 if (level <= ip_vs_get_debug_level()) \ 256 pp->debug_packet(af, pp, skb, ofs, msg); \ 257 } while (0) 258 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \ 259 do { \ 260 if (level <= ip_vs_get_debug_level() && \ 261 net_ratelimit()) \ 262 pp->debug_packet(af, pp, skb, ofs, msg); \ 263 } while (0) 264 #else /* NO DEBUGGING at ALL */ 265 #define IP_VS_DBG_BUF(level, msg...) do {} while (0) 266 #define IP_VS_ERR_BUF(msg...) do {} while (0) 267 #define IP_VS_DBG(level, msg...) do {} while (0) 268 #define IP_VS_DBG_RL(msg...) do {} while (0) 269 #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 270 #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0) 271 #endif 272 273 #define IP_VS_BUG() BUG() 274 #define IP_VS_ERR_RL(msg, ...) \ 275 do { \ 276 if (net_ratelimit()) \ 277 pr_err(msg, ##__VA_ARGS__); \ 278 } while (0) 279 280 struct ip_vs_aligned_lock { 281 spinlock_t l; /* Protect buckets */ 282 } ____cacheline_aligned_in_smp; 283 284 /* For arrays per family */ 285 enum { 286 IP_VS_AF_INET, 287 IP_VS_AF_INET6, 288 IP_VS_AF_MAX 289 }; 290 291 static inline int ip_vs_af_index(int af) 292 { 293 return af == AF_INET6 ? IP_VS_AF_INET6 : IP_VS_AF_INET; 294 } 295 296 /* work_flags */ 297 enum { 298 IP_VS_WORK_SVC_RESIZE, /* Schedule svc_resize_work */ 299 IP_VS_WORK_SVC_NORESIZE, /* Stopping svc_resize_work */ 300 IP_VS_WORK_CONN_RESIZE, /* Schedule conn_resize_work */ 301 }; 302 303 /* The port number of FTP service (in network order). */ 304 #define FTPPORT cpu_to_be16(21) 305 #define FTPDATA cpu_to_be16(20) 306 307 /* TCP State Values */ 308 enum { 309 IP_VS_TCP_S_NONE = 0, 310 IP_VS_TCP_S_ESTABLISHED, 311 IP_VS_TCP_S_SYN_SENT, 312 IP_VS_TCP_S_SYN_RECV, 313 IP_VS_TCP_S_FIN_WAIT, 314 IP_VS_TCP_S_TIME_WAIT, 315 IP_VS_TCP_S_CLOSE, 316 IP_VS_TCP_S_CLOSE_WAIT, 317 IP_VS_TCP_S_LAST_ACK, 318 IP_VS_TCP_S_LISTEN, 319 IP_VS_TCP_S_SYNACK, 320 IP_VS_TCP_S_LAST 321 }; 322 323 /* UDP State Values */ 324 enum { 325 IP_VS_UDP_S_NORMAL, 326 IP_VS_UDP_S_LAST, 327 }; 328 329 /* ICMP State Values */ 330 enum { 331 IP_VS_ICMP_S_NORMAL, 332 IP_VS_ICMP_S_LAST, 333 }; 334 335 /* SCTP State Values */ 336 enum ip_vs_sctp_states { 337 IP_VS_SCTP_S_NONE, 338 IP_VS_SCTP_S_INIT1, 339 IP_VS_SCTP_S_INIT, 340 IP_VS_SCTP_S_COOKIE_SENT, 341 IP_VS_SCTP_S_COOKIE_REPLIED, 342 IP_VS_SCTP_S_COOKIE_WAIT, 343 IP_VS_SCTP_S_COOKIE, 344 IP_VS_SCTP_S_COOKIE_ECHOED, 345 IP_VS_SCTP_S_ESTABLISHED, 346 IP_VS_SCTP_S_SHUTDOWN_SENT, 347 IP_VS_SCTP_S_SHUTDOWN_RECEIVED, 348 IP_VS_SCTP_S_SHUTDOWN_ACK_SENT, 349 IP_VS_SCTP_S_REJECTED, 350 IP_VS_SCTP_S_CLOSED, 351 IP_VS_SCTP_S_LAST 352 }; 353 354 /* Connection templates use bits from state */ 355 #define IP_VS_CTPL_S_NONE 0x0000 356 #define IP_VS_CTPL_S_ASSURED 0x0001 357 #define IP_VS_CTPL_S_LAST 0x0002 358 359 /* Delta sequence info structure 360 * Each ip_vs_conn has 2 (output AND input seq. changes). 361 * Only used in the VS/NAT. 362 */ 363 struct ip_vs_seq { 364 __u32 init_seq; /* Add delta from this seq */ 365 __u32 delta; /* Delta in sequence numbers */ 366 __u32 previous_delta; /* Delta in sequence numbers 367 * before last resized pkt */ 368 }; 369 370 /* counters per cpu */ 371 struct ip_vs_counters { 372 u64_stats_t conns; /* connections scheduled */ 373 u64_stats_t inpkts; /* incoming packets */ 374 u64_stats_t outpkts; /* outgoing packets */ 375 u64_stats_t inbytes; /* incoming bytes */ 376 u64_stats_t outbytes; /* outgoing bytes */ 377 }; 378 /* Stats per cpu */ 379 struct ip_vs_cpu_stats { 380 struct ip_vs_counters cnt; 381 struct u64_stats_sync syncp; 382 }; 383 384 /* Default nice for estimator kthreads */ 385 #define IPVS_EST_NICE 0 386 387 /* IPVS statistics objects */ 388 struct ip_vs_estimator { 389 struct hlist_node list; 390 391 u64 last_inbytes; 392 u64 last_outbytes; 393 u64 last_conns; 394 u64 last_inpkts; 395 u64 last_outpkts; 396 397 u64 cps; 398 u64 inpps; 399 u64 outpps; 400 u64 inbps; 401 u64 outbps; 402 403 s32 ktid:16, /* kthread ID, -1=temp list */ 404 ktrow:8, /* row/tick ID for kthread */ 405 ktcid:8; /* chain ID for kthread tick */ 406 }; 407 408 /* 409 * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user 410 */ 411 struct ip_vs_kstats { 412 u64 conns; /* connections scheduled */ 413 u64 inpkts; /* incoming packets */ 414 u64 outpkts; /* outgoing packets */ 415 u64 inbytes; /* incoming bytes */ 416 u64 outbytes; /* outgoing bytes */ 417 418 u64 cps; /* current connection rate */ 419 u64 inpps; /* current in packet rate */ 420 u64 outpps; /* current out packet rate */ 421 u64 inbps; /* current in byte rate */ 422 u64 outbps; /* current out byte rate */ 423 }; 424 425 struct ip_vs_stats { 426 struct ip_vs_kstats kstats; /* kernel statistics */ 427 struct ip_vs_estimator est; /* estimator */ 428 struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */ 429 spinlock_t lock; /* spin lock */ 430 struct ip_vs_kstats kstats0; /* reset values */ 431 }; 432 433 struct ip_vs_stats_rcu { 434 struct ip_vs_stats s; 435 struct rcu_head rcu_head; 436 }; 437 438 int ip_vs_stats_init_alloc(struct ip_vs_stats *s); 439 struct ip_vs_stats *ip_vs_stats_alloc(void); 440 void ip_vs_stats_release(struct ip_vs_stats *stats); 441 void ip_vs_stats_free(struct ip_vs_stats *stats); 442 443 /* Process estimators in multiple timer ticks (20/50/100, see ktrow) */ 444 #define IPVS_EST_NTICKS 50 445 /* Estimation uses a 2-second period containing ticks (in jiffies) */ 446 #define IPVS_EST_TICK ((2 * HZ) / IPVS_EST_NTICKS) 447 448 /* Limit of CPU load per kthread (8 for 12.5%), ratio of CPU capacity (1/C). 449 * Value of 4 and above ensures kthreads will take work without exceeding 450 * the CPU capacity under different circumstances. 451 */ 452 #define IPVS_EST_LOAD_DIVISOR 8 453 454 /* Kthreads should not have work that exceeds the CPU load above 50% */ 455 #define IPVS_EST_CPU_KTHREADS (IPVS_EST_LOAD_DIVISOR / 2) 456 457 /* Desired number of chains per timer tick (chain load factor in 100us units), 458 * 48=4.8ms of 40ms tick (12% CPU usage): 459 * 2 sec * 1000 ms in sec * 10 (100us in ms) / 8 (12.5%) / 50 460 */ 461 #define IPVS_EST_CHAIN_FACTOR \ 462 ALIGN_DOWN(2 * 1000 * 10 / IPVS_EST_LOAD_DIVISOR / IPVS_EST_NTICKS, 8) 463 464 /* Compiled number of chains per tick 465 * The defines should match cond_resched_rcu 466 */ 467 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) 468 #define IPVS_EST_TICK_CHAINS IPVS_EST_CHAIN_FACTOR 469 #else 470 #define IPVS_EST_TICK_CHAINS 1 471 #endif 472 473 #if IPVS_EST_NTICKS > 127 474 #error Too many timer ticks for ktrow 475 #endif 476 477 /* Multiple chains processed in same tick */ 478 struct ip_vs_est_tick_data { 479 struct rcu_head rcu_head; 480 struct hlist_head chains[IPVS_EST_TICK_CHAINS]; 481 DECLARE_BITMAP(present, IPVS_EST_TICK_CHAINS); 482 DECLARE_BITMAP(full, IPVS_EST_TICK_CHAINS); 483 int chain_len[IPVS_EST_TICK_CHAINS]; 484 }; 485 486 /* Context for estimation kthread */ 487 struct ip_vs_est_kt_data { 488 struct netns_ipvs *ipvs; 489 struct task_struct *task; /* task if running */ 490 struct ip_vs_est_tick_data __rcu *ticks[IPVS_EST_NTICKS]; 491 DECLARE_BITMAP(avail, IPVS_EST_NTICKS); /* tick has space for ests */ 492 unsigned long est_timer; /* estimation timer (jiffies) */ 493 struct ip_vs_stats *calc_stats; /* Used for calculation */ 494 int tick_len[IPVS_EST_NTICKS]; /* est count */ 495 int id; /* ktid per netns */ 496 int chain_max; /* max ests per tick chain */ 497 int tick_max; /* max ests per tick */ 498 int est_count; /* attached ests to kthread */ 499 int est_max_count; /* max ests per kthread */ 500 int add_row; /* row for new ests */ 501 int est_row; /* estimated row */ 502 }; 503 504 /* IPVS resizable hash tables */ 505 struct ip_vs_rht { 506 struct hlist_bl_head *buckets; 507 struct ip_vs_rht __rcu *new_tbl; /* New/Same table */ 508 seqcount_t *seqc; /* Protects moves */ 509 struct ip_vs_aligned_lock *lock; /* Protect seqc */ 510 int mask; /* Buckets mask */ 511 int size; /* Buckets */ 512 int seqc_mask; /* seqc mask */ 513 int lock_mask; /* lock mask */ 514 u32 table_id; 515 int u_thresh; /* upper threshold */ 516 int l_thresh; /* lower threshold */ 517 int lfactor; /* Load Factor (shift)*/ 518 int bits; /* size = 1 << bits */ 519 siphash_key_t hash_key; 520 struct rcu_head rcu_head; 521 }; 522 523 /** 524 * ip_vs_rht_for_each_table() - Walk the hash tables 525 * @table: struct ip_vs_rht __rcu *table 526 * @t: current table, used as cursor, struct ip_vs_rht *var 527 * @p: previous table, temp struct ip_vs_rht *var 528 * 529 * Walk tables assuming others can not change the installed tables 530 */ 531 #define ip_vs_rht_for_each_table(table, t, p) \ 532 for (p = NULL, t = rcu_dereference_protected(table, 1); \ 533 t != p; \ 534 p = t, t = rcu_dereference_protected(t->new_tbl, 1)) 535 536 /** 537 * ip_vs_rht_for_each_table_rcu() - Walk the hash tables under RCU reader lock 538 * @table: struct ip_vs_rht __rcu *table 539 * @t: current table, used as cursor, struct ip_vs_rht *var 540 * @p: previous table, temp struct ip_vs_rht *var 541 * 542 * We usually search in one table and also in second table on resizing 543 */ 544 #define ip_vs_rht_for_each_table_rcu(table, t, p) \ 545 for (p = NULL, t = rcu_dereference(table); \ 546 t != p; \ 547 p = t, t = rcu_dereference(t->new_tbl)) 548 549 /** 550 * ip_vs_rht_for_each_bucket() - Walk all table buckets 551 * @t: current table, used as cursor, struct ip_vs_rht *var 552 * @bucket: bucket index, used as cursor, u32 var 553 * @head: bucket address, used as cursor, struct hlist_bl_head *var 554 */ 555 #define ip_vs_rht_for_each_bucket(t, bucket, head) \ 556 for (bucket = 0, head = (t)->buckets; \ 557 bucket < t->size; bucket++, head++) 558 559 /** 560 * ip_vs_rht_for_bucket_retry() - Retry bucket if entries are moved 561 * @t: current table, used as cursor, struct ip_vs_rht *var 562 * @bucket: index of current bucket or hash key 563 * @sc: temp seqcount_t *var 564 * @seq: temp unsigned int var for sequence count 565 * @retry: temp int var 566 */ 567 #define ip_vs_rht_for_bucket_retry(t, bucket, sc, seq, retry) \ 568 for (retry = 1, sc = &(t)->seqc[(bucket) & (t)->seqc_mask]; \ 569 retry && ({ seq = read_seqcount_begin(sc); 1; }); \ 570 retry = read_seqcount_retry(sc, seq)) 571 572 /** 573 * DECLARE_IP_VS_RHT_WALK_BUCKETS_RCU() - Declare variables 574 * 575 * Variables for ip_vs_rht_walk_buckets_rcu 576 */ 577 #define DECLARE_IP_VS_RHT_WALK_BUCKETS_RCU() \ 578 struct ip_vs_rht *_t, *_p; \ 579 unsigned int _seq; \ 580 seqcount_t *_sc; \ 581 u32 _bucket; \ 582 int _retry 583 /** 584 * ip_vs_rht_walk_buckets_rcu() - Walk all buckets under RCU read lock 585 * @table: struct ip_vs_rht __rcu *table 586 * @head: bucket address, used as cursor, struct hlist_bl_head *var 587 * 588 * Can be used while others add/delete/move entries 589 * Not suitable if duplicates are not desired 590 * Possible cases for reader that uses cond_resched_rcu() in the loop: 591 * - new table can not be installed, no need to repeat 592 * - new table can be installed => check and repeat if new table is 593 * installed, needed for !PREEMPT_RCU 594 */ 595 #define ip_vs_rht_walk_buckets_rcu(table, head) \ 596 ip_vs_rht_for_each_table_rcu(table, _t, _p) \ 597 ip_vs_rht_for_each_bucket(_t, _bucket, head) \ 598 ip_vs_rht_for_bucket_retry(_t, _bucket, _sc, \ 599 _seq, _retry) 600 601 /** 602 * DECLARE_IP_VS_RHT_WALK_BUCKET_RCU() - Declare variables 603 * 604 * Variables for ip_vs_rht_walk_bucket_rcu 605 */ 606 #define DECLARE_IP_VS_RHT_WALK_BUCKET_RCU() \ 607 unsigned int _seq; \ 608 seqcount_t *_sc; \ 609 int _retry 610 /** 611 * ip_vs_rht_walk_bucket_rcu() - Walk bucket under RCU read lock 612 * @t: current table, struct ip_vs_rht *var 613 * @bucket: index of current bucket or hash key 614 * @head: bucket address, used as cursor, struct hlist_bl_head *var 615 * 616 * Can be used while others add/delete/move entries 617 * Not suitable if duplicates are not desired 618 * Possible cases for reader that uses cond_resched_rcu() in the loop: 619 * - new table can not be installed, no need to repeat 620 * - new table can be installed => check and repeat if new table is 621 * installed, needed for !PREEMPT_RCU 622 */ 623 #define ip_vs_rht_walk_bucket_rcu(t, bucket, head) \ 624 if (({ head = (t)->buckets + ((bucket) & (t)->mask); 0; })) \ 625 {} \ 626 else \ 627 ip_vs_rht_for_bucket_retry(t, (bucket), _sc, _seq, _retry) 628 629 /** 630 * DECLARE_IP_VS_RHT_WALK_BUCKETS_SAFE_RCU() - Declare variables 631 * 632 * Variables for ip_vs_rht_walk_buckets_safe_rcu 633 */ 634 #define DECLARE_IP_VS_RHT_WALK_BUCKETS_SAFE_RCU() \ 635 struct ip_vs_rht *_t, *_p; \ 636 u32 _bucket 637 /** 638 * ip_vs_rht_walk_buckets_safe_rcu() - Walk all buckets under RCU read lock 639 * @table: struct ip_vs_rht __rcu *table 640 * @head: bucket address, used as cursor, struct hlist_bl_head *var 641 * 642 * Can be used while others add/delete entries but moving is disabled 643 * Using cond_resched_rcu() should be safe if tables do not change 644 */ 645 #define ip_vs_rht_walk_buckets_safe_rcu(table, head) \ 646 ip_vs_rht_for_each_table_rcu(table, _t, _p) \ 647 ip_vs_rht_for_each_bucket(_t, _bucket, head) 648 649 /** 650 * DECLARE_IP_VS_RHT_WALK_BUCKETS() - Declare variables 651 * 652 * Variables for ip_vs_rht_walk_buckets 653 */ 654 #define DECLARE_IP_VS_RHT_WALK_BUCKETS() \ 655 struct ip_vs_rht *_t, *_p; \ 656 u32 _bucket 657 658 /** 659 * ip_vs_rht_walk_buckets() - Walk all buckets 660 * @table: struct ip_vs_rht __rcu *table 661 * @head: bucket address, used as cursor, struct hlist_bl_head *var 662 * 663 * Use if others can not add/delete/move entries 664 */ 665 #define ip_vs_rht_walk_buckets(table, head) \ 666 ip_vs_rht_for_each_table(table, _t, _p) \ 667 ip_vs_rht_for_each_bucket(_t, _bucket, head) 668 669 /* Entries can be in one of two tables, so we flip bit when new table is 670 * created and store it as highest bit in hash keys 671 */ 672 #define IP_VS_RHT_TABLE_ID_MASK BIT(31) 673 674 /* Check if hash key is from this table */ 675 static inline bool ip_vs_rht_same_table(struct ip_vs_rht *t, u32 hash_key) 676 { 677 return !((t->table_id ^ hash_key) & IP_VS_RHT_TABLE_ID_MASK); 678 } 679 680 /* Build per-table hash key from hash value */ 681 static inline u32 ip_vs_rht_build_hash_key(struct ip_vs_rht *t, u32 hash) 682 { 683 return t->table_id | (hash & ~IP_VS_RHT_TABLE_ID_MASK); 684 } 685 686 void ip_vs_rht_free(struct ip_vs_rht *t); 687 void ip_vs_rht_rcu_free(struct rcu_head *head); 688 struct ip_vs_rht *ip_vs_rht_alloc(int buckets, int scounts, int locks); 689 int ip_vs_rht_desired_size(struct netns_ipvs *ipvs, struct ip_vs_rht *t, int n, 690 int lfactor, int min_bits, int max_bits); 691 void ip_vs_rht_set_thresholds(struct ip_vs_rht *t, int size, int lfactor, 692 int min_bits, int max_bits); 693 u32 ip_vs_rht_hash_linfo(struct ip_vs_rht *t, int af, 694 const union nf_inet_addr *addr, u32 v1, u32 v2); 695 696 struct dst_entry; 697 struct iphdr; 698 struct ip_vs_conn; 699 struct ip_vs_app; 700 struct sk_buff; 701 struct ip_vs_proto_data; 702 703 struct ip_vs_protocol { 704 struct ip_vs_protocol *next; 705 char *name; 706 u16 protocol; 707 u16 num_states; 708 int dont_defrag; 709 710 void (*init)(struct ip_vs_protocol *pp); 711 712 void (*exit)(struct ip_vs_protocol *pp); 713 714 int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); 715 716 void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); 717 718 int (*conn_schedule)(struct netns_ipvs *ipvs, 719 int af, struct sk_buff *skb, 720 struct ip_vs_proto_data *pd, 721 int *verdict, struct ip_vs_conn **cpp, 722 struct ip_vs_iphdr *iph); 723 724 struct ip_vs_conn * 725 (*conn_in_get)(struct netns_ipvs *ipvs, 726 int af, 727 const struct sk_buff *skb, 728 const struct ip_vs_iphdr *iph); 729 730 struct ip_vs_conn * 731 (*conn_out_get)(struct netns_ipvs *ipvs, 732 int af, 733 const struct sk_buff *skb, 734 const struct ip_vs_iphdr *iph); 735 736 int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 737 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 738 739 int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 740 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 741 742 const char *(*state_name)(int state); 743 744 void (*state_transition)(struct ip_vs_conn *cp, int direction, 745 const struct sk_buff *skb, 746 struct ip_vs_proto_data *pd); 747 748 int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); 749 750 void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); 751 752 int (*app_conn_bind)(struct ip_vs_conn *cp); 753 754 void (*debug_packet)(int af, struct ip_vs_protocol *pp, 755 const struct sk_buff *skb, 756 int offset, 757 const char *msg); 758 759 void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); 760 }; 761 762 /* protocol data per netns */ 763 struct ip_vs_proto_data { 764 struct ip_vs_proto_data *next; 765 struct ip_vs_protocol *pp; 766 int *timeout_table; /* protocol timeout table */ 767 atomic_t appcnt; /* counter of proto app incs. */ 768 struct tcp_states_t *tcp_state_table; 769 }; 770 771 struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); 772 struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs, 773 unsigned short proto); 774 775 struct ip_vs_conn_param { 776 struct netns_ipvs *ipvs; 777 const union nf_inet_addr *caddr; 778 const union nf_inet_addr *vaddr; 779 __be16 cport; 780 __be16 vport; 781 __u16 protocol; 782 u16 af; 783 784 const struct ip_vs_pe *pe; 785 char *pe_data; 786 __u8 pe_data_len; 787 }; 788 789 /* Hash node in conn_tab */ 790 struct ip_vs_conn_hnode { 791 struct hlist_bl_node node; /* node in conn_tab */ 792 u32 hash_key; /* Key for the hash table */ 793 u8 dir; /* 0=out->in, 1=in->out */ 794 } __packed; 795 796 /* IP_VS structure allocated for each dynamically scheduled connection */ 797 struct ip_vs_conn { 798 /* Cacheline for hash table nodes - rarely modified */ 799 800 struct ip_vs_conn_hnode hn0; /* Original direction */ 801 u8 af; /* address family */ 802 __be16 cport; 803 struct ip_vs_conn_hnode hn1; /* Reply direction */ 804 u8 daf; /* Address family of the dest */ 805 __be16 dport; 806 struct ip_vs_dest *dest; /* real server */ 807 atomic_t n_control; /* Number of controlled ones */ 808 volatile __u32 flags; /* status flags */ 809 /* 44/64 */ 810 811 struct ip_vs_conn *control; /* Master control connection */ 812 const struct ip_vs_pe *pe; 813 char *pe_data; 814 __u8 pe_data_len; 815 volatile __u16 state; /* state info */ 816 volatile __u16 old_state; /* old state, to be used for 817 * state transition triggered 818 * synchronization 819 */ 820 /* 2-byte hole */ 821 /* 64/96 */ 822 823 union nf_inet_addr caddr; /* client address */ 824 union nf_inet_addr vaddr; /* virtual address */ 825 /* 96/128 */ 826 827 union nf_inet_addr daddr; /* destination address */ 828 __u32 fwmark; /* Fire wall mark from skb */ 829 __be16 vport; 830 __u16 protocol; /* Which protocol (TCP/UDP) */ 831 832 /* Note: we can group the following members into a structure, 833 * in order to save more space, and the following members are 834 * only used in VS/NAT anyway 835 */ 836 struct ip_vs_app *app; /* bound ip_vs_app object */ 837 void *app_data; /* Application private data */ 838 /* 128/168 */ 839 struct_group(sync_conn_opt, 840 struct ip_vs_seq in_seq; /* incoming seq. struct */ 841 struct ip_vs_seq out_seq; /* outgoing seq. struct */ 842 ); 843 /* 152/192 */ 844 845 struct timer_list timer; /* Expiration timer */ 846 volatile unsigned long timeout; /* timeout */ 847 spinlock_t lock; /* lock for state transition */ 848 refcount_t refcnt; /* reference count */ 849 atomic_t in_pkts; /* incoming packet counter */ 850 /* 64-bit: 4-byte gap */ 851 852 /* 188/256 */ 853 unsigned long sync_endtime; /* jiffies + sent_retries */ 854 struct netns_ipvs *ipvs; 855 856 /* Packet transmitter for different forwarding methods. If it 857 * mangles the packet, it must return NF_DROP or better NF_STOLEN, 858 * otherwise this must be changed to a sk_buff **. 859 * NF_ACCEPT can be returned when destination is local. 860 */ 861 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, 862 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 863 864 struct rcu_head rcu_head; 865 }; 866 867 /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user 868 * for IPv6 support. 869 * 870 * We need these to conveniently pass around service and destination 871 * options, but unfortunately, we also need to keep the old definitions to 872 * maintain userspace backwards compatibility for the setsockopt interface. 873 */ 874 struct ip_vs_service_user_kern { 875 /* virtual service addresses */ 876 u16 af; 877 u16 protocol; 878 union nf_inet_addr addr; /* virtual ip address */ 879 __be16 port; 880 u32 fwmark; /* firewall mark of service */ 881 882 /* virtual service options */ 883 char *sched_name; 884 char *pe_name; 885 unsigned int flags; /* virtual service flags */ 886 unsigned int timeout; /* persistent timeout in sec */ 887 __be32 netmask; /* persistent netmask or plen */ 888 }; 889 890 891 struct ip_vs_dest_user_kern { 892 /* destination server address */ 893 union nf_inet_addr addr; 894 __be16 port; 895 896 /* real server options */ 897 unsigned int conn_flags; /* connection flags */ 898 int weight; /* destination weight */ 899 900 /* thresholds for active connections */ 901 u32 u_threshold; /* upper threshold */ 902 u32 l_threshold; /* lower threshold */ 903 904 /* Address family of addr */ 905 u16 af; 906 907 u16 tun_type; /* tunnel type */ 908 __be16 tun_port; /* tunnel port */ 909 u16 tun_flags; /* tunnel flags */ 910 }; 911 912 913 /* 914 * The information about the virtual service offered to the net and the 915 * forwarding entries. 916 */ 917 struct ip_vs_service { 918 struct hlist_bl_node s_list; /* node in service table */ 919 u32 hash_key; /* Key for the hash table */ 920 u16 af; /* address family */ 921 __u16 protocol; /* which protocol (TCP/UDP) */ 922 923 union nf_inet_addr addr; /* IP address for virtual service */ 924 __u32 fwmark; /* firewall mark of the service */ 925 atomic_t refcnt; /* reference counter */ 926 __be16 port; /* port number for the service */ 927 unsigned int flags; /* service status flags */ 928 unsigned int timeout; /* persistent timeout in ticks */ 929 __be32 netmask; /* grouping granularity, mask/plen */ 930 struct netns_ipvs *ipvs; 931 932 struct list_head destinations; /* real server d-linked list */ 933 __u32 num_dests; /* number of servers */ 934 struct ip_vs_stats stats; /* statistics for the service */ 935 936 /* for scheduling */ 937 struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */ 938 spinlock_t sched_lock; /* lock sched_data */ 939 void *sched_data; /* scheduler application data */ 940 941 /* alternate persistence engine */ 942 struct ip_vs_pe __rcu *pe; 943 int conntrack_afmask; 944 945 struct rcu_head rcu_head; 946 }; 947 948 /* Information for cached dst */ 949 struct ip_vs_dest_dst { 950 struct dst_entry *dst_cache; /* destination cache entry */ 951 u32 dst_cookie; 952 union nf_inet_addr dst_saddr; 953 struct rcu_head rcu_head; 954 }; 955 956 /* The real server destination forwarding entry with ip address, port number, 957 * and so on. 958 */ 959 struct ip_vs_dest { 960 struct list_head n_list; /* for the dests in the service */ 961 struct hlist_node d_list; /* for table with all the dests */ 962 963 u16 af; /* address family */ 964 __be16 port; /* port number of the server */ 965 union nf_inet_addr addr; /* IP address of the server */ 966 volatile unsigned int flags; /* dest status flags */ 967 atomic_t conn_flags; /* flags to copy to conn */ 968 atomic_t weight; /* server weight */ 969 atomic_t last_weight; /* server latest weight */ 970 __u16 tun_type; /* tunnel type */ 971 __be16 tun_port; /* tunnel port */ 972 __u16 tun_flags; /* tunnel flags */ 973 974 refcount_t refcnt; /* reference counter */ 975 struct ip_vs_stats stats; /* statistics */ 976 unsigned long idle_start; /* start time, jiffies */ 977 978 /* connection counters and thresholds */ 979 atomic_t activeconns; /* active connections */ 980 atomic_t inactconns; /* inactive connections */ 981 atomic_t persistconns; /* persistent connections */ 982 __u32 u_threshold; /* upper threshold */ 983 __u32 l_threshold; /* lower threshold */ 984 985 /* for destination cache */ 986 spinlock_t dst_lock; /* lock of dst_cache */ 987 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ 988 989 /* for virtual service */ 990 struct ip_vs_service __rcu *svc; /* service it belongs to */ 991 __u16 protocol; /* which protocol (TCP/UDP) */ 992 __be16 vport; /* virtual port number */ 993 union nf_inet_addr vaddr; /* virtual IP address */ 994 __u32 vfwmark; /* firewall mark of service */ 995 996 struct rcu_head rcu_head; 997 struct list_head t_list; /* in dest_trash */ 998 unsigned int in_rs_table:1; /* we are in rs_table */ 999 }; 1000 1001 /* The scheduler object */ 1002 struct ip_vs_scheduler { 1003 struct list_head n_list; /* d-linked list head */ 1004 char *name; /* scheduler name */ 1005 atomic_t refcnt; /* reference counter */ 1006 struct module *module; /* THIS_MODULE/NULL */ 1007 1008 /* scheduler initializing service */ 1009 int (*init_service)(struct ip_vs_service *svc); 1010 /* scheduling service finish */ 1011 void (*done_service)(struct ip_vs_service *svc); 1012 /* dest is linked */ 1013 int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 1014 /* dest is unlinked */ 1015 int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 1016 /* dest is updated */ 1017 int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); 1018 1019 /* selecting a server from the given service */ 1020 struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc, 1021 const struct sk_buff *skb, 1022 struct ip_vs_iphdr *iph); 1023 }; 1024 1025 /* The persistence engine object */ 1026 struct ip_vs_pe { 1027 struct list_head n_list; /* d-linked list head */ 1028 char *name; /* scheduler name */ 1029 atomic_t refcnt; /* reference counter */ 1030 struct module *module; /* THIS_MODULE/NULL */ 1031 1032 /* get the connection template, if any */ 1033 int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb); 1034 bool (*ct_match)(const struct ip_vs_conn_param *p, 1035 struct ip_vs_conn *ct); 1036 u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, 1037 struct ip_vs_rht *t, bool inverse); 1038 int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); 1039 /* create connections for real-server outgoing packets */ 1040 struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc, 1041 struct ip_vs_dest *dest, 1042 struct sk_buff *skb, 1043 const struct ip_vs_iphdr *iph, 1044 __be16 dport, __be16 cport); 1045 }; 1046 1047 /* The application module object (a.k.a. app incarnation) */ 1048 struct ip_vs_app { 1049 struct list_head a_list; /* member in app list */ 1050 int type; /* IP_VS_APP_TYPE_xxx */ 1051 char *name; /* application module name */ 1052 __u16 protocol; 1053 struct module *module; /* THIS_MODULE/NULL */ 1054 struct list_head incs_list; /* list of incarnations */ 1055 1056 /* members for application incarnations */ 1057 struct list_head p_list; /* member in proto app list */ 1058 struct ip_vs_app *app; /* its real application */ 1059 __be16 port; /* port number in net order */ 1060 atomic_t usecnt; /* usage counter */ 1061 struct rcu_head rcu_head; 1062 1063 /* output hook: Process packet in inout direction, diff set for TCP. 1064 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 1065 * 2=Mangled but checksum was not updated 1066 */ 1067 int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, 1068 struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); 1069 1070 /* input hook: Process packet in outin direction, diff set for TCP. 1071 * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, 1072 * 2=Mangled but checksum was not updated 1073 */ 1074 int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, 1075 struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); 1076 1077 /* ip_vs_app initializer */ 1078 int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *); 1079 1080 /* ip_vs_app finish */ 1081 int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *); 1082 1083 1084 /* not used now */ 1085 int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *, 1086 struct ip_vs_protocol *); 1087 1088 void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *); 1089 1090 int * timeout_table; 1091 int * timeouts; 1092 int timeouts_size; 1093 1094 int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app, 1095 int *verdict, struct ip_vs_conn **cpp); 1096 1097 struct ip_vs_conn * 1098 (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app, 1099 const struct iphdr *iph, int inverse); 1100 1101 struct ip_vs_conn * 1102 (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app, 1103 const struct iphdr *iph, int inverse); 1104 1105 int (*state_transition)(struct ip_vs_conn *cp, int direction, 1106 const struct sk_buff *skb, 1107 struct ip_vs_app *app); 1108 1109 void (*timeout_change)(struct ip_vs_app *app, int flags); 1110 }; 1111 1112 struct ipvs_master_sync_state { 1113 struct list_head sync_queue; 1114 struct ip_vs_sync_buff *sync_buff; 1115 unsigned long sync_queue_len; 1116 unsigned int sync_queue_delay; 1117 struct delayed_work master_wakeup_work; 1118 struct netns_ipvs *ipvs; 1119 }; 1120 1121 struct ip_vs_sync_thread_data; 1122 1123 /* How much time to keep dests in trash */ 1124 #define IP_VS_DEST_TRASH_PERIOD (120 * HZ) 1125 1126 struct ipvs_sync_daemon_cfg { 1127 union nf_inet_addr mcast_group; 1128 int syncid; 1129 u16 sync_maxlen; 1130 u16 mcast_port; 1131 u8 mcast_af; 1132 u8 mcast_ttl; 1133 /* multicast interface name */ 1134 char mcast_ifn[IP_VS_IFNAME_MAXLEN]; 1135 }; 1136 1137 /* IPVS in network namespace */ 1138 struct netns_ipvs { 1139 int gen; /* Generation */ 1140 int enable; /* enable like nf_hooks do */ 1141 /* Hash table: for real service lookups */ 1142 #define IP_VS_RTAB_BITS 4 1143 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) 1144 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) 1145 1146 struct hlist_head rs_table[IP_VS_RTAB_SIZE]; 1147 /* ip_vs_app */ 1148 struct list_head app_list; 1149 /* ip_vs_proto */ 1150 #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ 1151 struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; 1152 /* ip_vs_proto_tcp */ 1153 #ifdef CONFIG_IP_VS_PROTO_TCP 1154 #define TCP_APP_TAB_BITS 4 1155 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) 1156 #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) 1157 struct list_head tcp_apps[TCP_APP_TAB_SIZE]; 1158 #endif 1159 /* ip_vs_proto_udp */ 1160 #ifdef CONFIG_IP_VS_PROTO_UDP 1161 #define UDP_APP_TAB_BITS 4 1162 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) 1163 #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) 1164 struct list_head udp_apps[UDP_APP_TAB_SIZE]; 1165 #endif 1166 /* ip_vs_proto_sctp */ 1167 #ifdef CONFIG_IP_VS_PROTO_SCTP 1168 #define SCTP_APP_TAB_BITS 4 1169 #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS) 1170 #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) 1171 /* Hash table for SCTP application incarnations */ 1172 struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; 1173 #endif 1174 /* ip_vs_conn */ 1175 atomic_t conn_count; /* connection counter */ 1176 atomic_t no_cport_conns[IP_VS_AF_MAX]; 1177 struct delayed_work conn_resize_work;/* resize conn_tab */ 1178 1179 /* ip_vs_ctl */ 1180 struct ip_vs_stats_rcu *tot_stats; /* Statistics & est. */ 1181 1182 /* Trash for destinations */ 1183 struct list_head dest_trash; 1184 spinlock_t dest_trash_lock; 1185 struct timer_list dest_trash_timer; /* expiration timer */ 1186 struct mutex service_mutex; /* service reconfig */ 1187 struct rw_semaphore svc_resize_sem; /* svc_table resizing */ 1188 struct delayed_work svc_resize_work; /* resize svc_table */ 1189 atomic_t svc_table_changes;/* ++ on new table */ 1190 /* Service counters */ 1191 atomic_t num_services[IP_VS_AF_MAX]; /* Services */ 1192 atomic_t fwm_services[IP_VS_AF_MAX]; /* Services */ 1193 atomic_t nonfwm_services[IP_VS_AF_MAX];/* Services */ 1194 atomic_t ftpsvc_counter[IP_VS_AF_MAX]; /* FTPPORT */ 1195 atomic_t nullsvc_counter[IP_VS_AF_MAX];/* Zero port */ 1196 atomic_t conn_out_counter[IP_VS_AF_MAX];/* out conn */ 1197 1198 #ifdef CONFIG_SYSCTL 1199 /* delayed work for expiring no dest connections */ 1200 struct delayed_work expire_nodest_conn_work; 1201 /* 1/rate drop and drop-entry variables */ 1202 struct delayed_work defense_work; /* Work handler */ 1203 int drop_rate; 1204 int drop_counter; 1205 int old_secure_tcp; 1206 atomic_t dropentry; 1207 s8 dropentry_counters[8]; 1208 /* locks in ctl.c */ 1209 spinlock_t dropentry_lock; /* drop entry handling */ 1210 spinlock_t droppacket_lock; /* drop packet handling */ 1211 spinlock_t securetcp_lock; /* state and timeout tables */ 1212 1213 /* sys-ctl struct */ 1214 struct ctl_table_header *sysctl_hdr; 1215 struct ctl_table *sysctl_tbl; 1216 #endif 1217 1218 /* sysctl variables */ 1219 int sysctl_amemthresh; 1220 int sysctl_am_droprate; 1221 int sysctl_drop_entry; 1222 int sysctl_drop_packet; 1223 int sysctl_secure_tcp; 1224 #ifdef CONFIG_IP_VS_NFCT 1225 int sysctl_conntrack; 1226 #endif 1227 int sysctl_snat_reroute; 1228 int sysctl_sync_ver; 1229 int sysctl_sync_ports; 1230 int sysctl_sync_persist_mode; 1231 unsigned long sysctl_sync_qlen_max; 1232 int sysctl_sync_sock_size; 1233 int sysctl_cache_bypass; 1234 int sysctl_expire_nodest_conn; 1235 int sysctl_sloppy_tcp; 1236 int sysctl_sloppy_sctp; 1237 int sysctl_expire_quiescent_template; 1238 int sysctl_sync_threshold[2]; 1239 unsigned int sysctl_sync_refresh_period; 1240 int sysctl_sync_retries; 1241 int sysctl_nat_icmp_send; 1242 int sysctl_pmtu_disc; 1243 int sysctl_backup_only; 1244 int sysctl_conn_reuse_mode; 1245 int sysctl_schedule_icmp; 1246 int sysctl_ignore_tunneled; 1247 int sysctl_run_estimation; 1248 #ifdef CONFIG_SYSCTL 1249 cpumask_var_t sysctl_est_cpulist; /* kthread cpumask */ 1250 int est_cpulist_valid; /* cpulist set */ 1251 int sysctl_est_nice; /* kthread nice */ 1252 int est_stopped; /* stop tasks */ 1253 #endif 1254 int sysctl_conn_lfactor; 1255 int sysctl_svc_lfactor; 1256 1257 /* ip_vs_lblc */ 1258 int sysctl_lblc_expiration; 1259 struct ctl_table_header *lblc_ctl_header; 1260 struct ctl_table *lblc_ctl_table; 1261 /* ip_vs_lblcr */ 1262 int sysctl_lblcr_expiration; 1263 struct ctl_table_header *lblcr_ctl_header; 1264 struct ctl_table *lblcr_ctl_table; 1265 unsigned long work_flags; /* IP_VS_WORK_* flags */ 1266 /* ip_vs_est */ 1267 struct delayed_work est_reload_work;/* Reload kthread tasks */ 1268 struct mutex est_mutex; /* protect kthread tasks */ 1269 struct hlist_head est_temp_list; /* Ests during calc phase */ 1270 struct ip_vs_est_kt_data **est_kt_arr; /* Array of kthread data ptrs */ 1271 unsigned long est_max_threads;/* Hard limit of kthreads */ 1272 int est_calc_phase; /* Calculation phase */ 1273 int est_chain_max; /* Calculated chain_max */ 1274 int est_kt_count; /* Allocated ptrs */ 1275 int est_add_ktid; /* ktid where to add ests */ 1276 atomic_t est_genid; /* kthreads reload genid */ 1277 atomic_t est_genid_done; /* applied genid */ 1278 /* ip_vs_sync */ 1279 spinlock_t sync_lock; 1280 struct ipvs_master_sync_state *ms; 1281 spinlock_t sync_buff_lock; 1282 struct ip_vs_sync_thread_data *master_tinfo; 1283 struct ip_vs_sync_thread_data *backup_tinfo; 1284 int threads_mask; 1285 volatile int sync_state; 1286 struct mutex sync_mutex; 1287 struct ipvs_sync_daemon_cfg mcfg; /* Master Configuration */ 1288 struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */ 1289 /* net name space ptr */ 1290 struct net *net; /* Needed by timer routines */ 1291 /* Number of heterogeneous destinations, needed because heterogeneous 1292 * are not supported when synchronization is enabled. 1293 */ 1294 unsigned int mixed_address_family_dests; 1295 unsigned int hooks_afmask; /* &1=AF_INET, &2=AF_INET6 */ 1296 1297 struct ip_vs_rht __rcu *svc_table; /* Services */ 1298 struct ip_vs_rht __rcu *conn_tab; /* Connections */ 1299 atomic_t conn_tab_changes;/* ++ on new table */ 1300 }; 1301 1302 #define DEFAULT_SYNC_THRESHOLD 3 1303 #define DEFAULT_SYNC_PERIOD 50 1304 #define DEFAULT_SYNC_VER 1 1305 #define DEFAULT_SLOPPY_TCP 0 1306 #define DEFAULT_SLOPPY_SCTP 0 1307 #define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) 1308 #define DEFAULT_SYNC_RETRIES 0 1309 #define IPVS_SYNC_WAKEUP_RATE 8 1310 #define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4) 1311 #define IPVS_SYNC_SEND_DELAY (HZ / 50) 1312 #define IPVS_SYNC_CHECK_PERIOD HZ 1313 #define IPVS_SYNC_FLUSH_TIME (HZ * 2) 1314 #define IPVS_SYNC_PORTS_MAX (1 << 6) 1315 1316 #ifdef CONFIG_SYSCTL 1317 1318 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1319 { 1320 return ipvs->sysctl_sync_threshold[0]; 1321 } 1322 1323 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 1324 { 1325 return READ_ONCE(ipvs->sysctl_sync_threshold[1]); 1326 } 1327 1328 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 1329 { 1330 return READ_ONCE(ipvs->sysctl_sync_refresh_period); 1331 } 1332 1333 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 1334 { 1335 return ipvs->sysctl_sync_retries; 1336 } 1337 1338 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 1339 { 1340 return ipvs->sysctl_sync_ver; 1341 } 1342 1343 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) 1344 { 1345 return ipvs->sysctl_sloppy_tcp; 1346 } 1347 1348 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) 1349 { 1350 return ipvs->sysctl_sloppy_sctp; 1351 } 1352 1353 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1354 { 1355 return READ_ONCE(ipvs->sysctl_sync_ports); 1356 } 1357 1358 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) 1359 { 1360 return ipvs->sysctl_sync_persist_mode; 1361 } 1362 1363 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1364 { 1365 return ipvs->sysctl_sync_qlen_max; 1366 } 1367 1368 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1369 { 1370 return ipvs->sysctl_sync_sock_size; 1371 } 1372 1373 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1374 { 1375 return ipvs->sysctl_pmtu_disc; 1376 } 1377 1378 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1379 { 1380 return ipvs->sync_state & IP_VS_STATE_BACKUP && 1381 ipvs->sysctl_backup_only; 1382 } 1383 1384 static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) 1385 { 1386 return ipvs->sysctl_conn_reuse_mode; 1387 } 1388 1389 static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) 1390 { 1391 return ipvs->sysctl_expire_nodest_conn; 1392 } 1393 1394 static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) 1395 { 1396 return ipvs->sysctl_schedule_icmp; 1397 } 1398 1399 static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) 1400 { 1401 return ipvs->sysctl_ignore_tunneled; 1402 } 1403 1404 static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) 1405 { 1406 return ipvs->sysctl_cache_bypass; 1407 } 1408 1409 static inline int sysctl_run_estimation(struct netns_ipvs *ipvs) 1410 { 1411 return ipvs->sysctl_run_estimation; 1412 } 1413 1414 static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs) 1415 { 1416 if (ipvs->est_cpulist_valid) 1417 return ipvs->sysctl_est_cpulist; 1418 else 1419 return housekeeping_cpumask(HK_TYPE_KTHREAD); 1420 } 1421 1422 static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs) 1423 { 1424 if (ipvs->est_cpulist_valid) 1425 return ipvs->sysctl_est_cpulist; 1426 else 1427 return NULL; 1428 } 1429 1430 static inline int sysctl_est_nice(struct netns_ipvs *ipvs) 1431 { 1432 return ipvs->sysctl_est_nice; 1433 } 1434 1435 #else 1436 1437 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) 1438 { 1439 return DEFAULT_SYNC_THRESHOLD; 1440 } 1441 1442 static inline int sysctl_sync_period(struct netns_ipvs *ipvs) 1443 { 1444 return DEFAULT_SYNC_PERIOD; 1445 } 1446 1447 static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) 1448 { 1449 return DEFAULT_SYNC_REFRESH_PERIOD; 1450 } 1451 1452 static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) 1453 { 1454 return DEFAULT_SYNC_RETRIES & 3; 1455 } 1456 1457 static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) 1458 { 1459 return DEFAULT_SYNC_VER; 1460 } 1461 1462 static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) 1463 { 1464 return DEFAULT_SLOPPY_TCP; 1465 } 1466 1467 static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) 1468 { 1469 return DEFAULT_SLOPPY_SCTP; 1470 } 1471 1472 static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) 1473 { 1474 return 1; 1475 } 1476 1477 static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) 1478 { 1479 return 0; 1480 } 1481 1482 static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) 1483 { 1484 return IPVS_SYNC_QLEN_MAX; 1485 } 1486 1487 static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) 1488 { 1489 return 0; 1490 } 1491 1492 static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) 1493 { 1494 return 1; 1495 } 1496 1497 static inline int sysctl_backup_only(struct netns_ipvs *ipvs) 1498 { 1499 return 0; 1500 } 1501 1502 static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) 1503 { 1504 return 1; 1505 } 1506 1507 static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) 1508 { 1509 return 0; 1510 } 1511 1512 static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) 1513 { 1514 return 0; 1515 } 1516 1517 static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) 1518 { 1519 return 0; 1520 } 1521 1522 static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) 1523 { 1524 return 0; 1525 } 1526 1527 static inline int sysctl_run_estimation(struct netns_ipvs *ipvs) 1528 { 1529 return 1; 1530 } 1531 1532 static inline const struct cpumask *sysctl_est_cpulist(struct netns_ipvs *ipvs) 1533 { 1534 return housekeeping_cpumask(HK_TYPE_KTHREAD); 1535 } 1536 1537 static inline const struct cpumask *sysctl_est_preferred_cpulist(struct netns_ipvs *ipvs) 1538 { 1539 return NULL; 1540 } 1541 1542 static inline int sysctl_est_nice(struct netns_ipvs *ipvs) 1543 { 1544 return IPVS_EST_NICE; 1545 } 1546 1547 #endif 1548 1549 /* Get load factor to map conn_count/u_thresh to t->size */ 1550 static inline int sysctl_conn_lfactor(struct netns_ipvs *ipvs) 1551 { 1552 return READ_ONCE(ipvs->sysctl_conn_lfactor); 1553 } 1554 1555 /* Get load factor to map num_services/u_thresh to t->size 1556 * Smaller value decreases u_thresh to reduce collisions but increases 1557 * the table size 1558 * Returns factor where: 1559 * - <0: u_thresh = size >> -factor, eg. lfactor -2 = 25% load 1560 * - >=0: u_thresh = size << factor, eg. lfactor 1 = 200% load 1561 */ 1562 static inline int sysctl_svc_lfactor(struct netns_ipvs *ipvs) 1563 { 1564 return READ_ONCE(ipvs->sysctl_svc_lfactor); 1565 } 1566 1567 /* IPVS core functions 1568 * (from ip_vs_core.c) 1569 */ 1570 const char *ip_vs_proto_name(unsigned int proto); 1571 void ip_vs_init_hash_table(struct list_head *table, int rows); 1572 struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, 1573 struct ip_vs_dest *dest, 1574 struct sk_buff *skb, 1575 const struct ip_vs_iphdr *iph, 1576 __be16 dport, 1577 __be16 cport); 1578 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) 1579 1580 #define IP_VS_APP_TYPE_FTP 1 1581 1582 /* ip_vs_conn handling functions 1583 * (from ip_vs_conn.c) 1584 */ 1585 enum { 1586 IP_VS_DIR_INPUT = 0, 1587 IP_VS_DIR_OUTPUT, 1588 IP_VS_DIR_INPUT_ONLY, 1589 IP_VS_DIR_LAST, 1590 }; 1591 1592 static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol, 1593 const union nf_inet_addr *caddr, 1594 __be16 cport, 1595 const union nf_inet_addr *vaddr, 1596 __be16 vport, 1597 struct ip_vs_conn_param *p) 1598 { 1599 p->ipvs = ipvs; 1600 p->af = af; 1601 p->protocol = protocol; 1602 p->caddr = caddr; 1603 p->cport = cport; 1604 p->vaddr = vaddr; 1605 p->vport = vport; 1606 p->pe = NULL; 1607 p->pe_data = NULL; 1608 } 1609 1610 struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); 1611 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); 1612 1613 struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af, 1614 const struct sk_buff *skb, 1615 const struct ip_vs_iphdr *iph); 1616 1617 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); 1618 1619 struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, 1620 const struct sk_buff *skb, 1621 const struct ip_vs_iphdr *iph); 1622 1623 /* Get reference to gain full access to conn. 1624 * By default, RCU read-side critical sections have access only to 1625 * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference. 1626 */ 1627 static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) 1628 { 1629 return refcount_inc_not_zero(&cp->refcnt); 1630 } 1631 1632 /* put back the conn without restarting its timer */ 1633 static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 1634 { 1635 smp_mb__before_atomic(); 1636 refcount_dec(&cp->refcnt); 1637 } 1638 void ip_vs_conn_put(struct ip_vs_conn *cp); 1639 void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); 1640 int ip_vs_conn_desired_size(struct netns_ipvs *ipvs, struct ip_vs_rht *t, 1641 int lfactor); 1642 struct ip_vs_rht *ip_vs_conn_tab_alloc(struct netns_ipvs *ipvs, int buckets, 1643 int lfactor); 1644 1645 static inline struct ip_vs_conn * 1646 ip_vs_hn0_to_conn(struct ip_vs_conn_hnode *hn) 1647 { 1648 return container_of(hn, struct ip_vs_conn, hn0); 1649 } 1650 1651 static inline struct ip_vs_conn * 1652 ip_vs_hn_to_conn(struct ip_vs_conn_hnode *hn) 1653 { 1654 return hn->dir ? container_of(hn, struct ip_vs_conn, hn1) : 1655 container_of(hn, struct ip_vs_conn, hn0); 1656 } 1657 1658 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, 1659 const union nf_inet_addr *daddr, 1660 __be16 dport, unsigned int flags, 1661 struct ip_vs_dest *dest, __u32 fwmark); 1662 void ip_vs_conn_expire_now(struct ip_vs_conn *cp); 1663 1664 const char *ip_vs_state_name(const struct ip_vs_conn *cp); 1665 1666 void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); 1667 int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); 1668 void ip_vs_random_dropentry(struct netns_ipvs *ipvs); 1669 int ip_vs_conn_init(void); 1670 void ip_vs_conn_cleanup(void); 1671 1672 static inline void ip_vs_control_del(struct ip_vs_conn *cp) 1673 { 1674 struct ip_vs_conn *ctl_cp = cp->control; 1675 if (!ctl_cp) { 1676 IP_VS_ERR_BUF("request control DEL for uncontrolled: " 1677 "%s:%d to %s:%d\n", 1678 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1679 ntohs(cp->cport), 1680 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1681 ntohs(cp->vport)); 1682 1683 return; 1684 } 1685 1686 IP_VS_DBG_BUF(7, "DELeting control for: " 1687 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1688 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1689 ntohs(cp->cport), 1690 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1691 ntohs(ctl_cp->cport)); 1692 1693 cp->control = NULL; 1694 if (atomic_read(&ctl_cp->n_control) == 0) { 1695 IP_VS_ERR_BUF("BUG control DEL with n=0 : " 1696 "%s:%d to %s:%d\n", 1697 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1698 ntohs(cp->cport), 1699 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1700 ntohs(cp->vport)); 1701 1702 return; 1703 } 1704 atomic_dec(&ctl_cp->n_control); 1705 } 1706 1707 static inline void 1708 ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) 1709 { 1710 if (cp->control) { 1711 IP_VS_ERR_BUF("request control ADD for already controlled: " 1712 "%s:%d to %s:%d\n", 1713 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1714 ntohs(cp->cport), 1715 IP_VS_DBG_ADDR(cp->af, &cp->vaddr), 1716 ntohs(cp->vport)); 1717 1718 ip_vs_control_del(cp); 1719 } 1720 1721 IP_VS_DBG_BUF(7, "ADDing control for: " 1722 "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", 1723 IP_VS_DBG_ADDR(cp->af, &cp->caddr), 1724 ntohs(cp->cport), 1725 IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), 1726 ntohs(ctl_cp->cport)); 1727 1728 cp->control = ctl_cp; 1729 atomic_inc(&ctl_cp->n_control); 1730 } 1731 1732 /* Mark our template as assured */ 1733 static inline void 1734 ip_vs_control_assure_ct(struct ip_vs_conn *cp) 1735 { 1736 struct ip_vs_conn *ct = cp->control; 1737 1738 if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) && 1739 (ct->flags & IP_VS_CONN_F_TEMPLATE)) 1740 ct->state |= IP_VS_CTPL_S_ASSURED; 1741 } 1742 1743 /* IPVS netns init & cleanup functions */ 1744 int ip_vs_estimator_net_init(struct netns_ipvs *ipvs); 1745 int ip_vs_control_net_init(struct netns_ipvs *ipvs); 1746 int ip_vs_protocol_net_init(struct netns_ipvs *ipvs); 1747 int ip_vs_app_net_init(struct netns_ipvs *ipvs); 1748 int ip_vs_conn_net_init(struct netns_ipvs *ipvs); 1749 int ip_vs_sync_net_init(struct netns_ipvs *ipvs); 1750 void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs); 1751 void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs); 1752 void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs); 1753 void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs); 1754 void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs); 1755 void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs); 1756 void ip_vs_service_nets_cleanup(struct list_head *net_list); 1757 1758 /* IPVS application functions 1759 * (from ip_vs_app.c) 1760 */ 1761 #define IP_VS_APP_MAX_PORTS 8 1762 struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); 1763 void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); 1764 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1765 void ip_vs_unbind_app(struct ip_vs_conn *cp); 1766 int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto, 1767 __u16 port); 1768 int ip_vs_app_inc_get(struct ip_vs_app *inc); 1769 void ip_vs_app_inc_put(struct ip_vs_app *inc); 1770 1771 int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb, 1772 struct ip_vs_iphdr *ipvsh); 1773 int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb, 1774 struct ip_vs_iphdr *ipvsh); 1775 1776 int register_ip_vs_pe(struct ip_vs_pe *pe); 1777 int unregister_ip_vs_pe(struct ip_vs_pe *pe); 1778 struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); 1779 struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); 1780 1781 /* Use a #define to avoid all of module.h just for these trivial ops */ 1782 #define ip_vs_pe_get(pe) \ 1783 if (pe && pe->module) \ 1784 __module_get(pe->module); 1785 1786 #define ip_vs_pe_put(pe) \ 1787 if (pe && pe->module) \ 1788 module_put(pe->module); 1789 1790 /* IPVS protocol functions (from ip_vs_proto.c) */ 1791 int ip_vs_protocol_init(void); 1792 void ip_vs_protocol_cleanup(void); 1793 void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); 1794 int *ip_vs_create_timeout_table(int *table, int size); 1795 void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, 1796 const struct sk_buff *skb, int offset, 1797 const char *msg); 1798 1799 extern struct ip_vs_protocol ip_vs_protocol_tcp; 1800 extern struct ip_vs_protocol ip_vs_protocol_udp; 1801 extern struct ip_vs_protocol ip_vs_protocol_icmp; 1802 extern struct ip_vs_protocol ip_vs_protocol_esp; 1803 extern struct ip_vs_protocol ip_vs_protocol_ah; 1804 extern struct ip_vs_protocol ip_vs_protocol_sctp; 1805 1806 /* Registering/unregistering scheduler functions 1807 * (from ip_vs_sched.c) 1808 */ 1809 int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1810 int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1811 int ip_vs_bind_scheduler(struct ip_vs_service *svc, 1812 struct ip_vs_scheduler *scheduler); 1813 void ip_vs_unbind_scheduler(struct ip_vs_service *svc, 1814 struct ip_vs_scheduler *sched); 1815 struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); 1816 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); 1817 struct ip_vs_conn * 1818 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 1819 struct ip_vs_proto_data *pd, int *ignored, 1820 struct ip_vs_iphdr *iph); 1821 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 1822 struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph); 1823 1824 void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); 1825 1826 /* IPVS control data and functions (from ip_vs_ctl.c) */ 1827 extern struct ip_vs_stats ip_vs_stats; 1828 extern int sysctl_ip_vs_sync_ver; 1829 1830 struct ip_vs_service * 1831 ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol, 1832 const union nf_inet_addr *vaddr, __be16 vport); 1833 1834 bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, 1835 const union nf_inet_addr *daddr, __be16 dport); 1836 1837 struct ip_vs_dest * 1838 ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, 1839 const union nf_inet_addr *daddr, __be16 dport); 1840 struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af, 1841 const union nf_inet_addr *daddr, 1842 __be16 tun_port); 1843 1844 int ip_vs_use_count_inc(void); 1845 void ip_vs_use_count_dec(void); 1846 int ip_vs_register_nl_ioctl(void); 1847 void ip_vs_unregister_nl_ioctl(void); 1848 int ip_vs_control_init(void); 1849 void ip_vs_control_cleanup(void); 1850 struct ip_vs_dest * 1851 ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af, 1852 const union nf_inet_addr *daddr, __be16 dport, 1853 const union nf_inet_addr *vaddr, __be16 vport, 1854 __u16 protocol, __u32 fwmark, __u32 flags); 1855 void ip_vs_try_bind_dest(struct ip_vs_conn *cp); 1856 1857 static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) 1858 { 1859 refcount_inc(&dest->refcnt); 1860 } 1861 1862 static inline void ip_vs_dest_put(struct ip_vs_dest *dest) 1863 { 1864 smp_mb__before_atomic(); 1865 refcount_dec(&dest->refcnt); 1866 } 1867 1868 static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) 1869 { 1870 if (refcount_dec_and_test(&dest->refcnt)) 1871 kfree(dest); 1872 } 1873 1874 /* IPVS sync daemon data and function prototypes 1875 * (from ip_vs_sync.c) 1876 */ 1877 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg, 1878 int state); 1879 int stop_sync_thread(struct netns_ipvs *ipvs, int state); 1880 void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts); 1881 1882 /* IPVS rate estimator prototypes (from ip_vs_est.c) */ 1883 int ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); 1884 void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); 1885 void ip_vs_zero_estimator(struct ip_vs_stats *stats); 1886 void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats); 1887 void ip_vs_est_reload_start(struct netns_ipvs *ipvs); 1888 int ip_vs_est_kthread_start(struct netns_ipvs *ipvs, 1889 struct ip_vs_est_kt_data *kd); 1890 void ip_vs_est_kthread_stop(struct ip_vs_est_kt_data *kd); 1891 1892 static inline void ip_vs_est_stopped_recalc(struct netns_ipvs *ipvs) 1893 { 1894 #ifdef CONFIG_SYSCTL 1895 /* Stop tasks while cpulist is empty or if disabled with flag */ 1896 ipvs->est_stopped = !sysctl_run_estimation(ipvs) || 1897 (ipvs->est_cpulist_valid && 1898 cpumask_empty(sysctl_est_cpulist(ipvs))); 1899 #endif 1900 } 1901 1902 static inline bool ip_vs_est_stopped(struct netns_ipvs *ipvs) 1903 { 1904 #ifdef CONFIG_SYSCTL 1905 return ipvs->est_stopped; 1906 #else 1907 return false; 1908 #endif 1909 } 1910 1911 static inline int ip_vs_est_max_threads(struct netns_ipvs *ipvs) 1912 { 1913 unsigned int limit = IPVS_EST_CPU_KTHREADS * 1914 cpumask_weight(sysctl_est_cpulist(ipvs)); 1915 1916 return max(1U, limit); 1917 } 1918 1919 /* Various IPVS packet transmitters (from ip_vs_xmit.c) */ 1920 int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1921 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1922 int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1923 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1924 int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1925 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1926 int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1927 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1928 int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1929 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1930 int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1931 struct ip_vs_protocol *pp, int offset, 1932 unsigned int hooknum, struct ip_vs_iphdr *iph); 1933 void ip_vs_dest_dst_rcu_free(struct rcu_head *head); 1934 1935 #ifdef CONFIG_IP_VS_IPV6 1936 int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1937 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1938 int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1939 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1940 int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1941 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1942 int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1943 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1944 int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1945 struct ip_vs_protocol *pp, int offset, 1946 unsigned int hooknum, struct ip_vs_iphdr *iph); 1947 #endif 1948 1949 #ifdef CONFIG_SYSCTL 1950 /* This is a simple mechanism to ignore packets when 1951 * we are loaded. Just set ip_vs_drop_rate to 'n' and 1952 * we start to drop 1/rate of the packets 1953 */ 1954 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) 1955 { 1956 if (!ipvs->drop_rate) 1957 return 0; 1958 if (--ipvs->drop_counter > 0) 1959 return 0; 1960 ipvs->drop_counter = ipvs->drop_rate; 1961 return 1; 1962 } 1963 #else 1964 static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } 1965 #endif 1966 1967 #ifdef CONFIG_SYSCTL 1968 /* Enqueue delayed work for expiring no dest connections 1969 * Only run when sysctl_expire_nodest=1 1970 */ 1971 static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) 1972 { 1973 if (sysctl_expire_nodest_conn(ipvs)) 1974 queue_delayed_work(system_long_wq, 1975 &ipvs->expire_nodest_conn_work, 1); 1976 } 1977 1978 void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs); 1979 #else 1980 static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) {} 1981 #endif 1982 1983 #define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \ 1984 IP_VS_CONN_F_FWD_MASK) 1985 1986 /* ip_vs_fwd_tag returns the forwarding tag of the connection */ 1987 #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) 1988 1989 static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) 1990 { 1991 char fwd; 1992 1993 switch (IP_VS_FWD_METHOD(cp)) { 1994 case IP_VS_CONN_F_MASQ: 1995 fwd = 'M'; break; 1996 case IP_VS_CONN_F_LOCALNODE: 1997 fwd = 'L'; break; 1998 case IP_VS_CONN_F_TUNNEL: 1999 fwd = 'T'; break; 2000 case IP_VS_CONN_F_DROUTE: 2001 fwd = 'R'; break; 2002 case IP_VS_CONN_F_BYPASS: 2003 fwd = 'B'; break; 2004 default: 2005 fwd = '?'; break; 2006 } 2007 return fwd; 2008 } 2009 2010 /* Check if connection uses double hashing */ 2011 static inline bool ip_vs_conn_use_hash2(struct ip_vs_conn *cp) 2012 { 2013 return IP_VS_FWD_METHOD(cp) == IP_VS_CONN_F_MASQ && 2014 !(cp->flags & IP_VS_CONN_F_TEMPLATE); 2015 } 2016 2017 void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, 2018 struct ip_vs_conn *cp, int dir); 2019 2020 #ifdef CONFIG_IP_VS_IPV6 2021 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, 2022 struct ip_vs_conn *cp, int dir); 2023 #endif 2024 2025 __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); 2026 2027 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum) 2028 { 2029 __be32 diff[2] = { ~old, new }; 2030 2031 return csum_partial(diff, sizeof(diff), oldsum); 2032 } 2033 2034 #ifdef CONFIG_IP_VS_IPV6 2035 static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new, 2036 __wsum oldsum) 2037 { 2038 __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0], 2039 new[3], new[2], new[1], new[0] }; 2040 2041 return csum_partial(diff, sizeof(diff), oldsum); 2042 } 2043 #endif 2044 2045 static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) 2046 { 2047 __be16 diff[2] = { ~old, new }; 2048 2049 return csum_partial(diff, sizeof(diff), oldsum); 2050 } 2051 2052 /* Forget current conntrack (unconfirmed) and attach notrack entry */ 2053 static inline void ip_vs_notrack(struct sk_buff *skb) 2054 { 2055 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2056 enum ip_conntrack_info ctinfo; 2057 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); 2058 2059 if (ct) { 2060 nf_conntrack_put(&ct->ct_general); 2061 nf_ct_set(skb, NULL, IP_CT_UNTRACKED); 2062 } 2063 #endif 2064 } 2065 2066 #ifdef CONFIG_IP_VS_NFCT 2067 /* Netfilter connection tracking 2068 * (from ip_vs_nfct.c) 2069 */ 2070 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 2071 { 2072 #ifdef CONFIG_SYSCTL 2073 return ipvs->sysctl_conntrack; 2074 #else 2075 return 0; 2076 #endif 2077 } 2078 2079 void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, 2080 int outin); 2081 int ip_vs_confirm_conntrack(struct sk_buff *skb); 2082 void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, 2083 struct ip_vs_conn *cp, u_int8_t proto, 2084 const __be16 port, int from_rs); 2085 void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp); 2086 2087 #else 2088 2089 static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) 2090 { 2091 return 0; 2092 } 2093 2094 static inline void ip_vs_update_conntrack(struct sk_buff *skb, 2095 struct ip_vs_conn *cp, int outin) 2096 { 2097 } 2098 2099 static inline int ip_vs_confirm_conntrack(struct sk_buff *skb) 2100 { 2101 return NF_ACCEPT; 2102 } 2103 2104 static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) 2105 { 2106 } 2107 #endif /* CONFIG_IP_VS_NFCT */ 2108 2109 /* Using old conntrack that can not be redirected to another real server? */ 2110 static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp, 2111 struct sk_buff *skb) 2112 { 2113 #ifdef CONFIG_IP_VS_NFCT 2114 enum ip_conntrack_info ctinfo; 2115 struct nf_conn *ct; 2116 2117 ct = nf_ct_get(skb, &ctinfo); 2118 if (ct && nf_ct_is_confirmed(ct)) 2119 return true; 2120 #endif 2121 return false; 2122 } 2123 2124 static inline int ip_vs_register_conntrack(struct ip_vs_service *svc) 2125 { 2126 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 2127 int afmask = (svc->af == AF_INET6) ? 2 : 1; 2128 int ret = 0; 2129 2130 if (!(svc->conntrack_afmask & afmask)) { 2131 ret = nf_ct_netns_get(svc->ipvs->net, svc->af); 2132 if (ret >= 0) 2133 svc->conntrack_afmask |= afmask; 2134 } 2135 return ret; 2136 #else 2137 return 0; 2138 #endif 2139 } 2140 2141 static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc) 2142 { 2143 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 2144 int afmask = (svc->af == AF_INET6) ? 2 : 1; 2145 2146 if (svc->conntrack_afmask & afmask) { 2147 nf_ct_netns_put(svc->ipvs->net, svc->af); 2148 svc->conntrack_afmask &= ~afmask; 2149 } 2150 #endif 2151 } 2152 2153 int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af); 2154 void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af); 2155 2156 static inline int 2157 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) 2158 { 2159 /* We think the overhead of processing active connections is 256 2160 * times higher than that of inactive connections in average. (This 2161 * 256 times might not be accurate, we will change it later) We 2162 * use the following formula to estimate the overhead now: 2163 * dest->activeconns*256 + dest->inactconns 2164 */ 2165 return (atomic_read(&dest->activeconns) << 8) + 2166 atomic_read(&dest->inactconns); 2167 } 2168 2169 #ifdef CONFIG_IP_VS_PROTO_TCP 2170 INDIRECT_CALLABLE_DECLARE(int 2171 tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 2172 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)); 2173 #endif 2174 2175 #ifdef CONFIG_IP_VS_PROTO_UDP 2176 INDIRECT_CALLABLE_DECLARE(int 2177 udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 2178 struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)); 2179 #endif 2180 #endif /* _NET_IP_VS_H */ 2181