1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
3 *
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6 */
7
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <net/arp.h>
13 #include <net/ipv6_stubs.h>
14 #include <net/lwtunnel.h>
15 #include <net/ndisc.h>
16 #include <net/nexthop.h>
17 #include <net/route.h>
18 #include <net/sock.h>
19
20 #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ)
21 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */
22
23 static void remove_nexthop(struct net *net, struct nexthop *nh,
24 struct nl_info *nlinfo);
25
26 #define NH_DEV_HASHBITS 8
27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
28
29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \
30 NHA_OP_FLAG_DUMP_HW_STATS)
31
32 static const struct nla_policy rtm_nh_policy_new[] = {
33 [NHA_ID] = { .type = NLA_U32 },
34 [NHA_GROUP] = { .type = NLA_BINARY },
35 [NHA_GROUP_TYPE] = { .type = NLA_U16 },
36 [NHA_BLACKHOLE] = { .type = NLA_FLAG },
37 [NHA_OIF] = { .type = NLA_U32 },
38 [NHA_GATEWAY] = { .type = NLA_BINARY },
39 [NHA_ENCAP_TYPE] = { .type = NLA_U16 },
40 [NHA_ENCAP] = { .type = NLA_NESTED },
41 [NHA_FDB] = { .type = NLA_FLAG },
42 [NHA_RES_GROUP] = { .type = NLA_NESTED },
43 [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true),
44 };
45
46 static const struct nla_policy rtm_nh_policy_get[] = {
47 [NHA_ID] = { .type = NLA_U32 },
48 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
49 NHA_OP_FLAGS_DUMP_ALL),
50 };
51
52 static const struct nla_policy rtm_nh_policy_del[] = {
53 [NHA_ID] = { .type = NLA_U32 },
54 };
55
56 static const struct nla_policy rtm_nh_policy_dump[] = {
57 [NHA_OIF] = { .type = NLA_U32 },
58 [NHA_GROUPS] = { .type = NLA_FLAG },
59 [NHA_MASTER] = { .type = NLA_U32 },
60 [NHA_FDB] = { .type = NLA_FLAG },
61 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
62 NHA_OP_FLAGS_DUMP_ALL),
63 };
64
65 static const struct nla_policy rtm_nh_res_policy_new[] = {
66 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 },
67 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 },
68 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 },
69 };
70
71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
72 [NHA_ID] = { .type = NLA_U32 },
73 [NHA_OIF] = { .type = NLA_U32 },
74 [NHA_MASTER] = { .type = NLA_U32 },
75 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
76 };
77
78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
79 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 },
80 };
81
82 static const struct nla_policy rtm_nh_policy_get_bucket[] = {
83 [NHA_ID] = { .type = NLA_U32 },
84 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
85 };
86
87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
88 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 },
89 };
90
nexthop_notifiers_is_empty(struct net * net)91 static bool nexthop_notifiers_is_empty(struct net *net)
92 {
93 return !net->nexthop.notifier_chain.head;
94 }
95
96 static void
__nh_notifier_single_info_init(struct nh_notifier_single_info * nh_info,const struct nh_info * nhi)97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
98 const struct nh_info *nhi)
99 {
100 nh_info->dev = nhi->fib_nhc.nhc_dev;
101 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
102 if (nh_info->gw_family == AF_INET)
103 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
104 else if (nh_info->gw_family == AF_INET6)
105 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
106
107 nh_info->id = nhi->nh_parent->id;
108 nh_info->is_reject = nhi->reject_nh;
109 nh_info->is_fdb = nhi->fdb_nh;
110 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
111 }
112
nh_notifier_single_info_init(struct nh_notifier_info * info,const struct nexthop * nh)113 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
114 const struct nexthop *nh)
115 {
116 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
117
118 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
119 info->nh = kzalloc_obj(*info->nh);
120 if (!info->nh)
121 return -ENOMEM;
122
123 __nh_notifier_single_info_init(info->nh, nhi);
124
125 return 0;
126 }
127
nh_notifier_single_info_fini(struct nh_notifier_info * info)128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
129 {
130 kfree(info->nh);
131 }
132
nh_notifier_mpath_info_init(struct nh_notifier_info * info,struct nh_group * nhg)133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
134 struct nh_group *nhg)
135 {
136 u16 num_nh = nhg->num_nh;
137 int i;
138
139 info->type = NH_NOTIFIER_INFO_TYPE_GRP;
140 info->nh_grp = kzalloc_flex(*info->nh_grp, nh_entries, num_nh);
141 if (!info->nh_grp)
142 return -ENOMEM;
143
144 info->nh_grp->num_nh = num_nh;
145 info->nh_grp->is_fdb = nhg->fdb_nh;
146 info->nh_grp->hw_stats = nhg->hw_stats;
147
148 for (i = 0; i < num_nh; i++) {
149 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
150 struct nh_info *nhi;
151
152 nhi = rtnl_dereference(nhge->nh->nh_info);
153 info->nh_grp->nh_entries[i].weight = nhge->weight;
154 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
155 nhi);
156 }
157
158 return 0;
159 }
160
nh_notifier_res_table_info_init(struct nh_notifier_info * info,struct nh_group * nhg)161 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
162 struct nh_group *nhg)
163 {
164 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
165 u16 num_nh_buckets = res_table->num_nh_buckets;
166 unsigned long size;
167 u16 i;
168
169 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
170 size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
171 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
172 __GFP_NOWARN);
173 if (!info->nh_res_table)
174 return -ENOMEM;
175
176 info->nh_res_table->num_nh_buckets = num_nh_buckets;
177 info->nh_res_table->hw_stats = nhg->hw_stats;
178
179 for (i = 0; i < num_nh_buckets; i++) {
180 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
181 struct nh_grp_entry *nhge;
182 struct nh_info *nhi;
183
184 nhge = rtnl_dereference(bucket->nh_entry);
185 nhi = rtnl_dereference(nhge->nh->nh_info);
186 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
187 nhi);
188 }
189
190 return 0;
191 }
192
nh_notifier_grp_info_init(struct nh_notifier_info * info,const struct nexthop * nh)193 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
194 const struct nexthop *nh)
195 {
196 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
197
198 if (nhg->hash_threshold)
199 return nh_notifier_mpath_info_init(info, nhg);
200 else if (nhg->resilient)
201 return nh_notifier_res_table_info_init(info, nhg);
202 return -EINVAL;
203 }
204
nh_notifier_grp_info_fini(struct nh_notifier_info * info,const struct nexthop * nh)205 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
206 const struct nexthop *nh)
207 {
208 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
209
210 if (nhg->hash_threshold)
211 kfree(info->nh_grp);
212 else if (nhg->resilient)
213 vfree(info->nh_res_table);
214 }
215
nh_notifier_info_init(struct nh_notifier_info * info,const struct nexthop * nh)216 static int nh_notifier_info_init(struct nh_notifier_info *info,
217 const struct nexthop *nh)
218 {
219 info->id = nh->id;
220
221 if (nh->is_group)
222 return nh_notifier_grp_info_init(info, nh);
223 else
224 return nh_notifier_single_info_init(info, nh);
225 }
226
nh_notifier_info_fini(struct nh_notifier_info * info,const struct nexthop * nh)227 static void nh_notifier_info_fini(struct nh_notifier_info *info,
228 const struct nexthop *nh)
229 {
230 if (nh->is_group)
231 nh_notifier_grp_info_fini(info, nh);
232 else
233 nh_notifier_single_info_fini(info);
234 }
235
call_nexthop_notifiers(struct net * net,enum nexthop_event_type event_type,struct nexthop * nh,struct netlink_ext_ack * extack)236 static int call_nexthop_notifiers(struct net *net,
237 enum nexthop_event_type event_type,
238 struct nexthop *nh,
239 struct netlink_ext_ack *extack)
240 {
241 struct nh_notifier_info info = {
242 .net = net,
243 .extack = extack,
244 };
245 int err;
246
247 ASSERT_RTNL();
248
249 if (nexthop_notifiers_is_empty(net))
250 return 0;
251
252 err = nh_notifier_info_init(&info, nh);
253 if (err) {
254 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
255 return err;
256 }
257
258 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
259 event_type, &info);
260 nh_notifier_info_fini(&info, nh);
261
262 return notifier_to_errno(err);
263 }
264
265 static int
nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info * info,bool force,unsigned int * p_idle_timer_ms)266 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
267 bool force, unsigned int *p_idle_timer_ms)
268 {
269 struct nh_res_table *res_table;
270 struct nh_group *nhg;
271 struct nexthop *nh;
272 int err = 0;
273
274 /* When 'force' is false, nexthop bucket replacement is performed
275 * because the bucket was deemed to be idle. In this case, capable
276 * listeners can choose to perform an atomic replacement: The bucket is
277 * only replaced if it is inactive. However, if the idle timer interval
278 * is smaller than the interval in which a listener is querying
279 * buckets' activity from the device, then atomic replacement should
280 * not be tried. Pass the idle timer value to listeners, so that they
281 * could determine which type of replacement to perform.
282 */
283 if (force) {
284 *p_idle_timer_ms = 0;
285 return 0;
286 }
287
288 rcu_read_lock();
289
290 nh = nexthop_find_by_id(info->net, info->id);
291 if (!nh) {
292 err = -EINVAL;
293 goto out;
294 }
295
296 nhg = rcu_dereference(nh->nh_grp);
297 res_table = rcu_dereference(nhg->res_table);
298 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
299
300 out:
301 rcu_read_unlock();
302
303 return err;
304 }
305
nh_notifier_res_bucket_info_init(struct nh_notifier_info * info,u16 bucket_index,bool force,struct nh_info * oldi,struct nh_info * newi)306 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
307 u16 bucket_index, bool force,
308 struct nh_info *oldi,
309 struct nh_info *newi)
310 {
311 unsigned int idle_timer_ms;
312 int err;
313
314 err = nh_notifier_res_bucket_idle_timer_get(info, force,
315 &idle_timer_ms);
316 if (err)
317 return err;
318
319 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
320 info->nh_res_bucket = kzalloc_obj(*info->nh_res_bucket);
321 if (!info->nh_res_bucket)
322 return -ENOMEM;
323
324 info->nh_res_bucket->bucket_index = bucket_index;
325 info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
326 info->nh_res_bucket->force = force;
327 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
328 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
329 return 0;
330 }
331
nh_notifier_res_bucket_info_fini(struct nh_notifier_info * info)332 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
333 {
334 kfree(info->nh_res_bucket);
335 }
336
__call_nexthop_res_bucket_notifiers(struct net * net,u32 nhg_id,u16 bucket_index,bool force,struct nh_info * oldi,struct nh_info * newi,struct netlink_ext_ack * extack)337 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
338 u16 bucket_index, bool force,
339 struct nh_info *oldi,
340 struct nh_info *newi,
341 struct netlink_ext_ack *extack)
342 {
343 struct nh_notifier_info info = {
344 .net = net,
345 .extack = extack,
346 .id = nhg_id,
347 };
348 int err;
349
350 if (nexthop_notifiers_is_empty(net))
351 return 0;
352
353 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
354 oldi, newi);
355 if (err)
356 return err;
357
358 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
359 NEXTHOP_EVENT_BUCKET_REPLACE, &info);
360 nh_notifier_res_bucket_info_fini(&info);
361
362 return notifier_to_errno(err);
363 }
364
365 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
366 *
367 * 1) a collection of callbacks for NH maintenance. This operates under
368 * RTNL,
369 * 2) the delayed work that gradually balances the resilient table,
370 * 3) and nexthop_select_path(), operating under RCU.
371 *
372 * Both the delayed work and the RTNL block are writers, and need to
373 * maintain mutual exclusion. Since there are only two and well-known
374 * writers for each table, the RTNL code can make sure it has exclusive
375 * access thus:
376 *
377 * - Have the DW operate without locking;
378 * - synchronously cancel the DW;
379 * - do the writing;
380 * - if the write was not actually a delete, call upkeep, which schedules
381 * DW again if necessary.
382 *
383 * The functions that are always called from the RTNL context use
384 * rtnl_dereference(). The functions that can also be called from the DW do
385 * a raw dereference and rely on the above mutual exclusion scheme.
386 */
387 #define nh_res_dereference(p) (rcu_dereference_raw(p))
388
call_nexthop_res_bucket_notifiers(struct net * net,u32 nhg_id,u16 bucket_index,bool force,struct nexthop * old_nh,struct nexthop * new_nh,struct netlink_ext_ack * extack)389 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
390 u16 bucket_index, bool force,
391 struct nexthop *old_nh,
392 struct nexthop *new_nh,
393 struct netlink_ext_ack *extack)
394 {
395 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
396 struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
397
398 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
399 force, oldi, newi, extack);
400 }
401
call_nexthop_res_table_notifiers(struct net * net,struct nexthop * nh,struct netlink_ext_ack * extack)402 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
403 struct netlink_ext_ack *extack)
404 {
405 struct nh_notifier_info info = {
406 .net = net,
407 .extack = extack,
408 .id = nh->id,
409 };
410 struct nh_group *nhg;
411 int err;
412
413 ASSERT_RTNL();
414
415 if (nexthop_notifiers_is_empty(net))
416 return 0;
417
418 /* At this point, the nexthop buckets are still not populated. Only
419 * emit a notification with the logical nexthops, so that a listener
420 * could potentially veto it in case of unsupported configuration.
421 */
422 nhg = rtnl_dereference(nh->nh_grp);
423 err = nh_notifier_mpath_info_init(&info, nhg);
424 if (err) {
425 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
426 return err;
427 }
428
429 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
430 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
431 &info);
432 kfree(info.nh_grp);
433
434 return notifier_to_errno(err);
435 }
436
call_nexthop_notifier(struct notifier_block * nb,struct net * net,enum nexthop_event_type event_type,struct nexthop * nh,struct netlink_ext_ack * extack)437 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
438 enum nexthop_event_type event_type,
439 struct nexthop *nh,
440 struct netlink_ext_ack *extack)
441 {
442 struct nh_notifier_info info = {
443 .net = net,
444 .extack = extack,
445 };
446 int err;
447
448 err = nh_notifier_info_init(&info, nh);
449 if (err)
450 return err;
451
452 err = nb->notifier_call(nb, event_type, &info);
453 nh_notifier_info_fini(&info, nh);
454
455 return notifier_to_errno(err);
456 }
457
nh_dev_hashfn(unsigned int val)458 static unsigned int nh_dev_hashfn(unsigned int val)
459 {
460 unsigned int mask = NH_DEV_HASHSIZE - 1;
461
462 return (val ^
463 (val >> NH_DEV_HASHBITS) ^
464 (val >> (NH_DEV_HASHBITS * 2))) & mask;
465 }
466
nexthop_devhash_add(struct net * net,struct nh_info * nhi)467 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
468 {
469 struct net_device *dev = nhi->fib_nhc.nhc_dev;
470 struct hlist_head *head;
471 unsigned int hash;
472
473 WARN_ON(!dev);
474
475 hash = nh_dev_hashfn(dev->ifindex);
476 head = &net->nexthop.devhash[hash];
477 hlist_add_head(&nhi->dev_hash, head);
478 }
479
nexthop_free_group(struct nexthop * nh)480 static void nexthop_free_group(struct nexthop *nh)
481 {
482 struct nh_group *nhg;
483 int i;
484
485 nhg = rcu_dereference_raw(nh->nh_grp);
486 for (i = 0; i < nhg->num_nh; ++i) {
487 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
488
489 WARN_ON(!list_empty(&nhge->nh_list));
490 free_percpu(nhge->stats);
491 nexthop_put(nhge->nh);
492 }
493
494 WARN_ON(nhg->spare == nhg);
495
496 if (nhg->resilient)
497 vfree(rcu_dereference_raw(nhg->res_table));
498
499 kfree(nhg->spare);
500 kfree(nhg);
501 }
502
nexthop_free_single(struct nexthop * nh)503 static void nexthop_free_single(struct nexthop *nh)
504 {
505 struct nh_info *nhi;
506
507 nhi = rcu_dereference_raw(nh->nh_info);
508 switch (nhi->family) {
509 case AF_INET:
510 fib_nh_release(nh->net, &nhi->fib_nh);
511 break;
512 case AF_INET6:
513 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
514 break;
515 }
516 kfree(nhi);
517 }
518
nexthop_free_rcu(struct rcu_head * head)519 void nexthop_free_rcu(struct rcu_head *head)
520 {
521 struct nexthop *nh = container_of(head, struct nexthop, rcu);
522
523 if (nh->is_group)
524 nexthop_free_group(nh);
525 else
526 nexthop_free_single(nh);
527
528 kfree(nh);
529 }
530 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
531
nexthop_alloc(void)532 static struct nexthop *nexthop_alloc(void)
533 {
534 struct nexthop *nh;
535
536 nh = kzalloc_obj(struct nexthop);
537 if (nh) {
538 INIT_LIST_HEAD(&nh->fi_list);
539 INIT_LIST_HEAD(&nh->f6i_list);
540 INIT_LIST_HEAD(&nh->grp_list);
541 INIT_LIST_HEAD(&nh->fdb_list);
542 spin_lock_init(&nh->lock);
543 }
544 return nh;
545 }
546
nexthop_grp_alloc(u16 num_nh)547 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
548 {
549 struct nh_group *nhg;
550
551 nhg = kzalloc_flex(*nhg, nh_entries, num_nh);
552 if (nhg)
553 nhg->num_nh = num_nh;
554
555 return nhg;
556 }
557
558 static void nh_res_table_upkeep_dw(struct work_struct *work);
559
560 static struct nh_res_table *
nexthop_res_table_alloc(struct net * net,u32 nhg_id,struct nh_config * cfg)561 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
562 {
563 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
564 struct nh_res_table *res_table;
565 unsigned long size;
566
567 size = struct_size(res_table, nh_buckets, num_nh_buckets);
568 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
569 if (!res_table)
570 return NULL;
571
572 res_table->net = net;
573 res_table->nhg_id = nhg_id;
574 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
575 INIT_LIST_HEAD(&res_table->uw_nh_entries);
576 res_table->idle_timer = cfg->nh_grp_res_idle_timer;
577 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
578 res_table->num_nh_buckets = num_nh_buckets;
579 return res_table;
580 }
581
nh_base_seq_inc(struct net * net)582 static void nh_base_seq_inc(struct net *net)
583 {
584 while (++net->nexthop.seq == 0)
585 ;
586 }
587
588 /* no reference taken; rcu lock or rtnl must be held */
nexthop_find_by_id(struct net * net,u32 id)589 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
590 {
591 struct rb_node **pp, *parent = NULL, *next;
592
593 pp = &net->nexthop.rb_root.rb_node;
594 while (1) {
595 struct nexthop *nh;
596
597 next = rcu_dereference_raw(*pp);
598 if (!next)
599 break;
600 parent = next;
601
602 nh = rb_entry(parent, struct nexthop, rb_node);
603 if (id < nh->id)
604 pp = &next->rb_left;
605 else if (id > nh->id)
606 pp = &next->rb_right;
607 else
608 return nh;
609 }
610 return NULL;
611 }
612 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
613
614 /* used for auto id allocation; called with rtnl held */
nh_find_unused_id(struct net * net)615 static u32 nh_find_unused_id(struct net *net)
616 {
617 u32 id_start = net->nexthop.last_id_allocated;
618
619 while (1) {
620 net->nexthop.last_id_allocated++;
621 if (net->nexthop.last_id_allocated == id_start)
622 break;
623
624 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
625 return net->nexthop.last_id_allocated;
626 }
627 return 0;
628 }
629
nh_res_time_set_deadline(unsigned long next_time,unsigned long * deadline)630 static void nh_res_time_set_deadline(unsigned long next_time,
631 unsigned long *deadline)
632 {
633 if (time_before(next_time, *deadline))
634 *deadline = next_time;
635 }
636
nh_res_table_unbalanced_time(struct nh_res_table * res_table)637 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
638 {
639 if (list_empty(&res_table->uw_nh_entries))
640 return 0;
641 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
642 }
643
nla_put_nh_group_res(struct sk_buff * skb,struct nh_group * nhg)644 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
645 {
646 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
647 struct nlattr *nest;
648
649 nest = nla_nest_start(skb, NHA_RES_GROUP);
650 if (!nest)
651 return -EMSGSIZE;
652
653 if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
654 res_table->num_nh_buckets) ||
655 nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
656 jiffies_to_clock_t(res_table->idle_timer)) ||
657 nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
658 jiffies_to_clock_t(res_table->unbalanced_timer)) ||
659 nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
660 nh_res_table_unbalanced_time(res_table),
661 NHA_RES_GROUP_PAD))
662 goto nla_put_failure;
663
664 nla_nest_end(skb, nest);
665 return 0;
666
667 nla_put_failure:
668 nla_nest_cancel(skb, nest);
669 return -EMSGSIZE;
670 }
671
nh_grp_entry_stats_inc(struct nh_grp_entry * nhge)672 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
673 {
674 struct nh_grp_entry_stats *cpu_stats;
675
676 cpu_stats = get_cpu_ptr(nhge->stats);
677 u64_stats_update_begin(&cpu_stats->syncp);
678 u64_stats_inc(&cpu_stats->packets);
679 u64_stats_update_end(&cpu_stats->syncp);
680 put_cpu_ptr(cpu_stats);
681 }
682
nh_grp_entry_stats_read(struct nh_grp_entry * nhge,u64 * ret_packets)683 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
684 u64 *ret_packets)
685 {
686 int i;
687
688 *ret_packets = 0;
689
690 for_each_possible_cpu(i) {
691 struct nh_grp_entry_stats *cpu_stats;
692 unsigned int start;
693 u64 packets;
694
695 cpu_stats = per_cpu_ptr(nhge->stats, i);
696 do {
697 start = u64_stats_fetch_begin(&cpu_stats->syncp);
698 packets = u64_stats_read(&cpu_stats->packets);
699 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
700
701 *ret_packets += packets;
702 }
703 }
704
nh_notifier_grp_hw_stats_init(struct nh_notifier_info * info,const struct nexthop * nh)705 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
706 const struct nexthop *nh)
707 {
708 struct nh_group *nhg;
709 int i;
710
711 ASSERT_RTNL();
712 nhg = rtnl_dereference(nh->nh_grp);
713
714 info->id = nh->id;
715 info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
716 info->nh_grp_hw_stats = kzalloc_flex(*info->nh_grp_hw_stats, stats,
717 nhg->num_nh);
718 if (!info->nh_grp_hw_stats)
719 return -ENOMEM;
720
721 info->nh_grp_hw_stats->num_nh = nhg->num_nh;
722 for (i = 0; i < nhg->num_nh; i++) {
723 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
724
725 info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
726 }
727
728 return 0;
729 }
730
nh_notifier_grp_hw_stats_fini(struct nh_notifier_info * info)731 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
732 {
733 kfree(info->nh_grp_hw_stats);
734 }
735
nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info * info,unsigned int nh_idx,u64 delta_packets)736 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
737 unsigned int nh_idx,
738 u64 delta_packets)
739 {
740 info->hw_stats_used = true;
741 info->stats[nh_idx].packets += delta_packets;
742 }
743 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
744
nh_grp_hw_stats_apply_update(struct nexthop * nh,struct nh_notifier_info * info)745 static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
746 struct nh_notifier_info *info)
747 {
748 struct nh_group *nhg;
749 int i;
750
751 ASSERT_RTNL();
752 nhg = rtnl_dereference(nh->nh_grp);
753
754 for (i = 0; i < nhg->num_nh; i++) {
755 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
756
757 nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
758 }
759 }
760
nh_grp_hw_stats_update(struct nexthop * nh,bool * hw_stats_used)761 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
762 {
763 struct nh_notifier_info info = {
764 .net = nh->net,
765 };
766 struct net *net = nh->net;
767 int err;
768
769 if (nexthop_notifiers_is_empty(net)) {
770 *hw_stats_used = false;
771 return 0;
772 }
773
774 err = nh_notifier_grp_hw_stats_init(&info, nh);
775 if (err)
776 return err;
777
778 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
779 NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
780 &info);
781
782 /* Cache whatever we got, even if there was an error, otherwise the
783 * successful stats retrievals would get lost.
784 */
785 nh_grp_hw_stats_apply_update(nh, &info);
786 *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
787
788 nh_notifier_grp_hw_stats_fini(&info);
789 return notifier_to_errno(err);
790 }
791
nla_put_nh_group_stats_entry(struct sk_buff * skb,struct nh_grp_entry * nhge,u32 op_flags)792 static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
793 struct nh_grp_entry *nhge,
794 u32 op_flags)
795 {
796 struct nlattr *nest;
797 u64 packets;
798
799 nh_grp_entry_stats_read(nhge, &packets);
800
801 nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
802 if (!nest)
803 return -EMSGSIZE;
804
805 if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
806 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
807 packets + nhge->packets_hw))
808 goto nla_put_failure;
809
810 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
811 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
812 nhge->packets_hw))
813 goto nla_put_failure;
814
815 nla_nest_end(skb, nest);
816 return 0;
817
818 nla_put_failure:
819 nla_nest_cancel(skb, nest);
820 return -EMSGSIZE;
821 }
822
nla_put_nh_group_stats(struct sk_buff * skb,struct nexthop * nh,u32 op_flags)823 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
824 u32 op_flags)
825 {
826 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
827 struct nlattr *nest;
828 bool hw_stats_used;
829 int err;
830 int i;
831
832 if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
833 goto err_out;
834
835 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
836 nhg->hw_stats) {
837 err = nh_grp_hw_stats_update(nh, &hw_stats_used);
838 if (err)
839 goto out;
840
841 if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
842 goto err_out;
843 }
844
845 nest = nla_nest_start(skb, NHA_GROUP_STATS);
846 if (!nest)
847 goto err_out;
848
849 for (i = 0; i < nhg->num_nh; i++)
850 if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
851 op_flags))
852 goto cancel_out;
853
854 nla_nest_end(skb, nest);
855 return 0;
856
857 cancel_out:
858 nla_nest_cancel(skb, nest);
859 err_out:
860 err = -EMSGSIZE;
861 out:
862 return err;
863 }
864
nla_put_nh_group(struct sk_buff * skb,struct nexthop * nh,u32 op_flags,u32 * resp_op_flags)865 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
866 u32 op_flags, u32 *resp_op_flags)
867 {
868 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
869 struct nexthop_grp *p;
870 size_t len = nhg->num_nh * sizeof(*p);
871 struct nlattr *nla;
872 u16 group_type = 0;
873 u16 weight;
874 int i;
875
876 *resp_op_flags |= NHA_OP_FLAG_RESP_GRP_RESVD_0;
877
878 if (nhg->hash_threshold)
879 group_type = NEXTHOP_GRP_TYPE_MPATH;
880 else if (nhg->resilient)
881 group_type = NEXTHOP_GRP_TYPE_RES;
882
883 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
884 goto nla_put_failure;
885
886 nla = nla_reserve(skb, NHA_GROUP, len);
887 if (!nla)
888 goto nla_put_failure;
889
890 p = nla_data(nla);
891 for (i = 0; i < nhg->num_nh; ++i) {
892 weight = nhg->nh_entries[i].weight - 1;
893
894 *p++ = (struct nexthop_grp) {
895 .id = nhg->nh_entries[i].nh->id,
896 .weight = weight,
897 .weight_high = weight >> 8,
898 };
899 }
900
901 if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
902 goto nla_put_failure;
903
904 if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
905 nla_put_nh_group_stats(skb, nh, op_flags))
906 goto nla_put_failure;
907
908 return 0;
909
910 nla_put_failure:
911 return -EMSGSIZE;
912 }
913
nh_fill_node(struct sk_buff * skb,struct nexthop * nh,int event,u32 portid,u32 seq,unsigned int nlflags,u32 op_flags)914 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
915 int event, u32 portid, u32 seq, unsigned int nlflags,
916 u32 op_flags)
917 {
918 struct fib6_nh *fib6_nh;
919 struct fib_nh *fib_nh;
920 struct nlmsghdr *nlh;
921 struct nh_info *nhi;
922 struct nhmsg *nhm;
923
924 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
925 if (!nlh)
926 return -EMSGSIZE;
927
928 nhm = nlmsg_data(nlh);
929 nhm->nh_family = AF_UNSPEC;
930 nhm->nh_flags = nh->nh_flags;
931 nhm->nh_protocol = nh->protocol;
932 nhm->nh_scope = 0;
933 nhm->resvd = 0;
934
935 if (nla_put_u32(skb, NHA_ID, nh->id))
936 goto nla_put_failure;
937
938 if (nh->is_group) {
939 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
940 u32 resp_op_flags = 0;
941
942 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
943 goto nla_put_failure;
944 if (nla_put_nh_group(skb, nh, op_flags, &resp_op_flags) ||
945 nla_put_u32(skb, NHA_OP_FLAGS, resp_op_flags))
946 goto nla_put_failure;
947 goto out;
948 }
949
950 nhi = rtnl_dereference(nh->nh_info);
951 nhm->nh_family = nhi->family;
952 if (nhi->reject_nh) {
953 if (nla_put_flag(skb, NHA_BLACKHOLE))
954 goto nla_put_failure;
955 goto out;
956 } else if (nhi->fdb_nh) {
957 if (nla_put_flag(skb, NHA_FDB))
958 goto nla_put_failure;
959 } else {
960 const struct net_device *dev;
961
962 dev = nhi->fib_nhc.nhc_dev;
963 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
964 goto nla_put_failure;
965 }
966
967 nhm->nh_scope = nhi->fib_nhc.nhc_scope;
968 switch (nhi->family) {
969 case AF_INET:
970 fib_nh = &nhi->fib_nh;
971 if (fib_nh->fib_nh_gw_family &&
972 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
973 goto nla_put_failure;
974 break;
975
976 case AF_INET6:
977 fib6_nh = &nhi->fib6_nh;
978 if (fib6_nh->fib_nh_gw_family &&
979 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
980 goto nla_put_failure;
981 break;
982 }
983
984 if (lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
985 NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
986 goto nla_put_failure;
987
988 out:
989 nlmsg_end(skb, nlh);
990 return 0;
991
992 nla_put_failure:
993 nlmsg_cancel(skb, nlh);
994 return -EMSGSIZE;
995 }
996
nh_nlmsg_size_grp_res(struct nh_group * nhg)997 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
998 {
999 return nla_total_size(0) + /* NHA_RES_GROUP */
1000 nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */
1001 nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */
1002 nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */
1003 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
1004 }
1005
nh_nlmsg_size_grp(struct nexthop * nh,u32 op_flags)1006 static size_t nh_nlmsg_size_grp(struct nexthop *nh, u32 op_flags)
1007 {
1008 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1009 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
1010 size_t tot = nla_total_size(sz) +
1011 nla_total_size(2) + /* NHA_GROUP_TYPE */
1012 nla_total_size(0); /* NHA_FDB */
1013
1014 if (nhg->resilient)
1015 tot += nh_nlmsg_size_grp_res(nhg);
1016
1017 if (op_flags & NHA_OP_FLAG_DUMP_STATS) {
1018 tot += nla_total_size(0) + /* NHA_GROUP_STATS */
1019 nla_total_size(4); /* NHA_HW_STATS_ENABLE */
1020 tot += nhg->num_nh *
1021 (nla_total_size(0) + /* NHA_GROUP_STATS_ENTRY */
1022 nla_total_size(4) + /* NHA_GROUP_STATS_ENTRY_ID */
1023 nla_total_size_64bit(8)); /* NHA_GROUP_STATS_ENTRY_PACKETS */
1024
1025 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS) {
1026 tot += nhg->num_nh *
1027 nla_total_size_64bit(8); /* NHA_GROUP_STATS_ENTRY_PACKETS_HW */
1028 tot += nla_total_size(4); /* NHA_HW_STATS_USED */
1029 }
1030 }
1031
1032 return tot;
1033 }
1034
nh_nlmsg_size_single(struct nexthop * nh)1035 static size_t nh_nlmsg_size_single(struct nexthop *nh)
1036 {
1037 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1038 size_t sz;
1039
1040 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
1041 * are mutually exclusive
1042 */
1043 sz = nla_total_size(4); /* NHA_OIF */
1044
1045 switch (nhi->family) {
1046 case AF_INET:
1047 if (nhi->fib_nh.fib_nh_gw_family)
1048 sz += nla_total_size(4); /* NHA_GATEWAY */
1049 break;
1050
1051 case AF_INET6:
1052 /* NHA_GATEWAY */
1053 if (nhi->fib6_nh.fib_nh_gw_family)
1054 sz += nla_total_size(sizeof(const struct in6_addr));
1055 break;
1056 }
1057
1058 if (nhi->fib_nhc.nhc_lwtstate) {
1059 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
1060 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */
1061 }
1062
1063 return sz;
1064 }
1065
nh_nlmsg_size(struct nexthop * nh,u32 op_flags)1066 static size_t nh_nlmsg_size(struct nexthop *nh, u32 op_flags)
1067 {
1068 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
1069
1070 sz += nla_total_size(4); /* NHA_ID */
1071
1072 if (nh->is_group)
1073 sz += nh_nlmsg_size_grp(nh, op_flags) +
1074 nla_total_size(4) + /* NHA_OP_FLAGS */
1075 0;
1076 else
1077 sz += nh_nlmsg_size_single(nh);
1078
1079 return sz;
1080 }
1081
nexthop_notify(int event,struct nexthop * nh,struct nl_info * info)1082 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
1083 {
1084 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
1085 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
1086 struct sk_buff *skb;
1087 int err = -ENOBUFS;
1088
1089 skb = nlmsg_new(nh_nlmsg_size(nh, 0), gfp_any());
1090 if (!skb)
1091 goto errout;
1092
1093 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
1094 if (err < 0) {
1095 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
1096 WARN_ON(err == -EMSGSIZE);
1097 kfree_skb(skb);
1098 goto errout;
1099 }
1100
1101 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
1102 info->nlh, gfp_any());
1103 return;
1104 errout:
1105 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
1106 }
1107
nh_res_bucket_used_time(const struct nh_res_bucket * bucket)1108 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
1109 {
1110 return (unsigned long)atomic_long_read(&bucket->used_time);
1111 }
1112
1113 static unsigned long
nh_res_bucket_idle_point(const struct nh_res_table * res_table,const struct nh_res_bucket * bucket,unsigned long now)1114 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
1115 const struct nh_res_bucket *bucket,
1116 unsigned long now)
1117 {
1118 unsigned long time = nh_res_bucket_used_time(bucket);
1119
1120 /* Bucket was not used since it was migrated. The idle time is now. */
1121 if (time == bucket->migrated_time)
1122 return now;
1123
1124 return time + res_table->idle_timer;
1125 }
1126
1127 static unsigned long
nh_res_table_unb_point(const struct nh_res_table * res_table)1128 nh_res_table_unb_point(const struct nh_res_table *res_table)
1129 {
1130 return res_table->unbalanced_since + res_table->unbalanced_timer;
1131 }
1132
nh_res_bucket_set_idle(const struct nh_res_table * res_table,struct nh_res_bucket * bucket)1133 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
1134 struct nh_res_bucket *bucket)
1135 {
1136 unsigned long now = jiffies;
1137
1138 atomic_long_set(&bucket->used_time, (long)now);
1139 bucket->migrated_time = now;
1140 }
1141
nh_res_bucket_set_busy(struct nh_res_bucket * bucket)1142 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1143 {
1144 atomic_long_set(&bucket->used_time, (long)jiffies);
1145 }
1146
nh_res_bucket_idle_time(const struct nh_res_bucket * bucket)1147 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1148 {
1149 unsigned long used_time = nh_res_bucket_used_time(bucket);
1150
1151 return jiffies_delta_to_clock_t(jiffies - used_time);
1152 }
1153
nh_fill_res_bucket(struct sk_buff * skb,struct nexthop * nh,struct nh_res_bucket * bucket,u16 bucket_index,int event,u32 portid,u32 seq,unsigned int nlflags,struct netlink_ext_ack * extack)1154 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1155 struct nh_res_bucket *bucket, u16 bucket_index,
1156 int event, u32 portid, u32 seq,
1157 unsigned int nlflags,
1158 struct netlink_ext_ack *extack)
1159 {
1160 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1161 struct nlmsghdr *nlh;
1162 struct nlattr *nest;
1163 struct nhmsg *nhm;
1164
1165 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
1166 if (!nlh)
1167 return -EMSGSIZE;
1168
1169 nhm = nlmsg_data(nlh);
1170 nhm->nh_family = AF_UNSPEC;
1171 nhm->nh_flags = bucket->nh_flags;
1172 nhm->nh_protocol = nh->protocol;
1173 nhm->nh_scope = 0;
1174 nhm->resvd = 0;
1175
1176 if (nla_put_u32(skb, NHA_ID, nh->id))
1177 goto nla_put_failure;
1178
1179 nest = nla_nest_start(skb, NHA_RES_BUCKET);
1180 if (!nest)
1181 goto nla_put_failure;
1182
1183 if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
1184 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
1185 nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
1186 nh_res_bucket_idle_time(bucket),
1187 NHA_RES_BUCKET_PAD))
1188 goto nla_put_failure_nest;
1189
1190 nla_nest_end(skb, nest);
1191 nlmsg_end(skb, nlh);
1192 return 0;
1193
1194 nla_put_failure_nest:
1195 nla_nest_cancel(skb, nest);
1196 nla_put_failure:
1197 nlmsg_cancel(skb, nlh);
1198 return -EMSGSIZE;
1199 }
1200
nexthop_bucket_notify(struct nh_res_table * res_table,u16 bucket_index)1201 static void nexthop_bucket_notify(struct nh_res_table *res_table,
1202 u16 bucket_index)
1203 {
1204 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1205 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1206 struct nexthop *nh = nhge->nh_parent;
1207 struct sk_buff *skb;
1208 int err = -ENOBUFS;
1209
1210 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1211 if (!skb)
1212 goto errout;
1213
1214 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1215 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
1216 NULL);
1217 if (err < 0) {
1218 kfree_skb(skb);
1219 goto errout;
1220 }
1221
1222 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1223 return;
1224 errout:
1225 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
1226 }
1227
valid_group_nh(struct nexthop * nh,unsigned int npaths,bool * is_fdb,struct netlink_ext_ack * extack)1228 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1229 bool *is_fdb, struct netlink_ext_ack *extack)
1230 {
1231 if (nh->is_group) {
1232 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1233
1234 /* Nesting groups within groups is not supported. */
1235 if (nhg->hash_threshold) {
1236 NL_SET_ERR_MSG(extack,
1237 "Hash-threshold group can not be a nexthop within a group");
1238 return false;
1239 }
1240 if (nhg->resilient) {
1241 NL_SET_ERR_MSG(extack,
1242 "Resilient group can not be a nexthop within a group");
1243 return false;
1244 }
1245 *is_fdb = nhg->fdb_nh;
1246 } else {
1247 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1248
1249 if (nhi->reject_nh && npaths > 1) {
1250 NL_SET_ERR_MSG(extack,
1251 "Blackhole nexthop can not be used in a group with more than 1 path");
1252 return false;
1253 }
1254 *is_fdb = nhi->fdb_nh;
1255 }
1256
1257 return true;
1258 }
1259
nh_check_attr_fdb_group(struct nexthop * nh,u8 * nh_family,struct netlink_ext_ack * extack)1260 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1261 struct netlink_ext_ack *extack)
1262 {
1263 struct nh_info *nhi;
1264
1265 nhi = rtnl_dereference(nh->nh_info);
1266
1267 if (!nhi->fdb_nh) {
1268 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1269 return -EINVAL;
1270 }
1271
1272 if (*nh_family == AF_UNSPEC) {
1273 *nh_family = nhi->family;
1274 } else if (*nh_family != nhi->family) {
1275 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1276 return -EINVAL;
1277 }
1278
1279 return 0;
1280 }
1281
nh_check_attr_group(struct net * net,struct nlattr * tb[],size_t tb_size,u16 nh_grp_type,struct netlink_ext_ack * extack)1282 static int nh_check_attr_group(struct net *net,
1283 struct nlattr *tb[], size_t tb_size,
1284 u16 nh_grp_type, struct netlink_ext_ack *extack)
1285 {
1286 unsigned int len = nla_len(tb[NHA_GROUP]);
1287 struct nexthop_grp *nhg;
1288 unsigned int i, j;
1289
1290 if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1291 NL_SET_ERR_MSG(extack,
1292 "Invalid length for nexthop group attribute");
1293 return -EINVAL;
1294 }
1295
1296 /* convert len to number of nexthop ids */
1297 len /= sizeof(*nhg);
1298
1299 nhg = nla_data(tb[NHA_GROUP]);
1300 for (i = 0; i < len; ++i) {
1301 if (nhg[i].resvd2) {
1302 NL_SET_ERR_MSG(extack, "Reserved field in nexthop_grp must be 0");
1303 return -EINVAL;
1304 }
1305 if (nexthop_grp_weight(&nhg[i]) == 0) {
1306 /* 0xffff got passed in, representing weight of 0x10000,
1307 * which is too heavy.
1308 */
1309 NL_SET_ERR_MSG(extack, "Invalid value for weight");
1310 return -EINVAL;
1311 }
1312 for (j = i + 1; j < len; ++j) {
1313 if (nhg[i].id == nhg[j].id) {
1314 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1315 return -EINVAL;
1316 }
1317 }
1318 }
1319
1320 nhg = nla_data(tb[NHA_GROUP]);
1321 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1322 if (!tb[i])
1323 continue;
1324 switch (i) {
1325 case NHA_HW_STATS_ENABLE:
1326 case NHA_FDB:
1327 continue;
1328 case NHA_RES_GROUP:
1329 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1330 continue;
1331 break;
1332 }
1333 NL_SET_ERR_MSG(extack,
1334 "No other attributes can be set in nexthop groups");
1335 return -EINVAL;
1336 }
1337
1338 return 0;
1339 }
1340
nh_check_attr_group_rtnl(struct net * net,struct nlattr * tb[],struct netlink_ext_ack * extack)1341 static int nh_check_attr_group_rtnl(struct net *net, struct nlattr *tb[],
1342 struct netlink_ext_ack *extack)
1343 {
1344 u8 nh_family = AF_UNSPEC;
1345 struct nexthop_grp *nhg;
1346 unsigned int len;
1347 unsigned int i;
1348 u8 nhg_fdb;
1349
1350 len = nla_len(tb[NHA_GROUP]) / sizeof(*nhg);
1351 nhg = nla_data(tb[NHA_GROUP]);
1352 nhg_fdb = !!tb[NHA_FDB];
1353
1354 for (i = 0; i < len; i++) {
1355 struct nexthop *nh;
1356 bool is_fdb_nh;
1357
1358 nh = nexthop_find_by_id(net, nhg[i].id);
1359 if (!nh) {
1360 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1361 return -EINVAL;
1362 }
1363 if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1364 return -EINVAL;
1365
1366 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1367 return -EINVAL;
1368
1369 if (!nhg_fdb && is_fdb_nh) {
1370 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1371 return -EINVAL;
1372 }
1373 }
1374
1375 return 0;
1376 }
1377
ipv6_good_nh(const struct fib6_nh * nh)1378 static bool ipv6_good_nh(const struct fib6_nh *nh)
1379 {
1380 int state = NUD_REACHABLE;
1381 struct neighbour *n;
1382
1383 rcu_read_lock();
1384
1385 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1386 if (n)
1387 state = READ_ONCE(n->nud_state);
1388
1389 rcu_read_unlock();
1390
1391 return !!(state & NUD_VALID);
1392 }
1393
ipv4_good_nh(const struct fib_nh * nh)1394 static bool ipv4_good_nh(const struct fib_nh *nh)
1395 {
1396 int state = NUD_REACHABLE;
1397 struct neighbour *n;
1398
1399 rcu_read_lock();
1400
1401 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1402 (__force u32)nh->fib_nh_gw4);
1403 if (n)
1404 state = READ_ONCE(n->nud_state);
1405
1406 rcu_read_unlock();
1407
1408 return !!(state & NUD_VALID);
1409 }
1410
nexthop_is_good_nh(const struct nexthop * nh)1411 static bool nexthop_is_good_nh(const struct nexthop *nh)
1412 {
1413 struct nh_info *nhi = rcu_dereference(nh->nh_info);
1414
1415 switch (nhi->family) {
1416 case AF_INET:
1417 return ipv4_good_nh(&nhi->fib_nh);
1418 case AF_INET6:
1419 return ipv6_good_nh(&nhi->fib6_nh);
1420 }
1421
1422 return false;
1423 }
1424
nexthop_select_path_fdb(struct nh_group * nhg,int hash)1425 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1426 {
1427 int i;
1428
1429 for (i = 0; i < nhg->num_nh; i++) {
1430 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1431
1432 if (hash > atomic_read(&nhge->hthr.upper_bound))
1433 continue;
1434
1435 nh_grp_entry_stats_inc(nhge);
1436 return nhge->nh;
1437 }
1438
1439 WARN_ON_ONCE(1);
1440 return NULL;
1441 }
1442
nexthop_select_path_hthr(struct nh_group * nhg,int hash)1443 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1444 {
1445 struct nh_grp_entry *nhge0 = NULL;
1446 int i;
1447
1448 if (nhg->fdb_nh)
1449 return nexthop_select_path_fdb(nhg, hash);
1450
1451 for (i = 0; i < nhg->num_nh; ++i) {
1452 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1453
1454 /* nexthops always check if it is good and does
1455 * not rely on a sysctl for this behavior
1456 */
1457 if (!nexthop_is_good_nh(nhge->nh))
1458 continue;
1459
1460 if (!nhge0)
1461 nhge0 = nhge;
1462
1463 if (hash > atomic_read(&nhge->hthr.upper_bound))
1464 continue;
1465
1466 nh_grp_entry_stats_inc(nhge);
1467 return nhge->nh;
1468 }
1469
1470 if (!nhge0)
1471 nhge0 = &nhg->nh_entries[0];
1472 nh_grp_entry_stats_inc(nhge0);
1473 return nhge0->nh;
1474 }
1475
nexthop_select_path_res(struct nh_group * nhg,int hash)1476 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1477 {
1478 struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1479 u16 bucket_index = hash % res_table->num_nh_buckets;
1480 struct nh_res_bucket *bucket;
1481 struct nh_grp_entry *nhge;
1482
1483 /* nexthop_select_path() is expected to return a non-NULL value, so
1484 * skip protocol validation and just hand out whatever there is.
1485 */
1486 bucket = &res_table->nh_buckets[bucket_index];
1487 nh_res_bucket_set_busy(bucket);
1488 nhge = rcu_dereference(bucket->nh_entry);
1489 nh_grp_entry_stats_inc(nhge);
1490 return nhge->nh;
1491 }
1492
nexthop_select_path(struct nexthop * nh,int hash)1493 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1494 {
1495 struct nh_group *nhg;
1496
1497 if (!nh->is_group)
1498 return nh;
1499
1500 nhg = rcu_dereference(nh->nh_grp);
1501 if (nhg->hash_threshold)
1502 return nexthop_select_path_hthr(nhg, hash);
1503 else if (nhg->resilient)
1504 return nexthop_select_path_res(nhg, hash);
1505
1506 /* Unreachable. */
1507 return NULL;
1508 }
1509 EXPORT_SYMBOL_GPL(nexthop_select_path);
1510
nexthop_for_each_fib6_nh(struct nexthop * nh,int (* cb)(struct fib6_nh * nh,void * arg),void * arg)1511 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1512 int (*cb)(struct fib6_nh *nh, void *arg),
1513 void *arg)
1514 {
1515 struct nh_info *nhi;
1516 int err;
1517
1518 if (nh->is_group) {
1519 struct nh_group *nhg;
1520 int i;
1521
1522 nhg = rcu_dereference_rtnl(nh->nh_grp);
1523 for (i = 0; i < nhg->num_nh; i++) {
1524 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1525
1526 nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1527 err = cb(&nhi->fib6_nh, arg);
1528 if (err)
1529 return err;
1530 }
1531 } else {
1532 nhi = rcu_dereference_rtnl(nh->nh_info);
1533 err = cb(&nhi->fib6_nh, arg);
1534 if (err)
1535 return err;
1536 }
1537
1538 return 0;
1539 }
1540 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1541
check_src_addr(const struct in6_addr * saddr,struct netlink_ext_ack * extack)1542 static int check_src_addr(const struct in6_addr *saddr,
1543 struct netlink_ext_ack *extack)
1544 {
1545 if (!ipv6_addr_any(saddr)) {
1546 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1547 return -EINVAL;
1548 }
1549 return 0;
1550 }
1551
fib6_check_nexthop(struct nexthop * nh,struct fib6_config * cfg,struct netlink_ext_ack * extack)1552 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1553 struct netlink_ext_ack *extack)
1554 {
1555 struct nh_info *nhi;
1556 bool is_fdb_nh;
1557
1558 /* fib6_src is unique to a fib6_info and limits the ability to cache
1559 * routes in fib6_nh within a nexthop that is potentially shared
1560 * across multiple fib entries. If the config wants to use source
1561 * routing it can not use nexthop objects. mlxsw also does not allow
1562 * fib6_src on routes.
1563 */
1564 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1565 return -EINVAL;
1566
1567 if (nh->is_group) {
1568 struct nh_group *nhg;
1569
1570 nhg = rcu_dereference_rtnl(nh->nh_grp);
1571 if (nhg->has_v4)
1572 goto no_v4_nh;
1573 is_fdb_nh = nhg->fdb_nh;
1574 } else {
1575 nhi = rcu_dereference_rtnl(nh->nh_info);
1576 if (nhi->family == AF_INET)
1577 goto no_v4_nh;
1578 is_fdb_nh = nhi->fdb_nh;
1579 }
1580
1581 if (is_fdb_nh) {
1582 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1583 return -EINVAL;
1584 }
1585
1586 return 0;
1587 no_v4_nh:
1588 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1589 return -EINVAL;
1590 }
1591 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1592
1593 /* if existing nexthop has ipv6 routes linked to it, need
1594 * to verify this new spec works with ipv6
1595 */
fib6_check_nh_list(struct nexthop * old,struct nexthop * new,struct netlink_ext_ack * extack)1596 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1597 struct netlink_ext_ack *extack)
1598 {
1599 struct fib6_info *f6i;
1600
1601 if (list_empty(&old->f6i_list))
1602 return 0;
1603
1604 list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1605 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1606 return -EINVAL;
1607 }
1608
1609 return fib6_check_nexthop(new, NULL, extack);
1610 }
1611
nexthop_check_scope(struct nh_info * nhi,u8 scope,struct netlink_ext_ack * extack)1612 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1613 struct netlink_ext_ack *extack)
1614 {
1615 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1616 NL_SET_ERR_MSG(extack,
1617 "Route with host scope can not have a gateway");
1618 return -EINVAL;
1619 }
1620
1621 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1622 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1623 return -EINVAL;
1624 }
1625
1626 return 0;
1627 }
1628
1629 /* Invoked by fib add code to verify nexthop by id is ok with
1630 * config for prefix; parts of fib_check_nh not done when nexthop
1631 * object is used.
1632 */
fib_check_nexthop(struct nexthop * nh,u8 scope,struct netlink_ext_ack * extack)1633 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1634 struct netlink_ext_ack *extack)
1635 {
1636 struct nh_info *nhi;
1637 int err = 0;
1638
1639 if (nh->is_group) {
1640 struct nh_group *nhg;
1641
1642 nhg = rtnl_dereference(nh->nh_grp);
1643 if (nhg->fdb_nh) {
1644 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1645 err = -EINVAL;
1646 goto out;
1647 }
1648
1649 if (scope == RT_SCOPE_HOST) {
1650 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1651 err = -EINVAL;
1652 goto out;
1653 }
1654
1655 /* all nexthops in a group have the same scope */
1656 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1657 err = nexthop_check_scope(nhi, scope, extack);
1658 } else {
1659 nhi = rtnl_dereference(nh->nh_info);
1660 if (nhi->fdb_nh) {
1661 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1662 err = -EINVAL;
1663 goto out;
1664 }
1665 err = nexthop_check_scope(nhi, scope, extack);
1666 }
1667
1668 out:
1669 return err;
1670 }
1671
fib_check_nh_list(struct nexthop * old,struct nexthop * new,struct netlink_ext_ack * extack)1672 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1673 struct netlink_ext_ack *extack)
1674 {
1675 struct fib_info *fi;
1676
1677 list_for_each_entry(fi, &old->fi_list, nh_list) {
1678 int err;
1679
1680 err = fib_check_nexthop(new, fi->fib_scope, extack);
1681 if (err)
1682 return err;
1683 }
1684 return 0;
1685 }
1686
nh_res_nhge_is_balanced(const struct nh_grp_entry * nhge)1687 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1688 {
1689 return nhge->res.count_buckets == nhge->res.wants_buckets;
1690 }
1691
nh_res_nhge_is_ow(const struct nh_grp_entry * nhge)1692 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1693 {
1694 return nhge->res.count_buckets > nhge->res.wants_buckets;
1695 }
1696
nh_res_nhge_is_uw(const struct nh_grp_entry * nhge)1697 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1698 {
1699 return nhge->res.count_buckets < nhge->res.wants_buckets;
1700 }
1701
nh_res_table_is_balanced(const struct nh_res_table * res_table)1702 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1703 {
1704 return list_empty(&res_table->uw_nh_entries);
1705 }
1706
nh_res_bucket_unset_nh(struct nh_res_bucket * bucket)1707 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1708 {
1709 struct nh_grp_entry *nhge;
1710
1711 if (bucket->occupied) {
1712 nhge = nh_res_dereference(bucket->nh_entry);
1713 nhge->res.count_buckets--;
1714 bucket->occupied = false;
1715 }
1716 }
1717
nh_res_bucket_set_nh(struct nh_res_bucket * bucket,struct nh_grp_entry * nhge)1718 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1719 struct nh_grp_entry *nhge)
1720 {
1721 nh_res_bucket_unset_nh(bucket);
1722
1723 bucket->occupied = true;
1724 rcu_assign_pointer(bucket->nh_entry, nhge);
1725 nhge->res.count_buckets++;
1726 }
1727
nh_res_bucket_should_migrate(struct nh_res_table * res_table,struct nh_res_bucket * bucket,unsigned long * deadline,bool * force)1728 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1729 struct nh_res_bucket *bucket,
1730 unsigned long *deadline, bool *force)
1731 {
1732 unsigned long now = jiffies;
1733 struct nh_grp_entry *nhge;
1734 unsigned long idle_point;
1735
1736 if (!bucket->occupied) {
1737 /* The bucket is not occupied, its NHGE pointer is either
1738 * NULL or obsolete. We _have to_ migrate: set force.
1739 */
1740 *force = true;
1741 return true;
1742 }
1743
1744 nhge = nh_res_dereference(bucket->nh_entry);
1745
1746 /* If the bucket is populated by an underweight or balanced
1747 * nexthop, do not migrate.
1748 */
1749 if (!nh_res_nhge_is_ow(nhge))
1750 return false;
1751
1752 /* At this point we know that the bucket is populated with an
1753 * overweight nexthop. It needs to be migrated to a new nexthop if
1754 * the idle timer of unbalanced timer expired.
1755 */
1756
1757 idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1758 if (time_after_eq(now, idle_point)) {
1759 /* The bucket is idle. We _can_ migrate: unset force. */
1760 *force = false;
1761 return true;
1762 }
1763
1764 /* Unbalanced timer of 0 means "never force". */
1765 if (res_table->unbalanced_timer) {
1766 unsigned long unb_point;
1767
1768 unb_point = nh_res_table_unb_point(res_table);
1769 if (time_after(now, unb_point)) {
1770 /* The bucket is not idle, but the unbalanced timer
1771 * expired. We _can_ migrate, but set force anyway,
1772 * so that drivers know to ignore activity reports
1773 * from the HW.
1774 */
1775 *force = true;
1776 return true;
1777 }
1778
1779 nh_res_time_set_deadline(unb_point, deadline);
1780 }
1781
1782 nh_res_time_set_deadline(idle_point, deadline);
1783 return false;
1784 }
1785
nh_res_bucket_migrate(struct nh_res_table * res_table,u16 bucket_index,bool notify,bool notify_nl,bool force)1786 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1787 u16 bucket_index, bool notify,
1788 bool notify_nl, bool force)
1789 {
1790 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1791 struct nh_grp_entry *new_nhge;
1792 struct netlink_ext_ack extack;
1793 int err;
1794
1795 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1796 struct nh_grp_entry,
1797 res.uw_nh_entry);
1798 if (WARN_ON_ONCE(!new_nhge))
1799 /* If this function is called, "bucket" is either not
1800 * occupied, or it belongs to a next hop that is
1801 * overweight. In either case, there ought to be a
1802 * corresponding underweight next hop.
1803 */
1804 return false;
1805
1806 if (notify) {
1807 struct nh_grp_entry *old_nhge;
1808
1809 old_nhge = nh_res_dereference(bucket->nh_entry);
1810 err = call_nexthop_res_bucket_notifiers(res_table->net,
1811 res_table->nhg_id,
1812 bucket_index, force,
1813 old_nhge->nh,
1814 new_nhge->nh, &extack);
1815 if (err) {
1816 pr_err_ratelimited("%s\n", extack._msg);
1817 if (!force)
1818 return false;
1819 /* It is not possible to veto a forced replacement, so
1820 * just clear the hardware flags from the nexthop
1821 * bucket to indicate to user space that this bucket is
1822 * not correctly populated in hardware.
1823 */
1824 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1825 }
1826 }
1827
1828 nh_res_bucket_set_nh(bucket, new_nhge);
1829 nh_res_bucket_set_idle(res_table, bucket);
1830
1831 if (notify_nl)
1832 nexthop_bucket_notify(res_table, bucket_index);
1833
1834 if (nh_res_nhge_is_balanced(new_nhge))
1835 list_del(&new_nhge->res.uw_nh_entry);
1836 return true;
1837 }
1838
1839 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1840
nh_res_table_upkeep(struct nh_res_table * res_table,bool notify,bool notify_nl)1841 static void nh_res_table_upkeep(struct nh_res_table *res_table,
1842 bool notify, bool notify_nl)
1843 {
1844 unsigned long now = jiffies;
1845 unsigned long deadline;
1846 u16 i;
1847
1848 /* Deadline is the next time that upkeep should be run. It is the
1849 * earliest time at which one of the buckets might be migrated.
1850 * Start at the most pessimistic estimate: either unbalanced_timer
1851 * from now, or if there is none, idle_timer from now. For each
1852 * encountered time point, call nh_res_time_set_deadline() to
1853 * refine the estimate.
1854 */
1855 if (res_table->unbalanced_timer)
1856 deadline = now + res_table->unbalanced_timer;
1857 else
1858 deadline = now + res_table->idle_timer;
1859
1860 for (i = 0; i < res_table->num_nh_buckets; i++) {
1861 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1862 bool force;
1863
1864 if (nh_res_bucket_should_migrate(res_table, bucket,
1865 &deadline, &force)) {
1866 if (!nh_res_bucket_migrate(res_table, i, notify,
1867 notify_nl, force)) {
1868 unsigned long idle_point;
1869
1870 /* A driver can override the migration
1871 * decision if the HW reports that the
1872 * bucket is actually not idle. Therefore
1873 * remark the bucket as busy again and
1874 * update the deadline.
1875 */
1876 nh_res_bucket_set_busy(bucket);
1877 idle_point = nh_res_bucket_idle_point(res_table,
1878 bucket,
1879 now);
1880 nh_res_time_set_deadline(idle_point, &deadline);
1881 }
1882 }
1883 }
1884
1885 /* If the group is still unbalanced, schedule the next upkeep to
1886 * either the deadline computed above, or the minimum deadline,
1887 * whichever comes later.
1888 */
1889 if (!nh_res_table_is_balanced(res_table)) {
1890 unsigned long now = jiffies;
1891 unsigned long min_deadline;
1892
1893 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1894 if (time_before(deadline, min_deadline))
1895 deadline = min_deadline;
1896
1897 queue_delayed_work(system_power_efficient_wq,
1898 &res_table->upkeep_dw, deadline - now);
1899 }
1900 }
1901
nh_res_table_upkeep_dw(struct work_struct * work)1902 static void nh_res_table_upkeep_dw(struct work_struct *work)
1903 {
1904 struct delayed_work *dw = to_delayed_work(work);
1905 struct nh_res_table *res_table;
1906
1907 res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1908 nh_res_table_upkeep(res_table, true, true);
1909 }
1910
nh_res_table_cancel_upkeep(struct nh_res_table * res_table)1911 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1912 {
1913 cancel_delayed_work_sync(&res_table->upkeep_dw);
1914 }
1915
nh_res_group_rebalance(struct nh_group * nhg,struct nh_res_table * res_table)1916 static void nh_res_group_rebalance(struct nh_group *nhg,
1917 struct nh_res_table *res_table)
1918 {
1919 u16 prev_upper_bound = 0;
1920 u32 total = 0;
1921 u32 w = 0;
1922 int i;
1923
1924 INIT_LIST_HEAD(&res_table->uw_nh_entries);
1925
1926 for (i = 0; i < nhg->num_nh; ++i)
1927 total += nhg->nh_entries[i].weight;
1928
1929 for (i = 0; i < nhg->num_nh; ++i) {
1930 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1931 u16 upper_bound;
1932 u64 btw;
1933
1934 w += nhge->weight;
1935 btw = ((u64)res_table->num_nh_buckets) * w;
1936 upper_bound = DIV_ROUND_CLOSEST_ULL(btw, total);
1937 nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1938 prev_upper_bound = upper_bound;
1939
1940 if (nh_res_nhge_is_uw(nhge)) {
1941 if (list_empty(&res_table->uw_nh_entries))
1942 res_table->unbalanced_since = jiffies;
1943 list_add(&nhge->res.uw_nh_entry,
1944 &res_table->uw_nh_entries);
1945 }
1946 }
1947 }
1948
1949 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1950 * the right NH ID. Set those buckets that do not have a corresponding NHGE
1951 * entry in NHG as not occupied.
1952 */
nh_res_table_migrate_buckets(struct nh_res_table * res_table,struct nh_group * nhg)1953 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1954 struct nh_group *nhg)
1955 {
1956 u16 i;
1957
1958 for (i = 0; i < res_table->num_nh_buckets; i++) {
1959 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1960 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1961 bool found = false;
1962 int j;
1963
1964 for (j = 0; j < nhg->num_nh; j++) {
1965 struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1966
1967 if (nhge->nh->id == id) {
1968 nh_res_bucket_set_nh(bucket, nhge);
1969 found = true;
1970 break;
1971 }
1972 }
1973
1974 if (!found)
1975 nh_res_bucket_unset_nh(bucket);
1976 }
1977 }
1978
replace_nexthop_grp_res(struct nh_group * oldg,struct nh_group * newg)1979 static void replace_nexthop_grp_res(struct nh_group *oldg,
1980 struct nh_group *newg)
1981 {
1982 /* For NH group replacement, the new NHG might only have a stub
1983 * hash table with 0 buckets, because the number of buckets was not
1984 * specified. For NH removal, oldg and newg both reference the same
1985 * res_table. So in any case, in the following, we want to work
1986 * with oldg->res_table.
1987 */
1988 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1989 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1990 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1991
1992 nh_res_table_cancel_upkeep(old_res_table);
1993 nh_res_table_migrate_buckets(old_res_table, newg);
1994 nh_res_group_rebalance(newg, old_res_table);
1995 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1996 old_res_table->unbalanced_since = prev_unbalanced_since;
1997 nh_res_table_upkeep(old_res_table, true, false);
1998 }
1999
nh_hthr_group_rebalance(struct nh_group * nhg)2000 static void nh_hthr_group_rebalance(struct nh_group *nhg)
2001 {
2002 u32 total = 0;
2003 u32 w = 0;
2004 int i;
2005
2006 for (i = 0; i < nhg->num_nh; ++i)
2007 total += nhg->nh_entries[i].weight;
2008
2009 for (i = 0; i < nhg->num_nh; ++i) {
2010 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2011 u32 upper_bound;
2012
2013 w += nhge->weight;
2014 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
2015 atomic_set(&nhge->hthr.upper_bound, upper_bound);
2016 }
2017 }
2018
remove_nh_grp_entry(struct net * net,struct nh_grp_entry * nhge,struct nl_info * nlinfo,struct list_head * deferred_free)2019 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
2020 struct nl_info *nlinfo,
2021 struct list_head *deferred_free)
2022 {
2023 struct nh_grp_entry *nhges, *new_nhges;
2024 struct nexthop *nhp = nhge->nh_parent;
2025 struct netlink_ext_ack extack;
2026 struct nexthop *nh = nhge->nh;
2027 struct nh_group *nhg, *newg;
2028 int i, j, err;
2029
2030 WARN_ON(!nh);
2031
2032 nhg = rtnl_dereference(nhp->nh_grp);
2033 newg = nhg->spare;
2034
2035 /* last entry, keep it visible and remove the parent */
2036 if (nhg->num_nh == 1) {
2037 remove_nexthop(net, nhp, nlinfo);
2038 return;
2039 }
2040
2041 newg->has_v4 = false;
2042 newg->is_multipath = nhg->is_multipath;
2043 newg->hash_threshold = nhg->hash_threshold;
2044 newg->resilient = nhg->resilient;
2045 newg->fdb_nh = nhg->fdb_nh;
2046 newg->num_nh = nhg->num_nh;
2047
2048 /* copy old entries to new except the one getting removed */
2049 nhges = nhg->nh_entries;
2050 new_nhges = newg->nh_entries;
2051 for (i = 0, j = 0; i < nhg->num_nh; ++i) {
2052 struct nh_info *nhi;
2053
2054 /* current nexthop getting removed */
2055 if (nhg->nh_entries[i].nh == nh) {
2056 newg->num_nh--;
2057 continue;
2058 }
2059
2060 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2061 if (nhi->family == AF_INET)
2062 newg->has_v4 = true;
2063
2064 list_del(&nhges[i].nh_list);
2065 new_nhges[j].stats = nhges[i].stats;
2066 new_nhges[j].nh_parent = nhges[i].nh_parent;
2067 new_nhges[j].nh = nhges[i].nh;
2068 new_nhges[j].weight = nhges[i].weight;
2069 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
2070 j++;
2071 }
2072
2073 if (newg->hash_threshold)
2074 nh_hthr_group_rebalance(newg);
2075 else if (newg->resilient)
2076 replace_nexthop_grp_res(nhg, newg);
2077
2078 rcu_assign_pointer(nhp->nh_grp, newg);
2079
2080 list_del(&nhge->nh_list);
2081 nexthop_put(nhge->nh);
2082 list_add(&nhge->nh_list, deferred_free);
2083
2084 /* Removal of a NH from a resilient group is notified through
2085 * bucket notifications.
2086 */
2087 if (newg->hash_threshold) {
2088 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
2089 &extack);
2090 if (err)
2091 pr_err("%s\n", extack._msg);
2092 }
2093
2094 if (nlinfo)
2095 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
2096 }
2097
remove_nexthop_from_groups(struct net * net,struct nexthop * nh,struct nl_info * nlinfo)2098 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
2099 struct nl_info *nlinfo)
2100 {
2101 struct nh_grp_entry *nhge, *tmp;
2102 LIST_HEAD(deferred_free);
2103
2104 /* If there is nothing to do, let's avoid the costly call to
2105 * synchronize_net()
2106 */
2107 if (list_empty(&nh->grp_list))
2108 return;
2109
2110 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
2111 remove_nh_grp_entry(net, nhge, nlinfo, &deferred_free);
2112
2113 /* make sure all see the newly published array before releasing rtnl */
2114 synchronize_net();
2115
2116 /* Now safe to free percpu stats — all RCU readers have finished */
2117 list_for_each_entry_safe(nhge, tmp, &deferred_free, nh_list) {
2118 list_del(&nhge->nh_list);
2119 free_percpu(nhge->stats);
2120 }
2121 }
2122
remove_nexthop_group(struct nexthop * nh,struct nl_info * nlinfo)2123 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
2124 {
2125 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
2126 struct nh_res_table *res_table;
2127 int i, num_nh = nhg->num_nh;
2128
2129 for (i = 0; i < num_nh; ++i) {
2130 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2131
2132 if (WARN_ON(!nhge->nh))
2133 continue;
2134
2135 list_del_init(&nhge->nh_list);
2136 }
2137
2138 if (nhg->resilient) {
2139 res_table = rtnl_dereference(nhg->res_table);
2140 nh_res_table_cancel_upkeep(res_table);
2141 }
2142 }
2143
2144 /* not called for nexthop replace */
__remove_nexthop_fib(struct net * net,struct nexthop * nh)2145 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
2146 {
2147 struct fib6_info *f6i;
2148 bool do_flush = false;
2149 struct fib_info *fi;
2150
2151 list_for_each_entry(fi, &nh->fi_list, nh_list) {
2152 fi->fib_flags |= RTNH_F_DEAD;
2153 do_flush = true;
2154 }
2155 if (do_flush)
2156 fib_flush(net);
2157
2158 spin_lock_bh(&nh->lock);
2159
2160 nh->dead = true;
2161
2162 while (!list_empty(&nh->f6i_list)) {
2163 f6i = list_first_entry(&nh->f6i_list, typeof(*f6i), nh_list);
2164
2165 /* __ip6_del_rt does a release, so do a hold here */
2166 fib6_info_hold(f6i);
2167
2168 spin_unlock_bh(&nh->lock);
2169 ipv6_stub->ip6_del_rt(net, f6i,
2170 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
2171
2172 spin_lock_bh(&nh->lock);
2173 }
2174
2175 spin_unlock_bh(&nh->lock);
2176 }
2177
__remove_nexthop(struct net * net,struct nexthop * nh,struct nl_info * nlinfo)2178 static void __remove_nexthop(struct net *net, struct nexthop *nh,
2179 struct nl_info *nlinfo)
2180 {
2181 __remove_nexthop_fib(net, nh);
2182
2183 if (nh->is_group) {
2184 remove_nexthop_group(nh, nlinfo);
2185 } else {
2186 struct nh_info *nhi;
2187
2188 nhi = rtnl_dereference(nh->nh_info);
2189 if (nhi->fib_nhc.nhc_dev)
2190 hlist_del(&nhi->dev_hash);
2191
2192 remove_nexthop_from_groups(net, nh, nlinfo);
2193 }
2194 }
2195
remove_nexthop(struct net * net,struct nexthop * nh,struct nl_info * nlinfo)2196 static void remove_nexthop(struct net *net, struct nexthop *nh,
2197 struct nl_info *nlinfo)
2198 {
2199 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
2200
2201 /* remove from the tree */
2202 rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2203
2204 if (nlinfo)
2205 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
2206
2207 __remove_nexthop(net, nh, nlinfo);
2208 nh_base_seq_inc(net);
2209
2210 nexthop_put(nh);
2211 }
2212
2213 /* if any FIB entries reference this nexthop, any dst entries
2214 * need to be regenerated
2215 */
nh_rt_cache_flush(struct net * net,struct nexthop * nh,struct nexthop * replaced_nh)2216 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2217 struct nexthop *replaced_nh)
2218 {
2219 struct fib6_info *f6i;
2220 struct nh_group *nhg;
2221 int i;
2222
2223 if (!list_empty(&nh->fi_list))
2224 rt_cache_flush(net);
2225
2226 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2227 ipv6_stub->fib6_update_sernum(net, f6i);
2228
2229 /* if an IPv6 group was replaced, we have to release all old
2230 * dsts to make sure all refcounts are released
2231 */
2232 if (!replaced_nh->is_group)
2233 return;
2234
2235 nhg = rtnl_dereference(replaced_nh->nh_grp);
2236 for (i = 0; i < nhg->num_nh; i++) {
2237 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2238 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2239
2240 if (nhi->family == AF_INET6)
2241 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2242 }
2243 }
2244
replace_nexthop_grp(struct net * net,struct nexthop * old,struct nexthop * new,const struct nh_config * cfg,struct netlink_ext_ack * extack)2245 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2246 struct nexthop *new, const struct nh_config *cfg,
2247 struct netlink_ext_ack *extack)
2248 {
2249 struct nh_res_table *tmp_table = NULL;
2250 struct nh_res_table *new_res_table;
2251 struct nh_res_table *old_res_table;
2252 struct nh_group *oldg, *newg;
2253 int i, err;
2254
2255 if (!new->is_group) {
2256 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2257 return -EINVAL;
2258 }
2259
2260 oldg = rtnl_dereference(old->nh_grp);
2261 newg = rtnl_dereference(new->nh_grp);
2262
2263 if (newg->hash_threshold != oldg->hash_threshold) {
2264 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2265 return -EINVAL;
2266 }
2267
2268 if (newg->hash_threshold) {
2269 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
2270 extack);
2271 if (err)
2272 return err;
2273 } else if (newg->resilient) {
2274 new_res_table = rtnl_dereference(newg->res_table);
2275 old_res_table = rtnl_dereference(oldg->res_table);
2276
2277 /* Accept if num_nh_buckets was not given, but if it was
2278 * given, demand that the value be correct.
2279 */
2280 if (cfg->nh_grp_res_has_num_buckets &&
2281 cfg->nh_grp_res_num_buckets !=
2282 old_res_table->num_nh_buckets) {
2283 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2284 return -EINVAL;
2285 }
2286
2287 /* Emit a pre-replace notification so that listeners could veto
2288 * a potentially unsupported configuration. Otherwise,
2289 * individual bucket replacement notifications would need to be
2290 * vetoed, which is something that should only happen if the
2291 * bucket is currently active.
2292 */
2293 err = call_nexthop_res_table_notifiers(net, new, extack);
2294 if (err)
2295 return err;
2296
2297 if (cfg->nh_grp_res_has_idle_timer)
2298 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2299 if (cfg->nh_grp_res_has_unbalanced_timer)
2300 old_res_table->unbalanced_timer =
2301 cfg->nh_grp_res_unbalanced_timer;
2302
2303 replace_nexthop_grp_res(oldg, newg);
2304
2305 tmp_table = new_res_table;
2306 rcu_assign_pointer(newg->res_table, old_res_table);
2307 rcu_assign_pointer(newg->spare->res_table, old_res_table);
2308 }
2309
2310 /* update parents - used by nexthop code for cleanup */
2311 for (i = 0; i < newg->num_nh; i++)
2312 newg->nh_entries[i].nh_parent = old;
2313
2314 rcu_assign_pointer(old->nh_grp, newg);
2315
2316 /* Make sure concurrent readers are not using 'oldg' anymore. */
2317 synchronize_net();
2318
2319 if (newg->resilient) {
2320 rcu_assign_pointer(oldg->res_table, tmp_table);
2321 rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2322 }
2323
2324 for (i = 0; i < oldg->num_nh; i++)
2325 oldg->nh_entries[i].nh_parent = new;
2326
2327 rcu_assign_pointer(new->nh_grp, oldg);
2328
2329 return 0;
2330 }
2331
nh_group_v4_update(struct nh_group * nhg)2332 static void nh_group_v4_update(struct nh_group *nhg)
2333 {
2334 struct nh_grp_entry *nhges;
2335 bool has_v4 = false;
2336 int i;
2337
2338 nhges = nhg->nh_entries;
2339 for (i = 0; i < nhg->num_nh; i++) {
2340 struct nh_info *nhi;
2341
2342 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2343 if (nhi->family == AF_INET)
2344 has_v4 = true;
2345 }
2346 nhg->has_v4 = has_v4;
2347 }
2348
replace_nexthop_single_notify_res(struct net * net,struct nh_res_table * res_table,struct nexthop * old,struct nh_info * oldi,struct nh_info * newi,struct netlink_ext_ack * extack)2349 static int replace_nexthop_single_notify_res(struct net *net,
2350 struct nh_res_table *res_table,
2351 struct nexthop *old,
2352 struct nh_info *oldi,
2353 struct nh_info *newi,
2354 struct netlink_ext_ack *extack)
2355 {
2356 u32 nhg_id = res_table->nhg_id;
2357 int err;
2358 u16 i;
2359
2360 for (i = 0; i < res_table->num_nh_buckets; i++) {
2361 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2362 struct nh_grp_entry *nhge;
2363
2364 nhge = rtnl_dereference(bucket->nh_entry);
2365 if (nhge->nh == old) {
2366 err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2367 i, true,
2368 oldi, newi,
2369 extack);
2370 if (err)
2371 goto err_notify;
2372 }
2373 }
2374
2375 return 0;
2376
2377 err_notify:
2378 while (i-- > 0) {
2379 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2380 struct nh_grp_entry *nhge;
2381
2382 nhge = rtnl_dereference(bucket->nh_entry);
2383 if (nhge->nh == old)
2384 __call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2385 true, newi, oldi,
2386 extack);
2387 }
2388 return err;
2389 }
2390
replace_nexthop_single_notify(struct net * net,struct nexthop * group_nh,struct nexthop * old,struct nh_info * oldi,struct nh_info * newi,struct netlink_ext_ack * extack)2391 static int replace_nexthop_single_notify(struct net *net,
2392 struct nexthop *group_nh,
2393 struct nexthop *old,
2394 struct nh_info *oldi,
2395 struct nh_info *newi,
2396 struct netlink_ext_ack *extack)
2397 {
2398 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2399 struct nh_res_table *res_table;
2400
2401 if (nhg->hash_threshold) {
2402 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2403 group_nh, extack);
2404 } else if (nhg->resilient) {
2405 res_table = rtnl_dereference(nhg->res_table);
2406 return replace_nexthop_single_notify_res(net, res_table,
2407 old, oldi, newi,
2408 extack);
2409 }
2410
2411 return -EINVAL;
2412 }
2413
replace_nexthop_single(struct net * net,struct nexthop * old,struct nexthop * new,struct netlink_ext_ack * extack)2414 static int replace_nexthop_single(struct net *net, struct nexthop *old,
2415 struct nexthop *new,
2416 struct netlink_ext_ack *extack)
2417 {
2418 u8 old_protocol, old_nh_flags;
2419 struct nh_info *oldi, *newi;
2420 struct nh_grp_entry *nhge;
2421 int err;
2422
2423 if (new->is_group) {
2424 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2425 return -EINVAL;
2426 }
2427
2428 if (!list_empty(&old->grp_list) &&
2429 rtnl_dereference(new->nh_info)->fdb_nh !=
2430 rtnl_dereference(old->nh_info)->fdb_nh) {
2431 NL_SET_ERR_MSG(extack, "Cannot change nexthop FDB status while in a group");
2432 return -EINVAL;
2433 }
2434
2435 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2436 if (err)
2437 return err;
2438
2439 /* Hardware flags were set on 'old' as 'new' is not in the red-black
2440 * tree. Therefore, inherit the flags from 'old' to 'new'.
2441 */
2442 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2443
2444 oldi = rtnl_dereference(old->nh_info);
2445 newi = rtnl_dereference(new->nh_info);
2446
2447 newi->nh_parent = old;
2448 oldi->nh_parent = new;
2449
2450 old_protocol = old->protocol;
2451 old_nh_flags = old->nh_flags;
2452
2453 old->protocol = new->protocol;
2454 old->nh_flags = new->nh_flags;
2455
2456 rcu_assign_pointer(old->nh_info, newi);
2457 rcu_assign_pointer(new->nh_info, oldi);
2458
2459 /* Send a replace notification for all the groups using the nexthop. */
2460 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2461 struct nexthop *nhp = nhge->nh_parent;
2462
2463 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2464 extack);
2465 if (err)
2466 goto err_notify;
2467 }
2468
2469 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2470 * update IPv4 indication in all the groups using the nexthop.
2471 */
2472 if (oldi->family == AF_INET && newi->family == AF_INET6) {
2473 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2474 struct nexthop *nhp = nhge->nh_parent;
2475 struct nh_group *nhg;
2476
2477 nhg = rtnl_dereference(nhp->nh_grp);
2478 nh_group_v4_update(nhg);
2479 }
2480 }
2481
2482 return 0;
2483
2484 err_notify:
2485 rcu_assign_pointer(new->nh_info, newi);
2486 rcu_assign_pointer(old->nh_info, oldi);
2487 old->nh_flags = old_nh_flags;
2488 old->protocol = old_protocol;
2489 oldi->nh_parent = old;
2490 newi->nh_parent = new;
2491 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2492 struct nexthop *nhp = nhge->nh_parent;
2493
2494 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2495 }
2496 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2497 return err;
2498 }
2499
__nexthop_replace_notify(struct net * net,struct nexthop * nh,struct nl_info * info)2500 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2501 struct nl_info *info)
2502 {
2503 struct fib6_info *f6i;
2504
2505 if (!list_empty(&nh->fi_list)) {
2506 struct fib_info *fi;
2507
2508 /* expectation is a few fib_info per nexthop and then
2509 * a lot of routes per fib_info. So mark the fib_info
2510 * and then walk the fib tables once
2511 */
2512 list_for_each_entry(fi, &nh->fi_list, nh_list)
2513 fi->nh_updated = true;
2514
2515 fib_info_notify_update(net, info);
2516
2517 list_for_each_entry(fi, &nh->fi_list, nh_list)
2518 fi->nh_updated = false;
2519 }
2520
2521 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2522 ipv6_stub->fib6_rt_update(net, f6i, info);
2523 }
2524
2525 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2526 * linked to this nexthop and for all groups that the nexthop
2527 * is a member of
2528 */
nexthop_replace_notify(struct net * net,struct nexthop * nh,struct nl_info * info)2529 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2530 struct nl_info *info)
2531 {
2532 struct nh_grp_entry *nhge;
2533
2534 __nexthop_replace_notify(net, nh, info);
2535
2536 list_for_each_entry(nhge, &nh->grp_list, nh_list)
2537 __nexthop_replace_notify(net, nhge->nh_parent, info);
2538 }
2539
replace_nexthop(struct net * net,struct nexthop * old,struct nexthop * new,const struct nh_config * cfg,struct netlink_ext_ack * extack)2540 static int replace_nexthop(struct net *net, struct nexthop *old,
2541 struct nexthop *new, const struct nh_config *cfg,
2542 struct netlink_ext_ack *extack)
2543 {
2544 bool new_is_reject = false;
2545 struct nh_grp_entry *nhge;
2546 int err;
2547
2548 /* check that existing FIB entries are ok with the
2549 * new nexthop definition
2550 */
2551 err = fib_check_nh_list(old, new, extack);
2552 if (err)
2553 return err;
2554
2555 err = fib6_check_nh_list(old, new, extack);
2556 if (err)
2557 return err;
2558
2559 if (!new->is_group) {
2560 struct nh_info *nhi = rtnl_dereference(new->nh_info);
2561
2562 new_is_reject = nhi->reject_nh;
2563 }
2564
2565 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2566 /* if new nexthop is a blackhole, any groups using this
2567 * nexthop cannot have more than 1 path
2568 */
2569 if (new_is_reject &&
2570 nexthop_num_path(nhge->nh_parent) > 1) {
2571 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2572 return -EINVAL;
2573 }
2574
2575 err = fib_check_nh_list(nhge->nh_parent, new, extack);
2576 if (err)
2577 return err;
2578
2579 err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2580 if (err)
2581 return err;
2582 }
2583
2584 if (old->is_group)
2585 err = replace_nexthop_grp(net, old, new, cfg, extack);
2586 else
2587 err = replace_nexthop_single(net, old, new, extack);
2588
2589 if (!err) {
2590 nh_rt_cache_flush(net, old, new);
2591
2592 __remove_nexthop(net, new, NULL);
2593 nexthop_put(new);
2594 }
2595
2596 return err;
2597 }
2598
2599 /* called with rtnl_lock held */
insert_nexthop(struct net * net,struct nexthop * new_nh,struct nh_config * cfg,struct netlink_ext_ack * extack)2600 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2601 struct nh_config *cfg, struct netlink_ext_ack *extack)
2602 {
2603 struct rb_node **pp, *parent = NULL, *next;
2604 struct rb_root *root = &net->nexthop.rb_root;
2605 bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2606 bool create = !!(cfg->nlflags & NLM_F_CREATE);
2607 u32 new_id = new_nh->id;
2608 int replace_notify = 0;
2609 int rc = -EEXIST;
2610
2611 pp = &root->rb_node;
2612 while (1) {
2613 struct nexthop *nh;
2614
2615 next = *pp;
2616 if (!next)
2617 break;
2618
2619 parent = next;
2620
2621 nh = rb_entry(parent, struct nexthop, rb_node);
2622 if (new_id < nh->id) {
2623 pp = &next->rb_left;
2624 } else if (new_id > nh->id) {
2625 pp = &next->rb_right;
2626 } else if (replace) {
2627 rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2628 if (!rc) {
2629 new_nh = nh; /* send notification with old nh */
2630 replace_notify = 1;
2631 }
2632 goto out;
2633 } else {
2634 /* id already exists and not a replace */
2635 goto out;
2636 }
2637 }
2638
2639 if (replace && !create) {
2640 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2641 rc = -ENOENT;
2642 goto out;
2643 }
2644
2645 if (new_nh->is_group) {
2646 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2647 struct nh_res_table *res_table;
2648
2649 if (nhg->resilient) {
2650 res_table = rtnl_dereference(nhg->res_table);
2651
2652 /* Not passing the number of buckets is OK when
2653 * replacing, but not when creating a new group.
2654 */
2655 if (!cfg->nh_grp_res_has_num_buckets) {
2656 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2657 rc = -EINVAL;
2658 goto out;
2659 }
2660
2661 nh_res_group_rebalance(nhg, res_table);
2662
2663 /* Do not send bucket notifications, we do full
2664 * notification below.
2665 */
2666 nh_res_table_upkeep(res_table, false, false);
2667 }
2668 }
2669
2670 rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2671 rb_insert_color(&new_nh->rb_node, root);
2672
2673 /* The initial insertion is a full notification for hash-threshold as
2674 * well as resilient groups.
2675 */
2676 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2677 if (rc)
2678 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2679
2680 out:
2681 if (!rc) {
2682 nh_base_seq_inc(net);
2683 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2684 if (replace_notify &&
2685 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2686 nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2687 }
2688
2689 return rc;
2690 }
2691
2692 /* rtnl */
2693 /* remove all nexthops tied to a device being deleted */
nexthop_flush_dev(struct net_device * dev,unsigned long event)2694 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2695 {
2696 unsigned int hash = nh_dev_hashfn(dev->ifindex);
2697 struct net *net = dev_net(dev);
2698 struct hlist_head *head = &net->nexthop.devhash[hash];
2699 struct hlist_node *n;
2700 struct nh_info *nhi;
2701
2702 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2703 if (nhi->fib_nhc.nhc_dev != dev)
2704 continue;
2705
2706 if (nhi->reject_nh &&
2707 (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2708 continue;
2709
2710 remove_nexthop(net, nhi->nh_parent, NULL);
2711 }
2712 }
2713
2714 /* rtnl; called when net namespace is deleted */
flush_all_nexthops(struct net * net)2715 static void flush_all_nexthops(struct net *net)
2716 {
2717 struct rb_root *root = &net->nexthop.rb_root;
2718 struct rb_node *node;
2719 struct nexthop *nh;
2720
2721 while ((node = rb_first(root))) {
2722 nh = rb_entry(node, struct nexthop, rb_node);
2723 remove_nexthop(net, nh, NULL);
2724 cond_resched();
2725 }
2726 }
2727
nexthop_create_group(struct net * net,struct nh_config * cfg)2728 static struct nexthop *nexthop_create_group(struct net *net,
2729 struct nh_config *cfg)
2730 {
2731 struct nlattr *grps_attr = cfg->nh_grp;
2732 struct nexthop_grp *entry = nla_data(grps_attr);
2733 u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2734 struct nh_group *nhg;
2735 struct nexthop *nh;
2736 int err;
2737 int i;
2738
2739 nh = nexthop_alloc();
2740 if (!nh)
2741 return ERR_PTR(-ENOMEM);
2742
2743 nh->is_group = 1;
2744
2745 nhg = nexthop_grp_alloc(num_nh);
2746 if (!nhg) {
2747 kfree(nh);
2748 return ERR_PTR(-ENOMEM);
2749 }
2750
2751 /* spare group used for removals */
2752 nhg->spare = nexthop_grp_alloc(num_nh);
2753 if (!nhg->spare) {
2754 kfree(nhg);
2755 kfree(nh);
2756 return ERR_PTR(-ENOMEM);
2757 }
2758 nhg->spare->spare = nhg;
2759
2760 for (i = 0; i < nhg->num_nh; ++i) {
2761 struct nexthop *nhe;
2762 struct nh_info *nhi;
2763
2764 nhe = nexthop_find_by_id(net, entry[i].id);
2765 if (!nexthop_get(nhe)) {
2766 err = -ENOENT;
2767 goto out_no_nh;
2768 }
2769
2770 nhi = rtnl_dereference(nhe->nh_info);
2771 if (nhi->family == AF_INET)
2772 nhg->has_v4 = true;
2773
2774 nhg->nh_entries[i].stats =
2775 netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2776 if (!nhg->nh_entries[i].stats) {
2777 err = -ENOMEM;
2778 nexthop_put(nhe);
2779 goto out_no_nh;
2780 }
2781 nhg->nh_entries[i].nh = nhe;
2782 nhg->nh_entries[i].weight = nexthop_grp_weight(&entry[i]);
2783
2784 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2785 nhg->nh_entries[i].nh_parent = nh;
2786 }
2787
2788 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2789 nhg->hash_threshold = 1;
2790 nhg->is_multipath = true;
2791 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2792 struct nh_res_table *res_table;
2793
2794 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2795 if (!res_table) {
2796 err = -ENOMEM;
2797 goto out_no_nh;
2798 }
2799
2800 rcu_assign_pointer(nhg->spare->res_table, res_table);
2801 rcu_assign_pointer(nhg->res_table, res_table);
2802 nhg->resilient = true;
2803 nhg->is_multipath = true;
2804 }
2805
2806 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2807
2808 if (nhg->hash_threshold)
2809 nh_hthr_group_rebalance(nhg);
2810
2811 if (cfg->nh_fdb)
2812 nhg->fdb_nh = 1;
2813
2814 if (cfg->nh_hw_stats)
2815 nhg->hw_stats = true;
2816
2817 rcu_assign_pointer(nh->nh_grp, nhg);
2818
2819 return nh;
2820
2821 out_no_nh:
2822 for (i--; i >= 0; --i) {
2823 list_del(&nhg->nh_entries[i].nh_list);
2824 free_percpu(nhg->nh_entries[i].stats);
2825 nexthop_put(nhg->nh_entries[i].nh);
2826 }
2827
2828 kfree(nhg->spare);
2829 kfree(nhg);
2830 kfree(nh);
2831
2832 return ERR_PTR(err);
2833 }
2834
nh_create_ipv4(struct net * net,struct nexthop * nh,struct nh_info * nhi,struct nh_config * cfg,struct netlink_ext_ack * extack)2835 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2836 struct nh_info *nhi, struct nh_config *cfg,
2837 struct netlink_ext_ack *extack)
2838 {
2839 struct fib_nh *fib_nh = &nhi->fib_nh;
2840 struct fib_config fib_cfg = {
2841 .fc_oif = cfg->nh_ifindex,
2842 .fc_gw4 = cfg->gw.ipv4,
2843 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2844 .fc_flags = cfg->nh_flags,
2845 .fc_nlinfo = cfg->nlinfo,
2846 .fc_encap = cfg->nh_encap,
2847 .fc_encap_type = cfg->nh_encap_type,
2848 };
2849 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2850 int err;
2851
2852 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2853 if (err) {
2854 fib_nh_release(net, fib_nh);
2855 goto out;
2856 }
2857
2858 if (nhi->fdb_nh)
2859 goto out;
2860
2861 /* sets nh_dev if successful */
2862 err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2863 if (!err) {
2864 nh->nh_flags = fib_nh->fib_nh_flags;
2865 fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2866 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2867 } else {
2868 fib_nh_release(net, fib_nh);
2869 }
2870 out:
2871 return err;
2872 }
2873
nh_create_ipv6(struct net * net,struct nexthop * nh,struct nh_info * nhi,struct nh_config * cfg,struct netlink_ext_ack * extack)2874 static int nh_create_ipv6(struct net *net, struct nexthop *nh,
2875 struct nh_info *nhi, struct nh_config *cfg,
2876 struct netlink_ext_ack *extack)
2877 {
2878 struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2879 struct fib6_config fib6_cfg = {
2880 .fc_table = l3mdev_fib_table(cfg->dev),
2881 .fc_ifindex = cfg->nh_ifindex,
2882 .fc_gateway = cfg->gw.ipv6,
2883 .fc_flags = cfg->nh_flags,
2884 .fc_nlinfo = cfg->nlinfo,
2885 .fc_encap = cfg->nh_encap,
2886 .fc_encap_type = cfg->nh_encap_type,
2887 .fc_is_fdb = cfg->nh_fdb,
2888 };
2889 int err;
2890
2891 if (!ipv6_addr_any(&cfg->gw.ipv6))
2892 fib6_cfg.fc_flags |= RTF_GATEWAY;
2893
2894 /* sets nh_dev if successful */
2895 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2896 extack);
2897 if (err) {
2898 /* IPv6 is not enabled, don't call fib6_nh_release */
2899 if (err == -EAFNOSUPPORT)
2900 goto out;
2901 ipv6_stub->fib6_nh_release(fib6_nh);
2902 } else {
2903 nh->nh_flags = fib6_nh->fib_nh_flags;
2904 }
2905 out:
2906 return err;
2907 }
2908
nexthop_create(struct net * net,struct nh_config * cfg,struct netlink_ext_ack * extack)2909 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2910 struct netlink_ext_ack *extack)
2911 {
2912 struct nh_info *nhi;
2913 struct nexthop *nh;
2914 int err = 0;
2915
2916 nh = nexthop_alloc();
2917 if (!nh)
2918 return ERR_PTR(-ENOMEM);
2919
2920 nhi = kzalloc_obj(*nhi);
2921 if (!nhi) {
2922 kfree(nh);
2923 return ERR_PTR(-ENOMEM);
2924 }
2925
2926 nh->nh_flags = cfg->nh_flags;
2927 nh->net = net;
2928
2929 nhi->nh_parent = nh;
2930 nhi->family = cfg->nh_family;
2931 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2932
2933 if (cfg->nh_fdb)
2934 nhi->fdb_nh = 1;
2935
2936 if (cfg->nh_blackhole) {
2937 nhi->reject_nh = 1;
2938 cfg->nh_ifindex = net->loopback_dev->ifindex;
2939 }
2940
2941 switch (cfg->nh_family) {
2942 case AF_INET:
2943 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2944 break;
2945 case AF_INET6:
2946 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2947 break;
2948 }
2949
2950 if (err) {
2951 kfree(nhi);
2952 kfree(nh);
2953 return ERR_PTR(err);
2954 }
2955
2956 /* add the entry to the device based hash */
2957 if (!nhi->fdb_nh)
2958 nexthop_devhash_add(net, nhi);
2959
2960 rcu_assign_pointer(nh->nh_info, nhi);
2961
2962 return nh;
2963 }
2964
2965 /* called with rtnl lock held */
nexthop_add(struct net * net,struct nh_config * cfg,struct netlink_ext_ack * extack)2966 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2967 struct netlink_ext_ack *extack)
2968 {
2969 struct nexthop *nh;
2970 int err;
2971
2972 if (!cfg->nh_id) {
2973 cfg->nh_id = nh_find_unused_id(net);
2974 if (!cfg->nh_id) {
2975 NL_SET_ERR_MSG(extack, "No unused id");
2976 return ERR_PTR(-EINVAL);
2977 }
2978 }
2979
2980 if (cfg->nh_grp)
2981 nh = nexthop_create_group(net, cfg);
2982 else
2983 nh = nexthop_create(net, cfg, extack);
2984
2985 if (IS_ERR(nh))
2986 return nh;
2987
2988 refcount_set(&nh->refcnt, 1);
2989 nh->id = cfg->nh_id;
2990 nh->protocol = cfg->nh_protocol;
2991 nh->net = net;
2992
2993 err = insert_nexthop(net, nh, cfg, extack);
2994 if (err) {
2995 __remove_nexthop(net, nh, NULL);
2996 nexthop_put(nh);
2997 nh = ERR_PTR(err);
2998 }
2999
3000 return nh;
3001 }
3002
rtm_nh_get_timer(struct nlattr * attr,unsigned long fallback,unsigned long * timer_p,bool * has_p,struct netlink_ext_ack * extack)3003 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
3004 unsigned long *timer_p, bool *has_p,
3005 struct netlink_ext_ack *extack)
3006 {
3007 unsigned long timer;
3008 u32 value;
3009
3010 if (!attr) {
3011 *timer_p = fallback;
3012 *has_p = false;
3013 return 0;
3014 }
3015
3016 value = nla_get_u32(attr);
3017 timer = clock_t_to_jiffies(value);
3018 if (timer == ~0UL) {
3019 NL_SET_ERR_MSG(extack, "Timer value too large");
3020 return -EINVAL;
3021 }
3022
3023 *timer_p = timer;
3024 *has_p = true;
3025 return 0;
3026 }
3027
rtm_to_nh_config_grp_res(struct nlattr * res,struct nh_config * cfg,struct netlink_ext_ack * extack)3028 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
3029 struct netlink_ext_ack *extack)
3030 {
3031 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
3032 int err;
3033
3034 if (res) {
3035 err = nla_parse_nested(tb,
3036 ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
3037 res, rtm_nh_res_policy_new, extack);
3038 if (err < 0)
3039 return err;
3040 }
3041
3042 if (tb[NHA_RES_GROUP_BUCKETS]) {
3043 cfg->nh_grp_res_num_buckets =
3044 nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
3045 cfg->nh_grp_res_has_num_buckets = true;
3046 if (!cfg->nh_grp_res_num_buckets) {
3047 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
3048 return -EINVAL;
3049 }
3050 }
3051
3052 err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
3053 NH_RES_DEFAULT_IDLE_TIMER,
3054 &cfg->nh_grp_res_idle_timer,
3055 &cfg->nh_grp_res_has_idle_timer,
3056 extack);
3057 if (err)
3058 return err;
3059
3060 return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
3061 NH_RES_DEFAULT_UNBALANCED_TIMER,
3062 &cfg->nh_grp_res_unbalanced_timer,
3063 &cfg->nh_grp_res_has_unbalanced_timer,
3064 extack);
3065 }
3066
rtm_to_nh_config(struct net * net,struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** tb,struct nh_config * cfg,struct netlink_ext_ack * extack)3067 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
3068 struct nlmsghdr *nlh, struct nlattr **tb,
3069 struct nh_config *cfg,
3070 struct netlink_ext_ack *extack)
3071 {
3072 struct nhmsg *nhm = nlmsg_data(nlh);
3073 int err;
3074
3075 err = -EINVAL;
3076 if (nhm->resvd || nhm->nh_scope) {
3077 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
3078 goto out;
3079 }
3080 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
3081 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
3082 goto out;
3083 }
3084
3085 switch (nhm->nh_family) {
3086 case AF_INET:
3087 case AF_INET6:
3088 break;
3089 case AF_UNSPEC:
3090 if (tb[NHA_GROUP])
3091 break;
3092 fallthrough;
3093 default:
3094 NL_SET_ERR_MSG(extack, "Invalid address family");
3095 goto out;
3096 }
3097
3098 memset(cfg, 0, sizeof(*cfg));
3099 cfg->nlflags = nlh->nlmsg_flags;
3100 cfg->nlinfo.portid = NETLINK_CB(skb).portid;
3101 cfg->nlinfo.nlh = nlh;
3102 cfg->nlinfo.nl_net = net;
3103
3104 cfg->nh_family = nhm->nh_family;
3105 cfg->nh_protocol = nhm->nh_protocol;
3106 cfg->nh_flags = nhm->nh_flags;
3107
3108 if (tb[NHA_ID])
3109 cfg->nh_id = nla_get_u32(tb[NHA_ID]);
3110
3111 if (tb[NHA_FDB]) {
3112 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
3113 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
3114 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
3115 goto out;
3116 }
3117 if (nhm->nh_flags) {
3118 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
3119 goto out;
3120 }
3121 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
3122 }
3123
3124 if (tb[NHA_GROUP]) {
3125 if (nhm->nh_family != AF_UNSPEC) {
3126 NL_SET_ERR_MSG(extack, "Invalid family for group");
3127 goto out;
3128 }
3129 cfg->nh_grp = tb[NHA_GROUP];
3130
3131 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
3132 if (tb[NHA_GROUP_TYPE])
3133 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
3134
3135 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
3136 NL_SET_ERR_MSG(extack, "Invalid group type");
3137 goto out;
3138 }
3139
3140 err = nh_check_attr_group(net, tb, ARRAY_SIZE(rtm_nh_policy_new),
3141 cfg->nh_grp_type, extack);
3142 if (err)
3143 goto out;
3144
3145 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
3146 err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
3147 cfg, extack);
3148
3149 if (tb[NHA_HW_STATS_ENABLE])
3150 cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
3151
3152 /* no other attributes should be set */
3153 goto out;
3154 }
3155
3156 if (tb[NHA_BLACKHOLE]) {
3157 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
3158 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
3159 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
3160 goto out;
3161 }
3162
3163 cfg->nh_blackhole = 1;
3164 err = 0;
3165 goto out;
3166 }
3167
3168 if (!cfg->nh_fdb && !tb[NHA_OIF]) {
3169 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
3170 goto out;
3171 }
3172
3173 err = -EINVAL;
3174 if (tb[NHA_GATEWAY]) {
3175 struct nlattr *gwa = tb[NHA_GATEWAY];
3176
3177 switch (cfg->nh_family) {
3178 case AF_INET:
3179 if (nla_len(gwa) != sizeof(u32)) {
3180 NL_SET_ERR_MSG(extack, "Invalid gateway");
3181 goto out;
3182 }
3183 cfg->gw.ipv4 = nla_get_be32(gwa);
3184 break;
3185 case AF_INET6:
3186 if (nla_len(gwa) != sizeof(struct in6_addr)) {
3187 NL_SET_ERR_MSG(extack, "Invalid gateway");
3188 goto out;
3189 }
3190 cfg->gw.ipv6 = nla_get_in6_addr(gwa);
3191 break;
3192 default:
3193 NL_SET_ERR_MSG(extack,
3194 "Unknown address family for gateway");
3195 goto out;
3196 }
3197 } else {
3198 /* device only nexthop (no gateway) */
3199 if (cfg->nh_flags & RTNH_F_ONLINK) {
3200 NL_SET_ERR_MSG(extack,
3201 "ONLINK flag can not be set for nexthop without a gateway");
3202 goto out;
3203 }
3204 }
3205
3206 if (tb[NHA_ENCAP]) {
3207 cfg->nh_encap = tb[NHA_ENCAP];
3208
3209 if (!tb[NHA_ENCAP_TYPE]) {
3210 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3211 goto out;
3212 }
3213
3214 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
3215 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
3216 if (err < 0)
3217 goto out;
3218
3219 } else if (tb[NHA_ENCAP_TYPE]) {
3220 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3221 goto out;
3222 }
3223
3224 if (tb[NHA_HW_STATS_ENABLE]) {
3225 NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3226 goto out;
3227 }
3228
3229 err = 0;
3230 out:
3231 return err;
3232 }
3233
rtm_to_nh_config_rtnl(struct net * net,struct nlattr ** tb,struct nh_config * cfg,struct netlink_ext_ack * extack)3234 static int rtm_to_nh_config_rtnl(struct net *net, struct nlattr **tb,
3235 struct nh_config *cfg,
3236 struct netlink_ext_ack *extack)
3237 {
3238 if (tb[NHA_GROUP])
3239 return nh_check_attr_group_rtnl(net, tb, extack);
3240
3241 if (tb[NHA_OIF]) {
3242 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
3243 if (cfg->nh_ifindex)
3244 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
3245
3246 if (!cfg->dev) {
3247 NL_SET_ERR_MSG(extack, "Invalid device index");
3248 return -EINVAL;
3249 }
3250
3251 if (!(cfg->dev->flags & IFF_UP)) {
3252 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3253 return -ENETDOWN;
3254 }
3255
3256 if (!netif_carrier_ok(cfg->dev)) {
3257 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3258 return -ENETDOWN;
3259 }
3260 }
3261
3262 return 0;
3263 }
3264
3265 /* rtnl */
rtm_new_nexthop(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3266 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3267 struct netlink_ext_ack *extack)
3268 {
3269 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
3270 struct net *net = sock_net(skb->sk);
3271 struct nh_config cfg;
3272 struct nexthop *nh;
3273 int err;
3274
3275 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3276 ARRAY_SIZE(rtm_nh_policy_new) - 1,
3277 rtm_nh_policy_new, extack);
3278 if (err < 0)
3279 goto out;
3280
3281 err = rtm_to_nh_config(net, skb, nlh, tb, &cfg, extack);
3282 if (err)
3283 goto out;
3284
3285 if (cfg.nlflags & NLM_F_REPLACE && !cfg.nh_id) {
3286 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
3287 err = -EINVAL;
3288 goto out;
3289 }
3290
3291 rtnl_net_lock(net);
3292
3293 err = rtm_to_nh_config_rtnl(net, tb, &cfg, extack);
3294 if (err)
3295 goto unlock;
3296
3297 nh = nexthop_add(net, &cfg, extack);
3298 if (IS_ERR(nh))
3299 err = PTR_ERR(nh);
3300
3301 unlock:
3302 rtnl_net_unlock(net);
3303 out:
3304 return err;
3305 }
3306
nh_valid_get_del_req(const struct nlmsghdr * nlh,struct nlattr ** tb,u32 * id,u32 * op_flags,struct netlink_ext_ack * extack)3307 static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3308 struct nlattr **tb, u32 *id, u32 *op_flags,
3309 struct netlink_ext_ack *extack)
3310 {
3311 struct nhmsg *nhm = nlmsg_data(nlh);
3312
3313 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3314 NL_SET_ERR_MSG(extack, "Invalid values in header");
3315 return -EINVAL;
3316 }
3317
3318 if (!tb[NHA_ID]) {
3319 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3320 return -EINVAL;
3321 }
3322
3323 *id = nla_get_u32(tb[NHA_ID]);
3324 if (!(*id)) {
3325 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3326 return -EINVAL;
3327 }
3328
3329 if (op_flags)
3330 *op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0);
3331
3332 return 0;
3333 }
3334
3335 /* rtnl */
rtm_del_nexthop(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3336 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3337 struct netlink_ext_ack *extack)
3338 {
3339 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
3340 struct net *net = sock_net(skb->sk);
3341 struct nl_info nlinfo = {
3342 .nlh = nlh,
3343 .nl_net = net,
3344 .portid = NETLINK_CB(skb).portid,
3345 };
3346 struct nexthop *nh;
3347 int err;
3348 u32 id;
3349
3350 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3351 ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
3352 extack);
3353 if (err < 0)
3354 return err;
3355
3356 err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
3357 if (err)
3358 return err;
3359
3360 rtnl_net_lock(net);
3361
3362 nh = nexthop_find_by_id(net, id);
3363 if (nh)
3364 remove_nexthop(net, nh, &nlinfo);
3365 else
3366 err = -ENOENT;
3367
3368 rtnl_net_unlock(net);
3369
3370 return err;
3371 }
3372
3373 /* rtnl */
rtm_get_nexthop(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3374 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3375 struct netlink_ext_ack *extack)
3376 {
3377 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3378 struct net *net = sock_net(in_skb->sk);
3379 struct sk_buff *skb = NULL;
3380 struct nexthop *nh;
3381 u32 op_flags;
3382 int err;
3383 u32 id;
3384
3385 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3386 ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
3387 extack);
3388 if (err < 0)
3389 return err;
3390
3391 err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3392 if (err)
3393 return err;
3394
3395 err = -ENOENT;
3396 nh = nexthop_find_by_id(net, id);
3397 if (!nh)
3398 goto out;
3399
3400 err = -ENOBUFS;
3401 skb = nlmsg_new(nh_nlmsg_size(nh, op_flags), GFP_KERNEL);
3402 if (!skb)
3403 goto out;
3404
3405 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3406 nlh->nlmsg_seq, 0, op_flags);
3407 if (err < 0) {
3408 WARN_ON(err == -EMSGSIZE);
3409 goto errout_free;
3410 }
3411
3412 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3413 out:
3414 return err;
3415 errout_free:
3416 kfree_skb(skb);
3417 goto out;
3418 }
3419
3420 struct nh_dump_filter {
3421 u32 nh_id;
3422 int dev_idx;
3423 int master_idx;
3424 bool group_filter;
3425 bool fdb_filter;
3426 u32 res_bucket_nh_id;
3427 u32 op_flags;
3428 };
3429
nh_dump_filtered(struct nexthop * nh,struct nh_dump_filter * filter,u8 family)3430 static bool nh_dump_filtered(struct nexthop *nh,
3431 struct nh_dump_filter *filter, u8 family)
3432 {
3433 const struct net_device *dev;
3434 const struct nh_info *nhi;
3435
3436 if (filter->group_filter && !nh->is_group)
3437 return true;
3438
3439 if (!filter->dev_idx && !filter->master_idx && !family)
3440 return false;
3441
3442 if (nh->is_group)
3443 return true;
3444
3445 nhi = rtnl_dereference(nh->nh_info);
3446 if (family && nhi->family != family)
3447 return true;
3448
3449 dev = nhi->fib_nhc.nhc_dev;
3450 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3451 return true;
3452
3453 if (filter->master_idx) {
3454 struct net_device *master;
3455
3456 if (!dev)
3457 return true;
3458
3459 master = netdev_master_upper_dev_get((struct net_device *)dev);
3460 if (!master || master->ifindex != filter->master_idx)
3461 return true;
3462 }
3463
3464 return false;
3465 }
3466
__nh_valid_dump_req(const struct nlmsghdr * nlh,struct nlattr ** tb,struct nh_dump_filter * filter,struct netlink_ext_ack * extack)3467 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3468 struct nh_dump_filter *filter,
3469 struct netlink_ext_ack *extack)
3470 {
3471 struct nhmsg *nhm;
3472 u32 idx;
3473
3474 if (tb[NHA_OIF]) {
3475 idx = nla_get_u32(tb[NHA_OIF]);
3476 if (idx > INT_MAX) {
3477 NL_SET_ERR_MSG(extack, "Invalid device index");
3478 return -EINVAL;
3479 }
3480 filter->dev_idx = idx;
3481 }
3482 if (tb[NHA_MASTER]) {
3483 idx = nla_get_u32(tb[NHA_MASTER]);
3484 if (idx > INT_MAX) {
3485 NL_SET_ERR_MSG(extack, "Invalid master device index");
3486 return -EINVAL;
3487 }
3488 filter->master_idx = idx;
3489 }
3490 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3491 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3492
3493 nhm = nlmsg_data(nlh);
3494 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3495 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3496 return -EINVAL;
3497 }
3498
3499 return 0;
3500 }
3501
nh_valid_dump_req(const struct nlmsghdr * nlh,struct nh_dump_filter * filter,struct netlink_callback * cb)3502 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3503 struct nh_dump_filter *filter,
3504 struct netlink_callback *cb)
3505 {
3506 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3507 int err;
3508
3509 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3510 ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3511 rtm_nh_policy_dump, cb->extack);
3512 if (err < 0)
3513 return err;
3514
3515 filter->op_flags = nla_get_u32_default(tb[NHA_OP_FLAGS], 0);
3516
3517 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3518 }
3519
3520 struct rtm_dump_nh_ctx {
3521 u32 idx;
3522 };
3523
3524 static struct rtm_dump_nh_ctx *
rtm_dump_nh_ctx(struct netlink_callback * cb)3525 rtm_dump_nh_ctx(struct netlink_callback *cb)
3526 {
3527 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3528
3529 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3530 return ctx;
3531 }
3532
rtm_dump_walk_nexthops(struct sk_buff * skb,struct netlink_callback * cb,struct rb_root * root,struct rtm_dump_nh_ctx * ctx,int (* nh_cb)(struct sk_buff * skb,struct netlink_callback * cb,struct nexthop * nh,void * data),void * data)3533 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3534 struct netlink_callback *cb,
3535 struct rb_root *root,
3536 struct rtm_dump_nh_ctx *ctx,
3537 int (*nh_cb)(struct sk_buff *skb,
3538 struct netlink_callback *cb,
3539 struct nexthop *nh, void *data),
3540 void *data)
3541 {
3542 struct rb_node *node;
3543 int s_idx;
3544 int err;
3545
3546 s_idx = ctx->idx;
3547
3548 /* If this is not the first invocation, ctx->idx will contain the id of
3549 * the last nexthop we processed. Instead of starting from the very
3550 * first element of the red/black tree again and linearly skipping the
3551 * (potentially large) set of nodes with an id smaller than s_idx, walk
3552 * the tree and find the left-most node whose id is >= s_idx. This
3553 * provides an efficient O(log n) starting point for the dump
3554 * continuation.
3555 */
3556 if (s_idx != 0) {
3557 struct rb_node *tmp = root->rb_node;
3558
3559 node = NULL;
3560 while (tmp) {
3561 struct nexthop *nh;
3562
3563 nh = rb_entry(tmp, struct nexthop, rb_node);
3564 if (nh->id < s_idx) {
3565 tmp = tmp->rb_right;
3566 } else {
3567 /* Track current candidate and keep looking on
3568 * the left side to find the left-most
3569 * (smallest id) that is still >= s_idx.
3570 */
3571 node = tmp;
3572 tmp = tmp->rb_left;
3573 }
3574 }
3575 } else {
3576 node = rb_first(root);
3577 }
3578
3579 for (; node; node = rb_next(node)) {
3580 struct nexthop *nh;
3581
3582 nh = rb_entry(node, struct nexthop, rb_node);
3583
3584 ctx->idx = nh->id;
3585 err = nh_cb(skb, cb, nh, data);
3586 if (err)
3587 return err;
3588 }
3589
3590 return 0;
3591 }
3592
rtm_dump_nexthop_cb(struct sk_buff * skb,struct netlink_callback * cb,struct nexthop * nh,void * data)3593 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3594 struct nexthop *nh, void *data)
3595 {
3596 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3597 struct nh_dump_filter *filter = data;
3598
3599 if (nh_dump_filtered(nh, filter, nhm->nh_family))
3600 return 0;
3601
3602 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3603 NETLINK_CB(cb->skb).portid,
3604 cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
3605 }
3606
3607 /* rtnl */
rtm_dump_nexthop(struct sk_buff * skb,struct netlink_callback * cb)3608 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3609 {
3610 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3611 struct net *net = sock_net(skb->sk);
3612 struct rb_root *root = &net->nexthop.rb_root;
3613 struct nh_dump_filter filter = {};
3614 int err;
3615
3616 err = nh_valid_dump_req(cb->nlh, &filter, cb);
3617 if (err < 0)
3618 return err;
3619
3620 err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3621 &rtm_dump_nexthop_cb, &filter);
3622
3623 cb->seq = net->nexthop.seq;
3624 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3625 return err;
3626 }
3627
3628 static struct nexthop *
nexthop_find_group_resilient(struct net * net,u32 id,struct netlink_ext_ack * extack)3629 nexthop_find_group_resilient(struct net *net, u32 id,
3630 struct netlink_ext_ack *extack)
3631 {
3632 struct nh_group *nhg;
3633 struct nexthop *nh;
3634
3635 nh = nexthop_find_by_id(net, id);
3636 if (!nh)
3637 return ERR_PTR(-ENOENT);
3638
3639 if (!nh->is_group) {
3640 NL_SET_ERR_MSG(extack, "Not a nexthop group");
3641 return ERR_PTR(-EINVAL);
3642 }
3643
3644 nhg = rtnl_dereference(nh->nh_grp);
3645 if (!nhg->resilient) {
3646 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3647 return ERR_PTR(-EINVAL);
3648 }
3649
3650 return nh;
3651 }
3652
nh_valid_dump_nhid(struct nlattr * attr,u32 * nh_id_p,struct netlink_ext_ack * extack)3653 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3654 struct netlink_ext_ack *extack)
3655 {
3656 u32 idx;
3657
3658 if (attr) {
3659 idx = nla_get_u32(attr);
3660 if (!idx) {
3661 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3662 return -EINVAL;
3663 }
3664 *nh_id_p = idx;
3665 } else {
3666 *nh_id_p = 0;
3667 }
3668
3669 return 0;
3670 }
3671
nh_valid_dump_bucket_req(const struct nlmsghdr * nlh,struct nh_dump_filter * filter,struct netlink_callback * cb)3672 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3673 struct nh_dump_filter *filter,
3674 struct netlink_callback *cb)
3675 {
3676 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3677 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3678 int err;
3679
3680 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3681 ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3682 rtm_nh_policy_dump_bucket, NULL);
3683 if (err < 0)
3684 return err;
3685
3686 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3687 if (err)
3688 return err;
3689
3690 if (tb[NHA_RES_BUCKET]) {
3691 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3692
3693 err = nla_parse_nested(res_tb, max,
3694 tb[NHA_RES_BUCKET],
3695 rtm_nh_res_bucket_policy_dump,
3696 cb->extack);
3697 if (err < 0)
3698 return err;
3699
3700 err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3701 &filter->res_bucket_nh_id,
3702 cb->extack);
3703 if (err)
3704 return err;
3705 }
3706
3707 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3708 }
3709
3710 struct rtm_dump_res_bucket_ctx {
3711 struct rtm_dump_nh_ctx nh;
3712 u16 bucket_index;
3713 };
3714
3715 static struct rtm_dump_res_bucket_ctx *
rtm_dump_res_bucket_ctx(struct netlink_callback * cb)3716 rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3717 {
3718 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3719
3720 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3721 return ctx;
3722 }
3723
3724 struct rtm_dump_nexthop_bucket_data {
3725 struct rtm_dump_res_bucket_ctx *ctx;
3726 struct nh_dump_filter filter;
3727 };
3728
rtm_dump_nexthop_bucket_nh(struct sk_buff * skb,struct netlink_callback * cb,struct nexthop * nh,struct rtm_dump_nexthop_bucket_data * dd)3729 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3730 struct netlink_callback *cb,
3731 struct nexthop *nh,
3732 struct rtm_dump_nexthop_bucket_data *dd)
3733 {
3734 u32 portid = NETLINK_CB(cb->skb).portid;
3735 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3736 struct nh_res_table *res_table;
3737 struct nh_group *nhg;
3738 u16 bucket_index;
3739 int err;
3740
3741 nhg = rtnl_dereference(nh->nh_grp);
3742 res_table = rtnl_dereference(nhg->res_table);
3743 for (bucket_index = dd->ctx->bucket_index;
3744 bucket_index < res_table->num_nh_buckets;
3745 bucket_index++) {
3746 struct nh_res_bucket *bucket;
3747 struct nh_grp_entry *nhge;
3748
3749 bucket = &res_table->nh_buckets[bucket_index];
3750 nhge = rtnl_dereference(bucket->nh_entry);
3751 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3752 continue;
3753
3754 if (dd->filter.res_bucket_nh_id &&
3755 dd->filter.res_bucket_nh_id != nhge->nh->id)
3756 continue;
3757
3758 dd->ctx->bucket_index = bucket_index;
3759 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3760 RTM_NEWNEXTHOPBUCKET, portid,
3761 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3762 cb->extack);
3763 if (err)
3764 return err;
3765 }
3766
3767 dd->ctx->bucket_index = 0;
3768
3769 return 0;
3770 }
3771
rtm_dump_nexthop_bucket_cb(struct sk_buff * skb,struct netlink_callback * cb,struct nexthop * nh,void * data)3772 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3773 struct netlink_callback *cb,
3774 struct nexthop *nh, void *data)
3775 {
3776 struct rtm_dump_nexthop_bucket_data *dd = data;
3777 struct nh_group *nhg;
3778
3779 if (!nh->is_group)
3780 return 0;
3781
3782 nhg = rtnl_dereference(nh->nh_grp);
3783 if (!nhg->resilient)
3784 return 0;
3785
3786 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3787 }
3788
3789 /* rtnl */
rtm_dump_nexthop_bucket(struct sk_buff * skb,struct netlink_callback * cb)3790 static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3791 struct netlink_callback *cb)
3792 {
3793 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3794 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3795 struct net *net = sock_net(skb->sk);
3796 struct nexthop *nh;
3797 int err;
3798
3799 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3800 if (err)
3801 return err;
3802
3803 if (dd.filter.nh_id) {
3804 nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3805 cb->extack);
3806 if (IS_ERR(nh))
3807 return PTR_ERR(nh);
3808 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3809 } else {
3810 struct rb_root *root = &net->nexthop.rb_root;
3811
3812 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3813 &rtm_dump_nexthop_bucket_cb, &dd);
3814 }
3815
3816 cb->seq = net->nexthop.seq;
3817 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3818 return err;
3819 }
3820
nh_valid_get_bucket_req_res_bucket(struct nlattr * res,u16 * bucket_index,struct netlink_ext_ack * extack)3821 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3822 u16 *bucket_index,
3823 struct netlink_ext_ack *extack)
3824 {
3825 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3826 int err;
3827
3828 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3829 res, rtm_nh_res_bucket_policy_get, extack);
3830 if (err < 0)
3831 return err;
3832
3833 if (!tb[NHA_RES_BUCKET_INDEX]) {
3834 NL_SET_ERR_MSG(extack, "Bucket index is missing");
3835 return -EINVAL;
3836 }
3837
3838 *bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3839 return 0;
3840 }
3841
nh_valid_get_bucket_req(const struct nlmsghdr * nlh,u32 * id,u16 * bucket_index,struct netlink_ext_ack * extack)3842 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3843 u32 *id, u16 *bucket_index,
3844 struct netlink_ext_ack *extack)
3845 {
3846 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3847 int err;
3848
3849 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3850 ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3851 rtm_nh_policy_get_bucket, extack);
3852 if (err < 0)
3853 return err;
3854
3855 err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
3856 if (err)
3857 return err;
3858
3859 if (!tb[NHA_RES_BUCKET]) {
3860 NL_SET_ERR_MSG(extack, "Bucket information is missing");
3861 return -EINVAL;
3862 }
3863
3864 err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3865 bucket_index, extack);
3866 if (err)
3867 return err;
3868
3869 return 0;
3870 }
3871
3872 /* rtnl */
rtm_get_nexthop_bucket(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3873 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3874 struct netlink_ext_ack *extack)
3875 {
3876 struct net *net = sock_net(in_skb->sk);
3877 struct nh_res_table *res_table;
3878 struct sk_buff *skb = NULL;
3879 struct nh_group *nhg;
3880 struct nexthop *nh;
3881 u16 bucket_index;
3882 int err;
3883 u32 id;
3884
3885 err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3886 if (err)
3887 return err;
3888
3889 nh = nexthop_find_group_resilient(net, id, extack);
3890 if (IS_ERR(nh))
3891 return PTR_ERR(nh);
3892
3893 nhg = rtnl_dereference(nh->nh_grp);
3894 res_table = rtnl_dereference(nhg->res_table);
3895 if (bucket_index >= res_table->num_nh_buckets) {
3896 NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3897 return -ENOENT;
3898 }
3899
3900 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3901 if (!skb)
3902 return -ENOBUFS;
3903
3904 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3905 bucket_index, RTM_NEWNEXTHOPBUCKET,
3906 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3907 0, extack);
3908 if (err < 0) {
3909 WARN_ON(err == -EMSGSIZE);
3910 goto errout_free;
3911 }
3912
3913 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3914
3915 errout_free:
3916 kfree_skb(skb);
3917 return err;
3918 }
3919
nexthop_sync_mtu(struct net_device * dev,u32 orig_mtu)3920 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3921 {
3922 unsigned int hash = nh_dev_hashfn(dev->ifindex);
3923 struct net *net = dev_net(dev);
3924 struct hlist_head *head = &net->nexthop.devhash[hash];
3925 struct hlist_node *n;
3926 struct nh_info *nhi;
3927
3928 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3929 if (nhi->fib_nhc.nhc_dev == dev) {
3930 if (nhi->family == AF_INET)
3931 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3932 orig_mtu);
3933 }
3934 }
3935 }
3936
3937 /* rtnl */
nh_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)3938 static int nh_netdev_event(struct notifier_block *this,
3939 unsigned long event, void *ptr)
3940 {
3941 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3942 struct netdev_notifier_info_ext *info_ext;
3943
3944 switch (event) {
3945 case NETDEV_DOWN:
3946 case NETDEV_UNREGISTER:
3947 nexthop_flush_dev(dev, event);
3948 break;
3949 case NETDEV_CHANGE:
3950 if (!(netif_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3951 nexthop_flush_dev(dev, event);
3952 break;
3953 case NETDEV_CHANGEMTU:
3954 info_ext = ptr;
3955 nexthop_sync_mtu(dev, info_ext->ext.mtu);
3956 rt_cache_flush(dev_net(dev));
3957 break;
3958 }
3959 return NOTIFY_DONE;
3960 }
3961
3962 static struct notifier_block nh_netdev_notifier = {
3963 .notifier_call = nh_netdev_event,
3964 };
3965
nexthops_dump(struct net * net,struct notifier_block * nb,enum nexthop_event_type event_type,struct netlink_ext_ack * extack)3966 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3967 enum nexthop_event_type event_type,
3968 struct netlink_ext_ack *extack)
3969 {
3970 struct rb_root *root = &net->nexthop.rb_root;
3971 struct rb_node *node;
3972 int err = 0;
3973
3974 for (node = rb_first(root); node; node = rb_next(node)) {
3975 struct nexthop *nh;
3976
3977 nh = rb_entry(node, struct nexthop, rb_node);
3978 err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3979 if (err)
3980 break;
3981 }
3982
3983 return err;
3984 }
3985
register_nexthop_notifier(struct net * net,struct notifier_block * nb,struct netlink_ext_ack * extack)3986 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3987 struct netlink_ext_ack *extack)
3988 {
3989 int err;
3990
3991 rtnl_lock();
3992 err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3993 if (err)
3994 goto unlock;
3995 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3996 nb);
3997 unlock:
3998 rtnl_unlock();
3999 return err;
4000 }
4001 EXPORT_SYMBOL(register_nexthop_notifier);
4002
__unregister_nexthop_notifier(struct net * net,struct notifier_block * nb)4003 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
4004 {
4005 int err;
4006
4007 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
4008 nb);
4009 if (!err)
4010 nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
4011 return err;
4012 }
4013 EXPORT_SYMBOL(__unregister_nexthop_notifier);
4014
unregister_nexthop_notifier(struct net * net,struct notifier_block * nb)4015 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
4016 {
4017 int err;
4018
4019 rtnl_lock();
4020 err = __unregister_nexthop_notifier(net, nb);
4021 rtnl_unlock();
4022 return err;
4023 }
4024 EXPORT_SYMBOL(unregister_nexthop_notifier);
4025
nexthop_set_hw_flags(struct net * net,u32 id,bool offload,bool trap)4026 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
4027 {
4028 struct nexthop *nexthop;
4029
4030 rcu_read_lock();
4031
4032 nexthop = nexthop_find_by_id(net, id);
4033 if (!nexthop)
4034 goto out;
4035
4036 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
4037 if (offload)
4038 nexthop->nh_flags |= RTNH_F_OFFLOAD;
4039 if (trap)
4040 nexthop->nh_flags |= RTNH_F_TRAP;
4041
4042 out:
4043 rcu_read_unlock();
4044 }
4045 EXPORT_SYMBOL(nexthop_set_hw_flags);
4046
nexthop_bucket_set_hw_flags(struct net * net,u32 id,u16 bucket_index,bool offload,bool trap)4047 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
4048 bool offload, bool trap)
4049 {
4050 struct nh_res_table *res_table;
4051 struct nh_res_bucket *bucket;
4052 struct nexthop *nexthop;
4053 struct nh_group *nhg;
4054
4055 rcu_read_lock();
4056
4057 nexthop = nexthop_find_by_id(net, id);
4058 if (!nexthop || !nexthop->is_group)
4059 goto out;
4060
4061 nhg = rcu_dereference(nexthop->nh_grp);
4062 if (!nhg->resilient)
4063 goto out;
4064
4065 if (bucket_index >= nhg->res_table->num_nh_buckets)
4066 goto out;
4067
4068 res_table = rcu_dereference(nhg->res_table);
4069 bucket = &res_table->nh_buckets[bucket_index];
4070 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
4071 if (offload)
4072 bucket->nh_flags |= RTNH_F_OFFLOAD;
4073 if (trap)
4074 bucket->nh_flags |= RTNH_F_TRAP;
4075
4076 out:
4077 rcu_read_unlock();
4078 }
4079 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
4080
nexthop_res_grp_activity_update(struct net * net,u32 id,u16 num_buckets,unsigned long * activity)4081 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
4082 unsigned long *activity)
4083 {
4084 struct nh_res_table *res_table;
4085 struct nexthop *nexthop;
4086 struct nh_group *nhg;
4087 u16 i;
4088
4089 rcu_read_lock();
4090
4091 nexthop = nexthop_find_by_id(net, id);
4092 if (!nexthop || !nexthop->is_group)
4093 goto out;
4094
4095 nhg = rcu_dereference(nexthop->nh_grp);
4096 if (!nhg->resilient)
4097 goto out;
4098
4099 /* Instead of silently ignoring some buckets, demand that the sizes
4100 * be the same.
4101 */
4102 res_table = rcu_dereference(nhg->res_table);
4103 if (num_buckets != res_table->num_nh_buckets)
4104 goto out;
4105
4106 for (i = 0; i < num_buckets; i++) {
4107 if (test_bit(i, activity))
4108 nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
4109 }
4110
4111 out:
4112 rcu_read_unlock();
4113 }
4114 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
4115
nexthop_net_exit_rtnl(struct net * net,struct list_head * dev_to_kill)4116 static void __net_exit nexthop_net_exit_rtnl(struct net *net,
4117 struct list_head *dev_to_kill)
4118 {
4119 ASSERT_RTNL_NET(net);
4120 flush_all_nexthops(net);
4121 }
4122
nexthop_net_exit(struct net * net)4123 static void __net_exit nexthop_net_exit(struct net *net)
4124 {
4125 kfree(net->nexthop.devhash);
4126 net->nexthop.devhash = NULL;
4127 }
4128
nexthop_net_init(struct net * net)4129 static int __net_init nexthop_net_init(struct net *net)
4130 {
4131 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
4132
4133 net->nexthop.rb_root = RB_ROOT;
4134 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
4135 if (!net->nexthop.devhash)
4136 return -ENOMEM;
4137 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
4138
4139 return 0;
4140 }
4141
4142 static struct pernet_operations nexthop_net_ops = {
4143 .init = nexthop_net_init,
4144 .exit = nexthop_net_exit,
4145 .exit_rtnl = nexthop_net_exit_rtnl,
4146 };
4147
4148 static const struct rtnl_msg_handler nexthop_rtnl_msg_handlers[] __initconst = {
4149 {.msgtype = RTM_NEWNEXTHOP, .doit = rtm_new_nexthop,
4150 .flags = RTNL_FLAG_DOIT_PERNET},
4151 {.msgtype = RTM_DELNEXTHOP, .doit = rtm_del_nexthop,
4152 .flags = RTNL_FLAG_DOIT_PERNET},
4153 {.msgtype = RTM_GETNEXTHOP, .doit = rtm_get_nexthop,
4154 .dumpit = rtm_dump_nexthop},
4155 {.msgtype = RTM_GETNEXTHOPBUCKET, .doit = rtm_get_nexthop_bucket,
4156 .dumpit = rtm_dump_nexthop_bucket},
4157 {.protocol = PF_INET, .msgtype = RTM_NEWNEXTHOP,
4158 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET},
4159 {.protocol = PF_INET, .msgtype = RTM_GETNEXTHOP,
4160 .dumpit = rtm_dump_nexthop},
4161 {.protocol = PF_INET6, .msgtype = RTM_NEWNEXTHOP,
4162 .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET},
4163 {.protocol = PF_INET6, .msgtype = RTM_GETNEXTHOP,
4164 .dumpit = rtm_dump_nexthop},
4165 };
4166
nexthop_init(void)4167 static int __init nexthop_init(void)
4168 {
4169 register_pernet_subsys(&nexthop_net_ops);
4170
4171 register_netdevice_notifier(&nh_netdev_notifier);
4172
4173 rtnl_register_many(nexthop_rtnl_msg_handlers);
4174
4175 return 0;
4176 }
4177 subsys_initcall(nexthop_init);
4178