Lines Matching refs:idev
80 static void mld_ifc_event(struct inet6_dev *idev);
81 static bool mld_in_v1_mode(const struct inet6_dev *idev);
85 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
88 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
92 struct inet6_dev *idev);
111 #define mc_assert_locked(idev) \
112 lockdep_assert_held(&(idev)->mc_lock)
114 #define mc_dereference(e, idev) \
115 rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
131 for (psf = mc_dereference((mc)->mca_sources, mc->idev); \
133 psf = mc_dereference(psf->sf_next, mc->idev))
141 for (psf = mc_dereference((mc)->mca_tomb, mc->idev); \
143 psf = mc_dereference(psf->sf_next, mc->idev))
145 #define for_each_mc_mclock(idev, mc) \
146 for (mc = mc_dereference((idev)->mc_list, idev); \
148 mc = mc_dereference(mc->next, idev))
150 #define for_each_mc_rcu(idev, mc) \
151 for (mc = rcu_dereference((idev)->mc_list); \
155 #define for_each_mc_tomb(idev, mc) \
156 for (mc = mc_dereference((idev)->mc_tomb, idev); \
158 mc = mc_dereference(mc->next, idev))
160 static int unsolicited_report_interval(struct inet6_dev *idev)
164 if (mld_in_v1_mode(idev))
165 iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
167 iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
263 struct inet6_dev *idev = in6_dev_get(dev);
265 ip6_mc_leave_src(sk, mc_lst, idev);
267 if (idev) {
268 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
269 in6_dev_put(idev);
310 struct inet6_dev *idev;
329 idev = in6_dev_get(dev);
332 return idev;
366 struct inet6_dev *idev;
377 idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface);
378 if (!idev)
381 mutex_lock(&idev->mc_lock);
383 if (idev->dead) {
408 ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
409 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
433 ip6_mc_del_src(idev, group, omode, 1, source, 1);
483 ip6_mc_add_src(idev, group, omode, 1, source, 1);
485 mutex_unlock(&idev->mc_lock);
486 in6_dev_put(idev);
500 struct inet6_dev *idev;
512 idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
513 if (!idev)
516 mutex_lock(&idev->mc_lock);
518 if (idev->dead) {
556 err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
565 ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
570 ip6_mc_del_src(idev, group, pmc->sfmode,
575 ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
583 mutex_unlock(&idev->mc_lock);
584 in6_dev_put(idev);
674 struct net_device *dev = mc->idev->dev;
677 mc_assert_locked(mc->idev);
692 if (mld_in_v1_mode(mc->idev)) {
703 mc->mca_crcount = mc->idev->mc_qrv;
705 mld_ifc_event(mc->idev);
710 struct net_device *dev = mc->idev->dev;
713 mc_assert_locked(mc->idev);
728 if (!mc->idev->dead)
736 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
740 mc_assert_locked(idev);
752 pmc->idev = im->idev;
753 in6_dev_hold(idev);
755 pmc->mca_crcount = idev->mc_qrv;
761 mc_dereference(im->mca_tomb, idev));
763 mc_dereference(im->mca_sources, idev));
771 rcu_assign_pointer(pmc->next, idev->mc_tomb);
772 rcu_assign_pointer(idev->mc_tomb, pmc);
775 static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
781 mc_assert_locked(idev);
784 for_each_mc_tomb(idev, pmc) {
794 rcu_assign_pointer(idev->mc_tomb, pmc->next);
796 im->idev = pmc->idev;
799 mc_dereference(pmc->mca_tomb, pmc->idev),
800 lockdep_is_held(&im->idev->mc_lock));
804 mc_dereference(pmc->mca_sources, pmc->idev),
805 lockdep_is_held(&im->idev->mc_lock));
808 psf->sf_crcount = idev->mc_qrv;
810 im->mca_crcount = idev->mc_qrv;
813 in6_dev_put(pmc->idev);
817 static void mld_clear_delrec(struct inet6_dev *idev)
821 mc_assert_locked(idev);
823 pmc = mc_dereference(idev->mc_tomb, idev);
824 RCU_INIT_POINTER(idev->mc_tomb, NULL);
827 nextpmc = mc_dereference(pmc->next, idev);
829 in6_dev_put(pmc->idev);
834 for_each_mc_mclock(idev, pmc) {
837 psf = mc_dereference(pmc->mca_tomb, idev);
840 psf_next = mc_dereference(psf->sf_next, idev);
846 static void mld_clear_query(struct inet6_dev *idev)
848 spin_lock_bh(&idev->mc_query_lock);
849 __skb_queue_purge(&idev->mc_query_queue);
850 spin_unlock_bh(&idev->mc_query_lock);
853 static void mld_clear_report(struct inet6_dev *idev)
855 spin_lock_bh(&idev->mc_report_lock);
856 __skb_queue_purge(&idev->mc_report_queue);
857 spin_unlock_bh(&idev->mc_report_lock);
863 in6_dev_put(mc->idev);
868 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
874 mc_assert_locked(idev);
883 mc->idev = idev; /* reference taken by caller */
940 struct inet6_dev *idev;
943 /* we need to take a reference on idev */
944 idev = in6_dev_get(dev);
945 if (!idev)
948 mutex_lock(&idev->mc_lock);
950 if (READ_ONCE(idev->dead)) {
951 mutex_unlock(&idev->mc_lock);
952 in6_dev_put(idev);
956 for_each_mc_mclock(idev, mc) {
959 ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
960 mutex_unlock(&idev->mc_lock);
961 in6_dev_put(idev);
966 mc = mca_alloc(idev, addr, mode);
968 mutex_unlock(&idev->mc_lock);
969 in6_dev_put(idev);
973 rcu_assign_pointer(mc->next, idev->mc_list);
974 rcu_assign_pointer(idev->mc_list, mc);
976 mld_del_delrec(idev, mc);
979 mutex_unlock(&idev->mc_lock);
993 int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
997 mutex_lock(&idev->mc_lock);
999 for (map = &idev->mc_list;
1000 (ma = mc_dereference(*map, idev));
1007 inet6_ifmcaddr_notify(idev->dev, ma,
1010 mutex_unlock(&idev->mc_lock);
1015 mutex_unlock(&idev->mc_lock);
1020 mutex_unlock(&idev->mc_lock);
1026 struct inet6_dev *idev;
1029 idev = in6_dev_get(dev);
1030 if (!idev)
1033 err = __ipv6_dev_mc_dec(idev, addr);
1034 in6_dev_put(idev);
1046 struct inet6_dev *idev;
1051 idev = __in6_dev_get(dev);
1052 if (!idev)
1054 for_each_mc_rcu(idev, mc) {
1081 static void mld_gq_start_work(struct inet6_dev *idev)
1083 unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
1085 mc_assert_locked(idev);
1087 idev->mc_gq_running = 1;
1088 if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
1089 in6_dev_hold(idev);
1092 static void mld_gq_stop_work(struct inet6_dev *idev)
1094 mc_assert_locked(idev);
1096 idev->mc_gq_running = 0;
1097 if (cancel_delayed_work(&idev->mc_gq_work))
1098 __in6_dev_put(idev);
1101 static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
1105 mc_assert_locked(idev);
1107 if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
1108 in6_dev_hold(idev);
1111 static void mld_ifc_stop_work(struct inet6_dev *idev)
1113 mc_assert_locked(idev);
1115 idev->mc_ifc_count = 0;
1116 if (cancel_delayed_work(&idev->mc_ifc_work))
1117 __in6_dev_put(idev);
1120 static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
1124 mc_assert_locked(idev);
1126 if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
1127 in6_dev_hold(idev);
1130 static void mld_dad_stop_work(struct inet6_dev *idev)
1132 if (cancel_delayed_work(&idev->mc_dad_work))
1133 __in6_dev_put(idev);
1136 static void mld_query_stop_work(struct inet6_dev *idev)
1138 spin_lock_bh(&idev->mc_query_lock);
1139 if (cancel_delayed_work(&idev->mc_query_work))
1140 __in6_dev_put(idev);
1141 spin_unlock_bh(&idev->mc_query_lock);
1144 static void mld_report_stop_work(struct inet6_dev *idev)
1146 if (cancel_delayed_work_sync(&idev->mc_report_work))
1147 __in6_dev_put(idev);
1155 mc_assert_locked(ma->idev);
1182 mc_assert_locked(pmc->idev);
1212 mc_assert_locked(pmc->idev);
1239 static int mld_force_mld_version(const struct inet6_dev *idev)
1241 const struct net *net = dev_net(idev->dev);
1249 return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
1252 static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1254 return mld_force_mld_version(idev) == 2;
1257 static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1259 return mld_force_mld_version(idev) == 1;
1262 static bool mld_in_v1_mode(const struct inet6_dev *idev)
1264 if (mld_in_v2_mode_only(idev))
1266 if (mld_in_v1_mode_only(idev))
1268 if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1274 static void mld_set_v1_mode(struct inet6_dev *idev)
1284 switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1286 idev->mc_v1_seen = jiffies + switchback;
1289 static void mld_update_qrv(struct inet6_dev *idev,
1302 WARN_ON(idev->mc_qrv == 0);
1305 idev->mc_qrv = mlh2->mld2q_qrv;
1307 if (unlikely(idev->mc_qrv < min_qrv)) {
1309 idev->mc_qrv, min_qrv);
1310 idev->mc_qrv = min_qrv;
1314 static void mld_update_qi(struct inet6_dev *idev,
1336 idev->mc_qi = mc_qqi * HZ;
1339 static void mld_update_qri(struct inet6_dev *idev,
1346 idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1349 static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1355 if (mld_in_v2_mode_only(idev))
1383 mld_set_v1_mode(idev);
1386 mld_gq_stop_work(idev);
1388 mld_ifc_stop_work(idev);
1390 mld_clear_delrec(idev);
1395 static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1400 mld_update_qrv(idev, mld);
1401 mld_update_qi(idev, mld);
1402 mld_update_qri(idev, mld);
1404 idev->mc_maxdelay = *max_delay;
1412 struct inet6_dev *idev = __in6_dev_get(skb->dev);
1414 if (!idev || idev->dead)
1417 spin_lock_bh(&idev->mc_query_lock);
1418 if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
1419 __skb_queue_tail(&idev->mc_query_queue, skb);
1420 if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
1421 in6_dev_hold(idev);
1424 spin_unlock_bh(&idev->mc_query_lock);
1434 struct inet6_dev *idev;
1461 idev = in6_dev_get(skb->dev);
1462 if (!idev)
1475 } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1476 err = mld_process_v1(idev, mld, &max_delay,
1489 mld_process_v2(idev, mlh2, &max_delay);
1495 mld_gq_start_work(idev);
1512 for_each_mc_mclock(idev, ma) {
1516 for_each_mc_mclock(idev, ma) {
1538 in6_dev_put(idev);
1545 struct inet6_dev *idev = container_of(to_delayed_work(work),
1555 spin_lock_bh(&idev->mc_query_lock);
1556 while ((skb = __skb_dequeue(&idev->mc_query_queue))) {
1564 spin_unlock_bh(&idev->mc_query_lock);
1566 mutex_lock(&idev->mc_lock);
1569 mutex_unlock(&idev->mc_lock);
1571 if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
1574 in6_dev_put(idev);
1580 struct inet6_dev *idev = __in6_dev_get(skb->dev);
1582 if (!idev || idev->dead)
1585 spin_lock_bh(&idev->mc_report_lock);
1586 if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
1587 __skb_queue_tail(&idev->mc_report_queue, skb);
1588 if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
1589 in6_dev_hold(idev);
1592 spin_unlock_bh(&idev->mc_report_lock);
1599 struct inet6_dev *idev;
1624 idev = in6_dev_get(skb->dev);
1625 if (!idev)
1632 for_each_mc_mclock(idev, ma) {
1642 in6_dev_put(idev);
1649 struct inet6_dev *idev = container_of(to_delayed_work(work),
1658 spin_lock_bh(&idev->mc_report_lock);
1659 while ((skb = __skb_dequeue(&idev->mc_report_queue))) {
1667 spin_unlock_bh(&idev->mc_report_lock);
1669 mutex_lock(&idev->mc_lock);
1672 mutex_unlock(&idev->mc_lock);
1674 if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
1677 in6_dev_put(idev);
1761 static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1765 struct net_device *dev = idev->dev;
1826 struct inet6_dev *idev;
1833 idev = __in6_dev_get(skb->dev);
1834 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
1865 ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1866 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1868 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1891 skb = mld_newpack(pmc->idev, mtu);
1915 struct inet6_dev *idev = pmc->idev;
1916 struct net_device *dev = idev->dev;
1921 mc_assert_locked(idev);
1950 skb = mld_newpack(idev, mtu);
1955 for (psf = mc_dereference(*psf_list, idev);
1960 psf_next = mc_dereference(psf->sf_next, idev);
1988 skb = mld_newpack(idev, mtu);
2008 mc_dereference(psf->sf_next, idev));
2011 mc_dereference(psf->sf_next, idev));
2041 static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2046 mc_assert_locked(idev);
2049 for_each_mc_mclock(idev, pmc) {
2070 static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
2075 for (psf = mc_dereference(*ppsf, idev);
2078 psf_next = mc_dereference(psf->sf_next, idev);
2082 mc_dereference(psf->sf_next, idev));
2085 mc_dereference(psf->sf_next, idev));
2093 static void mld_send_cr(struct inet6_dev *idev)
2101 for (pmc = mc_dereference(idev->mc_tomb, idev);
2104 pmc_next = mc_dereference(pmc->next, idev);
2118 mld_clear_zeros(&pmc->mca_tomb, idev);
2119 mld_clear_zeros(&pmc->mca_sources, idev);
2128 rcu_assign_pointer(idev->mc_tomb, pmc_next);
2129 in6_dev_put(pmc->idev);
2136 for_each_mc_mclock(idev, pmc) {
2167 struct inet6_dev *idev;
2194 idev = __in6_dev_get(dev);
2195 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
2197 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2243 ICMP6MSGOUT_INC_STATS(net, idev, type);
2244 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2246 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2256 static void mld_send_initial_cr(struct inet6_dev *idev)
2262 mc_assert_locked(idev);
2264 if (mld_in_v1_mode(idev))
2268 for_each_mc_mclock(idev, pmc) {
2279 void ipv6_mc_dad_complete(struct inet6_dev *idev)
2281 mutex_lock(&idev->mc_lock);
2282 idev->mc_dad_count = idev->mc_qrv;
2283 if (idev->mc_dad_count) {
2284 mld_send_initial_cr(idev);
2285 idev->mc_dad_count--;
2286 if (idev->mc_dad_count)
2287 mld_dad_start_work(idev,
2288 unsolicited_report_interval(idev));
2290 mutex_unlock(&idev->mc_lock);
2295 struct inet6_dev *idev = container_of(to_delayed_work(work),
2298 mutex_lock(&idev->mc_lock);
2299 mld_send_initial_cr(idev);
2300 if (idev->mc_dad_count) {
2301 idev->mc_dad_count--;
2302 if (idev->mc_dad_count)
2303 mld_dad_start_work(idev,
2304 unsolicited_report_interval(idev));
2306 mutex_unlock(&idev->mc_lock);
2307 in6_dev_put(idev);
2316 mc_assert_locked(pmc->idev);
2330 struct inet6_dev *idev = pmc->idev;
2335 mc_dereference(psf->sf_next, idev));
2338 mc_dereference(psf->sf_next, idev));
2341 !mld_in_v1_mode(idev)) {
2342 psf->sf_crcount = idev->mc_qrv;
2344 mc_dereference(pmc->mca_tomb, idev));
2354 static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2362 if (!idev)
2365 mc_assert_locked(idev);
2367 for_each_mc_mclock(idev, pmc) {
2396 pmc->mca_crcount = idev->mc_qrv;
2397 idev->mc_ifc_count = pmc->mca_crcount;
2400 mld_ifc_event(pmc->idev);
2402 mld_ifc_event(pmc->idev);
2414 mc_assert_locked(pmc->idev);
2443 mc_assert_locked(pmc->idev);
2460 int qrv = pmc->idev->mc_qrv;
2463 mc_assert_locked(pmc->idev);
2486 pmc->idev));
2490 pmc->idev));
2513 mc_dereference(pmc->mca_tomb, pmc->idev));
2524 static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2532 if (!idev)
2535 mc_assert_locked(idev);
2537 for_each_mc_mclock(idev, pmc) {
2573 pmc->mca_crcount = idev->mc_qrv;
2574 idev->mc_ifc_count = pmc->mca_crcount;
2577 mld_ifc_event(idev);
2579 mld_ifc_event(idev);
2588 mc_assert_locked(pmc->idev);
2590 for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
2593 nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2597 for (psf = mc_dereference(pmc->mca_sources, pmc->idev);
2600 nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2614 mc_assert_locked(ma->idev);
2619 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2621 delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
2634 struct inet6_dev *idev)
2641 if (idev)
2642 mutex_lock(&idev->mc_lock);
2646 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2648 err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2656 if (idev)
2657 mutex_unlock(&idev->mc_lock);
2664 mc_assert_locked(ma->idev);
2666 if (mld_in_v1_mode(ma->idev)) {
2668 igmp6_send(&ma->mca_addr, ma->idev->dev,
2672 mld_add_delrec(ma->idev, ma);
2673 mld_ifc_event(ma->idev);
2679 struct inet6_dev *idev = container_of(to_delayed_work(work),
2683 mutex_lock(&idev->mc_lock);
2684 mld_send_report(idev, NULL);
2685 idev->mc_gq_running = 0;
2686 mutex_unlock(&idev->mc_lock);
2688 in6_dev_put(idev);
2693 struct inet6_dev *idev = container_of(to_delayed_work(work),
2697 mutex_lock(&idev->mc_lock);
2698 mld_send_cr(idev);
2700 if (idev->mc_ifc_count) {
2701 idev->mc_ifc_count--;
2702 if (idev->mc_ifc_count)
2703 mld_ifc_start_work(idev,
2704 unsolicited_report_interval(idev));
2706 mutex_unlock(&idev->mc_lock);
2707 in6_dev_put(idev);
2710 static void mld_ifc_event(struct inet6_dev *idev)
2712 mc_assert_locked(idev);
2714 if (mld_in_v1_mode(idev))
2717 idev->mc_ifc_count = idev->mc_qrv;
2718 mld_ifc_start_work(idev, 1);
2726 mutex_lock(&ma->idev->mc_lock);
2727 if (mld_in_v1_mode(ma->idev))
2728 igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2730 mld_send_report(ma->idev, ma);
2733 mutex_unlock(&ma->idev->mc_lock);
2740 void ipv6_mc_unmap(struct inet6_dev *idev)
2746 mutex_lock(&idev->mc_lock);
2747 for_each_mc_mclock(idev, i)
2749 mutex_unlock(&idev->mc_lock);
2752 void ipv6_mc_remap(struct inet6_dev *idev)
2754 ipv6_mc_up(idev);
2758 void ipv6_mc_down(struct inet6_dev *idev)
2762 mutex_lock(&idev->mc_lock);
2764 for_each_mc_mclock(idev, i)
2766 mutex_unlock(&idev->mc_lock);
2771 mld_query_stop_work(idev);
2772 mld_report_stop_work(idev);
2774 mutex_lock(&idev->mc_lock);
2775 mld_ifc_stop_work(idev);
2776 mld_gq_stop_work(idev);
2777 mutex_unlock(&idev->mc_lock);
2779 mld_dad_stop_work(idev);
2782 static void ipv6_mc_reset(struct inet6_dev *idev)
2784 idev->mc_qrv = sysctl_mld_qrv;
2785 idev->mc_qi = MLD_QI_DEFAULT;
2786 idev->mc_qri = MLD_QRI_DEFAULT;
2787 idev->mc_v1_seen = 0;
2788 idev->mc_maxdelay = unsolicited_report_interval(idev);
2793 void ipv6_mc_up(struct inet6_dev *idev)
2799 ipv6_mc_reset(idev);
2800 mutex_lock(&idev->mc_lock);
2801 for_each_mc_mclock(idev, i) {
2802 mld_del_delrec(idev, i);
2805 mutex_unlock(&idev->mc_lock);
2810 void ipv6_mc_init_dev(struct inet6_dev *idev)
2812 idev->mc_gq_running = 0;
2813 INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work);
2814 RCU_INIT_POINTER(idev->mc_tomb, NULL);
2815 idev->mc_ifc_count = 0;
2816 INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work);
2817 INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work);
2818 INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work);
2819 INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work);
2820 skb_queue_head_init(&idev->mc_query_queue);
2821 skb_queue_head_init(&idev->mc_report_queue);
2822 spin_lock_init(&idev->mc_query_lock);
2823 spin_lock_init(&idev->mc_report_lock);
2824 mutex_init(&idev->mc_lock);
2825 ipv6_mc_reset(idev);
2832 void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2837 ipv6_mc_down(idev);
2838 mutex_lock(&idev->mc_lock);
2839 mld_clear_delrec(idev);
2840 mutex_unlock(&idev->mc_lock);
2841 mld_clear_query(idev);
2842 mld_clear_report(idev);
2849 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2851 if (idev->cnf.forwarding)
2852 __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2854 mutex_lock(&idev->mc_lock);
2855 while ((i = mc_dereference(idev->mc_list, idev))) {
2856 rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev));
2861 mutex_unlock(&idev->mc_lock);
2864 static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2868 mutex_lock(&idev->mc_lock);
2869 if (mld_in_v1_mode(idev)) {
2870 for_each_mc_mclock(idev, pmc)
2873 mld_send_report(idev, NULL);
2875 mutex_unlock(&idev->mc_lock);
2883 struct inet6_dev *idev = __in6_dev_get(dev);
2887 if (idev)
2888 ipv6_mc_rejoin_groups(idev);
2905 struct inet6_dev *idev;
2916 state->idev = NULL;
2918 struct inet6_dev *idev;
2919 idev = __in6_dev_get(state->dev);
2920 if (!idev)
2923 im = rcu_dereference(idev->mc_list);
2925 state->idev = idev;
2940 state->idev = NULL;
2943 state->idev = __in6_dev_get(state->dev);
2944 if (!state->idev)
2946 im = rcu_dereference(state->idev->mc_list);
2980 if (likely(state->idev))
2981 state->idev = NULL;
3011 struct inet6_dev *idev;
3024 state->idev = NULL;
3027 struct inet6_dev *idev;
3028 idev = __in6_dev_get(state->dev);
3029 if (unlikely(idev == NULL))
3032 im = rcu_dereference(idev->mc_list);
3037 state->idev = idev;
3055 state->idev = NULL;
3058 state->idev = __in6_dev_get(state->dev);
3059 if (!state->idev)
3061 state->im = rcu_dereference(state->idev->mc_list);
3103 if (likely(state->idev))
3104 state->idev = NULL;