Lines Matching full:bond

266 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
293 * @bond: bond device that got this skb for tx.
297 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, in bond_dev_queue_xmit() argument
306 if (unlikely(netpoll_tx_running(bond->dev))) in bond_dev_queue_xmit()
307 return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); in bond_dev_queue_xmit()
312 static bool bond_sk_check(struct bonding *bond) in bond_sk_check() argument
314 switch (BOND_MODE(bond)) { in bond_sk_check()
317 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) in bond_sk_check()
325 static bool bond_xdp_check(struct bonding *bond) in bond_xdp_check() argument
327 switch (BOND_MODE(bond)) { in bond_xdp_check()
336 if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC) in bond_xdp_check()
371 struct bonding *bond = netdev_priv(bond_dev); in bond_vlan_rx_add_vid() local
376 bond_for_each_slave(bond, slave, iter) { in bond_vlan_rx_add_vid()
386 bond_for_each_slave(bond, rollback_slave, iter) { in bond_vlan_rx_add_vid()
405 struct bonding *bond = netdev_priv(bond_dev); in bond_vlan_rx_kill_vid() local
409 bond_for_each_slave(bond, slave, iter) in bond_vlan_rx_kill_vid()
412 if (bond_is_lb(bond)) in bond_vlan_rx_kill_vid()
413 bond_alb_clear_vlan(bond, vid); in bond_vlan_rx_kill_vid()
432 struct bonding *bond; in bond_ipsec_dev() local
438 bond = netdev_priv(bond_dev); in bond_ipsec_dev()
439 if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) in bond_ipsec_dev()
442 slave = rcu_dereference(bond->curr_active_slave); in bond_ipsec_dev()
468 struct bonding *bond; in bond_ipsec_add_sa() local
476 bond = netdev_priv(bond_dev); in bond_ipsec_add_sa()
477 slave = rcu_dereference(bond->curr_active_slave); in bond_ipsec_add_sa()
505 mutex_lock(&bond->ipsec_lock); in bond_ipsec_add_sa()
506 list_add(&ipsec->list, &bond->ipsec_list); in bond_ipsec_add_sa()
507 mutex_unlock(&bond->ipsec_lock); in bond_ipsec_add_sa()
516 static void bond_ipsec_add_sa_all(struct bonding *bond) in bond_ipsec_add_sa_all() argument
518 struct net_device *bond_dev = bond->dev; in bond_ipsec_add_sa_all()
523 slave = rtnl_dereference(bond->curr_active_slave); in bond_ipsec_add_sa_all()
528 mutex_lock(&bond->ipsec_lock); in bond_ipsec_add_sa_all()
532 if (!list_empty(&bond->ipsec_list)) in bond_ipsec_add_sa_all()
539 list_for_each_entry(ipsec, &bond->ipsec_list, list) { in bond_ipsec_add_sa_all()
551 mutex_unlock(&bond->ipsec_lock); in bond_ipsec_add_sa_all()
564 struct bonding *bond; in bond_ipsec_del_sa() local
571 bond = netdev_priv(bond_dev); in bond_ipsec_del_sa()
572 slave = rcu_dereference(bond->curr_active_slave); in bond_ipsec_del_sa()
595 mutex_lock(&bond->ipsec_lock); in bond_ipsec_del_sa()
596 list_for_each_entry(ipsec, &bond->ipsec_list, list) { in bond_ipsec_del_sa()
603 mutex_unlock(&bond->ipsec_lock); in bond_ipsec_del_sa()
606 static void bond_ipsec_del_sa_all(struct bonding *bond) in bond_ipsec_del_sa_all() argument
608 struct net_device *bond_dev = bond->dev; in bond_ipsec_del_sa_all()
613 slave = rtnl_dereference(bond->curr_active_slave); in bond_ipsec_del_sa_all()
618 mutex_lock(&bond->ipsec_lock); in bond_ipsec_del_sa_all()
619 list_for_each_entry(ipsec, &bond->ipsec_list, list) { in bond_ipsec_del_sa_all()
635 mutex_unlock(&bond->ipsec_lock); in bond_ipsec_del_sa_all()
643 struct bonding *bond; in bond_ipsec_free_sa() local
650 bond = netdev_priv(bond_dev); in bond_ipsec_free_sa()
651 slave = rcu_dereference(bond->curr_active_slave); in bond_ipsec_free_sa()
763 int bond_set_carrier(struct bonding *bond) in bond_set_carrier() argument
768 if (!bond_has_slaves(bond)) in bond_set_carrier()
771 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_set_carrier()
772 return bond_3ad_set_carrier(bond); in bond_set_carrier()
774 bond_for_each_slave(bond, slave, iter) { in bond_set_carrier()
776 if (!netif_carrier_ok(bond->dev)) { in bond_set_carrier()
777 netif_carrier_on(bond->dev); in bond_set_carrier()
785 if (netif_carrier_ok(bond->dev)) { in bond_set_carrier()
786 netif_carrier_off(bond->dev); in bond_set_carrier()
857 static int bond_check_dev_link(struct bonding *bond, in bond_check_dev_link() argument
868 if (bond->params.use_carrier) in bond_check_dev_link()
912 static int bond_set_promiscuity(struct bonding *bond, int inc) in bond_set_promiscuity() argument
917 if (bond_uses_primary(bond)) { in bond_set_promiscuity()
918 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); in bond_set_promiscuity()
925 bond_for_each_slave(bond, slave, iter) { in bond_set_promiscuity()
935 static int bond_set_allmulti(struct bonding *bond, int inc) in bond_set_allmulti() argument
940 if (bond_uses_primary(bond)) { in bond_set_allmulti()
941 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); in bond_set_allmulti()
948 bond_for_each_slave(bond, slave, iter) { in bond_set_allmulti()
963 struct bonding *bond = container_of(work, struct bonding, in bond_resend_igmp_join_requests_delayed() local
967 queue_delayed_work(bond->wq, &bond->mcast_work, 1); in bond_resend_igmp_join_requests_delayed()
970 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev); in bond_resend_igmp_join_requests_delayed()
972 if (bond->igmp_retrans > 1) { in bond_resend_igmp_join_requests_delayed()
973 bond->igmp_retrans--; in bond_resend_igmp_join_requests_delayed()
974 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); in bond_resend_igmp_join_requests_delayed()
979 /* Flush bond's hardware addresses from slave */
983 struct bonding *bond = netdev_priv(bond_dev); in bond_hw_addr_flush() local
988 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_hw_addr_flush()
999 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, in bond_hw_addr_swap() argument
1003 if (bond->dev->flags & IFF_PROMISC) in bond_hw_addr_swap()
1006 if (bond->dev->flags & IFF_ALLMULTI) in bond_hw_addr_swap()
1009 if (bond->dev->flags & IFF_UP) in bond_hw_addr_swap()
1010 bond_hw_addr_flush(bond->dev, old_active->dev); in bond_hw_addr_swap()
1012 bond_slave_ns_maddrs_add(bond, old_active); in bond_hw_addr_swap()
1017 if (bond->dev->flags & IFF_PROMISC) in bond_hw_addr_swap()
1020 if (bond->dev->flags & IFF_ALLMULTI) in bond_hw_addr_swap()
1023 if (bond->dev->flags & IFF_UP) { in bond_hw_addr_swap()
1024 netif_addr_lock_bh(bond->dev); in bond_hw_addr_swap()
1025 dev_uc_sync(new_active->dev, bond->dev); in bond_hw_addr_swap()
1026 dev_mc_sync(new_active->dev, bond->dev); in bond_hw_addr_swap()
1027 netif_addr_unlock_bh(bond->dev); in bond_hw_addr_swap()
1030 bond_slave_ns_maddrs_del(bond, new_active); in bond_hw_addr_swap()
1035 * bond_set_dev_addr - clone slave's address to bond
1036 * @bond_dev: bond net device
1058 static struct slave *bond_get_old_active(struct bonding *bond, in bond_get_old_active() argument
1064 bond_for_each_slave(bond, slave, iter) { in bond_get_old_active()
1068 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) in bond_get_old_active()
1081 static void bond_do_fail_over_mac(struct bonding *bond, in bond_do_fail_over_mac() argument
1089 switch (bond->params.fail_over_mac) { in bond_do_fail_over_mac()
1092 rv = bond_set_dev_addr(bond->dev, new_active->dev); in bond_do_fail_over_mac()
1094 slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n", in bond_do_fail_over_mac()
1101 * if just new_active, set new_active to bond's MAC in bond_do_fail_over_mac()
1107 old_active = bond_get_old_active(bond, new_active); in bond_do_fail_over_mac()
1117 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr, in bond_do_fail_over_mac()
1118 bond->dev->addr_len); in bond_do_fail_over_mac()
1119 ss.ss_family = bond->dev->type; in bond_do_fail_over_mac()
1125 slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n", in bond_do_fail_over_mac()
1140 slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n", in bond_do_fail_over_mac()
1145 netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n", in bond_do_fail_over_mac()
1146 bond->params.fail_over_mac); in bond_do_fail_over_mac()
1154 * @bond: our bonding struct
1163 static struct slave *bond_choose_primary_or_current(struct bonding *bond) in bond_choose_primary_or_current() argument
1165 struct slave *prim = rtnl_dereference(bond->primary_slave); in bond_choose_primary_or_current()
1166 struct slave *curr = rtnl_dereference(bond->curr_active_slave); in bond_choose_primary_or_current()
1171 bond_for_each_slave(bond, slave, iter) { in bond_choose_primary_or_current()
1189 if (bond->force_primary) { in bond_choose_primary_or_current()
1190 bond->force_primary = false; in bond_choose_primary_or_current()
1199 switch (bond->params.primary_reselect) { in bond_choose_primary_or_current()
1211 netdev_err(bond->dev, "impossible primary_reselect %d\n", in bond_choose_primary_or_current()
1212 bond->params.primary_reselect); in bond_choose_primary_or_current()
1219 * @bond: our bonding struct
1221 static struct slave *bond_find_best_slave(struct bonding *bond) in bond_find_best_slave() argument
1225 int mintime = bond->params.updelay; in bond_find_best_slave()
1227 slave = bond_choose_primary_or_current(bond); in bond_find_best_slave()
1231 bond_for_each_slave(bond, slave, iter) { in bond_find_best_slave()
1245 static bool bond_should_notify_peers(struct bonding *bond) in bond_should_notify_peers() argument
1247 struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave); in bond_should_notify_peers()
1249 if (!slave || !bond->send_peer_notif || in bond_should_notify_peers()
1250 bond->send_peer_notif % in bond_should_notify_peers()
1251 max(1, bond->params.peer_notif_delay) != 0 || in bond_should_notify_peers()
1252 !netif_carrier_ok(bond->dev) || in bond_should_notify_peers()
1256 netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n", in bond_should_notify_peers()
1264 * @bond: our bonding struct
1267 * Set the new slave to the bond's settings and unset them on the old
1277 void bond_change_active_slave(struct bonding *bond, struct slave *new_active) in bond_change_active_slave() argument
1283 old_active = rtnl_dereference(bond->curr_active_slave); in bond_change_active_slave()
1289 bond_ipsec_del_sa_all(bond); in bond_change_active_slave()
1296 if (bond_uses_primary(bond)) { in bond_change_active_slave()
1297 slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n", in bond_change_active_slave()
1298 (bond->params.updelay - new_active->delay) * bond->params.miimon); in bond_change_active_slave()
1305 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_change_active_slave()
1308 if (bond_is_lb(bond)) in bond_change_active_slave()
1309 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); in bond_change_active_slave()
1311 if (bond_uses_primary(bond)) in bond_change_active_slave()
1312 slave_info(bond->dev, new_active->dev, "making interface the new active one\n"); in bond_change_active_slave()
1316 if (bond_uses_primary(bond)) in bond_change_active_slave()
1317 bond_hw_addr_swap(bond, new_active, old_active); in bond_change_active_slave()
1319 if (bond_is_lb(bond)) { in bond_change_active_slave()
1320 bond_alb_handle_active_change(bond, new_active); in bond_change_active_slave()
1328 rcu_assign_pointer(bond->curr_active_slave, new_active); in bond_change_active_slave()
1331 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { in bond_change_active_slave()
1342 if (bond->params.fail_over_mac) in bond_change_active_slave()
1343 bond_do_fail_over_mac(bond, new_active, in bond_change_active_slave()
1346 if (netif_running(bond->dev)) { in bond_change_active_slave()
1347 bond->send_peer_notif = in bond_change_active_slave()
1348 bond->params.num_peer_notif * in bond_change_active_slave()
1349 max(1, bond->params.peer_notif_delay); in bond_change_active_slave()
1351 bond_should_notify_peers(bond); in bond_change_active_slave()
1354 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); in bond_change_active_slave()
1356 bond->send_peer_notif--; in bond_change_active_slave()
1358 bond->dev); in bond_change_active_slave()
1364 bond_ipsec_add_sa_all(bond); in bond_change_active_slave()
1369 * resend only if bond is brought up with the affected in bond_change_active_slave()
1372 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && in bond_change_active_slave()
1373 ((bond_uses_primary(bond) && new_active) || in bond_change_active_slave()
1374 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) { in bond_change_active_slave()
1375 bond->igmp_retrans = bond->params.resend_igmp; in bond_change_active_slave()
1376 queue_delayed_work(bond->wq, &bond->mcast_work, 1); in bond_change_active_slave()
1382 * @bond: our bonding struct
1391 void bond_select_active_slave(struct bonding *bond) in bond_select_active_slave() argument
1398 best_slave = bond_find_best_slave(bond); in bond_select_active_slave()
1399 if (best_slave != rtnl_dereference(bond->curr_active_slave)) { in bond_select_active_slave()
1400 bond_change_active_slave(bond, best_slave); in bond_select_active_slave()
1401 rv = bond_set_carrier(bond); in bond_select_active_slave()
1405 if (netif_carrier_ok(bond->dev)) in bond_select_active_slave()
1406 netdev_info(bond->dev, "active interface up!\n"); in bond_select_active_slave()
1408 netdev_info(bond->dev, "now running without any active interface!\n"); in bond_select_active_slave()
1446 struct bonding *bond = netdev_priv(bond_dev); in bond_poll_controller() local
1451 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_poll_controller()
1452 if (bond_3ad_get_active_agg_info(bond, &ad_info)) in bond_poll_controller()
1455 bond_for_each_slave_rcu(bond, slave, iter) { in bond_poll_controller()
1459 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_poll_controller()
1474 struct bonding *bond = netdev_priv(bond_dev); in bond_netpoll_cleanup() local
1478 bond_for_each_slave(bond, slave, iter) in bond_netpoll_cleanup()
1485 struct bonding *bond = netdev_priv(dev); in bond_netpoll_setup() local
1490 bond_for_each_slave(bond, slave, iter) { in bond_netpoll_setup()
1517 struct bonding *bond = netdev_priv(dev); in bond_fix_features() local
1527 bond_for_each_slave(bond, slave, iter) { in bond_fix_features()
1548 static void bond_compute_features(struct bonding *bond) in bond_compute_features() argument
1558 struct net_device *bond_dev = bond->dev; in bond_compute_features()
1565 if (!bond_has_slaves(bond)) in bond_compute_features()
1570 bond_for_each_slave(bond, slave, iter) { in bond_compute_features()
1647 struct bonding *bond) in bond_should_deliver_exact_match() argument
1650 if (BOND_MODE(bond) == BOND_MODE_ALB && in bond_should_deliver_exact_match()
1663 struct bonding *bond; in bond_handle_frame() local
1675 bond = slave->bond; in bond_handle_frame()
1677 recv_probe = READ_ONCE(bond->recv_probe); in bond_handle_frame()
1679 ret = recv_probe(skb, bond, slave); in bond_handle_frame()
1697 if (bond_should_deliver_exact_match(skb, slave, bond)) { in bond_handle_frame()
1703 skb->dev = bond->dev; in bond_handle_frame()
1705 if (BOND_MODE(bond) == BOND_MODE_ALB && in bond_handle_frame()
1706 netif_is_bridge_port(bond->dev) && in bond_handle_frame()
1714 bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, in bond_handle_frame()
1715 bond->dev->addr_len); in bond_handle_frame()
1721 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) in bond_lag_tx_type() argument
1723 switch (BOND_MODE(bond)) { in bond_lag_tx_type()
1738 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond, in bond_lag_hash_type() argument
1744 switch (bond->params.xmit_policy) { in bond_lag_hash_type()
1762 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, in bond_master_upper_dev_link() argument
1769 type = bond_lag_tx_type(bond); in bond_master_upper_dev_link()
1771 lag_upper_info.hash_type = bond_lag_hash_type(bond, type); in bond_master_upper_dev_link()
1773 err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave, in bond_master_upper_dev_link()
1782 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave) in bond_upper_dev_unlink() argument
1784 netdev_upper_dev_unlink(slave->dev, bond->dev); in bond_upper_dev_unlink()
1791 struct bonding *bond = bond_get_bond_by_slave(slave); in slave_kobj_release() local
1794 if (BOND_MODE(bond) == BOND_MODE_8023AD) in slave_kobj_release()
1819 static struct slave *bond_alloc_slave(struct bonding *bond, in bond_alloc_slave() argument
1828 slave->bond = bond; in bond_alloc_slave()
1835 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_alloc_slave()
1847 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) in bond_fill_ifbond() argument
1849 info->bond_mode = BOND_MODE(bond); in bond_fill_ifbond()
1850 info->miimon = bond->params.miimon; in bond_fill_ifbond()
1851 info->num_slaves = bond->slave_cnt; in bond_fill_ifbond()
1871 bond_fill_ifbond(slave->bond, &binfo.master); in bond_netdev_notify_work()
1875 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1); in bond_netdev_notify_work()
1881 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0); in bond_queue_slave_event()
1908 /* The bonding driver uses ether_setup() to convert a master bond device
1924 struct bonding *bond = netdev_priv(bond_dev); in bond_xdp_set_features() local
1931 if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) { in bond_xdp_set_features()
1936 bond_for_each_slave(bond, slave, iter) in bond_xdp_set_features()
1944 /* enslave device <slave> to bond device <master> */
1948 struct bonding *bond = netdev_priv(bond_dev); in bond_enslave() local
1962 if (!bond->params.use_carrier && in bond_enslave()
1976 BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself."); in bond_enslave()
1986 "Can not enslave VLAN challenged device to VLAN enabled bond"); in bond_enslave()
1989 …, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n"); in bond_enslave()
2013 * bond ether type mutual exclusion - don't allow slaves of dissimilar in bond_enslave()
2014 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond in bond_enslave()
2016 if (!bond_has_slaves(bond)) { in bond_enslave()
2048 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in bond_enslave()
2058 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && in bond_enslave()
2059 bond->params.fail_over_mac != BOND_FOM_ACTIVE) { in bond_enslave()
2060 if (!bond_has_slaves(bond)) { in bond_enslave()
2061 bond->params.fail_over_mac = BOND_FOM_ACTIVE; in bond_enslave()
2077 if (!bond_has_slaves(bond) && in bond_enslave()
2078 bond->dev->addr_assign_type == NET_ADDR_RANDOM) { in bond_enslave()
2079 res = bond_set_dev_addr(bond->dev, slave_dev); in bond_enslave()
2084 new_slave = bond_alloc_slave(bond, slave_dev); in bond_enslave()
2095 /* Save slave's original mtu and then set it to match the bond */ in bond_enslave()
2097 res = dev_set_mtu(slave_dev, bond->dev->mtu); in bond_enslave()
2110 if (!bond->params.fail_over_mac || in bond_enslave()
2111 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in bond_enslave()
2139 if (bond_is_lb(bond)) { in bond_enslave()
2143 res = bond_alb_init_slave(bond, new_slave); in bond_enslave()
2150 slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n"); in bond_enslave()
2154 prev_slave = bond_last_slave(bond); in bond_enslave()
2160 bond_needs_speed_duplex(bond)) in bond_enslave()
2164 (msecs_to_jiffies(bond->params.arp_interval) + 1); in bond_enslave()
2170 if (bond->params.miimon && !bond->params.use_carrier) { in bond_enslave()
2171 link_reporting = bond_check_dev_link(bond, slave_dev, 1); in bond_enslave()
2173 if ((link_reporting == -1) && !bond->params.arp_interval) { in bond_enslave()
2191 if (bond->params.miimon) { in bond_enslave()
2192 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { in bond_enslave()
2193 if (bond->params.updelay) { in bond_enslave()
2197 new_slave->delay = bond->params.updelay; in bond_enslave()
2207 } else if (bond->params.arp_interval) { in bond_enslave()
2223 if (bond_uses_primary(bond) && bond->params.primary[0]) { in bond_enslave()
2225 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { in bond_enslave()
2226 rcu_assign_pointer(bond->primary_slave, new_slave); in bond_enslave()
2227 bond->force_primary = true; in bond_enslave()
2231 switch (BOND_MODE(bond)) { in bond_enslave()
2246 * can be called only after the mac address of the bond is set in bond_enslave()
2248 bond_3ad_initialize(bond); in bond_enslave()
2268 * anyway (it holds no special properties of the bond device), in bond_enslave()
2271 if (!rcu_access_pointer(bond->curr_active_slave) && in bond_enslave()
2273 rcu_assign_pointer(bond->curr_active_slave, new_slave); in bond_enslave()
2279 if (bond->dev->npinfo) { in bond_enslave()
2298 res = bond_master_upper_dev_link(bond, new_slave, extack); in bond_enslave()
2315 if (!bond_uses_primary(bond)) { in bond_enslave()
2339 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_enslave()
2344 bond->slave_cnt++; in bond_enslave()
2345 bond_compute_features(bond); in bond_enslave()
2346 bond_set_carrier(bond); in bond_enslave()
2351 bond_slave_ns_maddrs_add(bond, new_slave); in bond_enslave()
2353 if (bond_uses_primary(bond)) { in bond_enslave()
2355 bond_select_active_slave(bond); in bond_enslave()
2359 if (bond_mode_can_use_xmit_hash(bond)) in bond_enslave()
2360 bond_update_slave_arr(bond, NULL); in bond_enslave()
2364 if (bond->xdp_prog) { in bond_enslave()
2370 } else if (bond->xdp_prog) { in bond_enslave()
2374 .prog = bond->xdp_prog, in bond_enslave()
2391 if (bond->xdp_prog) in bond_enslave()
2392 bpf_prog_inc(bond->xdp_prog); in bond_enslave()
2410 bond_upper_dev_unlink(bond, new_slave); in bond_enslave()
2417 if (rcu_access_pointer(bond->primary_slave) == new_slave) in bond_enslave()
2418 RCU_INIT_POINTER(bond->primary_slave, NULL); in bond_enslave()
2419 if (rcu_access_pointer(bond->curr_active_slave) == new_slave) { in bond_enslave()
2421 bond_change_active_slave(bond, NULL); in bond_enslave()
2422 bond_select_active_slave(bond); in bond_enslave()
2436 if (!bond->params.fail_over_mac || in bond_enslave()
2437 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in bond_enslave()
2439 * MAC if this slave's MAC is in use by the bond, or at in bond_enslave()
2456 if (!bond_has_slaves(bond)) { in bond_enslave()
2469 /* Try to release the slave device <slave> from the bond device <master>
2472 * while destroying a bond interface and all slaves are being released.
2484 struct bonding *bond = netdev_priv(bond_dev); in __bond_release_one() local
2499 slave = bond_get_slave_by_dev(bond, slave_dev); in __bond_release_one()
2501 /* not a slave of this bond */ in __bond_release_one()
2512 bond_get_stats(bond->dev, &bond->bond_stats); in __bond_release_one()
2514 if (bond->xdp_prog) { in __bond_release_one()
2530 if (BOND_MODE(bond) == BOND_MODE_8023AD) in __bond_release_one()
2533 bond_upper_dev_unlink(bond, slave); in __bond_release_one()
2535 if (bond_mode_can_use_xmit_hash(bond)) in __bond_release_one()
2536 bond_update_slave_arr(bond, slave); in __bond_release_one()
2541 oldcurrent = rcu_access_pointer(bond->curr_active_slave); in __bond_release_one()
2543 RCU_INIT_POINTER(bond->current_arp_slave, NULL); in __bond_release_one()
2545 if (!all && (!bond->params.fail_over_mac || in __bond_release_one()
2546 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { in __bond_release_one()
2548 bond_has_slaves(bond)) in __bond_release_one()
2549 …slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - s… in __bond_release_one()
2553 if (rtnl_dereference(bond->primary_slave) == slave) in __bond_release_one()
2554 RCU_INIT_POINTER(bond->primary_slave, NULL); in __bond_release_one()
2557 bond_change_active_slave(bond, NULL); in __bond_release_one()
2563 bond_slave_ns_maddrs_del(bond, slave); in __bond_release_one()
2565 if (bond_is_lb(bond)) { in __bond_release_one()
2571 bond_alb_deinit_slave(bond, slave); in __bond_release_one()
2575 RCU_INIT_POINTER(bond->curr_active_slave, NULL); in __bond_release_one()
2581 bond_select_active_slave(bond); in __bond_release_one()
2584 bond_set_carrier(bond); in __bond_release_one()
2585 if (!bond_has_slaves(bond)) in __bond_release_one()
2590 bond->slave_cnt--; in __bond_release_one()
2592 if (!bond_has_slaves(bond)) { in __bond_release_one()
2593 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); in __bond_release_one()
2594 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev); in __bond_release_one()
2597 bond_compute_features(bond); in __bond_release_one()
2600 …slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n… in __bond_release_one()
2607 if (!bond_uses_primary(bond)) { in __bond_release_one()
2633 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || in __bond_release_one()
2634 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in __bond_release_one()
2662 /* First release a slave and then destroy the bond if no more slaves are left.
2668 struct bonding *bond = netdev_priv(bond_dev); in bond_release_and_destroy() local
2672 if (ret == 0 && !bond_has_slaves(bond) && in bond_release_and_destroy()
2675 netdev_info(bond_dev, "Destroying bond\n"); in bond_release_and_destroy()
2676 bond_remove_proc_entry(bond); in bond_release_and_destroy()
2684 struct bonding *bond = netdev_priv(bond_dev); in bond_info_query() local
2686 bond_fill_ifbond(bond, info); in bond_info_query()
2691 struct bonding *bond = netdev_priv(bond_dev); in bond_slave_info_query() local
2696 bond_for_each_slave(bond, slave, iter) { in bond_slave_info_query()
2710 static int bond_miimon_inspect(struct bonding *bond) in bond_miimon_inspect() argument
2717 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { in bond_miimon_inspect()
2718 ignore_updelay = !rcu_dereference(bond->curr_active_slave); in bond_miimon_inspect()
2722 usable_slaves = rcu_dereference(bond->usable_slaves); in bond_miimon_inspect()
2728 bond_for_each_slave_rcu(bond, slave, iter) { in bond_miimon_inspect()
2731 link_state = bond_check_dev_link(bond, slave->dev, 0); in bond_miimon_inspect()
2740 slave->delay = bond->params.downdelay; in bond_miimon_inspect()
2742 slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n", in bond_miimon_inspect()
2743 (BOND_MODE(bond) == in bond_miimon_inspect()
2747 bond->params.downdelay * bond->params.miimon); in bond_miimon_inspect()
2756 slave_info(bond->dev, slave->dev, "link status up again after %d ms\n", in bond_miimon_inspect()
2757 (bond->params.downdelay - slave->delay) * in bond_miimon_inspect()
2758 bond->params.miimon); in bond_miimon_inspect()
2778 slave->delay = bond->params.updelay; in bond_miimon_inspect()
2781 slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n", in bond_miimon_inspect()
2783 bond->params.updelay * in bond_miimon_inspect()
2784 bond->params.miimon); in bond_miimon_inspect()
2791 slave_info(bond->dev, slave->dev, "link status down again after %d ms\n", in bond_miimon_inspect()
2792 (bond->params.updelay - slave->delay) * in bond_miimon_inspect()
2793 bond->params.miimon); in bond_miimon_inspect()
2816 static void bond_miimon_link_change(struct bonding *bond, in bond_miimon_link_change() argument
2820 switch (BOND_MODE(bond)) { in bond_miimon_link_change()
2826 bond_alb_handle_link_change(bond, slave, link); in bond_miimon_link_change()
2829 bond_update_slave_arr(bond, NULL); in bond_miimon_link_change()
2834 static void bond_miimon_commit(struct bonding *bond) in bond_miimon_commit() argument
2842 bond_for_each_slave(bond, slave, iter) { in bond_miimon_commit()
2851 if (BOND_MODE(bond) == BOND_MODE_8023AD && in bond_miimon_commit()
2858 bond_needs_speed_duplex(bond)) { in bond_miimon_commit()
2861 slave_warn(bond->dev, slave->dev, in bond_miimon_commit()
2869 primary = rtnl_dereference(bond->primary_slave); in bond_miimon_commit()
2870 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_miimon_commit()
2873 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { in bond_miimon_commit()
2878 slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n", in bond_miimon_commit()
2882 bond_miimon_link_change(bond, slave, BOND_LINK_UP); in bond_miimon_commit()
2884 active = rtnl_dereference(bond->curr_active_slave); in bond_miimon_commit()
2897 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || in bond_miimon_commit()
2898 BOND_MODE(bond) == BOND_MODE_8023AD) in bond_miimon_commit()
2902 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); in bond_miimon_commit()
2904 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN); in bond_miimon_commit()
2906 if (slave == rcu_access_pointer(bond->curr_active_slave)) in bond_miimon_commit()
2912 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", in bond_miimon_commit()
2922 bond_select_active_slave(bond); in bond_miimon_commit()
2926 bond_set_carrier(bond); in bond_miimon_commit()
2938 struct bonding *bond = container_of(work, struct bonding, in bond_mii_monitor() local
2946 delay = msecs_to_jiffies(bond->params.miimon); in bond_mii_monitor()
2948 if (!bond_has_slaves(bond)) in bond_mii_monitor()
2952 should_notify_peers = bond_should_notify_peers(bond); in bond_mii_monitor()
2953 commit = !!bond_miimon_inspect(bond); in bond_mii_monitor()
2954 if (bond->send_peer_notif) { in bond_mii_monitor()
2957 bond->send_peer_notif--; in bond_mii_monitor()
2972 bond_for_each_slave(bond, slave, iter) { in bond_mii_monitor()
2975 bond_miimon_commit(bond); in bond_mii_monitor()
2981 if (bond->params.miimon) in bond_mii_monitor()
2982 queue_delayed_work(bond->wq, &bond->mii_work, delay); in bond_mii_monitor()
2987 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); in bond_mii_monitor()
3000 static bool bond_has_this_ip(struct bonding *bond, __be32 ip) in bond_has_this_ip() argument
3007 if (ip == bond_confirm_addr(bond->dev, 0, ip)) in bond_has_this_ip()
3011 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv)) in bond_has_this_ip()
3023 struct net_device *bond_dev = slave->bond->dev; in bond_handle_vlan()
3068 struct net_device *bond_dev = slave->bond->dev; in bond_arp_send()
3131 static void bond_arp_send_all(struct bonding *bond, struct slave *slave) in bond_arp_send_all() argument
3135 __be32 *targets = bond->params.arp_targets, addr; in bond_arp_send_all()
3139 slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n", in bond_arp_send_all()
3144 rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 0, 0, in bond_arp_send_all()
3150 if (bond->params.arp_validate) in bond_arp_send_all()
3152 bond->dev->name, in bond_arp_send_all()
3159 /* bond device itself */ in bond_arp_send_all()
3160 if (rt->dst.dev == bond->dev) in bond_arp_send_all()
3164 tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0); in bond_arp_send_all()
3171 slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n", in bond_arp_send_all()
3185 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip) in bond_validate_arp() argument
3189 if (!sip || !bond_has_this_ip(bond, tip)) { in bond_validate_arp()
3190 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n", in bond_validate_arp()
3195 i = bond_get_targets_ip(bond->params.arp_targets, sip); in bond_validate_arp()
3197 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n", in bond_validate_arp()
3205 static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, in bond_arp_rcv() argument
3214 alen = arp_hdr_len(bond->dev); in bond_arp_rcv()
3224 if (arp->ar_hln != bond->dev->addr_len || in bond_arp_rcv()
3233 arp_ptr += bond->dev->addr_len; in bond_arp_rcv()
3235 arp_ptr += 4 + bond->dev->addr_len; in bond_arp_rcv()
3238 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n", in bond_arp_rcv()
3240 bond->params.arp_validate, slave_do_arp_validate(bond, slave), in bond_arp_rcv()
3243 curr_active_slave = rcu_dereference(bond->curr_active_slave); in bond_arp_rcv()
3244 curr_arp_slave = rcu_dereference(bond->current_arp_slave); in bond_arp_rcv()
3270 bond_validate_arp(bond, slave, sip, tip); in bond_arp_rcv()
3272 time_after(slave_last_rx(bond, curr_active_slave), in bond_arp_rcv()
3274 bond_validate_arp(bond, slave, tip, sip); in bond_arp_rcv()
3276 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) in bond_arp_rcv()
3277 bond_validate_arp(bond, slave, sip, tip); in bond_arp_rcv()
3289 struct net_device *bond_dev = slave->bond->dev; in bond_ns_send()
3310 static void bond_ns_send_all(struct bonding *bond, struct slave *slave) in bond_ns_send_all() argument
3312 struct in6_addr *targets = bond->params.ns_targets; in bond_ns_send_all()
3320 slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n", in bond_ns_send_all()
3327 fl6.flowi6_oif = bond->dev->ifindex; in bond_ns_send_all()
3329 dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6); in bond_ns_send_all()
3335 if (bond->params.arp_validate) in bond_ns_send_all()
3337 bond->dev->name, in bond_ns_send_all()
3343 /* bond device itself */ in bond_ns_send_all()
3344 if (dst->dev == bond->dev) in bond_ns_send_all()
3348 tags = bond_verify_device_path(bond->dev, dst->dev, 0); in bond_ns_send_all()
3355 slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n", in bond_ns_send_all()
3380 static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr) in bond_has_this_ip6() argument
3387 if (bond_confirm_addr6(bond->dev, &priv)) in bond_has_this_ip6()
3391 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv)) in bond_has_this_ip6()
3398 static void bond_validate_na(struct bonding *bond, struct slave *slave, in bond_validate_na() argument
3406 * exist on bond interface. in bond_validate_na()
3410 !bond_has_this_ip6(bond, daddr))) { in bond_validate_na()
3411 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n", in bond_validate_na()
3416 i = bond_get_targets_ip6(bond->params.ns_targets, saddr); in bond_validate_na()
3418 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n", in bond_validate_na()
3426 static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, in bond_na_rcv() argument
3449 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n", in bond_na_rcv()
3451 bond->params.arp_validate, slave_do_arp_validate(bond, slave), in bond_na_rcv()
3454 curr_active_slave = rcu_dereference(bond->curr_active_slave); in bond_na_rcv()
3455 curr_arp_slave = rcu_dereference(bond->current_arp_slave); in bond_na_rcv()
3461 bond_validate_na(bond, slave, saddr, daddr); in bond_na_rcv()
3463 time_after(slave_last_rx(bond, curr_active_slave), in bond_na_rcv()
3465 bond_validate_na(bond, slave, daddr, saddr); in bond_na_rcv()
3467 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1)) in bond_na_rcv()
3468 bond_validate_na(bond, slave, saddr, daddr); in bond_na_rcv()
3475 int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, in bond_rcv_validate() argument
3483 slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n", in bond_rcv_validate()
3487 if (!slave_do_arp_validate(bond, slave)) { in bond_rcv_validate()
3488 if ((slave_do_arp_validate_only(bond) && is_arp) || in bond_rcv_validate()
3490 (slave_do_arp_validate_only(bond) && is_ipv6) || in bond_rcv_validate()
3492 !slave_do_arp_validate_only(bond)) in bond_rcv_validate()
3496 return bond_arp_rcv(skb, bond, slave); in bond_rcv_validate()
3499 return bond_na_rcv(skb, bond, slave); in bond_rcv_validate()
3506 static void bond_send_validate(struct bonding *bond, struct slave *slave) in bond_send_validate() argument
3508 bond_arp_send_all(bond, slave); in bond_send_validate()
3510 bond_ns_send_all(bond, slave); in bond_send_validate()
3518 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act, in bond_time_in_interval() argument
3521 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); in bond_time_in_interval()
3534 static void bond_loadbalance_arp_mon(struct bonding *bond) in bond_loadbalance_arp_mon() argument
3540 if (!bond_has_slaves(bond)) in bond_loadbalance_arp_mon()
3545 oldcurrent = rcu_dereference(bond->curr_active_slave); in bond_loadbalance_arp_mon()
3554 bond_for_each_slave_rcu(bond, slave, iter) { in bond_loadbalance_arp_mon()
3560 if (bond_time_in_interval(bond, last_tx, 1) && in bond_loadbalance_arp_mon()
3561 bond_time_in_interval(bond, slave->last_rx, 1)) { in bond_loadbalance_arp_mon()
3572 slave_info(bond->dev, slave->dev, "link status definitely up\n"); in bond_loadbalance_arp_mon()
3575 slave_info(bond->dev, slave->dev, "interface is now up\n"); in bond_loadbalance_arp_mon()
3585 if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || in bond_loadbalance_arp_mon()
3586 !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) { in bond_loadbalance_arp_mon()
3594 slave_info(bond->dev, slave->dev, "interface is now down\n"); in bond_loadbalance_arp_mon()
3609 bond_send_validate(bond, slave); in bond_loadbalance_arp_mon()
3618 bond_for_each_slave(bond, slave, iter) { in bond_loadbalance_arp_mon()
3624 bond_slave_state_change(bond); in bond_loadbalance_arp_mon()
3625 if (BOND_MODE(bond) == BOND_MODE_XOR) in bond_loadbalance_arp_mon()
3626 bond_update_slave_arr(bond, NULL); in bond_loadbalance_arp_mon()
3630 bond_select_active_slave(bond); in bond_loadbalance_arp_mon()
3637 if (bond->params.arp_interval) in bond_loadbalance_arp_mon()
3638 queue_delayed_work(bond->wq, &bond->arp_work, in bond_loadbalance_arp_mon()
3639 msecs_to_jiffies(bond->params.arp_interval)); in bond_loadbalance_arp_mon()
3649 static int bond_ab_arp_inspect(struct bonding *bond) in bond_ab_arp_inspect() argument
3656 bond_for_each_slave_rcu(bond, slave, iter) { in bond_ab_arp_inspect()
3658 last_rx = slave_last_rx(bond, slave); in bond_ab_arp_inspect()
3661 if (bond_time_in_interval(bond, last_rx, 1)) { in bond_ab_arp_inspect()
3675 if (bond_time_in_interval(bond, slave->last_link_up, 2)) in bond_ab_arp_inspect()
3681 * - the bond has an IP address in bond_ab_arp_inspect()
3691 !rcu_access_pointer(bond->current_arp_slave) && in bond_ab_arp_inspect()
3692 !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) { in bond_ab_arp_inspect()
3700 * the bond has an IP address) in bond_ab_arp_inspect()
3704 (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || in bond_ab_arp_inspect()
3705 !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) { in bond_ab_arp_inspect()
3719 static void bond_ab_arp_commit(struct bonding *bond) in bond_ab_arp_commit() argument
3726 bond_for_each_slave(bond, slave, iter) { in bond_ab_arp_commit()
3733 if (rtnl_dereference(bond->curr_active_slave) != slave || in bond_ab_arp_commit()
3734 (!rtnl_dereference(bond->curr_active_slave) && in bond_ab_arp_commit()
3735 bond_time_in_interval(bond, last_tx, 1))) { in bond_ab_arp_commit()
3738 current_arp_slave = rtnl_dereference(bond->current_arp_slave); in bond_ab_arp_commit()
3745 RCU_INIT_POINTER(bond->current_arp_slave, NULL); in bond_ab_arp_commit()
3748 slave_info(bond->dev, slave->dev, "link status definitely up\n"); in bond_ab_arp_commit()
3750 if (!rtnl_dereference(bond->curr_active_slave) || in bond_ab_arp_commit()
3751 slave == rtnl_dereference(bond->primary_slave) || in bond_ab_arp_commit()
3752 slave->prio > rtnl_dereference(bond->curr_active_slave)->prio) in bond_ab_arp_commit()
3768 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n"); in bond_ab_arp_commit()
3770 if (slave == rtnl_dereference(bond->curr_active_slave)) { in bond_ab_arp_commit()
3771 RCU_INIT_POINTER(bond->current_arp_slave, NULL); in bond_ab_arp_commit()
3786 if (rtnl_dereference(bond->curr_active_slave)) in bond_ab_arp_commit()
3787 RCU_INIT_POINTER(bond->current_arp_slave, NULL); in bond_ab_arp_commit()
3791 slave_err(bond->dev, slave->dev, in bond_ab_arp_commit()
3800 bond_select_active_slave(bond); in bond_ab_arp_commit()
3804 bond_set_carrier(bond); in bond_ab_arp_commit()
3811 static bool bond_ab_arp_probe(struct bonding *bond) in bond_ab_arp_probe() argument
3814 *curr_arp_slave = rcu_dereference(bond->current_arp_slave), in bond_ab_arp_probe()
3815 *curr_active_slave = rcu_dereference(bond->curr_active_slave); in bond_ab_arp_probe()
3821 netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n", in bond_ab_arp_probe()
3826 bond_send_validate(bond, curr_active_slave); in bond_ab_arp_probe()
3836 curr_arp_slave = bond_first_slave_rcu(bond); in bond_ab_arp_probe()
3841 bond_for_each_slave_rcu(bond, slave, iter) { in bond_ab_arp_probe()
3863 slave_info(bond->dev, slave->dev, "backup interface is now down\n"); in bond_ab_arp_probe()
3878 bond_send_validate(bond, new_slave); in bond_ab_arp_probe()
3880 rcu_assign_pointer(bond->current_arp_slave, new_slave); in bond_ab_arp_probe()
3883 bond_for_each_slave_rcu(bond, slave, iter) { in bond_ab_arp_probe()
3892 static void bond_activebackup_arp_mon(struct bonding *bond) in bond_activebackup_arp_mon() argument
3898 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); in bond_activebackup_arp_mon()
3900 if (!bond_has_slaves(bond)) in bond_activebackup_arp_mon()
3905 should_notify_peers = bond_should_notify_peers(bond); in bond_activebackup_arp_mon()
3907 if (bond_ab_arp_inspect(bond)) { in bond_activebackup_arp_mon()
3917 bond_ab_arp_commit(bond); in bond_activebackup_arp_mon()
3923 should_notify_rtnl = bond_ab_arp_probe(bond); in bond_activebackup_arp_mon()
3927 if (bond->params.arp_interval) in bond_activebackup_arp_mon()
3928 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); in bond_activebackup_arp_mon()
3935 bond->send_peer_notif--; in bond_activebackup_arp_mon()
3937 bond->dev); in bond_activebackup_arp_mon()
3940 bond_slave_state_notify(bond); in bond_activebackup_arp_mon()
3941 bond_slave_link_notify(bond); in bond_activebackup_arp_mon()
3950 struct bonding *bond = container_of(work, struct bonding, in bond_arp_monitor() local
3953 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) in bond_arp_monitor()
3954 bond_activebackup_arp_mon(bond); in bond_arp_monitor()
3956 bond_loadbalance_arp_mon(bond); in bond_arp_monitor()
3962 static int bond_event_changename(struct bonding *bond) in bond_event_changename() argument
3964 bond_remove_proc_entry(bond); in bond_event_changename()
3965 bond_create_proc_entry(bond); in bond_event_changename()
3967 bond_debug_reregister(bond); in bond_event_changename()
4002 struct bonding *bond; in bond_slave_netdev_event() local
4014 bond_dev = slave->bond->dev; in bond_slave_netdev_event()
4015 bond = slave->bond; in bond_slave_netdev_event()
4016 primary = rtnl_dereference(bond->primary_slave); in bond_slave_netdev_event()
4037 BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_slave_netdev_event()
4044 if (BOND_MODE(bond) == BOND_MODE_8023AD) in bond_slave_netdev_event()
4056 if (bond_mode_can_use_xmit_hash(bond)) in bond_slave_netdev_event()
4057 bond_update_slave_arr(bond, NULL); in bond_slave_netdev_event()
4062 * an active-backup bond, slaves need in bond_slave_netdev_event()
4074 if (!bond_uses_primary(bond) || in bond_slave_netdev_event()
4075 !bond->params.primary[0]) in bond_slave_netdev_event()
4080 RCU_INIT_POINTER(bond->primary_slave, NULL); in bond_slave_netdev_event()
4081 } else if (!strcmp(slave_dev->name, bond->params.primary)) { in bond_slave_netdev_event()
4083 rcu_assign_pointer(bond->primary_slave, slave); in bond_slave_netdev_event()
4088 netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n", in bond_slave_netdev_event()
4092 bond_select_active_slave(bond); in bond_slave_netdev_event()
4096 if (!bond->notifier_ctx) { in bond_slave_netdev_event()
4097 bond->notifier_ctx = true; in bond_slave_netdev_event()
4098 bond_compute_features(bond); in bond_slave_netdev_event()
4099 bond->notifier_ctx = false; in bond_slave_netdev_event()
4104 call_netdevice_notifiers(event, slave->bond->dev); in bond_slave_netdev_event()
4240 /* Extract the appropriate headers based on bond's xmit policy */
4241 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data, in bond_flow_dissect() argument
4244 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34; in bond_flow_dissect()
4247 switch (bond->params.xmit_policy) { in bond_flow_dissect()
4305 static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data, in __bond_xmit_hash() argument
4311 if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC) in __bond_xmit_hash()
4314 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || in __bond_xmit_hash()
4315 !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow)) in __bond_xmit_hash()
4318 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || in __bond_xmit_hash()
4319 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) { in __bond_xmit_hash()
4328 return bond_ip_hash(hash, &flow, bond->params.xmit_policy); in __bond_xmit_hash()
4333 * @bond: bonding device
4339 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) in bond_xmit_hash() argument
4341 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 && in bond_xmit_hash()
4345 return __bond_xmit_hash(bond, skb, skb->data, skb->protocol, in bond_xmit_hash()
4352 * @bond: bonding device
4357 static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp) in bond_xmit_hash_xdp() argument
4366 return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0, in bond_xmit_hash_xdp()
4372 void bond_work_init_all(struct bonding *bond) in bond_work_init_all() argument
4374 INIT_DELAYED_WORK(&bond->mcast_work, in bond_work_init_all()
4376 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); in bond_work_init_all()
4377 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); in bond_work_init_all()
4378 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor); in bond_work_init_all()
4379 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler); in bond_work_init_all()
4380 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler); in bond_work_init_all()
4383 static void bond_work_cancel_all(struct bonding *bond) in bond_work_cancel_all() argument
4385 cancel_delayed_work_sync(&bond->mii_work); in bond_work_cancel_all()
4386 cancel_delayed_work_sync(&bond->arp_work); in bond_work_cancel_all()
4387 cancel_delayed_work_sync(&bond->alb_work); in bond_work_cancel_all()
4388 cancel_delayed_work_sync(&bond->ad_work); in bond_work_cancel_all()
4389 cancel_delayed_work_sync(&bond->mcast_work); in bond_work_cancel_all()
4390 cancel_delayed_work_sync(&bond->slave_arr_work); in bond_work_cancel_all()
4395 struct bonding *bond = netdev_priv(bond_dev); in bond_open() local
4399 if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) { in bond_open()
4400 bond->rr_tx_counter = alloc_percpu(u32); in bond_open()
4401 if (!bond->rr_tx_counter) in bond_open()
4406 if (bond_has_slaves(bond)) { in bond_open()
4407 bond_for_each_slave(bond, slave, iter) { in bond_open()
4408 if (bond_uses_primary(bond) && in bond_open()
4409 slave != rcu_access_pointer(bond->curr_active_slave)) { in bond_open()
4412 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) { in bond_open()
4419 if (bond_is_lb(bond)) { in bond_open()
4423 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB))) in bond_open()
4425 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB) in bond_open()
4426 queue_delayed_work(bond->wq, &bond->alb_work, 0); in bond_open()
4429 if (bond->params.miimon) /* link check interval, in milliseconds. */ in bond_open()
4430 queue_delayed_work(bond->wq, &bond->mii_work, 0); in bond_open()
4432 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */ in bond_open()
4433 queue_delayed_work(bond->wq, &bond->arp_work, 0); in bond_open()
4434 bond->recv_probe = bond_rcv_validate; in bond_open()
4437 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_open()
4438 queue_delayed_work(bond->wq, &bond->ad_work, 0); in bond_open()
4440 bond->recv_probe = bond_3ad_lacpdu_recv; in bond_open()
4441 bond_3ad_initiate_agg_selection(bond, 1); in bond_open()
4443 bond_for_each_slave(bond, slave, iter) in bond_open()
4447 if (bond_mode_can_use_xmit_hash(bond)) in bond_open()
4448 bond_update_slave_arr(bond, NULL); in bond_open()
4455 struct bonding *bond = netdev_priv(bond_dev); in bond_close() local
4458 bond_work_cancel_all(bond); in bond_close()
4459 bond->send_peer_notif = 0; in bond_close()
4460 if (bond_is_lb(bond)) in bond_close()
4461 bond_alb_deinitialize(bond); in bond_close()
4462 bond->recv_probe = NULL; in bond_close()
4464 if (bond_uses_primary(bond)) { in bond_close()
4466 slave = rcu_dereference(bond->curr_active_slave); in bond_close()
4473 bond_for_each_slave(bond, slave, iter) in bond_close()
4553 struct bonding *bond = netdev_priv(bond_dev); in bond_get_stats() local
4565 spin_lock_nested(&bond->stats_lock, nest_level); in bond_get_stats()
4566 memcpy(stats, &bond->bond_stats, sizeof(*stats)); in bond_get_stats()
4568 bond_for_each_slave_rcu(bond, slave, iter) { in bond_get_stats()
4578 memcpy(&bond->bond_stats, stats, sizeof(*stats)); in bond_get_stats()
4579 spin_unlock(&bond->stats_lock); in bond_get_stats()
4585 struct bonding *bond = netdev_priv(bond_dev); in bond_eth_ioctl() local
4608 if (netif_carrier_ok(bond->dev)) in bond_eth_ioctl()
4622 struct bonding *bond = netdev_priv(bond_dev); in bond_do_ioctl() local
4686 res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE, in bond_do_ioctl()
4721 struct bonding *bond = netdev_priv(bond_dev); in bond_change_rx_flags() local
4724 bond_set_promiscuity(bond, in bond_change_rx_flags()
4728 bond_set_allmulti(bond, in bond_change_rx_flags()
4734 struct bonding *bond = netdev_priv(bond_dev); in bond_set_rx_mode() local
4739 if (bond_uses_primary(bond)) { in bond_set_rx_mode()
4740 slave = rcu_dereference(bond->curr_active_slave); in bond_set_rx_mode()
4746 bond_for_each_slave_rcu(bond, slave, iter) { in bond_set_rx_mode()
4756 struct bonding *bond = netdev_priv(n->dev); in bond_neigh_init() local
4763 slave = bond_first_slave_rcu(bond); in bond_neigh_init()
4812 struct bonding *bond = netdev_priv(bond_dev); in bond_change_mtu() local
4817 netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu); in bond_change_mtu()
4819 bond_for_each_slave(bond, slave, iter) { in bond_change_mtu()
4846 bond_for_each_slave(bond, rollback_slave, iter) { in bond_change_mtu()
4869 struct bonding *bond = netdev_priv(bond_dev); in bond_set_mac_address() local
4875 if (BOND_MODE(bond) == BOND_MODE_ALB) in bond_set_mac_address()
4879 netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond); in bond_set_mac_address()
4884 if (bond->params.fail_over_mac && in bond_set_mac_address()
4885 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) in bond_set_mac_address()
4891 bond_for_each_slave(bond, slave, iter) { in bond_set_mac_address()
4917 bond_for_each_slave(bond, rollback_slave, iter) { in bond_set_mac_address()
4936 * @bond: bonding device that is transmitting
4942 static struct slave *bond_get_slave_by_id(struct bonding *bond, in bond_get_slave_by_id() argument
4950 bond_for_each_slave_rcu(bond, slave, iter) { in bond_get_slave_by_id()
4959 bond_for_each_slave_rcu(bond, slave, iter) { in bond_get_slave_by_id()
4971 * @bond: bonding device to use
4977 static u32 bond_rr_gen_slave_id(struct bonding *bond) in bond_rr_gen_slave_id() argument
4981 int packets_per_slave = bond->params.packets_per_slave; in bond_rr_gen_slave_id()
4988 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); in bond_rr_gen_slave_id()
4992 bond->params.reciprocal_packets_per_slave; in bond_rr_gen_slave_id()
4993 slave_id = this_cpu_inc_return(*bond->rr_tx_counter); in bond_rr_gen_slave_id()
5002 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond, in bond_xmit_roundrobin_slave_get() argument
5009 /* Start with the curr_active_slave that joined the bond as the in bond_xmit_roundrobin_slave_get()
5024 slave = rcu_dereference(bond->curr_active_slave); in bond_xmit_roundrobin_slave_get()
5027 return bond_get_slave_by_id(bond, 0); in bond_xmit_roundrobin_slave_get()
5032 slave_cnt = READ_ONCE(bond->slave_cnt); in bond_xmit_roundrobin_slave_get()
5034 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; in bond_xmit_roundrobin_slave_get()
5035 return bond_get_slave_by_id(bond, slave_id); in bond_xmit_roundrobin_slave_get()
5040 static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond, in bond_xdp_xmit_roundrobin_slave_get() argument
5065 slave = rcu_dereference(bond->curr_active_slave); in bond_xdp_xmit_roundrobin_slave_get()
5068 return bond_get_slave_by_id(bond, 0); in bond_xdp_xmit_roundrobin_slave_get()
5073 slave_cnt = READ_ONCE(bond->slave_cnt); in bond_xdp_xmit_roundrobin_slave_get()
5075 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt; in bond_xdp_xmit_roundrobin_slave_get()
5076 return bond_get_slave_by_id(bond, slave_id); in bond_xdp_xmit_roundrobin_slave_get()
5084 struct bonding *bond = netdev_priv(bond_dev); in bond_xmit_roundrobin() local
5087 slave = bond_xmit_roundrobin_slave_get(bond, skb); in bond_xmit_roundrobin()
5089 return bond_dev_queue_xmit(bond, skb, slave->dev); in bond_xmit_roundrobin()
5094 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond) in bond_xmit_activebackup_slave_get() argument
5096 return rcu_dereference(bond->curr_active_slave); in bond_xmit_activebackup_slave_get()
5099 /* In active-backup mode, we know that bond->curr_active_slave is always valid if
5100 * the bond has a usable interface.
5105 struct bonding *bond = netdev_priv(bond_dev); in bond_xmit_activebackup() local
5108 slave = bond_xmit_activebackup_slave_get(bond); in bond_xmit_activebackup()
5110 return bond_dev_queue_xmit(bond, skb, slave->dev); in bond_xmit_activebackup()
5119 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay) in bond_slave_arr_work_rearm() argument
5121 queue_delayed_work(bond->wq, &bond->slave_arr_work, delay); in bond_slave_arr_work_rearm()
5127 struct bonding *bond = container_of(work, struct bonding, in bond_slave_arr_handler() local
5134 ret = bond_update_slave_arr(bond, NULL); in bond_slave_arr_handler()
5143 bond_slave_arr_work_rearm(bond, 1); in bond_slave_arr_handler()
5169 static void bond_set_slave_arr(struct bonding *bond, in bond_set_slave_arr() argument
5175 usable = rtnl_dereference(bond->usable_slaves); in bond_set_slave_arr()
5176 rcu_assign_pointer(bond->usable_slaves, usable_slaves); in bond_set_slave_arr()
5179 all = rtnl_dereference(bond->all_slaves); in bond_set_slave_arr()
5180 rcu_assign_pointer(bond->all_slaves, all_slaves); in bond_set_slave_arr()
5184 static void bond_reset_slave_arr(struct bonding *bond) in bond_reset_slave_arr() argument
5186 bond_set_slave_arr(bond, NULL, NULL); in bond_reset_slave_arr()
5197 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) in bond_update_slave_arr() argument
5208 bond->slave_cnt), GFP_KERNEL); in bond_update_slave_arr()
5210 bond->slave_cnt), GFP_KERNEL); in bond_update_slave_arr()
5215 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_update_slave_arr()
5218 spin_lock_bh(&bond->mode_lock); in bond_update_slave_arr()
5219 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { in bond_update_slave_arr()
5220 spin_unlock_bh(&bond->mode_lock); in bond_update_slave_arr()
5225 bond_reset_slave_arr(bond); in bond_update_slave_arr()
5228 spin_unlock_bh(&bond->mode_lock); in bond_update_slave_arr()
5231 bond_for_each_slave(bond, slave, iter) { in bond_update_slave_arr()
5236 if (BOND_MODE(bond) == BOND_MODE_8023AD) { in bond_update_slave_arr()
5246 slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n", in bond_update_slave_arr()
5252 bond_set_slave_arr(bond, usable_slaves, all_slaves); in bond_update_slave_arr()
5256 bond_skip_slave(rtnl_dereference(bond->all_slaves), in bond_update_slave_arr()
5258 bond_skip_slave(rtnl_dereference(bond->usable_slaves), in bond_update_slave_arr()
5267 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond, in bond_xmit_3ad_xor_slave_get() argument
5275 hash = bond_xmit_hash(bond, skb); in bond_xmit_3ad_xor_slave_get()
5284 static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond, in bond_xdp_xmit_3ad_xor_slave_get() argument
5291 hash = bond_xmit_hash_xdp(bond, xdp); in bond_xdp_xmit_3ad_xor_slave_get()
5292 slaves = rcu_dereference(bond->usable_slaves); in bond_xdp_xmit_3ad_xor_slave_get()
5307 struct bonding *bond = netdev_priv(dev); in bond_3ad_xor_xmit() local
5311 slaves = rcu_dereference(bond->usable_slaves); in bond_3ad_xor_xmit()
5312 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); in bond_3ad_xor_xmit()
5314 return bond_dev_queue_xmit(bond, skb, slave->dev); in bond_3ad_xor_xmit()
5323 struct bonding *bond = netdev_priv(bond_dev); in bond_xmit_broadcast() local
5329 bond_for_each_slave_rcu(bond, slave, iter) { in bond_xmit_broadcast()
5335 if (bond_is_last_slave(bond, slave)) { in bond_xmit_broadcast()
5347 if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK) in bond_xmit_broadcast()
5364 static inline int bond_slave_override(struct bonding *bond, in bond_slave_override() argument
5374 bond_for_each_slave_rcu(bond, slave, iter) { in bond_slave_override()
5378 bond_dev_queue_xmit(bond, skb, slave->dev); in bond_slave_override()
5415 struct bonding *bond = netdev_priv(master_dev); in bond_xmit_get_slave() local
5419 switch (BOND_MODE(bond)) { in bond_xmit_get_slave()
5421 slave = bond_xmit_roundrobin_slave_get(bond, skb); in bond_xmit_get_slave()
5424 slave = bond_xmit_activebackup_slave_get(bond); in bond_xmit_get_slave()
5429 slaves = rcu_dereference(bond->all_slaves); in bond_xmit_get_slave()
5431 slaves = rcu_dereference(bond->usable_slaves); in bond_xmit_get_slave()
5432 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves); in bond_xmit_get_slave()
5437 slave = bond_xmit_alb_slave_get(bond, skb); in bond_xmit_get_slave()
5440 slave = bond_xmit_tlb_slave_get(bond, skb); in bond_xmit_get_slave()
5499 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond, in __bond_sk_get_lower_dev() argument
5507 slaves = rcu_dereference(bond->usable_slaves); in __bond_sk_get_lower_dev()
5521 struct bonding *bond = netdev_priv(dev); in bond_sk_get_lower_dev() local
5525 if (bond_sk_check(bond)) in bond_sk_get_lower_dev()
5526 lower = __bond_sk_get_lower_dev(bond, sk); in bond_sk_get_lower_dev()
5533 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb, in bond_tls_device_xmit() argument
5542 if (likely(bond_get_slave_by_dev(bond, tls_netdev))) in bond_tls_device_xmit()
5543 return bond_dev_queue_xmit(bond, skb, tls_netdev); in bond_tls_device_xmit()
5550 struct bonding *bond = netdev_priv(dev); in __bond_start_xmit() local
5552 if (bond_should_override_tx_queue(bond) && in __bond_start_xmit()
5553 !bond_slave_override(bond, skb)) in __bond_start_xmit()
5558 return bond_tls_device_xmit(bond, skb, dev); in __bond_start_xmit()
5561 switch (BOND_MODE(bond)) { in __bond_start_xmit()
5577 netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); in __bond_start_xmit()
5585 struct bonding *bond = netdev_priv(dev); in bond_start_xmit() local
5595 if (bond_has_slaves(bond)) in bond_start_xmit()
5607 struct bonding *bond = netdev_priv(bond_dev); in bond_xdp_get_xmit_slave() local
5612 switch (BOND_MODE(bond)) { in bond_xdp_get_xmit_slave()
5614 slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp); in bond_xdp_get_xmit_slave()
5618 slave = bond_xmit_activebackup_slave_get(bond); in bond_xdp_get_xmit_slave()
5623 slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp); in bond_xdp_get_xmit_slave()
5629 BOND_MODE(bond)); in bond_xdp_get_xmit_slave()
5679 struct bonding *bond = netdev_priv(dev); in bond_xdp_set() local
5693 if (!bond_xdp_check(bond)) in bond_xdp_set()
5696 old_prog = bond->xdp_prog; in bond_xdp_set()
5697 bond->xdp_prog = prog; in bond_xdp_set()
5699 bond_for_each_slave(bond, slave, iter) { in bond_xdp_set()
5738 bond->xdp_prog = old_prog; in bond_xdp_set()
5742 bond_for_each_slave(bond, rollback_slave, iter) { in bond_xdp_set()
5803 struct bonding *bond = netdev_priv(dev); in bond_hwtstamp_get() local
5807 real_dev = bond_option_active_slave_get_rcu(bond); in bond_hwtstamp_get()
5822 struct bonding *bond = netdev_priv(dev); in bond_hwtstamp_set() local
5829 real_dev = bond_option_active_slave_get_rcu(bond); in bond_hwtstamp_set()
5843 struct bonding *bond = netdev_priv(bond_dev); in bond_ethtool_get_link_ksettings() local
5856 bond_for_each_slave(bond, slave, iter) { in bond_ethtool_get_link_ksettings()
5860 if (BOND_MODE(bond) == BOND_MODE_BROADCAST) in bond_ethtool_get_link_ksettings()
5887 struct bonding *bond = netdev_priv(bond_dev); in bond_ethtool_get_ts_info() local
5896 real_dev = bond_option_active_slave_get_rcu(bond); in bond_ethtool_get_ts_info()
5905 bond_for_each_slave_rcu(bond, slave, iter) { in bond_ethtool_get_ts_info()
5969 .name = "bond",
5974 struct bonding *bond = netdev_priv(bond_dev); in bond_destructor() local
5976 if (bond->wq) in bond_destructor()
5977 destroy_workqueue(bond->wq); in bond_destructor()
5979 free_percpu(bond->rr_tx_counter); in bond_destructor()
5984 struct bonding *bond = netdev_priv(bond_dev); in bond_setup() local
5986 spin_lock_init(&bond->mode_lock); in bond_setup()
5987 bond->params = bonding_defaults; in bond_setup()
5990 bond->dev = bond_dev; in bond_setup()
6011 INIT_LIST_HEAD(&bond->ipsec_list); in bond_setup()
6012 mutex_init(&bond->ipsec_lock); in bond_setup()
6015 /* don't acquire bond device's netif_tx_lock when transmitting */ in bond_setup()
6018 /* Don't allow bond devices to change network namespaces. */ in bond_setup()
6021 /* By default, we declare the bond to be fully in bond_setup()
6040 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) in bond_setup()
6050 struct bonding *bond = netdev_priv(bond_dev); in bond_uninit() local
6057 bond_for_each_slave(bond, slave, iter) in bond_uninit()
6062 mutex_destroy(&bond->ipsec_lock); in bond_uninit()
6065 bond_set_slave_arr(bond, NULL, NULL); in bond_uninit()
6067 list_del_rcu(&bond->bond_list); in bond_uninit()
6069 bond_debug_unregister(bond); in bond_uninit()
6467 struct bonding *bond = netdev_priv(bond_dev); in bond_init() local
6472 bond->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, in bond_init()
6474 if (!bond->wq) in bond_init()
6477 bond->notifier_ctx = false; in bond_init()
6479 spin_lock_init(&bond->stats_lock); in bond_init()
6482 list_add_tail_rcu(&bond->bond_list, &bn->dev_list); in bond_init()
6484 bond_prepare_sysfs_group(bond); in bond_init()
6486 bond_debug_register(bond); in bond_init()
6501 /* Create a new bond based on the specified name and bonding parameters.
6502 * If name is NULL, obtain a suitable "bond%d" name for us.
6509 struct bonding *bond; in bond_create() local
6515 name ? name : "bond%d", NET_NAME_UNKNOWN, in bond_create()
6520 bond = netdev_priv(bond_dev); in bond_create()
6532 bond_work_init_all(bond); in bond_create()
6553 * race condition in bond unloading") we need to remove sysfs files
6569 /* Kill off any bonds created after unregistering bond rtnl ops */ in bond_net_exit_batch_rtnl()
6571 struct bonding *bond, *tmp_bond; in bond_net_exit_batch_rtnl() local
6574 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) in bond_net_exit_batch_rtnl()
6575 unregister_netdevice_queue(bond->dev, dev_kill_list); in bond_net_exit_batch_rtnl()