2 * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2013 Scott Feldman <sfeldma@cumulusnetworks.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/if_link.h>
19 #include <linux/if_ether.h>
20 #include <net/netlink.h>
21 #include <net/rtnetlink.h>
24 int bond_get_slave(struct net_device *slave_dev, struct sk_buff *skb)
26 struct slave *slave = bond_slave_get_rtnl(slave_dev);
27 const struct aggregator *agg;
29 if (nla_put_u8(skb, IFLA_SLAVE_STATE, bond_slave_state(slave)))
32 if (nla_put_u8(skb, IFLA_SLAVE_MII_STATUS, slave->link))
35 if (nla_put_u32(skb, IFLA_SLAVE_LINK_FAILURE_COUNT,
36 slave->link_failure_count))
39 if (nla_put(skb, IFLA_SLAVE_PERM_HWADDR,
40 slave_dev->addr_len, slave->perm_hwaddr))
43 if (nla_put_u16(skb, IFLA_SLAVE_QUEUE_ID, slave->queue_id))
46 if (slave->bond->params.mode == BOND_MODE_8023AD) {
47 agg = SLAVE_AD_INFO(slave).port.aggregator;
49 if (nla_put_u16(skb, IFLA_SLAVE_AD_AGGREGATOR_ID,
50 agg->aggregator_identifier))
60 static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
61 [IFLA_BOND_MODE] = { .type = NLA_U8 },
62 [IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
63 [IFLA_BOND_MIIMON] = { .type = NLA_U32 },
64 [IFLA_BOND_UPDELAY] = { .type = NLA_U32 },
65 [IFLA_BOND_DOWNDELAY] = { .type = NLA_U32 },
66 [IFLA_BOND_USE_CARRIER] = { .type = NLA_U8 },
67 [IFLA_BOND_ARP_INTERVAL] = { .type = NLA_U32 },
68 [IFLA_BOND_ARP_IP_TARGET] = { .type = NLA_NESTED },
69 [IFLA_BOND_ARP_VALIDATE] = { .type = NLA_U32 },
70 [IFLA_BOND_ARP_ALL_TARGETS] = { .type = NLA_U32 },
71 [IFLA_BOND_PRIMARY] = { .type = NLA_U32 },
72 [IFLA_BOND_PRIMARY_RESELECT] = { .type = NLA_U8 },
73 [IFLA_BOND_FAIL_OVER_MAC] = { .type = NLA_U8 },
74 [IFLA_BOND_XMIT_HASH_POLICY] = { .type = NLA_U8 },
75 [IFLA_BOND_RESEND_IGMP] = { .type = NLA_U32 },
76 [IFLA_BOND_NUM_PEER_NOTIF] = { .type = NLA_U8 },
77 [IFLA_BOND_ALL_SLAVES_ACTIVE] = { .type = NLA_U8 },
78 [IFLA_BOND_MIN_LINKS] = { .type = NLA_U32 },
79 [IFLA_BOND_LP_INTERVAL] = { .type = NLA_U32 },
80 [IFLA_BOND_PACKETS_PER_SLAVE] = { .type = NLA_U32 },
81 [IFLA_BOND_AD_LACP_RATE] = { .type = NLA_U8 },
82 [IFLA_BOND_AD_SELECT] = { .type = NLA_U8 },
83 [IFLA_BOND_AD_INFO] = { .type = NLA_NESTED },
86 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
88 if (tb[IFLA_ADDRESS]) {
89 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
91 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
92 return -EADDRNOTAVAIL;
97 static int bond_changelink(struct net_device *bond_dev,
98 struct nlattr *tb[], struct nlattr *data[])
100 struct bonding *bond = netdev_priv(bond_dev);
101 struct bond_opt_value newval;
108 if (data[IFLA_BOND_MODE]) {
109 int mode = nla_get_u8(data[IFLA_BOND_MODE]);
111 bond_opt_initval(&newval, mode);
112 err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
116 if (data[IFLA_BOND_ACTIVE_SLAVE]) {
117 int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
118 struct net_device *slave_dev;
123 slave_dev = __dev_get_by_index(dev_net(bond_dev),
128 err = bond_option_active_slave_set(bond, slave_dev);
132 if (data[IFLA_BOND_MIIMON]) {
133 miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
135 err = bond_option_miimon_set(bond, miimon);
139 if (data[IFLA_BOND_UPDELAY]) {
140 int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
142 err = bond_option_updelay_set(bond, updelay);
146 if (data[IFLA_BOND_DOWNDELAY]) {
147 int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
149 err = bond_option_downdelay_set(bond, downdelay);
153 if (data[IFLA_BOND_USE_CARRIER]) {
154 int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
156 err = bond_option_use_carrier_set(bond, use_carrier);
160 if (data[IFLA_BOND_ARP_INTERVAL]) {
161 int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
163 if (arp_interval && miimon) {
164 pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
169 bond_opt_initval(&newval, arp_interval);
170 err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
174 if (data[IFLA_BOND_ARP_IP_TARGET]) {
175 __be32 targets[BOND_MAX_ARP_TARGETS] = { 0, };
179 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
180 __be32 target = nla_get_be32(attr);
181 targets[i++] = target;
184 err = bond_option_arp_ip_targets_set(bond, targets, i);
188 if (data[IFLA_BOND_ARP_VALIDATE]) {
189 int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
191 if (arp_validate && miimon) {
192 pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
197 bond_opt_initval(&newval, arp_validate);
198 err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
202 if (data[IFLA_BOND_ARP_ALL_TARGETS]) {
203 int arp_all_targets =
204 nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
206 bond_opt_initval(&newval, arp_all_targets);
207 err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
211 if (data[IFLA_BOND_PRIMARY]) {
212 int ifindex = nla_get_u32(data[IFLA_BOND_PRIMARY]);
213 struct net_device *dev;
216 dev = __dev_get_by_index(dev_net(bond_dev), ifindex);
220 err = bond_option_primary_set(bond, primary);
224 if (data[IFLA_BOND_PRIMARY_RESELECT]) {
225 int primary_reselect =
226 nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
228 err = bond_option_primary_reselect_set(bond, primary_reselect);
232 if (data[IFLA_BOND_FAIL_OVER_MAC]) {
234 nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
236 bond_opt_initval(&newval, fail_over_mac);
237 err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
241 if (data[IFLA_BOND_XMIT_HASH_POLICY]) {
242 int xmit_hash_policy =
243 nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
245 bond_opt_initval(&newval, xmit_hash_policy);
246 err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
250 if (data[IFLA_BOND_RESEND_IGMP]) {
252 nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
254 err = bond_option_resend_igmp_set(bond, resend_igmp);
258 if (data[IFLA_BOND_NUM_PEER_NOTIF]) {
260 nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
262 err = bond_option_num_peer_notif_set(bond, num_peer_notif);
266 if (data[IFLA_BOND_ALL_SLAVES_ACTIVE]) {
267 int all_slaves_active =
268 nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
270 err = bond_option_all_slaves_active_set(bond,
275 if (data[IFLA_BOND_MIN_LINKS]) {
277 nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
279 err = bond_option_min_links_set(bond, min_links);
283 if (data[IFLA_BOND_LP_INTERVAL]) {
285 nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
287 err = bond_option_lp_interval_set(bond, lp_interval);
291 if (data[IFLA_BOND_PACKETS_PER_SLAVE]) {
292 int packets_per_slave =
293 nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
295 bond_opt_initval(&newval, packets_per_slave);
296 err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
300 if (data[IFLA_BOND_AD_LACP_RATE]) {
302 nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
304 err = bond_option_lacp_rate_set(bond, lacp_rate);
308 if (data[IFLA_BOND_AD_SELECT]) {
310 nla_get_u8(data[IFLA_BOND_AD_SELECT]);
312 err = bond_option_ad_select_set(bond, ad_select);
319 static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
320 struct nlattr *tb[], struct nlattr *data[])
324 err = bond_changelink(bond_dev, tb, data);
328 return register_netdevice(bond_dev);
331 static size_t bond_get_size(const struct net_device *bond_dev)
333 return nla_total_size(sizeof(u8)) + /* IFLA_BOND_MODE */
334 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ACTIVE_SLAVE */
335 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIIMON */
336 nla_total_size(sizeof(u32)) + /* IFLA_BOND_UPDELAY */
337 nla_total_size(sizeof(u32)) + /* IFLA_BOND_DOWNDELAY */
338 nla_total_size(sizeof(u8)) + /* IFLA_BOND_USE_CARRIER */
339 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_INTERVAL */
340 /* IFLA_BOND_ARP_IP_TARGET */
341 nla_total_size(sizeof(struct nlattr)) +
342 nla_total_size(sizeof(u32)) * BOND_MAX_ARP_TARGETS +
343 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_VALIDATE */
344 nla_total_size(sizeof(u32)) + /* IFLA_BOND_ARP_ALL_TARGETS */
345 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PRIMARY */
346 nla_total_size(sizeof(u8)) + /* IFLA_BOND_PRIMARY_RESELECT */
347 nla_total_size(sizeof(u8)) + /* IFLA_BOND_FAIL_OVER_MAC */
348 nla_total_size(sizeof(u8)) + /* IFLA_BOND_XMIT_HASH_POLICY */
349 nla_total_size(sizeof(u32)) + /* IFLA_BOND_RESEND_IGMP */
350 nla_total_size(sizeof(u8)) + /* IFLA_BOND_NUM_PEER_NOTIF */
351 nla_total_size(sizeof(u8)) + /* IFLA_BOND_ALL_SLAVES_ACTIVE */
352 nla_total_size(sizeof(u32)) + /* IFLA_BOND_MIN_LINKS */
353 nla_total_size(sizeof(u32)) + /* IFLA_BOND_LP_INTERVAL */
354 nla_total_size(sizeof(u32)) + /* IFLA_BOND_PACKETS_PER_SLAVE */
355 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_LACP_RATE */
356 nla_total_size(sizeof(u8)) + /* IFLA_BOND_AD_SELECT */
357 nla_total_size(sizeof(struct nlattr)) + /* IFLA_BOND_AD_INFO */
358 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_AGGREGATOR */
359 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_NUM_PORTS */
360 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_ACTOR_KEY */
361 nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_INFO_PARTNER_KEY*/
362 nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_INFO_PARTNER_MAC*/
366 static int bond_fill_info(struct sk_buff *skb,
367 const struct net_device *bond_dev)
369 struct bonding *bond = netdev_priv(bond_dev);
370 struct net_device *slave_dev = bond_option_active_slave_get(bond);
371 struct nlattr *targets;
372 unsigned int packets_per_slave;
373 int i, targets_added;
375 if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode))
376 goto nla_put_failure;
379 nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
380 goto nla_put_failure;
382 if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
383 goto nla_put_failure;
385 if (nla_put_u32(skb, IFLA_BOND_UPDELAY,
386 bond->params.updelay * bond->params.miimon))
387 goto nla_put_failure;
389 if (nla_put_u32(skb, IFLA_BOND_DOWNDELAY,
390 bond->params.downdelay * bond->params.miimon))
391 goto nla_put_failure;
393 if (nla_put_u8(skb, IFLA_BOND_USE_CARRIER, bond->params.use_carrier))
394 goto nla_put_failure;
396 if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
397 goto nla_put_failure;
399 targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
401 goto nla_put_failure;
404 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
405 if (bond->params.arp_targets[i]) {
406 nla_put_be32(skb, i, bond->params.arp_targets[i]);
412 nla_nest_end(skb, targets);
414 nla_nest_cancel(skb, targets);
416 if (nla_put_u32(skb, IFLA_BOND_ARP_VALIDATE, bond->params.arp_validate))
417 goto nla_put_failure;
419 if (nla_put_u32(skb, IFLA_BOND_ARP_ALL_TARGETS,
420 bond->params.arp_all_targets))
421 goto nla_put_failure;
423 if (bond->primary_slave &&
424 nla_put_u32(skb, IFLA_BOND_PRIMARY,
425 bond->primary_slave->dev->ifindex))
426 goto nla_put_failure;
428 if (nla_put_u8(skb, IFLA_BOND_PRIMARY_RESELECT,
429 bond->params.primary_reselect))
430 goto nla_put_failure;
432 if (nla_put_u8(skb, IFLA_BOND_FAIL_OVER_MAC,
433 bond->params.fail_over_mac))
434 goto nla_put_failure;
436 if (nla_put_u8(skb, IFLA_BOND_XMIT_HASH_POLICY,
437 bond->params.xmit_policy))
438 goto nla_put_failure;
440 if (nla_put_u32(skb, IFLA_BOND_RESEND_IGMP,
441 bond->params.resend_igmp))
442 goto nla_put_failure;
444 if (nla_put_u8(skb, IFLA_BOND_NUM_PEER_NOTIF,
445 bond->params.num_peer_notif))
446 goto nla_put_failure;
448 if (nla_put_u8(skb, IFLA_BOND_ALL_SLAVES_ACTIVE,
449 bond->params.all_slaves_active))
450 goto nla_put_failure;
452 if (nla_put_u32(skb, IFLA_BOND_MIN_LINKS,
453 bond->params.min_links))
454 goto nla_put_failure;
456 if (nla_put_u32(skb, IFLA_BOND_LP_INTERVAL,
457 bond->params.lp_interval))
458 goto nla_put_failure;
460 packets_per_slave = bond->params.packets_per_slave;
461 if (nla_put_u32(skb, IFLA_BOND_PACKETS_PER_SLAVE,
463 goto nla_put_failure;
465 if (nla_put_u8(skb, IFLA_BOND_AD_LACP_RATE,
466 bond->params.lacp_fast))
467 goto nla_put_failure;
469 if (nla_put_u8(skb, IFLA_BOND_AD_SELECT,
470 bond->params.ad_select))
471 goto nla_put_failure;
473 if (bond->params.mode == BOND_MODE_8023AD) {
476 if (!bond_3ad_get_active_agg_info(bond, &info)) {
479 nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
481 goto nla_put_failure;
483 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_AGGREGATOR,
485 goto nla_put_failure;
486 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_NUM_PORTS,
488 goto nla_put_failure;
489 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_ACTOR_KEY,
491 goto nla_put_failure;
492 if (nla_put_u16(skb, IFLA_BOND_AD_INFO_PARTNER_KEY,
494 goto nla_put_failure;
495 if (nla_put(skb, IFLA_BOND_AD_INFO_PARTNER_MAC,
496 sizeof(info.partner_system),
497 &info.partner_system))
498 goto nla_put_failure;
500 nla_nest_end(skb, nest);
510 struct rtnl_link_ops bond_link_ops __read_mostly = {
512 .priv_size = sizeof(struct bonding),
514 .maxtype = IFLA_BOND_MAX,
515 .policy = bond_policy,
516 .validate = bond_validate,
517 .newlink = bond_newlink,
518 .changelink = bond_changelink,
519 .get_size = bond_get_size,
520 .fill_info = bond_fill_info,
521 .get_num_tx_queues = bond_get_num_tx_queues,
522 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
526 int __init bond_netlink_init(void)
528 return rtnl_link_register(&bond_link_ops);
531 void bond_netlink_fini(void)
533 rtnl_link_unregister(&bond_link_ops);
536 MODULE_ALIAS_RTNL_LINK("bond");