4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
77 #include <linux/drbd_genl_api.h>
79 #include <linux/genl_magic_func.h>
81 /* used blkdev_get_by_path, to claim our meta data device(s) */
82 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
84 /* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
88 static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
91 /* assigned from request attributes, if present */
93 #define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
97 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
109 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
116 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
118 int drbd_msg_put_info(const char *info)
120 struct sk_buff *skb = adm_ctx.reply_skb;
124 if (!info || !info[0])
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
131 err = nla_put_string(skb, T_info_text, info);
133 nla_nest_cancel(skb, nla);
136 nla_nest_end(skb, nla);
140 /* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
145 #define DRBD_ADM_NEED_MINOR 1
146 #define DRBD_ADM_NEED_RESOURCE 2
147 #define DRBD_ADM_NEED_CONNECTION 4
148 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
161 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
162 if (!adm_ctx.reply_skb) {
167 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
168 info, &drbd_genl_family, 0, cmd);
169 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
171 if (!adm_ctx.reply_dh) {
176 adm_ctx.reply_dh->minor = d_in->minor;
177 adm_ctx.reply_dh->ret_code = NO_ERROR;
179 adm_ctx.volume = VOLUME_UNSPECIFIED;
180 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
182 /* parse and validate only */
183 err = drbd_cfg_context_from_attrs(NULL, info);
187 /* It was present, and valid,
188 * copy it over to the reply skb. */
189 err = nla_put_nohdr(adm_ctx.reply_skb,
190 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]);
195 /* and assign stuff to the global adm_ctx */
196 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
198 adm_ctx.volume = nla_get_u32(nla);
199 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
201 adm_ctx.resource_name = nla_data(nla);
202 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
203 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
204 if ((adm_ctx.my_addr &&
205 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
206 (adm_ctx.peer_addr &&
207 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
213 adm_ctx.minor = d_in->minor;
214 adm_ctx.mdev = minor_to_mdev(d_in->minor);
215 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
217 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
218 drbd_msg_put_info("unknown minor");
219 return ERR_MINOR_INVALID;
221 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
222 drbd_msg_put_info("unknown resource");
223 return ERR_INVALID_REQUEST;
226 if (flags & DRBD_ADM_NEED_CONNECTION) {
227 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
228 drbd_msg_put_info("no resource name expected");
229 return ERR_INVALID_REQUEST;
232 drbd_msg_put_info("no minor number expected");
233 return ERR_INVALID_REQUEST;
235 if (adm_ctx.my_addr && adm_ctx.peer_addr)
236 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
237 nla_len(adm_ctx.my_addr),
238 nla_data(adm_ctx.peer_addr),
239 nla_len(adm_ctx.peer_addr));
240 if (!adm_ctx.tconn) {
241 drbd_msg_put_info("unknown connection");
242 return ERR_INVALID_REQUEST;
246 /* some more paranoia, if the request was over-determined */
247 if (adm_ctx.mdev && adm_ctx.tconn &&
248 adm_ctx.mdev->tconn != adm_ctx.tconn) {
249 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
250 adm_ctx.minor, adm_ctx.resource_name,
251 adm_ctx.mdev->tconn->name);
252 drbd_msg_put_info("minor exists in different resource");
253 return ERR_INVALID_REQUEST;
256 adm_ctx.volume != VOLUME_UNSPECIFIED &&
257 adm_ctx.volume != adm_ctx.mdev->vnr) {
258 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
259 adm_ctx.minor, adm_ctx.volume,
260 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
261 drbd_msg_put_info("minor exists as different volume");
262 return ERR_INVALID_REQUEST;
268 nlmsg_free(adm_ctx.reply_skb);
269 adm_ctx.reply_skb = NULL;
273 static int drbd_adm_finish(struct genl_info *info, int retcode)
276 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
277 adm_ctx.tconn = NULL;
280 if (!adm_ctx.reply_skb)
283 adm_ctx.reply_dh->ret_code = retcode;
284 drbd_adm_send_reply(adm_ctx.reply_skb, info);
288 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
292 /* FIXME: A future version will not allow this case. */
293 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
296 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
299 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
300 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
304 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
305 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
309 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
310 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
312 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
315 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
317 char *envp[] = { "HOME=/",
319 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
320 (char[20]) { }, /* address family */
321 (char[60]) { }, /* address */
324 char *argv[] = {usermode_helper, cmd, mb, NULL };
325 struct drbd_tconn *tconn = mdev->tconn;
329 if (current == tconn->worker.task)
330 set_bit(CALLBACK_PENDING, &tconn->flags);
332 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
333 setup_khelper_env(tconn, envp);
335 /* The helper may take some time.
336 * write out any unsynced meta data changes now */
339 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
340 sib.sib_reason = SIB_HELPER_PRE;
341 sib.helper_name = cmd;
342 drbd_bcast_event(mdev, &sib);
343 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
345 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
346 usermode_helper, cmd, mb,
347 (ret >> 8) & 0xff, ret);
349 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
350 usermode_helper, cmd, mb,
351 (ret >> 8) & 0xff, ret);
352 sib.sib_reason = SIB_HELPER_POST;
353 sib.helper_exit_code = ret;
354 drbd_bcast_event(mdev, &sib);
356 if (current == tconn->worker.task)
357 clear_bit(CALLBACK_PENDING, &tconn->flags);
359 if (ret < 0) /* Ignore any ERRNOs we got. */
365 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
367 char *envp[] = { "HOME=/",
369 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
370 (char[20]) { }, /* address family */
371 (char[60]) { }, /* address */
373 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
376 setup_khelper_env(tconn, envp);
379 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
380 /* TODO: conn_bcast_event() ?? */
382 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
384 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
385 usermode_helper, cmd, tconn->name,
386 (ret >> 8) & 0xff, ret);
388 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
389 usermode_helper, cmd, tconn->name,
390 (ret >> 8) & 0xff, ret);
391 /* TODO: conn_bcast_event() ?? */
393 if (ret < 0) /* Ignore any ERRNOs we got. */
399 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
401 enum drbd_fencing_p fp = FP_NOT_AVAIL;
402 struct drbd_conf *mdev;
406 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
407 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
408 fp = max_t(enum drbd_fencing_p, fp,
409 rcu_dereference(mdev->ldev->disk_conf)->fencing);
418 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
420 union drbd_state mask = { };
421 union drbd_state val = { };
422 enum drbd_fencing_p fp;
426 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
427 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
431 fp = highest_fencing_policy(tconn);
434 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
441 r = conn_khelper(tconn, "fence-peer");
443 switch ((r>>8) & 0xff) {
444 case 3: /* peer is inconsistent */
445 ex_to_string = "peer is inconsistent or worse";
447 val.pdsk = D_INCONSISTENT;
449 case 4: /* peer got outdated, or was already outdated */
450 ex_to_string = "peer was fenced";
452 val.pdsk = D_OUTDATED;
454 case 5: /* peer was down */
455 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
456 /* we will(have) create(d) a new UUID anyways... */
457 ex_to_string = "peer is unreachable, assumed to be dead";
459 val.pdsk = D_OUTDATED;
461 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
464 case 6: /* Peer is primary, voluntarily outdate myself.
465 * This is useful when an unconnected R_SECONDARY is asked to
466 * become R_PRIMARY, but finds the other peer being active. */
467 ex_to_string = "peer is active";
468 conn_warn(tconn, "Peer is primary, outdating myself.\n");
470 val.disk = D_OUTDATED;
473 if (fp != FP_STONITH)
474 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
475 ex_to_string = "peer was stonithed";
477 val.pdsk = D_OUTDATED;
480 /* The script is broken ... */
481 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
482 return false; /* Eventually leave IO frozen */
485 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
486 (r>>8) & 0xff, ex_to_string);
491 conn_request_state(tconn, mask, val, CS_VERBOSE);
492 here, because we might were able to re-establish the connection in the
494 spin_lock_irq(&tconn->req_lock);
495 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
496 _conn_request_state(tconn, mask, val, CS_VERBOSE);
497 spin_unlock_irq(&tconn->req_lock);
499 return conn_highest_pdsk(tconn) <= D_OUTDATED;
502 static int _try_outdate_peer_async(void *data)
504 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
506 conn_try_outdate_peer(tconn);
508 kref_put(&tconn->kref, &conn_destroy);
512 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
514 struct task_struct *opa;
516 kref_get(&tconn->kref);
517 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
519 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
520 kref_put(&tconn->kref, &conn_destroy);
525 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
527 const int max_tries = 4;
528 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
532 union drbd_state mask, val;
534 if (new_role == R_PRIMARY)
535 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
537 mutex_lock(mdev->state_mutex);
539 mask.i = 0; mask.role = R_MASK;
540 val.i = 0; val.role = new_role;
542 while (try++ < max_tries) {
543 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
545 /* in case we first succeeded to outdate,
546 * but now suddenly could establish a connection */
547 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
553 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
554 (mdev->state.disk < D_UP_TO_DATE &&
555 mdev->state.disk >= D_INCONSISTENT)) {
557 val.disk = D_UP_TO_DATE;
562 if (rv == SS_NO_UP_TO_DATE_DISK &&
563 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
564 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
566 if (conn_try_outdate_peer(mdev->tconn)) {
567 val.disk = D_UP_TO_DATE;
573 if (rv == SS_NOTHING_TO_DO)
575 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
576 if (!conn_try_outdate_peer(mdev->tconn) && force) {
577 dev_warn(DEV, "Forced into split brain situation!\n");
579 val.pdsk = D_OUTDATED;
584 if (rv == SS_TWO_PRIMARIES) {
585 /* Maybe the peer is detected as dead very soon...
586 retry at most once more in this case. */
589 nc = rcu_dereference(mdev->tconn->net_conf);
590 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
592 schedule_timeout_interruptible(timeo);
597 if (rv < SS_SUCCESS) {
598 rv = _drbd_request_state(mdev, mask, val,
599 CS_VERBOSE + CS_WAIT_COMPLETE);
610 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
612 /* Wait until nothing is on the fly :) */
613 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
615 /* FIXME also wait for all pending P_BARRIER_ACK? */
617 if (new_role == R_SECONDARY) {
618 set_disk_ro(mdev->vdisk, true);
619 if (get_ldev(mdev)) {
620 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
624 mutex_lock(&mdev->tconn->conf_update);
625 nc = mdev->tconn->net_conf;
627 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
628 mutex_unlock(&mdev->tconn->conf_update);
630 set_disk_ro(mdev->vdisk, false);
631 if (get_ldev(mdev)) {
632 if (((mdev->state.conn < C_CONNECTED ||
633 mdev->state.pdsk <= D_FAILED)
634 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
635 drbd_uuid_new_current(mdev);
637 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
642 /* writeout of activity log covered areas of the bitmap
643 * to stable storage done in after state change already */
645 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
646 /* if this was forced, we should consider sync */
648 drbd_send_uuids(mdev);
649 drbd_send_current_state(mdev);
654 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
656 mutex_unlock(mdev->state_mutex);
660 static const char *from_attrs_err_to_txt(int err)
662 return err == -ENOMSG ? "required attribute missing" :
663 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
664 err == -EEXIST ? "can not change invariant setting" :
665 "invalid attribute value";
668 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
670 struct set_role_parms parms;
672 enum drbd_ret_code retcode;
674 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
675 if (!adm_ctx.reply_skb)
677 if (retcode != NO_ERROR)
680 memset(&parms, 0, sizeof(parms));
681 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
682 err = set_role_parms_from_attrs(&parms, info);
684 retcode = ERR_MANDATORY_TAG;
685 drbd_msg_put_info(from_attrs_err_to_txt(err));
690 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
691 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
693 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
695 drbd_adm_finish(info, retcode);
699 /* Initializes the md.*_offset members, so we are able to find
700 * the on disk meta data.
702 * We currently have two possible layouts:
704 * |----------- md_size_sect ------------------|
705 * [ 4k superblock ][ activity log ][ Bitmap ]
707 * | bm_offset = al_offset + X |
708 * ==> bitmap sectors = md_size_sect - bm_offset
711 * |----------- md_size_sect ------------------|
712 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
714 * | bm_offset = al_offset - Y |
715 * ==> bitmap sectors = Y = al_offset - bm_offset
717 * Activity log size used to be fixed 32kB,
718 * but is about to become configurable.
720 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
721 struct drbd_backing_dev *bdev)
723 sector_t md_size_sect = 0;
724 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
726 bdev->md.md_offset = drbd_md_ss(bdev);
728 switch (bdev->md.meta_dev_idx) {
730 /* v07 style fixed size indexed meta data */
731 bdev->md.md_size_sect = MD_128MB_SECT;
732 bdev->md.al_offset = MD_4kB_SECT;
733 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
735 case DRBD_MD_INDEX_FLEX_EXT:
736 /* just occupy the full device; unit: sectors */
737 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
738 bdev->md.al_offset = MD_4kB_SECT;
739 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
741 case DRBD_MD_INDEX_INTERNAL:
742 case DRBD_MD_INDEX_FLEX_INT:
743 /* al size is still fixed */
744 bdev->md.al_offset = -al_size_sect;
745 /* we need (slightly less than) ~ this much bitmap sectors: */
746 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
747 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
748 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
749 md_size_sect = ALIGN(md_size_sect, 8);
751 /* plus the "drbd meta data super block",
752 * and the activity log; */
753 md_size_sect += MD_4kB_SECT + al_size_sect;
755 bdev->md.md_size_sect = md_size_sect;
756 /* bitmap offset is adjusted by 'super' block size */
757 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
762 /* input size is expected to be in KB */
763 char *ppsize(char *buf, unsigned long long size)
765 /* Needs 9 bytes at max including trailing NUL:
766 * -1ULL ==> "16384 EB" */
767 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
769 while (size >= 10000 && base < sizeof(units)-1) {
771 size = (size >> 10) + !!(size & (1<<9));
774 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
779 /* there is still a theoretical deadlock when called from receiver
780 * on an D_INCONSISTENT R_PRIMARY:
781 * remote READ does inc_ap_bio, receiver would need to receive answer
782 * packet from remote to dec_ap_bio again.
783 * receiver receive_sizes(), comes here,
784 * waits for ap_bio_cnt == 0. -> deadlock.
785 * but this cannot happen, actually, because:
786 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
787 * (not connected, or bad/no disk on peer):
788 * see drbd_fail_request_early, ap_bio_cnt is zero.
789 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
790 * peer may not initiate a resize.
792 /* Note these are not to be confused with
793 * drbd_adm_suspend_io/drbd_adm_resume_io,
794 * which are (sub) state changes triggered by admin (drbdsetup),
795 * and can be long lived.
796 * This changes an mdev->flag, is triggered by drbd internals,
797 * and should be short-lived. */
798 void drbd_suspend_io(struct drbd_conf *mdev)
800 set_bit(SUSPEND_IO, &mdev->flags);
801 if (drbd_suspended(mdev))
803 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
806 void drbd_resume_io(struct drbd_conf *mdev)
808 clear_bit(SUSPEND_IO, &mdev->flags);
809 wake_up(&mdev->misc_wait);
813 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
814 * @mdev: DRBD device.
816 * Returns 0 on success, negative return values indicate errors.
817 * You should call drbd_md_sync() after calling this function.
819 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
821 sector_t prev_first_sect, prev_size; /* previous meta location */
822 sector_t la_size_sect, u_size;
826 int md_moved, la_size_changed;
827 enum determine_dev_size rv = unchanged;
830 * application request passes inc_ap_bio,
831 * but then cannot get an AL-reference.
832 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
835 * Suspend IO right here.
836 * still lock the act_log to not trigger ASSERTs there.
838 drbd_suspend_io(mdev);
840 /* no wait necessary anymore, actually we could assert that */
841 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
843 prev_first_sect = drbd_md_first_sector(mdev->ldev);
844 prev_size = mdev->ldev->md.md_size_sect;
845 la_size_sect = mdev->ldev->md.la_size_sect;
847 /* TODO: should only be some assert here, not (re)init... */
848 drbd_md_set_sector_offsets(mdev, mdev->ldev);
851 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
853 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
855 if (drbd_get_capacity(mdev->this_bdev) != size ||
856 drbd_bm_capacity(mdev) != size) {
858 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
860 /* currently there is only one error: ENOMEM! */
861 size = drbd_bm_capacity(mdev)>>1;
863 dev_err(DEV, "OUT OF MEMORY! "
864 "Could not allocate bitmap!\n");
866 dev_err(DEV, "BM resizing failed. "
867 "Leaving size unchanged at size = %lu KB\n",
868 (unsigned long)size);
872 /* racy, see comments above. */
873 drbd_set_my_capacity(mdev, size);
874 mdev->ldev->md.la_size_sect = size;
875 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
876 (unsigned long long)size>>1);
878 if (rv == dev_size_error)
881 la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
883 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
884 || prev_size != mdev->ldev->md.md_size_sect;
886 if (la_size_changed || md_moved) {
889 drbd_al_shrink(mdev); /* All extents inactive. */
890 dev_info(DEV, "Writing the whole bitmap, %s\n",
891 la_size_changed && md_moved ? "size changed and md moved" :
892 la_size_changed ? "size changed" : "md moved");
893 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
894 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
895 "size changed", BM_LOCKED_MASK);
900 drbd_md_mark_dirty(mdev);
903 if (size > la_size_sect)
905 if (size < la_size_sect)
908 lc_unlock(mdev->act_log);
909 wake_up(&mdev->al_wait);
910 drbd_resume_io(mdev);
916 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
917 sector_t u_size, int assume_peer_has_space)
919 sector_t p_size = mdev->p_size; /* partner's disk size. */
920 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
921 sector_t m_size; /* my size */
924 m_size = drbd_get_max_capacity(bdev);
926 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
927 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
931 if (p_size && m_size) {
932 size = min_t(sector_t, p_size, m_size);
936 if (m_size && m_size < size)
938 if (p_size && p_size < size)
949 dev_err(DEV, "Both nodes diskless!\n");
953 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
954 (unsigned long)u_size>>1, (unsigned long)size>>1);
963 * drbd_check_al_size() - Ensures that the AL is of the right size
964 * @mdev: DRBD device.
966 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
967 * failed, and 0 on success. You should call drbd_md_sync() after you called
970 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
972 struct lru_cache *n, *t;
973 struct lc_element *e;
978 mdev->act_log->nr_elements == dc->al_extents)
983 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
984 dc->al_extents, sizeof(struct lc_element), 0);
987 dev_err(DEV, "Cannot allocate act_log lru!\n");
990 spin_lock_irq(&mdev->al_lock);
992 for (i = 0; i < t->nr_elements; i++) {
993 e = lc_element_by_index(t, i);
995 dev_err(DEV, "refcnt(%d)==%d\n",
996 e->lc_number, e->refcnt);
1002 spin_unlock_irq(&mdev->al_lock);
1004 dev_err(DEV, "Activity log still in use!\n");
1011 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1015 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
1017 struct request_queue * const q = mdev->rq_queue;
1018 unsigned int max_hw_sectors = max_bio_size >> 9;
1019 unsigned int max_segments = 0;
1021 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1022 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1024 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1026 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1031 blk_queue_logical_block_size(q, 512);
1032 blk_queue_max_hw_sectors(q, max_hw_sectors);
1033 /* This is the workaround for "bio would need to, but cannot, be split" */
1034 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1035 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1037 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1038 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1040 blk_queue_stack_limits(q, b);
1042 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1043 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1044 q->backing_dev_info.ra_pages,
1045 b->backing_dev_info.ra_pages);
1046 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1052 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1054 unsigned int now, new, local, peer;
1056 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1057 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1058 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1060 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1061 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1062 mdev->local_max_bio_size = local;
1065 local = min(local, DRBD_MAX_BIO_SIZE);
1067 /* We may ignore peer limits if the peer is modern enough.
1068 Because new from 8.3.8 onwards the peer can use multiple
1069 BIOs for a single peer_request */
1070 if (mdev->state.conn >= C_CONNECTED) {
1071 if (mdev->tconn->agreed_pro_version < 94)
1072 peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1073 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1074 else if (mdev->tconn->agreed_pro_version == 94)
1075 peer = DRBD_MAX_SIZE_H80_PACKET;
1076 else if (mdev->tconn->agreed_pro_version < 100)
1077 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1079 peer = DRBD_MAX_BIO_SIZE;
1082 new = min(local, peer);
1084 if (mdev->state.role == R_PRIMARY && new < now)
1085 dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1088 dev_info(DEV, "max BIO size = %u\n", new);
1090 drbd_setup_queue_param(mdev, new);
1093 /* Starts the worker thread */
1094 static void conn_reconfig_start(struct drbd_tconn *tconn)
1096 drbd_thread_start(&tconn->worker);
1097 conn_flush_workqueue(tconn);
1100 /* if still unconfigured, stops worker again. */
1101 static void conn_reconfig_done(struct drbd_tconn *tconn)
1104 spin_lock_irq(&tconn->req_lock);
1105 stop_threads = conn_all_vols_unconf(tconn) &&
1106 tconn->cstate == C_STANDALONE;
1107 spin_unlock_irq(&tconn->req_lock);
1109 /* asender is implicitly stopped by receiver
1110 * in conn_disconnect() */
1111 drbd_thread_stop(&tconn->receiver);
1112 drbd_thread_stop(&tconn->worker);
1116 /* Make sure IO is suspended before calling this function(). */
1117 static void drbd_suspend_al(struct drbd_conf *mdev)
1121 if (!lc_try_lock(mdev->act_log)) {
1122 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1126 drbd_al_shrink(mdev);
1127 spin_lock_irq(&mdev->tconn->req_lock);
1128 if (mdev->state.conn < C_CONNECTED)
1129 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1130 spin_unlock_irq(&mdev->tconn->req_lock);
1131 lc_unlock(mdev->act_log);
1134 dev_info(DEV, "Suspended AL updates\n");
1138 static bool should_set_defaults(struct genl_info *info)
1140 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1141 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1144 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1146 /* This is limited by 16 bit "slot" numbers,
1147 * and by available on-disk context storage.
1149 * Also (u16)~0 is special (denotes a "free" extent).
1151 * One transaction occupies one 4kB on-disk block,
1152 * we have n such blocks in the on disk ring buffer,
1153 * the "current" transaction may fail (n-1),
1154 * and there is 919 slot numbers context information per transaction.
1156 * 72 transaction blocks amounts to more than 2**16 context slots,
1157 * so cap there first.
1159 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1160 const unsigned int sufficient_on_disk =
1161 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1162 /AL_CONTEXT_PER_TRANSACTION;
1164 unsigned int al_size_4k = bdev->md.al_size_4k;
1166 if (al_size_4k > sufficient_on_disk)
1169 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1172 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1174 enum drbd_ret_code retcode;
1175 struct drbd_conf *mdev;
1176 struct disk_conf *new_disk_conf, *old_disk_conf;
1177 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1180 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1181 if (!adm_ctx.reply_skb)
1183 if (retcode != NO_ERROR)
1186 mdev = adm_ctx.mdev;
1188 /* we also need a disk
1189 * to change the options on */
1190 if (!get_ldev(mdev)) {
1191 retcode = ERR_NO_DISK;
1195 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1196 if (!new_disk_conf) {
1197 retcode = ERR_NOMEM;
1201 mutex_lock(&mdev->tconn->conf_update);
1202 old_disk_conf = mdev->ldev->disk_conf;
1203 *new_disk_conf = *old_disk_conf;
1204 if (should_set_defaults(info))
1205 set_disk_conf_defaults(new_disk_conf);
1207 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1208 if (err && err != -ENOMSG) {
1209 retcode = ERR_MANDATORY_TAG;
1210 drbd_msg_put_info(from_attrs_err_to_txt(err));
1213 if (!expect(new_disk_conf->resync_rate >= 1))
1214 new_disk_conf->resync_rate = 1;
1216 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1217 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1218 if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
1219 new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
1221 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1222 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1224 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1225 if (fifo_size != mdev->rs_plan_s->size) {
1226 new_plan = fifo_alloc(fifo_size);
1228 dev_err(DEV, "kmalloc of fifo_buffer failed");
1229 retcode = ERR_NOMEM;
1234 drbd_suspend_io(mdev);
1235 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1236 drbd_al_shrink(mdev);
1237 err = drbd_check_al_size(mdev, new_disk_conf);
1238 lc_unlock(mdev->act_log);
1239 wake_up(&mdev->al_wait);
1240 drbd_resume_io(mdev);
1243 retcode = ERR_NOMEM;
1247 write_lock_irq(&global_state_lock);
1248 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1249 if (retcode == NO_ERROR) {
1250 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
1251 drbd_resync_after_changed(mdev);
1253 write_unlock_irq(&global_state_lock);
1255 if (retcode != NO_ERROR)
1259 old_plan = mdev->rs_plan_s;
1260 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
1263 mutex_unlock(&mdev->tconn->conf_update);
1265 if (new_disk_conf->al_updates)
1266 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
1268 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1270 if (new_disk_conf->md_flushes)
1271 clear_bit(MD_NO_FUA, &mdev->flags);
1273 set_bit(MD_NO_FUA, &mdev->flags);
1275 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1279 if (mdev->state.conn >= C_CONNECTED)
1280 drbd_send_sync_param(mdev);
1283 kfree(old_disk_conf);
1285 mod_timer(&mdev->request_timer, jiffies + HZ);
1289 mutex_unlock(&mdev->tconn->conf_update);
1291 kfree(new_disk_conf);
1296 drbd_adm_finish(info, retcode);
1300 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1302 struct drbd_conf *mdev;
1304 enum drbd_ret_code retcode;
1305 enum determine_dev_size dd;
1306 sector_t max_possible_sectors;
1307 sector_t min_md_device_sectors;
1308 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1309 struct disk_conf *new_disk_conf = NULL;
1310 struct block_device *bdev;
1311 struct lru_cache *resync_lru = NULL;
1312 struct fifo_buffer *new_plan = NULL;
1313 union drbd_state ns, os;
1314 enum drbd_state_rv rv;
1315 struct net_conf *nc;
1317 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1318 if (!adm_ctx.reply_skb)
1320 if (retcode != NO_ERROR)
1323 mdev = adm_ctx.mdev;
1324 conn_reconfig_start(mdev->tconn);
1326 /* if you want to reconfigure, please tear down first */
1327 if (mdev->state.disk > D_DISKLESS) {
1328 retcode = ERR_DISK_CONFIGURED;
1331 /* It may just now have detached because of IO error. Make sure
1332 * drbd_ldev_destroy is done already, we may end up here very fast,
1333 * e.g. if someone calls attach from the on-io-error handler,
1334 * to realize a "hot spare" feature (not that I'd recommend that) */
1335 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1337 /* make sure there is no leftover from previous force-detach attempts */
1338 clear_bit(FORCE_DETACH, &mdev->flags);
1339 clear_bit(WAS_IO_ERROR, &mdev->flags);
1340 clear_bit(WAS_READ_ERROR, &mdev->flags);
1342 /* and no leftover from previously aborted resync or verify, either */
1344 mdev->rs_failed = 0;
1345 atomic_set(&mdev->rs_pending_cnt, 0);
1347 /* allocation not in the IO path, drbdsetup context */
1348 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1350 retcode = ERR_NOMEM;
1353 spin_lock_init(&nbc->md.uuid_lock);
1355 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1356 if (!new_disk_conf) {
1357 retcode = ERR_NOMEM;
1360 nbc->disk_conf = new_disk_conf;
1362 set_disk_conf_defaults(new_disk_conf);
1363 err = disk_conf_from_attrs(new_disk_conf, info);
1365 retcode = ERR_MANDATORY_TAG;
1366 drbd_msg_put_info(from_attrs_err_to_txt(err));
1370 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1371 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1373 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1375 retcode = ERR_NOMEM;
1379 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1380 retcode = ERR_MD_IDX_INVALID;
1385 nc = rcu_dereference(mdev->tconn->net_conf);
1387 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1389 retcode = ERR_STONITH_AND_PROT_A;
1395 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1396 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1398 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1400 retcode = ERR_OPEN_DISK;
1403 nbc->backing_bdev = bdev;
1406 * meta_dev_idx >= 0: external fixed size, possibly multiple
1407 * drbd sharing one meta device. TODO in that case, paranoia
1408 * check that [md_bdev, meta_dev_idx] is not yet used by some
1409 * other drbd minor! (if you use drbd.conf + drbdadm, that
1410 * should check it for you already; but if you don't, or
1411 * someone fooled it, we need to double check here)
1413 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1414 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1415 (new_disk_conf->meta_dev_idx < 0) ?
1416 (void *)mdev : (void *)drbd_m_holder);
1418 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1420 retcode = ERR_OPEN_MD_DISK;
1423 nbc->md_bdev = bdev;
1425 if ((nbc->backing_bdev == nbc->md_bdev) !=
1426 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1427 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1428 retcode = ERR_MD_IDX_INVALID;
1432 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1433 1, 61, sizeof(struct bm_extent),
1434 offsetof(struct bm_extent, lce));
1436 retcode = ERR_NOMEM;
1440 /* Read our meta data super block early.
1441 * This also sets other on-disk offsets. */
1442 retcode = drbd_md_read(mdev, nbc);
1443 if (retcode != NO_ERROR)
1446 if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1447 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1448 if (new_disk_conf->al_extents > drbd_al_extents_max(nbc))
1449 new_disk_conf->al_extents = drbd_al_extents_max(nbc);
1451 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1452 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1453 (unsigned long long) drbd_get_max_capacity(nbc),
1454 (unsigned long long) new_disk_conf->disk_size);
1455 retcode = ERR_DISK_TOO_SMALL;
1459 if (new_disk_conf->meta_dev_idx < 0) {
1460 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1461 /* at least one MB, otherwise it does not make sense */
1462 min_md_device_sectors = (2<<10);
1464 max_possible_sectors = DRBD_MAX_SECTORS;
1465 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1468 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1469 retcode = ERR_MD_DISK_TOO_SMALL;
1470 dev_warn(DEV, "refusing attach: md-device too small, "
1471 "at least %llu sectors needed for this meta-disk type\n",
1472 (unsigned long long) min_md_device_sectors);
1476 /* Make sure the new disk is big enough
1477 * (we may currently be R_PRIMARY with no local disk...) */
1478 if (drbd_get_max_capacity(nbc) <
1479 drbd_get_capacity(mdev->this_bdev)) {
1480 retcode = ERR_DISK_TOO_SMALL;
1484 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1486 if (nbc->known_size > max_possible_sectors) {
1487 dev_warn(DEV, "==> truncating very big lower level device "
1488 "to currently maximum possible %llu sectors <==\n",
1489 (unsigned long long) max_possible_sectors);
1490 if (new_disk_conf->meta_dev_idx >= 0)
1491 dev_warn(DEV, "==>> using internal or flexible "
1492 "meta data may help <<==\n");
1495 drbd_suspend_io(mdev);
1496 /* also wait for the last barrier ack. */
1497 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1498 * We need a way to either ignore barrier acks for barriers sent before a device
1499 * was attached, or a way to wait for all pending barrier acks to come in.
1500 * As barriers are counted per resource,
1501 * we'd need to suspend io on all devices of a resource.
1503 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1504 /* and for any other previously queued work */
1505 drbd_flush_workqueue(mdev);
1507 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1508 retcode = rv; /* FIXME: Type mismatch. */
1509 drbd_resume_io(mdev);
1510 if (rv < SS_SUCCESS)
1513 if (!get_ldev_if_state(mdev, D_ATTACHING))
1514 goto force_diskless;
1516 if (!mdev->bitmap) {
1517 if (drbd_bm_init(mdev)) {
1518 retcode = ERR_NOMEM;
1519 goto force_diskless_dec;
1523 if (mdev->state.conn < C_CONNECTED &&
1524 mdev->state.role == R_PRIMARY &&
1525 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1526 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1527 (unsigned long long)mdev->ed_uuid);
1528 retcode = ERR_DATA_NOT_CURRENT;
1529 goto force_diskless_dec;
1532 /* Since we are diskless, fix the activity log first... */
1533 if (drbd_check_al_size(mdev, new_disk_conf)) {
1534 retcode = ERR_NOMEM;
1535 goto force_diskless_dec;
1538 /* Prevent shrinking of consistent devices ! */
1539 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1540 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1541 dev_warn(DEV, "refusing to truncate a consistent device\n");
1542 retcode = ERR_DISK_TOO_SMALL;
1543 goto force_diskless_dec;
1546 /* Reset the "barriers don't work" bits here, then force meta data to
1547 * be written, to ensure we determine if barriers are supported. */
1548 if (new_disk_conf->md_flushes)
1549 clear_bit(MD_NO_FUA, &mdev->flags);
1551 set_bit(MD_NO_FUA, &mdev->flags);
1553 /* Point of no return reached.
1554 * Devices and memory are no longer released by error cleanup below.
1555 * now mdev takes over responsibility, and the state engine should
1556 * clean it up somewhere. */
1557 D_ASSERT(mdev->ldev == NULL);
1559 mdev->resync = resync_lru;
1560 mdev->rs_plan_s = new_plan;
1563 new_disk_conf = NULL;
1566 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1568 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1569 set_bit(CRASHED_PRIMARY, &mdev->flags);
1571 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1573 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1574 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
1575 set_bit(CRASHED_PRIMARY, &mdev->flags);
1582 drbd_reconsider_max_bio_size(mdev);
1584 /* If I am currently not R_PRIMARY,
1585 * but meta data primary indicator is set,
1586 * I just now recover from a hard crash,
1587 * and have been R_PRIMARY before that crash.
1589 * Now, if I had no connection before that crash
1590 * (have been degraded R_PRIMARY), chances are that
1591 * I won't find my peer now either.
1593 * In that case, and _only_ in that case,
1594 * we use the degr-wfc-timeout instead of the default,
1595 * so we can automatically recover from a crash of a
1596 * degraded but active "cluster" after a certain timeout.
1598 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1599 if (mdev->state.role != R_PRIMARY &&
1600 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1601 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1602 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1604 dd = drbd_determine_dev_size(mdev, 0);
1605 if (dd == dev_size_error) {
1606 retcode = ERR_NOMEM_BITMAP;
1607 goto force_diskless_dec;
1608 } else if (dd == grew)
1609 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1611 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
1612 (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
1613 drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
1614 dev_info(DEV, "Assuming that all blocks are out of sync "
1615 "(aka FullSync)\n");
1616 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1617 "set_n_write from attaching", BM_LOCKED_MASK)) {
1618 retcode = ERR_IO_MD_DISK;
1619 goto force_diskless_dec;
1622 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1623 "read from attaching", BM_LOCKED_MASK)) {
1624 retcode = ERR_IO_MD_DISK;
1625 goto force_diskless_dec;
1629 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1630 drbd_suspend_al(mdev); /* IO is still suspended here... */
1632 spin_lock_irq(&mdev->tconn->req_lock);
1633 os = drbd_read_state(mdev);
1635 /* If MDF_CONSISTENT is not set go into inconsistent state,
1636 otherwise investigate MDF_WasUpToDate...
1637 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1638 otherwise into D_CONSISTENT state.
1640 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1641 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1642 ns.disk = D_CONSISTENT;
1644 ns.disk = D_OUTDATED;
1646 ns.disk = D_INCONSISTENT;
1649 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1650 ns.pdsk = D_OUTDATED;
1653 if (ns.disk == D_CONSISTENT &&
1654 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
1655 ns.disk = D_UP_TO_DATE;
1657 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1658 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1659 this point, because drbd_request_state() modifies these
1662 if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
1663 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
1665 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1669 /* In case we are C_CONNECTED postpone any decision on the new disk
1670 state after the negotiation phase. */
1671 if (mdev->state.conn == C_CONNECTED) {
1672 mdev->new_state_tmp.i = ns.i;
1674 ns.disk = D_NEGOTIATING;
1676 /* We expect to receive up-to-date UUIDs soon.
1677 To avoid a race in receive_state, free p_uuid while
1678 holding req_lock. I.e. atomic with the state change */
1679 kfree(mdev->p_uuid);
1680 mdev->p_uuid = NULL;
1683 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1684 spin_unlock_irq(&mdev->tconn->req_lock);
1686 if (rv < SS_SUCCESS)
1687 goto force_diskless_dec;
1689 mod_timer(&mdev->request_timer, jiffies + HZ);
1691 if (mdev->state.role == R_PRIMARY)
1692 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1694 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1696 drbd_md_mark_dirty(mdev);
1699 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1701 conn_reconfig_done(mdev->tconn);
1702 drbd_adm_finish(info, retcode);
1708 drbd_force_state(mdev, NS(disk, D_DISKLESS));
1711 conn_reconfig_done(mdev->tconn);
1713 if (nbc->backing_bdev)
1714 blkdev_put(nbc->backing_bdev,
1715 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1717 blkdev_put(nbc->md_bdev,
1718 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1721 kfree(new_disk_conf);
1722 lc_destroy(resync_lru);
1726 drbd_adm_finish(info, retcode);
1730 static int adm_detach(struct drbd_conf *mdev, int force)
1732 enum drbd_state_rv retcode;
1736 set_bit(FORCE_DETACH, &mdev->flags);
1737 drbd_force_state(mdev, NS(disk, D_FAILED));
1738 retcode = SS_SUCCESS;
1742 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1743 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
1744 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1745 drbd_md_put_buffer(mdev);
1746 /* D_FAILED will transition to DISKLESS. */
1747 ret = wait_event_interruptible(mdev->misc_wait,
1748 mdev->state.disk != D_FAILED);
1749 drbd_resume_io(mdev);
1750 if ((int)retcode == (int)SS_IS_DISKLESS)
1751 retcode = SS_NOTHING_TO_DO;
1758 /* Detaching the disk is a process in multiple stages. First we need to lock
1759 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1760 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1761 * internal references as well.
1762 * Only then we have finally detached. */
1763 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1765 enum drbd_ret_code retcode;
1766 struct detach_parms parms = { };
1769 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1770 if (!adm_ctx.reply_skb)
1772 if (retcode != NO_ERROR)
1775 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1776 err = detach_parms_from_attrs(&parms, info);
1778 retcode = ERR_MANDATORY_TAG;
1779 drbd_msg_put_info(from_attrs_err_to_txt(err));
1784 retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
1786 drbd_adm_finish(info, retcode);
1790 static bool conn_resync_running(struct drbd_tconn *tconn)
1792 struct drbd_conf *mdev;
1797 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1798 if (mdev->state.conn == C_SYNC_SOURCE ||
1799 mdev->state.conn == C_SYNC_TARGET ||
1800 mdev->state.conn == C_PAUSED_SYNC_S ||
1801 mdev->state.conn == C_PAUSED_SYNC_T) {
1811 static bool conn_ov_running(struct drbd_tconn *tconn)
1813 struct drbd_conf *mdev;
1818 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1819 if (mdev->state.conn == C_VERIFY_S ||
1820 mdev->state.conn == C_VERIFY_T) {
1830 static enum drbd_ret_code
1831 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1833 struct drbd_conf *mdev;
1836 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1837 if (new_conf->wire_protocol != old_conf->wire_protocol)
1838 return ERR_NEED_APV_100;
1840 if (new_conf->two_primaries != old_conf->two_primaries)
1841 return ERR_NEED_APV_100;
1843 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1844 return ERR_NEED_APV_100;
1847 if (!new_conf->two_primaries &&
1848 conn_highest_role(tconn) == R_PRIMARY &&
1849 conn_highest_peer(tconn) == R_PRIMARY)
1850 return ERR_NEED_ALLOW_TWO_PRI;
1852 if (new_conf->two_primaries &&
1853 (new_conf->wire_protocol != DRBD_PROT_C))
1854 return ERR_NOT_PROTO_C;
1856 idr_for_each_entry(&tconn->volumes, mdev, i) {
1857 if (get_ldev(mdev)) {
1858 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1860 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1861 return ERR_STONITH_AND_PROT_A;
1863 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
1864 return ERR_DISCARD_IMPOSSIBLE;
1867 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1868 return ERR_CONG_NOT_PROTO_A;
1873 static enum drbd_ret_code
1874 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1876 static enum drbd_ret_code rv;
1877 struct drbd_conf *mdev;
1881 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1884 /* tconn->volumes protected by genl_lock() here */
1885 idr_for_each_entry(&tconn->volumes, mdev, i) {
1886 if (!mdev->bitmap) {
1887 if(drbd_bm_init(mdev))
1896 struct crypto_hash *verify_tfm;
1897 struct crypto_hash *csums_tfm;
1898 struct crypto_hash *cram_hmac_tfm;
1899 struct crypto_hash *integrity_tfm;
1903 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1908 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1917 static enum drbd_ret_code
1918 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1920 char hmac_name[CRYPTO_MAX_ALG_NAME];
1921 enum drbd_ret_code rv;
1923 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1927 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1931 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1935 if (new_conf->cram_hmac_alg[0] != 0) {
1936 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1937 new_conf->cram_hmac_alg);
1939 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1946 static void free_crypto(struct crypto *crypto)
1948 crypto_free_hash(crypto->cram_hmac_tfm);
1949 crypto_free_hash(crypto->integrity_tfm);
1950 crypto_free_hash(crypto->csums_tfm);
1951 crypto_free_hash(crypto->verify_tfm);
1954 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1956 enum drbd_ret_code retcode;
1957 struct drbd_tconn *tconn;
1958 struct net_conf *old_conf, *new_conf = NULL;
1960 int ovr; /* online verify running */
1961 int rsr; /* re-sync running */
1962 struct crypto crypto = { };
1964 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
1965 if (!adm_ctx.reply_skb)
1967 if (retcode != NO_ERROR)
1970 tconn = adm_ctx.tconn;
1972 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1974 retcode = ERR_NOMEM;
1978 conn_reconfig_start(tconn);
1980 mutex_lock(&tconn->data.mutex);
1981 mutex_lock(&tconn->conf_update);
1982 old_conf = tconn->net_conf;
1985 drbd_msg_put_info("net conf missing, try connect");
1986 retcode = ERR_INVALID_REQUEST;
1990 *new_conf = *old_conf;
1991 if (should_set_defaults(info))
1992 set_net_conf_defaults(new_conf);
1994 err = net_conf_from_attrs_for_change(new_conf, info);
1995 if (err && err != -ENOMSG) {
1996 retcode = ERR_MANDATORY_TAG;
1997 drbd_msg_put_info(from_attrs_err_to_txt(err));
2001 retcode = check_net_options(tconn, new_conf);
2002 if (retcode != NO_ERROR)
2005 /* re-sync running */
2006 rsr = conn_resync_running(tconn);
2007 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
2008 retcode = ERR_CSUMS_RESYNC_RUNNING;
2012 /* online verify running */
2013 ovr = conn_ov_running(tconn);
2014 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
2015 retcode = ERR_VERIFY_RUNNING;
2019 retcode = alloc_crypto(&crypto, new_conf);
2020 if (retcode != NO_ERROR)
2023 rcu_assign_pointer(tconn->net_conf, new_conf);
2026 crypto_free_hash(tconn->csums_tfm);
2027 tconn->csums_tfm = crypto.csums_tfm;
2028 crypto.csums_tfm = NULL;
2031 crypto_free_hash(tconn->verify_tfm);
2032 tconn->verify_tfm = crypto.verify_tfm;
2033 crypto.verify_tfm = NULL;
2036 crypto_free_hash(tconn->integrity_tfm);
2037 tconn->integrity_tfm = crypto.integrity_tfm;
2038 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
2039 /* Do this without trying to take tconn->data.mutex again. */
2040 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
2042 crypto_free_hash(tconn->cram_hmac_tfm);
2043 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2045 mutex_unlock(&tconn->conf_update);
2046 mutex_unlock(&tconn->data.mutex);
2050 if (tconn->cstate >= C_WF_REPORT_PARAMS)
2051 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2056 mutex_unlock(&tconn->conf_update);
2057 mutex_unlock(&tconn->data.mutex);
2058 free_crypto(&crypto);
2061 conn_reconfig_done(tconn);
2063 drbd_adm_finish(info, retcode);
2067 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2069 struct drbd_conf *mdev;
2070 struct net_conf *old_conf, *new_conf = NULL;
2071 struct crypto crypto = { };
2072 struct drbd_tconn *tconn;
2073 enum drbd_ret_code retcode;
2077 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2079 if (!adm_ctx.reply_skb)
2081 if (retcode != NO_ERROR)
2083 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2084 drbd_msg_put_info("connection endpoint(s) missing");
2085 retcode = ERR_INVALID_REQUEST;
2089 /* No need for _rcu here. All reconfiguration is
2090 * strictly serialized on genl_lock(). We are protected against
2091 * concurrent reconfiguration/addition/deletion */
2092 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2093 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2094 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2095 retcode = ERR_LOCAL_ADDR;
2099 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2100 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2101 retcode = ERR_PEER_ADDR;
2106 tconn = adm_ctx.tconn;
2107 conn_reconfig_start(tconn);
2109 if (tconn->cstate > C_STANDALONE) {
2110 retcode = ERR_NET_CONFIGURED;
2114 /* allocation not in the IO path, drbdsetup / netlink process context */
2115 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
2117 retcode = ERR_NOMEM;
2121 set_net_conf_defaults(new_conf);
2123 err = net_conf_from_attrs(new_conf, info);
2124 if (err && err != -ENOMSG) {
2125 retcode = ERR_MANDATORY_TAG;
2126 drbd_msg_put_info(from_attrs_err_to_txt(err));
2130 retcode = check_net_options(tconn, new_conf);
2131 if (retcode != NO_ERROR)
2134 retcode = alloc_crypto(&crypto, new_conf);
2135 if (retcode != NO_ERROR)
2138 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2140 conn_flush_workqueue(tconn);
2142 mutex_lock(&tconn->conf_update);
2143 old_conf = tconn->net_conf;
2145 retcode = ERR_NET_CONFIGURED;
2146 mutex_unlock(&tconn->conf_update);
2149 rcu_assign_pointer(tconn->net_conf, new_conf);
2151 conn_free_crypto(tconn);
2152 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2153 tconn->integrity_tfm = crypto.integrity_tfm;
2154 tconn->csums_tfm = crypto.csums_tfm;
2155 tconn->verify_tfm = crypto.verify_tfm;
2157 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2158 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2159 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2160 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2162 mutex_unlock(&tconn->conf_update);
2165 idr_for_each_entry(&tconn->volumes, mdev, i) {
2171 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2173 conn_reconfig_done(tconn);
2174 drbd_adm_finish(info, retcode);
2178 free_crypto(&crypto);
2181 conn_reconfig_done(tconn);
2183 drbd_adm_finish(info, retcode);
2187 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2189 enum drbd_state_rv rv;
2191 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2192 force ? CS_HARD : 0);
2195 case SS_NOTHING_TO_DO:
2197 case SS_ALREADY_STANDALONE:
2199 case SS_PRIMARY_NOP:
2200 /* Our state checking code wants to see the peer outdated. */
2201 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2202 pdsk, D_OUTDATED), CS_VERBOSE);
2204 case SS_CW_FAILED_BY_PEER:
2205 /* The peer probably wants to see us outdated. */
2206 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2207 disk, D_OUTDATED), 0);
2208 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2209 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2214 /* no special handling necessary */
2217 if (rv >= SS_SUCCESS) {
2218 enum drbd_state_rv rv2;
2219 /* No one else can reconfigure the network while I am here.
2220 * The state handling only uses drbd_thread_stop_nowait(),
2221 * we want to really wait here until the receiver is no more.
2223 drbd_thread_stop(&adm_ctx.tconn->receiver);
2225 /* Race breaker. This additional state change request may be
2226 * necessary, if this was a forced disconnect during a receiver
2227 * restart. We may have "killed" the receiver thread just
2228 * after drbdd_init() returned. Typically, we should be
2229 * C_STANDALONE already, now, and this becomes a no-op.
2231 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2232 CS_VERBOSE | CS_HARD);
2233 if (rv2 < SS_SUCCESS)
2235 "unexpected rv2=%d in conn_try_disconnect()\n",
2241 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2243 struct disconnect_parms parms;
2244 struct drbd_tconn *tconn;
2245 enum drbd_state_rv rv;
2246 enum drbd_ret_code retcode;
2249 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
2250 if (!adm_ctx.reply_skb)
2252 if (retcode != NO_ERROR)
2255 tconn = adm_ctx.tconn;
2256 memset(&parms, 0, sizeof(parms));
2257 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2258 err = disconnect_parms_from_attrs(&parms, info);
2260 retcode = ERR_MANDATORY_TAG;
2261 drbd_msg_put_info(from_attrs_err_to_txt(err));
2266 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2267 if (rv < SS_SUCCESS)
2268 retcode = rv; /* FIXME: Type mismatch. */
2272 drbd_adm_finish(info, retcode);
2276 void resync_after_online_grow(struct drbd_conf *mdev)
2278 int iass; /* I am sync source */
2280 dev_info(DEV, "Resync of new storage after online grow\n");
2281 if (mdev->state.role != mdev->state.peer)
2282 iass = (mdev->state.role == R_PRIMARY);
2284 iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
2287 drbd_start_resync(mdev, C_SYNC_SOURCE);
2289 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2292 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2294 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2295 struct resize_parms rs;
2296 struct drbd_conf *mdev;
2297 enum drbd_ret_code retcode;
2298 enum determine_dev_size dd;
2299 enum dds_flags ddsf;
2303 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2304 if (!adm_ctx.reply_skb)
2306 if (retcode != NO_ERROR)
2309 memset(&rs, 0, sizeof(struct resize_parms));
2310 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2311 err = resize_parms_from_attrs(&rs, info);
2313 retcode = ERR_MANDATORY_TAG;
2314 drbd_msg_put_info(from_attrs_err_to_txt(err));
2319 mdev = adm_ctx.mdev;
2320 if (mdev->state.conn > C_CONNECTED) {
2321 retcode = ERR_RESIZE_RESYNC;
2325 if (mdev->state.role == R_SECONDARY &&
2326 mdev->state.peer == R_SECONDARY) {
2327 retcode = ERR_NO_PRIMARY;
2331 if (!get_ldev(mdev)) {
2332 retcode = ERR_NO_DISK;
2336 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2337 retcode = ERR_NEED_APV_93;
2342 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2344 if (u_size != (sector_t)rs.resize_size) {
2345 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2346 if (!new_disk_conf) {
2347 retcode = ERR_NOMEM;
2352 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2353 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2355 if (new_disk_conf) {
2356 mutex_lock(&mdev->tconn->conf_update);
2357 old_disk_conf = mdev->ldev->disk_conf;
2358 *new_disk_conf = *old_disk_conf;
2359 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2360 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2361 mutex_unlock(&mdev->tconn->conf_update);
2363 kfree(old_disk_conf);
2366 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2367 dd = drbd_determine_dev_size(mdev, ddsf);
2370 if (dd == dev_size_error) {
2371 retcode = ERR_NOMEM_BITMAP;
2375 if (mdev->state.conn == C_CONNECTED) {
2377 set_bit(RESIZE_PENDING, &mdev->flags);
2379 drbd_send_uuids(mdev);
2380 drbd_send_sizes(mdev, 1, ddsf);
2384 drbd_adm_finish(info, retcode);
2392 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2394 enum drbd_ret_code retcode;
2395 struct drbd_tconn *tconn;
2396 struct res_opts res_opts;
2399 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2400 if (!adm_ctx.reply_skb)
2402 if (retcode != NO_ERROR)
2404 tconn = adm_ctx.tconn;
2406 res_opts = tconn->res_opts;
2407 if (should_set_defaults(info))
2408 set_res_opts_defaults(&res_opts);
2410 err = res_opts_from_attrs(&res_opts, info);
2411 if (err && err != -ENOMSG) {
2412 retcode = ERR_MANDATORY_TAG;
2413 drbd_msg_put_info(from_attrs_err_to_txt(err));
2417 err = set_resource_options(tconn, &res_opts);
2419 retcode = ERR_INVALID_REQUEST;
2421 retcode = ERR_NOMEM;
2425 drbd_adm_finish(info, retcode);
2429 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2431 struct drbd_conf *mdev;
2432 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2434 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2435 if (!adm_ctx.reply_skb)
2437 if (retcode != NO_ERROR)
2440 mdev = adm_ctx.mdev;
2442 /* If there is still bitmap IO pending, probably because of a previous
2443 * resync just being finished, wait for it before requesting a new resync.
2444 * Also wait for it's after_state_ch(). */
2445 drbd_suspend_io(mdev);
2446 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2447 drbd_flush_workqueue(mdev);
2449 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2450 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2451 * try to start a resync handshake as sync target for full sync.
2453 if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
2454 retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
2455 if (retcode >= SS_SUCCESS) {
2456 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
2457 "set_n_write from invalidate", BM_LOCKED_MASK))
2458 retcode = ERR_IO_MD_DISK;
2461 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2462 drbd_resume_io(mdev);
2465 drbd_adm_finish(info, retcode);
2469 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2470 union drbd_state mask, union drbd_state val)
2472 enum drbd_ret_code retcode;
2474 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2475 if (!adm_ctx.reply_skb)
2477 if (retcode != NO_ERROR)
2480 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2482 drbd_adm_finish(info, retcode);
2486 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2490 rv = drbd_bmio_set_n_write(mdev);
2491 drbd_suspend_al(mdev);
2495 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2497 int retcode; /* drbd_ret_code, drbd_state_rv */
2498 struct drbd_conf *mdev;
2500 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2501 if (!adm_ctx.reply_skb)
2503 if (retcode != NO_ERROR)
2506 mdev = adm_ctx.mdev;
2508 /* If there is still bitmap IO pending, probably because of a previous
2509 * resync just being finished, wait for it before requesting a new resync.
2510 * Also wait for it's after_state_ch(). */
2511 drbd_suspend_io(mdev);
2512 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2513 drbd_flush_workqueue(mdev);
2515 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
2516 * in the bitmap. Otherwise, try to start a resync handshake
2517 * as sync source for full sync.
2519 if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
2520 /* The peer will get a resync upon connect anyways. Just make that
2521 into a full resync. */
2522 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2523 if (retcode >= SS_SUCCESS) {
2524 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
2525 "set_n_write from invalidate_peer",
2526 BM_LOCKED_SET_ALLOWED))
2527 retcode = ERR_IO_MD_DISK;
2530 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
2531 drbd_resume_io(mdev);
2534 drbd_adm_finish(info, retcode);
2538 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2540 enum drbd_ret_code retcode;
2542 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2543 if (!adm_ctx.reply_skb)
2545 if (retcode != NO_ERROR)
2548 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2549 retcode = ERR_PAUSE_IS_SET;
2551 drbd_adm_finish(info, retcode);
2555 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2557 union drbd_dev_state s;
2558 enum drbd_ret_code retcode;
2560 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2561 if (!adm_ctx.reply_skb)
2563 if (retcode != NO_ERROR)
2566 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2567 s = adm_ctx.mdev->state;
2568 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2569 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2570 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2572 retcode = ERR_PAUSE_IS_CLEAR;
2577 drbd_adm_finish(info, retcode);
2581 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2583 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2586 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2588 struct drbd_conf *mdev;
2589 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2591 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2592 if (!adm_ctx.reply_skb)
2594 if (retcode != NO_ERROR)
2597 mdev = adm_ctx.mdev;
2598 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2599 drbd_uuid_new_current(mdev);
2600 clear_bit(NEW_CUR_UUID, &mdev->flags);
2602 drbd_suspend_io(mdev);
2603 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2604 if (retcode == SS_SUCCESS) {
2605 if (mdev->state.conn < C_CONNECTED)
2606 tl_clear(mdev->tconn);
2607 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2608 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2610 drbd_resume_io(mdev);
2613 drbd_adm_finish(info, retcode);
2617 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2619 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2622 int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
2625 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2627 goto nla_put_failure;
2628 if (vnr != VOLUME_UNSPECIFIED &&
2629 nla_put_u32(skb, T_ctx_volume, vnr))
2630 goto nla_put_failure;
2631 if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
2632 goto nla_put_failure;
2633 if (tconn->my_addr_len &&
2634 nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
2635 goto nla_put_failure;
2636 if (tconn->peer_addr_len &&
2637 nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
2638 goto nla_put_failure;
2639 nla_nest_end(skb, nla);
2644 nla_nest_cancel(skb, nla);
2648 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2649 const struct sib_info *sib)
2651 struct state_info *si = NULL; /* for sizeof(si->member); */
2652 struct net_conf *nc;
2656 int exclude_sensitive;
2658 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2659 * to. So we better exclude_sensitive information.
2661 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2662 * in the context of the requesting user process. Exclude sensitive
2663 * information, unless current has superuser.
2665 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2666 * relies on the current implementation of netlink_dump(), which
2667 * executes the dump callback successively from netlink_recvmsg(),
2668 * always in the context of the receiving process */
2669 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2671 got_ldev = get_ldev(mdev);
2673 /* We need to add connection name and volume number information still.
2674 * Minor number is in drbd_genlmsghdr. */
2675 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
2676 goto nla_put_failure;
2678 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2679 goto nla_put_failure;
2683 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
2684 goto nla_put_failure;
2686 nc = rcu_dereference(mdev->tconn->net_conf);
2688 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2691 goto nla_put_failure;
2693 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2695 goto nla_put_failure;
2696 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2697 nla_put_u32(skb, T_current_state, mdev->state.i) ||
2698 nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
2699 nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
2700 nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
2701 nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
2702 nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
2703 nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
2704 nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
2705 nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
2706 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
2707 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
2708 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
2709 goto nla_put_failure;
2714 spin_lock_irq(&mdev->ldev->md.uuid_lock);
2715 err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2716 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2719 goto nla_put_failure;
2721 if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
2722 nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
2723 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
2724 goto nla_put_failure;
2725 if (C_SYNC_SOURCE <= mdev->state.conn &&
2726 C_PAUSED_SYNC_T >= mdev->state.conn) {
2727 if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
2728 nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
2729 goto nla_put_failure;
2734 switch(sib->sib_reason) {
2735 case SIB_SYNC_PROGRESS:
2736 case SIB_GET_STATUS_REPLY:
2738 case SIB_STATE_CHANGE:
2739 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
2740 nla_put_u32(skb, T_new_state, sib->ns.i))
2741 goto nla_put_failure;
2743 case SIB_HELPER_POST:
2744 if (nla_put_u32(skb, T_helper_exit_code,
2745 sib->helper_exit_code))
2746 goto nla_put_failure;
2748 case SIB_HELPER_PRE:
2749 if (nla_put_string(skb, T_helper, sib->helper_name))
2750 goto nla_put_failure;
2754 nla_nest_end(skb, nla);
2764 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2766 enum drbd_ret_code retcode;
2769 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2770 if (!adm_ctx.reply_skb)
2772 if (retcode != NO_ERROR)
2775 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2777 nlmsg_free(adm_ctx.reply_skb);
2781 drbd_adm_finish(info, retcode);
2785 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2787 struct drbd_conf *mdev;
2788 struct drbd_genlmsghdr *dh;
2789 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2790 struct drbd_tconn *tconn = NULL;
2791 struct drbd_tconn *tmp;
2792 unsigned volume = cb->args[1];
2794 /* Open coded, deferred, iteration:
2795 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2796 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2800 * where tconn is cb->args[0];
2801 * and i is cb->args[1];
2803 * cb->args[2] indicates if we shall loop over all resources,
2804 * or just dump all volumes of a single resource.
2806 * This may miss entries inserted after this dump started,
2807 * or entries deleted before they are reached.
2809 * We need to make sure the mdev won't disappear while
2810 * we are looking at it, and revalidate our iterators
2811 * on each iteration.
2814 /* synchronize with conn_create()/conn_destroy() */
2816 /* revalidate iterator position */
2817 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
2819 /* first iteration */
2831 mdev = idr_get_next(&tconn->volumes, &volume);
2833 /* No more volumes to dump on this tconn.
2834 * Advance tconn iterator. */
2835 pos = list_entry_rcu(tconn->all_tconn.next,
2836 struct drbd_tconn, all_tconn);
2837 /* Did we dump any volume on this tconn yet? */
2839 /* If we reached the end of the list,
2840 * or only a single resource dump was requested,
2842 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2850 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2851 cb->nlh->nlmsg_seq, &drbd_genl_family,
2852 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2857 /* This is a tconn without a single volume.
2858 * Suprisingly enough, it may have a network
2860 struct net_conf *nc;
2862 dh->ret_code = NO_ERROR;
2863 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
2865 nc = rcu_dereference(tconn->net_conf);
2866 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2871 D_ASSERT(mdev->vnr == volume);
2872 D_ASSERT(mdev->tconn == tconn);
2874 dh->minor = mdev_to_minor(mdev);
2875 dh->ret_code = NO_ERROR;
2877 if (nla_put_status_info(skb, mdev, NULL)) {
2879 genlmsg_cancel(skb, dh);
2883 genlmsg_end(skb, dh);
2888 /* where to start the next iteration */
2889 cb->args[0] = (long)pos;
2890 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2892 /* No more tconns/volumes/minors found results in an empty skb.
2893 * Which will terminate the dump. */
2898 * Request status of all resources, or of all volumes within a single resource.
2900 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2901 * Which means we cannot use the family->attrbuf or other such members, because
2902 * dump is NOT protected by the genl_lock(). During dump, we only have access
2903 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2905 * Once things are setup properly, we call into get_one_status().
2907 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2909 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2911 const char *resource_name;
2912 struct drbd_tconn *tconn;
2915 /* Is this a followup call? */
2917 /* ... of a single resource dump,
2918 * and the resource iterator has been advanced already? */
2919 if (cb->args[2] && cb->args[2] != cb->args[0])
2920 return 0; /* DONE. */
2924 /* First call (from netlink_dump_start). We need to figure out
2925 * which resource(s) the user wants us to dump. */
2926 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2927 nlmsg_attrlen(cb->nlh, hdrlen),
2928 DRBD_NLA_CFG_CONTEXT);
2930 /* No explicit context given. Dump all. */
2933 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2934 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2936 return PTR_ERR(nla);
2937 /* context given, but no name present? */
2940 resource_name = nla_data(nla);
2941 tconn = conn_get_by_name(resource_name);
2946 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2948 /* prime iterators, and set "filter" mode mark:
2949 * only dump this tconn. */
2950 cb->args[0] = (long)tconn;
2951 /* cb->args[1] = 0; passed in this way. */
2952 cb->args[2] = (long)tconn;
2955 return get_one_status(skb, cb);
2958 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2960 enum drbd_ret_code retcode;
2961 struct timeout_parms tp;
2964 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2965 if (!adm_ctx.reply_skb)
2967 if (retcode != NO_ERROR)
2971 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2972 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2975 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2977 nlmsg_free(adm_ctx.reply_skb);
2981 drbd_adm_finish(info, retcode);
2985 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2987 struct drbd_conf *mdev;
2988 enum drbd_ret_code retcode;
2989 struct start_ov_parms parms;
2991 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2992 if (!adm_ctx.reply_skb)
2994 if (retcode != NO_ERROR)
2997 mdev = adm_ctx.mdev;
2999 /* resume from last known position, if possible */
3000 parms.ov_start_sector = mdev->ov_start_sector;
3001 parms.ov_stop_sector = ULLONG_MAX;
3002 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
3003 int err = start_ov_parms_from_attrs(&parms, info);
3005 retcode = ERR_MANDATORY_TAG;
3006 drbd_msg_put_info(from_attrs_err_to_txt(err));
3010 /* w_make_ov_request expects position to be aligned */
3011 mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
3012 mdev->ov_stop_sector = parms.ov_stop_sector;
3014 /* If there is still bitmap IO pending, e.g. previous resync or verify
3015 * just being finished, wait for it before requesting a new resync. */
3016 drbd_suspend_io(mdev);
3017 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3018 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
3019 drbd_resume_io(mdev);
3021 drbd_adm_finish(info, retcode);
3026 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3028 struct drbd_conf *mdev;
3029 enum drbd_ret_code retcode;
3030 int skip_initial_sync = 0;
3032 struct new_c_uuid_parms args;
3034 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3035 if (!adm_ctx.reply_skb)
3037 if (retcode != NO_ERROR)
3040 mdev = adm_ctx.mdev;
3041 memset(&args, 0, sizeof(args));
3042 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
3043 err = new_c_uuid_parms_from_attrs(&args, info);
3045 retcode = ERR_MANDATORY_TAG;
3046 drbd_msg_put_info(from_attrs_err_to_txt(err));
3051 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
3053 if (!get_ldev(mdev)) {
3054 retcode = ERR_NO_DISK;
3058 /* this is "skip initial sync", assume to be clean */
3059 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
3060 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3061 dev_info(DEV, "Preparing to skip initial sync\n");
3062 skip_initial_sync = 1;
3063 } else if (mdev->state.conn != C_STANDALONE) {
3064 retcode = ERR_CONNECTED;
3068 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3069 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
3071 if (args.clear_bm) {
3072 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3073 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
3075 dev_err(DEV, "Writing bitmap failed with %d\n",err);
3076 retcode = ERR_IO_MD_DISK;
3078 if (skip_initial_sync) {
3079 drbd_send_uuids_skip_initial_sync(mdev);
3080 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3081 drbd_print_uuids(mdev, "cleared bitmap UUID");
3082 spin_lock_irq(&mdev->tconn->req_lock);
3083 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3085 spin_unlock_irq(&mdev->tconn->req_lock);
3093 mutex_unlock(mdev->state_mutex);
3095 drbd_adm_finish(info, retcode);
3099 static enum drbd_ret_code
3100 drbd_check_resource_name(const char *name)
3102 if (!name || !name[0]) {
3103 drbd_msg_put_info("resource name missing");
3104 return ERR_MANDATORY_TAG;
3106 /* if we want to use these in sysfs/configfs/debugfs some day,
3107 * we must not allow slashes */
3108 if (strchr(name, '/')) {
3109 drbd_msg_put_info("invalid resource name");
3110 return ERR_INVALID_REQUEST;
3115 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
3117 enum drbd_ret_code retcode;
3118 struct res_opts res_opts;
3121 retcode = drbd_adm_prepare(skb, info, 0);
3122 if (!adm_ctx.reply_skb)
3124 if (retcode != NO_ERROR)
3127 set_res_opts_defaults(&res_opts);
3128 err = res_opts_from_attrs(&res_opts, info);
3129 if (err && err != -ENOMSG) {
3130 retcode = ERR_MANDATORY_TAG;
3131 drbd_msg_put_info(from_attrs_err_to_txt(err));
3135 retcode = drbd_check_resource_name(adm_ctx.resource_name);
3136 if (retcode != NO_ERROR)
3139 if (adm_ctx.tconn) {
3140 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3141 retcode = ERR_INVALID_REQUEST;
3142 drbd_msg_put_info("resource exists");
3144 /* else: still NO_ERROR */
3148 if (!conn_create(adm_ctx.resource_name, &res_opts))
3149 retcode = ERR_NOMEM;
3151 drbd_adm_finish(info, retcode);
3155 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3157 struct drbd_genlmsghdr *dh = info->userhdr;
3158 enum drbd_ret_code retcode;
3160 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3161 if (!adm_ctx.reply_skb)
3163 if (retcode != NO_ERROR)
3166 if (dh->minor > MINORMASK) {
3167 drbd_msg_put_info("requested minor out of range");
3168 retcode = ERR_INVALID_REQUEST;
3171 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3172 drbd_msg_put_info("requested volume id out of range");
3173 retcode = ERR_INVALID_REQUEST;
3177 /* drbd_adm_prepare made sure already
3178 * that mdev->tconn and mdev->vnr match the request. */
3180 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3181 retcode = ERR_MINOR_EXISTS;
3182 /* else: still NO_ERROR */
3186 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3188 drbd_adm_finish(info, retcode);
3192 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3194 if (mdev->state.disk == D_DISKLESS &&
3195 /* no need to be mdev->state.conn == C_STANDALONE &&
3196 * we may want to delete a minor from a live replication group.
3198 mdev->state.role == R_SECONDARY) {
3199 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3200 CS_VERBOSE + CS_WAIT_COMPLETE);
3201 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3202 idr_remove(&minors, mdev_to_minor(mdev));
3203 destroy_workqueue(mdev->submit.wq);
3204 del_gendisk(mdev->vdisk);
3206 kref_put(&mdev->kref, &drbd_minor_destroy);
3209 return ERR_MINOR_CONFIGURED;
3212 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
3214 enum drbd_ret_code retcode;
3216 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3217 if (!adm_ctx.reply_skb)
3219 if (retcode != NO_ERROR)
3222 retcode = adm_delete_minor(adm_ctx.mdev);
3224 drbd_adm_finish(info, retcode);
3228 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3230 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3231 struct drbd_conf *mdev;
3234 retcode = drbd_adm_prepare(skb, info, 0);
3235 if (!adm_ctx.reply_skb)
3237 if (retcode != NO_ERROR)
3240 if (!adm_ctx.tconn) {
3241 retcode = ERR_RES_NOT_KNOWN;
3246 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3247 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3248 if (retcode < SS_SUCCESS) {
3249 drbd_msg_put_info("failed to demote");
3254 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3255 if (retcode < SS_SUCCESS) {
3256 drbd_msg_put_info("failed to disconnect");
3261 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3262 retcode = adm_detach(mdev, 0);
3263 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
3264 drbd_msg_put_info("failed to detach");
3269 /* If we reach this, all volumes (of this tconn) are Secondary,
3270 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3271 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3272 drbd_thread_stop(&adm_ctx.tconn->worker);
3274 /* Now, nothing can fail anymore */
3276 /* delete volumes */
3277 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3278 retcode = adm_delete_minor(mdev);
3279 if (retcode != NO_ERROR) {
3280 /* "can not happen" */
3281 drbd_msg_put_info("failed to delete volume");
3286 /* delete connection */
3287 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3288 list_del_rcu(&adm_ctx.tconn->all_tconn);
3290 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3294 /* "can not happen" */
3295 retcode = ERR_RES_IN_USE;
3296 drbd_msg_put_info("failed to delete connection");
3300 drbd_adm_finish(info, retcode);
3304 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3306 enum drbd_ret_code retcode;
3308 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3309 if (!adm_ctx.reply_skb)
3311 if (retcode != NO_ERROR)
3314 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3315 list_del_rcu(&adm_ctx.tconn->all_tconn);
3317 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3321 retcode = ERR_RES_IN_USE;
3324 if (retcode == NO_ERROR)
3325 drbd_thread_stop(&adm_ctx.tconn->worker);
3327 drbd_adm_finish(info, retcode);
3331 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3333 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3334 struct sk_buff *msg;
3335 struct drbd_genlmsghdr *d_out;
3339 if (sib->sib_reason == SIB_SYNC_PROGRESS) {
3340 if (time_after(jiffies, mdev->rs_last_bcast + HZ))
3341 mdev->rs_last_bcast = jiffies;
3346 seq = atomic_inc_return(&drbd_genl_seq);
3347 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3352 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3353 if (!d_out) /* cannot happen, but anyways. */
3354 goto nla_put_failure;
3355 d_out->minor = mdev_to_minor(mdev);
3356 d_out->ret_code = NO_ERROR;
3358 if (nla_put_status_info(msg, mdev, sib))
3359 goto nla_put_failure;
3360 genlmsg_end(msg, d_out);
3361 err = drbd_genl_multicast_events(msg, 0);
3362 /* msg has been consumed or freed in netlink_broadcast() */
3363 if (err && err != -ESRCH)
3371 dev_err(DEV, "Error %d while broadcasting event. "
3372 "Event seq:%u sib_reason:%u\n",
3373 err, seq, sib->sib_reason);