]> Pileus Git - ~andy/linux/blob - drivers/block/drbd/drbd_nl.c
drbd: cleanup ondisk meta data layout calculations and defines
[~andy/linux] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/in.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
34 #include "drbd_int.h"
35 #include "drbd_req.h"
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
40
41 #include <net/genetlink.h>
42
43 /* .doit */
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
53
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74 /* .dumpit */
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77 #include <linux/drbd_genl_api.h>
78 #include "drbd_nla.h"
79 #include <linux/genl_magic_func.h>
80
81 /* used blkdev_get_by_path, to claim our meta data device(s) */
82 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83
84 /* Configuration is strictly serialized, because generic netlink message
85  * processing is strictly serialized by the genl_lock().
86  * Which means we can use one static global drbd_config_context struct.
87  */
88 static struct drbd_config_context {
89         /* assigned from drbd_genlmsghdr */
90         unsigned int minor;
91         /* assigned from request attributes, if present */
92         unsigned int volume;
93 #define VOLUME_UNSPECIFIED              (-1U)
94         /* pointer into the request skb,
95          * limited lifetime! */
96         char *resource_name;
97         struct nlattr *my_addr;
98         struct nlattr *peer_addr;
99
100         /* reply buffer */
101         struct sk_buff *reply_skb;
102         /* pointer into reply buffer */
103         struct drbd_genlmsghdr *reply_dh;
104         /* resolved from attributes, if possible */
105         struct drbd_conf *mdev;
106         struct drbd_tconn *tconn;
107 } adm_ctx;
108
109 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
110 {
111         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112         if (genlmsg_reply(skb, info))
113                 printk(KERN_ERR "drbd: error sending genl reply\n");
114 }
115
116 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117  * reason it could fail was no space in skb, and there are 4k available. */
118 int drbd_msg_put_info(const char *info)
119 {
120         struct sk_buff *skb = adm_ctx.reply_skb;
121         struct nlattr *nla;
122         int err = -EMSGSIZE;
123
124         if (!info || !info[0])
125                 return 0;
126
127         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128         if (!nla)
129                 return err;
130
131         err = nla_put_string(skb, T_info_text, info);
132         if (err) {
133                 nla_nest_cancel(skb, nla);
134                 return err;
135         } else
136                 nla_nest_end(skb, nla);
137         return 0;
138 }
139
140 /* This would be a good candidate for a "pre_doit" hook,
141  * and per-family private info->pointers.
142  * But we need to stay compatible with older kernels.
143  * If it returns successfully, adm_ctx members are valid.
144  */
145 #define DRBD_ADM_NEED_MINOR     1
146 #define DRBD_ADM_NEED_RESOURCE  2
147 #define DRBD_ADM_NEED_CONNECTION 4
148 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
149                 unsigned flags)
150 {
151         struct drbd_genlmsghdr *d_in = info->userhdr;
152         const u8 cmd = info->genlhdr->cmd;
153         int err;
154
155         memset(&adm_ctx, 0, sizeof(adm_ctx));
156
157         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158         if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
159                return -EPERM;
160
161         adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
162         if (!adm_ctx.reply_skb) {
163                 err = -ENOMEM;
164                 goto fail;
165         }
166
167         adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
168                                         info, &drbd_genl_family, 0, cmd);
169         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
170          * but anyways */
171         if (!adm_ctx.reply_dh) {
172                 err = -ENOMEM;
173                 goto fail;
174         }
175
176         adm_ctx.reply_dh->minor = d_in->minor;
177         adm_ctx.reply_dh->ret_code = NO_ERROR;
178
179         adm_ctx.volume = VOLUME_UNSPECIFIED;
180         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
181                 struct nlattr *nla;
182                 /* parse and validate only */
183                 err = drbd_cfg_context_from_attrs(NULL, info);
184                 if (err)
185                         goto fail;
186
187                 /* It was present, and valid,
188                  * copy it over to the reply skb. */
189                 err = nla_put_nohdr(adm_ctx.reply_skb,
190                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
191                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
192                 if (err)
193                         goto fail;
194
195                 /* and assign stuff to the global adm_ctx */
196                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
197                 if (nla)
198                         adm_ctx.volume = nla_get_u32(nla);
199                 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
200                 if (nla)
201                         adm_ctx.resource_name = nla_data(nla);
202                 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
203                 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
204                 if ((adm_ctx.my_addr &&
205                      nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
206                     (adm_ctx.peer_addr &&
207                      nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
208                         err = -EINVAL;
209                         goto fail;
210                 }
211         }
212
213         adm_ctx.minor = d_in->minor;
214         adm_ctx.mdev = minor_to_mdev(d_in->minor);
215         adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
216
217         if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
218                 drbd_msg_put_info("unknown minor");
219                 return ERR_MINOR_INVALID;
220         }
221         if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
222                 drbd_msg_put_info("unknown resource");
223                 return ERR_INVALID_REQUEST;
224         }
225
226         if (flags & DRBD_ADM_NEED_CONNECTION) {
227                 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
228                         drbd_msg_put_info("no resource name expected");
229                         return ERR_INVALID_REQUEST;
230                 }
231                 if (adm_ctx.mdev) {
232                         drbd_msg_put_info("no minor number expected");
233                         return ERR_INVALID_REQUEST;
234                 }
235                 if (adm_ctx.my_addr && adm_ctx.peer_addr)
236                         adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
237                                                           nla_len(adm_ctx.my_addr),
238                                                           nla_data(adm_ctx.peer_addr),
239                                                           nla_len(adm_ctx.peer_addr));
240                 if (!adm_ctx.tconn) {
241                         drbd_msg_put_info("unknown connection");
242                         return ERR_INVALID_REQUEST;
243                 }
244         }
245
246         /* some more paranoia, if the request was over-determined */
247         if (adm_ctx.mdev && adm_ctx.tconn &&
248             adm_ctx.mdev->tconn != adm_ctx.tconn) {
249                 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
250                                 adm_ctx.minor, adm_ctx.resource_name,
251                                 adm_ctx.mdev->tconn->name);
252                 drbd_msg_put_info("minor exists in different resource");
253                 return ERR_INVALID_REQUEST;
254         }
255         if (adm_ctx.mdev &&
256             adm_ctx.volume != VOLUME_UNSPECIFIED &&
257             adm_ctx.volume != adm_ctx.mdev->vnr) {
258                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
259                                 adm_ctx.minor, adm_ctx.volume,
260                                 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
261                 drbd_msg_put_info("minor exists as different volume");
262                 return ERR_INVALID_REQUEST;
263         }
264
265         return NO_ERROR;
266
267 fail:
268         nlmsg_free(adm_ctx.reply_skb);
269         adm_ctx.reply_skb = NULL;
270         return err;
271 }
272
273 static int drbd_adm_finish(struct genl_info *info, int retcode)
274 {
275         if (adm_ctx.tconn) {
276                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
277                 adm_ctx.tconn = NULL;
278         }
279
280         if (!adm_ctx.reply_skb)
281                 return -ENOMEM;
282
283         adm_ctx.reply_dh->ret_code = retcode;
284         drbd_adm_send_reply(adm_ctx.reply_skb, info);
285         return 0;
286 }
287
288 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
289 {
290         char *afs;
291
292         /* FIXME: A future version will not allow this case. */
293         if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
294                 return;
295
296         switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
297         case AF_INET6:
298                 afs = "ipv6";
299                 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
300                          &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
301                 break;
302         case AF_INET:
303                 afs = "ipv4";
304                 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
305                          &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
306                 break;
307         default:
308                 afs = "ssocks";
309                 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
310                          &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
311         }
312         snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
313 }
314
315 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
316 {
317         char *envp[] = { "HOME=/",
318                         "TERM=linux",
319                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
320                          (char[20]) { }, /* address family */
321                          (char[60]) { }, /* address */
322                         NULL };
323         char mb[12];
324         char *argv[] = {usermode_helper, cmd, mb, NULL };
325         struct drbd_tconn *tconn = mdev->tconn;
326         struct sib_info sib;
327         int ret;
328
329         if (current == tconn->worker.task)
330                 set_bit(CALLBACK_PENDING, &tconn->flags);
331
332         snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
333         setup_khelper_env(tconn, envp);
334
335         /* The helper may take some time.
336          * write out any unsynced meta data changes now */
337         drbd_md_sync(mdev);
338
339         dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
340         sib.sib_reason = SIB_HELPER_PRE;
341         sib.helper_name = cmd;
342         drbd_bcast_event(mdev, &sib);
343         ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
344         if (ret)
345                 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
346                                 usermode_helper, cmd, mb,
347                                 (ret >> 8) & 0xff, ret);
348         else
349                 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
350                                 usermode_helper, cmd, mb,
351                                 (ret >> 8) & 0xff, ret);
352         sib.sib_reason = SIB_HELPER_POST;
353         sib.helper_exit_code = ret;
354         drbd_bcast_event(mdev, &sib);
355
356         if (current == tconn->worker.task)
357                 clear_bit(CALLBACK_PENDING, &tconn->flags);
358
359         if (ret < 0) /* Ignore any ERRNOs we got. */
360                 ret = 0;
361
362         return ret;
363 }
364
365 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
366 {
367         char *envp[] = { "HOME=/",
368                         "TERM=linux",
369                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
370                          (char[20]) { }, /* address family */
371                          (char[60]) { }, /* address */
372                         NULL };
373         char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
374         int ret;
375
376         setup_khelper_env(tconn, envp);
377         conn_md_sync(tconn);
378
379         conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
380         /* TODO: conn_bcast_event() ?? */
381
382         ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
383         if (ret)
384                 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
385                           usermode_helper, cmd, tconn->name,
386                           (ret >> 8) & 0xff, ret);
387         else
388                 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
389                           usermode_helper, cmd, tconn->name,
390                           (ret >> 8) & 0xff, ret);
391         /* TODO: conn_bcast_event() ?? */
392
393         if (ret < 0) /* Ignore any ERRNOs we got. */
394                 ret = 0;
395
396         return ret;
397 }
398
399 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
400 {
401         enum drbd_fencing_p fp = FP_NOT_AVAIL;
402         struct drbd_conf *mdev;
403         int vnr;
404
405         rcu_read_lock();
406         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
407                 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
408                         fp = max_t(enum drbd_fencing_p, fp,
409                                    rcu_dereference(mdev->ldev->disk_conf)->fencing);
410                         put_ldev(mdev);
411                 }
412         }
413         rcu_read_unlock();
414
415         return fp;
416 }
417
418 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
419 {
420         union drbd_state mask = { };
421         union drbd_state val = { };
422         enum drbd_fencing_p fp;
423         char *ex_to_string;
424         int r;
425
426         if (tconn->cstate >= C_WF_REPORT_PARAMS) {
427                 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
428                 return false;
429         }
430
431         fp = highest_fencing_policy(tconn);
432         switch (fp) {
433         case FP_NOT_AVAIL:
434                 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
435                 goto out;
436         case FP_DONT_CARE:
437                 return true;
438         default: ;
439         }
440
441         r = conn_khelper(tconn, "fence-peer");
442
443         switch ((r>>8) & 0xff) {
444         case 3: /* peer is inconsistent */
445                 ex_to_string = "peer is inconsistent or worse";
446                 mask.pdsk = D_MASK;
447                 val.pdsk = D_INCONSISTENT;
448                 break;
449         case 4: /* peer got outdated, or was already outdated */
450                 ex_to_string = "peer was fenced";
451                 mask.pdsk = D_MASK;
452                 val.pdsk = D_OUTDATED;
453                 break;
454         case 5: /* peer was down */
455                 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
456                         /* we will(have) create(d) a new UUID anyways... */
457                         ex_to_string = "peer is unreachable, assumed to be dead";
458                         mask.pdsk = D_MASK;
459                         val.pdsk = D_OUTDATED;
460                 } else {
461                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
462                 }
463                 break;
464         case 6: /* Peer is primary, voluntarily outdate myself.
465                  * This is useful when an unconnected R_SECONDARY is asked to
466                  * become R_PRIMARY, but finds the other peer being active. */
467                 ex_to_string = "peer is active";
468                 conn_warn(tconn, "Peer is primary, outdating myself.\n");
469                 mask.disk = D_MASK;
470                 val.disk = D_OUTDATED;
471                 break;
472         case 7:
473                 if (fp != FP_STONITH)
474                         conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
475                 ex_to_string = "peer was stonithed";
476                 mask.pdsk = D_MASK;
477                 val.pdsk = D_OUTDATED;
478                 break;
479         default:
480                 /* The script is broken ... */
481                 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
482                 return false; /* Eventually leave IO frozen */
483         }
484
485         conn_info(tconn, "fence-peer helper returned %d (%s)\n",
486                   (r>>8) & 0xff, ex_to_string);
487
488  out:
489
490         /* Not using
491            conn_request_state(tconn, mask, val, CS_VERBOSE);
492            here, because we might were able to re-establish the connection in the
493            meantime. */
494         spin_lock_irq(&tconn->req_lock);
495         if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
496                 _conn_request_state(tconn, mask, val, CS_VERBOSE);
497         spin_unlock_irq(&tconn->req_lock);
498
499         return conn_highest_pdsk(tconn) <= D_OUTDATED;
500 }
501
502 static int _try_outdate_peer_async(void *data)
503 {
504         struct drbd_tconn *tconn = (struct drbd_tconn *)data;
505
506         conn_try_outdate_peer(tconn);
507
508         kref_put(&tconn->kref, &conn_destroy);
509         return 0;
510 }
511
512 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
513 {
514         struct task_struct *opa;
515
516         kref_get(&tconn->kref);
517         opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
518         if (IS_ERR(opa)) {
519                 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
520                 kref_put(&tconn->kref, &conn_destroy);
521         }
522 }
523
524 enum drbd_state_rv
525 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
526 {
527         const int max_tries = 4;
528         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
529         struct net_conf *nc;
530         int try = 0;
531         int forced = 0;
532         union drbd_state mask, val;
533
534         if (new_role == R_PRIMARY)
535                 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
536
537         mutex_lock(mdev->state_mutex);
538
539         mask.i = 0; mask.role = R_MASK;
540         val.i  = 0; val.role  = new_role;
541
542         while (try++ < max_tries) {
543                 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
544
545                 /* in case we first succeeded to outdate,
546                  * but now suddenly could establish a connection */
547                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
548                         val.pdsk = 0;
549                         mask.pdsk = 0;
550                         continue;
551                 }
552
553                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
554                     (mdev->state.disk < D_UP_TO_DATE &&
555                      mdev->state.disk >= D_INCONSISTENT)) {
556                         mask.disk = D_MASK;
557                         val.disk  = D_UP_TO_DATE;
558                         forced = 1;
559                         continue;
560                 }
561
562                 if (rv == SS_NO_UP_TO_DATE_DISK &&
563                     mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
564                         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
565
566                         if (conn_try_outdate_peer(mdev->tconn)) {
567                                 val.disk = D_UP_TO_DATE;
568                                 mask.disk = D_MASK;
569                         }
570                         continue;
571                 }
572
573                 if (rv == SS_NOTHING_TO_DO)
574                         goto out;
575                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
576                         if (!conn_try_outdate_peer(mdev->tconn) && force) {
577                                 dev_warn(DEV, "Forced into split brain situation!\n");
578                                 mask.pdsk = D_MASK;
579                                 val.pdsk  = D_OUTDATED;
580
581                         }
582                         continue;
583                 }
584                 if (rv == SS_TWO_PRIMARIES) {
585                         /* Maybe the peer is detected as dead very soon...
586                            retry at most once more in this case. */
587                         int timeo;
588                         rcu_read_lock();
589                         nc = rcu_dereference(mdev->tconn->net_conf);
590                         timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
591                         rcu_read_unlock();
592                         schedule_timeout_interruptible(timeo);
593                         if (try < max_tries)
594                                 try = max_tries - 1;
595                         continue;
596                 }
597                 if (rv < SS_SUCCESS) {
598                         rv = _drbd_request_state(mdev, mask, val,
599                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
600                         if (rv < SS_SUCCESS)
601                                 goto out;
602                 }
603                 break;
604         }
605
606         if (rv < SS_SUCCESS)
607                 goto out;
608
609         if (forced)
610                 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
611
612         /* Wait until nothing is on the fly :) */
613         wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
614
615         /* FIXME also wait for all pending P_BARRIER_ACK? */
616
617         if (new_role == R_SECONDARY) {
618                 set_disk_ro(mdev->vdisk, true);
619                 if (get_ldev(mdev)) {
620                         mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
621                         put_ldev(mdev);
622                 }
623         } else {
624                 mutex_lock(&mdev->tconn->conf_update);
625                 nc = mdev->tconn->net_conf;
626                 if (nc)
627                         nc->discard_my_data = 0; /* without copy; single bit op is atomic */
628                 mutex_unlock(&mdev->tconn->conf_update);
629
630                 set_disk_ro(mdev->vdisk, false);
631                 if (get_ldev(mdev)) {
632                         if (((mdev->state.conn < C_CONNECTED ||
633                                mdev->state.pdsk <= D_FAILED)
634                               && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
635                                 drbd_uuid_new_current(mdev);
636
637                         mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
638                         put_ldev(mdev);
639                 }
640         }
641
642         /* writeout of activity log covered areas of the bitmap
643          * to stable storage done in after state change already */
644
645         if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
646                 /* if this was forced, we should consider sync */
647                 if (forced)
648                         drbd_send_uuids(mdev);
649                 drbd_send_current_state(mdev);
650         }
651
652         drbd_md_sync(mdev);
653
654         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
655 out:
656         mutex_unlock(mdev->state_mutex);
657         return rv;
658 }
659
660 static const char *from_attrs_err_to_txt(int err)
661 {
662         return  err == -ENOMSG ? "required attribute missing" :
663                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
664                 err == -EEXIST ? "can not change invariant setting" :
665                 "invalid attribute value";
666 }
667
668 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
669 {
670         struct set_role_parms parms;
671         int err;
672         enum drbd_ret_code retcode;
673
674         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
675         if (!adm_ctx.reply_skb)
676                 return retcode;
677         if (retcode != NO_ERROR)
678                 goto out;
679
680         memset(&parms, 0, sizeof(parms));
681         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
682                 err = set_role_parms_from_attrs(&parms, info);
683                 if (err) {
684                         retcode = ERR_MANDATORY_TAG;
685                         drbd_msg_put_info(from_attrs_err_to_txt(err));
686                         goto out;
687                 }
688         }
689
690         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
691                 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
692         else
693                 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
694 out:
695         drbd_adm_finish(info, retcode);
696         return 0;
697 }
698
699 /* Initializes the md.*_offset members, so we are able to find
700  * the on disk meta data.
701  *
702  * We currently have two possible layouts:
703  * external:
704  *   |----------- md_size_sect ------------------|
705  *   [ 4k superblock ][ activity log ][  Bitmap  ]
706  *   | al_offset == 8 |
707  *   | bm_offset = al_offset + X      |
708  *  ==> bitmap sectors = md_size_sect - bm_offset
709  *
710  * internal:
711  *            |----------- md_size_sect ------------------|
712  * [data.....][  Bitmap  ][ activity log ][ 4k superblock ]
713  *                        | al_offset < 0 |
714  *            | bm_offset = al_offset - Y |
715  *  ==> bitmap sectors = Y = al_offset - bm_offset
716  *
717  *  Activity log size used to be fixed 32kB,
718  *  but is about to become configurable.
719  */
720 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
721                                        struct drbd_backing_dev *bdev)
722 {
723         sector_t md_size_sect = 0;
724         unsigned int al_size_sect = MD_32kB_SECT;
725         int meta_dev_idx;
726
727         rcu_read_lock();
728         meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
729
730         switch (meta_dev_idx) {
731         default:
732                 /* v07 style fixed size indexed meta data */
733                 bdev->md.md_size_sect = MD_128MB_SECT;
734                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
735                 bdev->md.al_offset = MD_4kB_SECT;
736                 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
737                 break;
738         case DRBD_MD_INDEX_FLEX_EXT:
739                 /* just occupy the full device; unit: sectors */
740                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
741                 bdev->md.md_offset = 0;
742                 bdev->md.al_offset = MD_4kB_SECT;
743                 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
744                 break;
745         case DRBD_MD_INDEX_INTERNAL:
746         case DRBD_MD_INDEX_FLEX_INT:
747                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
748                 /* al size is still fixed */
749                 bdev->md.al_offset = -al_size_sect;
750                 /* we need (slightly less than) ~ this much bitmap sectors: */
751                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
752                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
753                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
754                 md_size_sect = ALIGN(md_size_sect, 8);
755
756                 /* plus the "drbd meta data super block",
757                  * and the activity log; */
758                 md_size_sect += MD_4kB_SECT + al_size_sect;
759
760                 bdev->md.md_size_sect = md_size_sect;
761                 /* bitmap offset is adjusted by 'super' block size */
762                 bdev->md.bm_offset   = -md_size_sect + MD_4kB_SECT;
763                 break;
764         }
765         rcu_read_unlock();
766 }
767
768 /* input size is expected to be in KB */
769 char *ppsize(char *buf, unsigned long long size)
770 {
771         /* Needs 9 bytes at max including trailing NUL:
772          * -1ULL ==> "16384 EB" */
773         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
774         int base = 0;
775         while (size >= 10000 && base < sizeof(units)-1) {
776                 /* shift + round */
777                 size = (size >> 10) + !!(size & (1<<9));
778                 base++;
779         }
780         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
781
782         return buf;
783 }
784
785 /* there is still a theoretical deadlock when called from receiver
786  * on an D_INCONSISTENT R_PRIMARY:
787  *  remote READ does inc_ap_bio, receiver would need to receive answer
788  *  packet from remote to dec_ap_bio again.
789  *  receiver receive_sizes(), comes here,
790  *  waits for ap_bio_cnt == 0. -> deadlock.
791  * but this cannot happen, actually, because:
792  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
793  *  (not connected, or bad/no disk on peer):
794  *  see drbd_fail_request_early, ap_bio_cnt is zero.
795  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
796  *  peer may not initiate a resize.
797  */
798 /* Note these are not to be confused with
799  * drbd_adm_suspend_io/drbd_adm_resume_io,
800  * which are (sub) state changes triggered by admin (drbdsetup),
801  * and can be long lived.
802  * This changes an mdev->flag, is triggered by drbd internals,
803  * and should be short-lived. */
804 void drbd_suspend_io(struct drbd_conf *mdev)
805 {
806         set_bit(SUSPEND_IO, &mdev->flags);
807         if (drbd_suspended(mdev))
808                 return;
809         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
810 }
811
812 void drbd_resume_io(struct drbd_conf *mdev)
813 {
814         clear_bit(SUSPEND_IO, &mdev->flags);
815         wake_up(&mdev->misc_wait);
816 }
817
818 /**
819  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
820  * @mdev:       DRBD device.
821  *
822  * Returns 0 on success, negative return values indicate errors.
823  * You should call drbd_md_sync() after calling this function.
824  */
825 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
826 {
827         sector_t prev_first_sect, prev_size; /* previous meta location */
828         sector_t la_size, u_size;
829         sector_t size;
830         char ppb[10];
831
832         int md_moved, la_size_changed;
833         enum determine_dev_size rv = unchanged;
834
835         /* race:
836          * application request passes inc_ap_bio,
837          * but then cannot get an AL-reference.
838          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
839          *
840          * to avoid that:
841          * Suspend IO right here.
842          * still lock the act_log to not trigger ASSERTs there.
843          */
844         drbd_suspend_io(mdev);
845
846         /* no wait necessary anymore, actually we could assert that */
847         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
848
849         prev_first_sect = drbd_md_first_sector(mdev->ldev);
850         prev_size = mdev->ldev->md.md_size_sect;
851         la_size = mdev->ldev->md.la_size_sect;
852
853         /* TODO: should only be some assert here, not (re)init... */
854         drbd_md_set_sector_offsets(mdev, mdev->ldev);
855
856         rcu_read_lock();
857         u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
858         rcu_read_unlock();
859         size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
860
861         if (drbd_get_capacity(mdev->this_bdev) != size ||
862             drbd_bm_capacity(mdev) != size) {
863                 int err;
864                 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
865                 if (unlikely(err)) {
866                         /* currently there is only one error: ENOMEM! */
867                         size = drbd_bm_capacity(mdev)>>1;
868                         if (size == 0) {
869                                 dev_err(DEV, "OUT OF MEMORY! "
870                                     "Could not allocate bitmap!\n");
871                         } else {
872                                 dev_err(DEV, "BM resizing failed. "
873                                     "Leaving size unchanged at size = %lu KB\n",
874                                     (unsigned long)size);
875                         }
876                         rv = dev_size_error;
877                 }
878                 /* racy, see comments above. */
879                 drbd_set_my_capacity(mdev, size);
880                 mdev->ldev->md.la_size_sect = size;
881                 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
882                      (unsigned long long)size>>1);
883         }
884         if (rv == dev_size_error)
885                 goto out;
886
887         la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
888
889         md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
890                 || prev_size       != mdev->ldev->md.md_size_sect;
891
892         if (la_size_changed || md_moved) {
893                 int err;
894
895                 drbd_al_shrink(mdev); /* All extents inactive. */
896                 dev_info(DEV, "Writing the whole bitmap, %s\n",
897                          la_size_changed && md_moved ? "size changed and md moved" :
898                          la_size_changed ? "size changed" : "md moved");
899                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
900                 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
901                                      "size changed", BM_LOCKED_MASK);
902                 if (err) {
903                         rv = dev_size_error;
904                         goto out;
905                 }
906                 drbd_md_mark_dirty(mdev);
907         }
908
909         if (size > la_size)
910                 rv = grew;
911         if (size < la_size)
912                 rv = shrunk;
913 out:
914         lc_unlock(mdev->act_log);
915         wake_up(&mdev->al_wait);
916         drbd_resume_io(mdev);
917
918         return rv;
919 }
920
921 sector_t
922 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
923                   sector_t u_size, int assume_peer_has_space)
924 {
925         sector_t p_size = mdev->p_size;   /* partner's disk size. */
926         sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
927         sector_t m_size; /* my size */
928         sector_t size = 0;
929
930         m_size = drbd_get_max_capacity(bdev);
931
932         if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
933                 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
934                 p_size = m_size;
935         }
936
937         if (p_size && m_size) {
938                 size = min_t(sector_t, p_size, m_size);
939         } else {
940                 if (la_size) {
941                         size = la_size;
942                         if (m_size && m_size < size)
943                                 size = m_size;
944                         if (p_size && p_size < size)
945                                 size = p_size;
946                 } else {
947                         if (m_size)
948                                 size = m_size;
949                         if (p_size)
950                                 size = p_size;
951                 }
952         }
953
954         if (size == 0)
955                 dev_err(DEV, "Both nodes diskless!\n");
956
957         if (u_size) {
958                 if (u_size > size)
959                         dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
960                             (unsigned long)u_size>>1, (unsigned long)size>>1);
961                 else
962                         size = u_size;
963         }
964
965         return size;
966 }
967
968 /**
969  * drbd_check_al_size() - Ensures that the AL is of the right size
970  * @mdev:       DRBD device.
971  *
972  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
973  * failed, and 0 on success. You should call drbd_md_sync() after you called
974  * this function.
975  */
976 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
977 {
978         struct lru_cache *n, *t;
979         struct lc_element *e;
980         unsigned int in_use;
981         int i;
982
983         if (mdev->act_log &&
984             mdev->act_log->nr_elements == dc->al_extents)
985                 return 0;
986
987         in_use = 0;
988         t = mdev->act_log;
989         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
990                 dc->al_extents, sizeof(struct lc_element), 0);
991
992         if (n == NULL) {
993                 dev_err(DEV, "Cannot allocate act_log lru!\n");
994                 return -ENOMEM;
995         }
996         spin_lock_irq(&mdev->al_lock);
997         if (t) {
998                 for (i = 0; i < t->nr_elements; i++) {
999                         e = lc_element_by_index(t, i);
1000                         if (e->refcnt)
1001                                 dev_err(DEV, "refcnt(%d)==%d\n",
1002                                     e->lc_number, e->refcnt);
1003                         in_use += e->refcnt;
1004                 }
1005         }
1006         if (!in_use)
1007                 mdev->act_log = n;
1008         spin_unlock_irq(&mdev->al_lock);
1009         if (in_use) {
1010                 dev_err(DEV, "Activity log still in use!\n");
1011                 lc_destroy(n);
1012                 return -EBUSY;
1013         } else {
1014                 if (t)
1015                         lc_destroy(t);
1016         }
1017         drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1018         return 0;
1019 }
1020
1021 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
1022 {
1023         struct request_queue * const q = mdev->rq_queue;
1024         unsigned int max_hw_sectors = max_bio_size >> 9;
1025         unsigned int max_segments = 0;
1026
1027         if (get_ldev_if_state(mdev, D_ATTACHING)) {
1028                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1029
1030                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1031                 rcu_read_lock();
1032                 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1033                 rcu_read_unlock();
1034                 put_ldev(mdev);
1035         }
1036
1037         blk_queue_logical_block_size(q, 512);
1038         blk_queue_max_hw_sectors(q, max_hw_sectors);
1039         /* This is the workaround for "bio would need to, but cannot, be split" */
1040         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1041         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1042
1043         if (get_ldev_if_state(mdev, D_ATTACHING)) {
1044                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1045
1046                 blk_queue_stack_limits(q, b);
1047
1048                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1049                         dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1050                                  q->backing_dev_info.ra_pages,
1051                                  b->backing_dev_info.ra_pages);
1052                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1053                 }
1054                 put_ldev(mdev);
1055         }
1056 }
1057
1058 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1059 {
1060         unsigned int now, new, local, peer;
1061
1062         now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1063         local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1064         peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1065
1066         if (get_ldev_if_state(mdev, D_ATTACHING)) {
1067                 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1068                 mdev->local_max_bio_size = local;
1069                 put_ldev(mdev);
1070         }
1071         local = min(local, DRBD_MAX_BIO_SIZE);
1072
1073         /* We may ignore peer limits if the peer is modern enough.
1074            Because new from 8.3.8 onwards the peer can use multiple
1075            BIOs for a single peer_request */
1076         if (mdev->state.conn >= C_CONNECTED) {
1077                 if (mdev->tconn->agreed_pro_version < 94)
1078                         peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1079                         /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1080                 else if (mdev->tconn->agreed_pro_version == 94)
1081                         peer = DRBD_MAX_SIZE_H80_PACKET;
1082                 else if (mdev->tconn->agreed_pro_version < 100)
1083                         peer = DRBD_MAX_BIO_SIZE_P95;  /* drbd 8.3.8 onwards, before 8.4.0 */
1084                 else
1085                         peer = DRBD_MAX_BIO_SIZE;
1086         }
1087
1088         new = min(local, peer);
1089
1090         if (mdev->state.role == R_PRIMARY && new < now)
1091                 dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1092
1093         if (new != now)
1094                 dev_info(DEV, "max BIO size = %u\n", new);
1095
1096         drbd_setup_queue_param(mdev, new);
1097 }
1098
1099 /* Starts the worker thread */
1100 static void conn_reconfig_start(struct drbd_tconn *tconn)
1101 {
1102         drbd_thread_start(&tconn->worker);
1103         conn_flush_workqueue(tconn);
1104 }
1105
1106 /* if still unconfigured, stops worker again. */
1107 static void conn_reconfig_done(struct drbd_tconn *tconn)
1108 {
1109         bool stop_threads;
1110         spin_lock_irq(&tconn->req_lock);
1111         stop_threads = conn_all_vols_unconf(tconn) &&
1112                 tconn->cstate == C_STANDALONE;
1113         spin_unlock_irq(&tconn->req_lock);
1114         if (stop_threads) {
1115                 /* asender is implicitly stopped by receiver
1116                  * in conn_disconnect() */
1117                 drbd_thread_stop(&tconn->receiver);
1118                 drbd_thread_stop(&tconn->worker);
1119         }
1120 }
1121
1122 /* Make sure IO is suspended before calling this function(). */
1123 static void drbd_suspend_al(struct drbd_conf *mdev)
1124 {
1125         int s = 0;
1126
1127         if (!lc_try_lock(mdev->act_log)) {
1128                 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1129                 return;
1130         }
1131
1132         drbd_al_shrink(mdev);
1133         spin_lock_irq(&mdev->tconn->req_lock);
1134         if (mdev->state.conn < C_CONNECTED)
1135                 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1136         spin_unlock_irq(&mdev->tconn->req_lock);
1137         lc_unlock(mdev->act_log);
1138
1139         if (s)
1140                 dev_info(DEV, "Suspended AL updates\n");
1141 }
1142
1143
1144 static bool should_set_defaults(struct genl_info *info)
1145 {
1146         unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1147         return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1148 }
1149
1150 static void enforce_disk_conf_limits(struct disk_conf *dc)
1151 {
1152         if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1153                 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1154         if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1155                 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1156
1157         if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1158                 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1159 }
1160
1161 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1162 {
1163         enum drbd_ret_code retcode;
1164         struct drbd_conf *mdev;
1165         struct disk_conf *new_disk_conf, *old_disk_conf;
1166         struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1167         int err, fifo_size;
1168
1169         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1170         if (!adm_ctx.reply_skb)
1171                 return retcode;
1172         if (retcode != NO_ERROR)
1173                 goto out;
1174
1175         mdev = adm_ctx.mdev;
1176
1177         /* we also need a disk
1178          * to change the options on */
1179         if (!get_ldev(mdev)) {
1180                 retcode = ERR_NO_DISK;
1181                 goto out;
1182         }
1183
1184         new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1185         if (!new_disk_conf) {
1186                 retcode = ERR_NOMEM;
1187                 goto fail;
1188         }
1189
1190         mutex_lock(&mdev->tconn->conf_update);
1191         old_disk_conf = mdev->ldev->disk_conf;
1192         *new_disk_conf = *old_disk_conf;
1193         if (should_set_defaults(info))
1194                 set_disk_conf_defaults(new_disk_conf);
1195
1196         err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1197         if (err && err != -ENOMSG) {
1198                 retcode = ERR_MANDATORY_TAG;
1199                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1200         }
1201
1202         if (!expect(new_disk_conf->resync_rate >= 1))
1203                 new_disk_conf->resync_rate = 1;
1204
1205         enforce_disk_conf_limits(new_disk_conf);
1206
1207         fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1208         if (fifo_size != mdev->rs_plan_s->size) {
1209                 new_plan = fifo_alloc(fifo_size);
1210                 if (!new_plan) {
1211                         dev_err(DEV, "kmalloc of fifo_buffer failed");
1212                         retcode = ERR_NOMEM;
1213                         goto fail_unlock;
1214                 }
1215         }
1216
1217         drbd_suspend_io(mdev);
1218         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1219         drbd_al_shrink(mdev);
1220         err = drbd_check_al_size(mdev, new_disk_conf);
1221         lc_unlock(mdev->act_log);
1222         wake_up(&mdev->al_wait);
1223         drbd_resume_io(mdev);
1224
1225         if (err) {
1226                 retcode = ERR_NOMEM;
1227                 goto fail_unlock;
1228         }
1229
1230         write_lock_irq(&global_state_lock);
1231         retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1232         if (retcode == NO_ERROR) {
1233                 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
1234                 drbd_resync_after_changed(mdev);
1235         }
1236         write_unlock_irq(&global_state_lock);
1237
1238         if (retcode != NO_ERROR)
1239                 goto fail_unlock;
1240
1241         if (new_plan) {
1242                 old_plan = mdev->rs_plan_s;
1243                 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
1244         }
1245
1246         mutex_unlock(&mdev->tconn->conf_update);
1247
1248         if (new_disk_conf->al_updates)
1249                 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
1250         else
1251                 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1252
1253         if (new_disk_conf->md_flushes)
1254                 clear_bit(MD_NO_FUA, &mdev->flags);
1255         else
1256                 set_bit(MD_NO_FUA, &mdev->flags);
1257
1258         drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1259
1260         drbd_md_sync(mdev);
1261
1262         if (mdev->state.conn >= C_CONNECTED)
1263                 drbd_send_sync_param(mdev);
1264
1265         synchronize_rcu();
1266         kfree(old_disk_conf);
1267         kfree(old_plan);
1268         mod_timer(&mdev->request_timer, jiffies + HZ);
1269         goto success;
1270
1271 fail_unlock:
1272         mutex_unlock(&mdev->tconn->conf_update);
1273  fail:
1274         kfree(new_disk_conf);
1275         kfree(new_plan);
1276 success:
1277         put_ldev(mdev);
1278  out:
1279         drbd_adm_finish(info, retcode);
1280         return 0;
1281 }
1282
1283 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1284 {
1285         struct drbd_conf *mdev;
1286         int err;
1287         enum drbd_ret_code retcode;
1288         enum determine_dev_size dd;
1289         sector_t max_possible_sectors;
1290         sector_t min_md_device_sectors;
1291         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1292         struct disk_conf *new_disk_conf = NULL;
1293         struct block_device *bdev;
1294         struct lru_cache *resync_lru = NULL;
1295         struct fifo_buffer *new_plan = NULL;
1296         union drbd_state ns, os;
1297         enum drbd_state_rv rv;
1298         struct net_conf *nc;
1299
1300         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1301         if (!adm_ctx.reply_skb)
1302                 return retcode;
1303         if (retcode != NO_ERROR)
1304                 goto finish;
1305
1306         mdev = adm_ctx.mdev;
1307         conn_reconfig_start(mdev->tconn);
1308
1309         /* if you want to reconfigure, please tear down first */
1310         if (mdev->state.disk > D_DISKLESS) {
1311                 retcode = ERR_DISK_CONFIGURED;
1312                 goto fail;
1313         }
1314         /* It may just now have detached because of IO error.  Make sure
1315          * drbd_ldev_destroy is done already, we may end up here very fast,
1316          * e.g. if someone calls attach from the on-io-error handler,
1317          * to realize a "hot spare" feature (not that I'd recommend that) */
1318         wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1319
1320         /* make sure there is no leftover from previous force-detach attempts */
1321         clear_bit(FORCE_DETACH, &mdev->flags);
1322         clear_bit(WAS_IO_ERROR, &mdev->flags);
1323         clear_bit(WAS_READ_ERROR, &mdev->flags);
1324
1325         /* and no leftover from previously aborted resync or verify, either */
1326         mdev->rs_total = 0;
1327         mdev->rs_failed = 0;
1328         atomic_set(&mdev->rs_pending_cnt, 0);
1329
1330         /* allocation not in the IO path, drbdsetup context */
1331         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1332         if (!nbc) {
1333                 retcode = ERR_NOMEM;
1334                 goto fail;
1335         }
1336         spin_lock_init(&nbc->md.uuid_lock);
1337
1338         new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1339         if (!new_disk_conf) {
1340                 retcode = ERR_NOMEM;
1341                 goto fail;
1342         }
1343         nbc->disk_conf = new_disk_conf;
1344
1345         set_disk_conf_defaults(new_disk_conf);
1346         err = disk_conf_from_attrs(new_disk_conf, info);
1347         if (err) {
1348                 retcode = ERR_MANDATORY_TAG;
1349                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1350                 goto fail;
1351         }
1352
1353         enforce_disk_conf_limits(new_disk_conf);
1354
1355         new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1356         if (!new_plan) {
1357                 retcode = ERR_NOMEM;
1358                 goto fail;
1359         }
1360
1361         if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1362                 retcode = ERR_MD_IDX_INVALID;
1363                 goto fail;
1364         }
1365
1366         rcu_read_lock();
1367         nc = rcu_dereference(mdev->tconn->net_conf);
1368         if (nc) {
1369                 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1370                         rcu_read_unlock();
1371                         retcode = ERR_STONITH_AND_PROT_A;
1372                         goto fail;
1373                 }
1374         }
1375         rcu_read_unlock();
1376
1377         bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1378                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1379         if (IS_ERR(bdev)) {
1380                 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1381                         PTR_ERR(bdev));
1382                 retcode = ERR_OPEN_DISK;
1383                 goto fail;
1384         }
1385         nbc->backing_bdev = bdev;
1386
1387         /*
1388          * meta_dev_idx >= 0: external fixed size, possibly multiple
1389          * drbd sharing one meta device.  TODO in that case, paranoia
1390          * check that [md_bdev, meta_dev_idx] is not yet used by some
1391          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1392          * should check it for you already; but if you don't, or
1393          * someone fooled it, we need to double check here)
1394          */
1395         bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1396                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1397                                   (new_disk_conf->meta_dev_idx < 0) ?
1398                                   (void *)mdev : (void *)drbd_m_holder);
1399         if (IS_ERR(bdev)) {
1400                 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1401                         PTR_ERR(bdev));
1402                 retcode = ERR_OPEN_MD_DISK;
1403                 goto fail;
1404         }
1405         nbc->md_bdev = bdev;
1406
1407         if ((nbc->backing_bdev == nbc->md_bdev) !=
1408             (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1409              new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1410                 retcode = ERR_MD_IDX_INVALID;
1411                 goto fail;
1412         }
1413
1414         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1415                         1, 61, sizeof(struct bm_extent),
1416                         offsetof(struct bm_extent, lce));
1417         if (!resync_lru) {
1418                 retcode = ERR_NOMEM;
1419                 goto fail;
1420         }
1421
1422         /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1423         drbd_md_set_sector_offsets(mdev, nbc);
1424
1425         if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1426                 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1427                         (unsigned long long) drbd_get_max_capacity(nbc),
1428                         (unsigned long long) new_disk_conf->disk_size);
1429                 retcode = ERR_DISK_TOO_SMALL;
1430                 goto fail;
1431         }
1432
1433         if (new_disk_conf->meta_dev_idx < 0) {
1434                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1435                 /* at least one MB, otherwise it does not make sense */
1436                 min_md_device_sectors = (2<<10);
1437         } else {
1438                 max_possible_sectors = DRBD_MAX_SECTORS;
1439                 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1440         }
1441
1442         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1443                 retcode = ERR_MD_DISK_TOO_SMALL;
1444                 dev_warn(DEV, "refusing attach: md-device too small, "
1445                      "at least %llu sectors needed for this meta-disk type\n",
1446                      (unsigned long long) min_md_device_sectors);
1447                 goto fail;
1448         }
1449
1450         /* Make sure the new disk is big enough
1451          * (we may currently be R_PRIMARY with no local disk...) */
1452         if (drbd_get_max_capacity(nbc) <
1453             drbd_get_capacity(mdev->this_bdev)) {
1454                 retcode = ERR_DISK_TOO_SMALL;
1455                 goto fail;
1456         }
1457
1458         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1459
1460         if (nbc->known_size > max_possible_sectors) {
1461                 dev_warn(DEV, "==> truncating very big lower level device "
1462                         "to currently maximum possible %llu sectors <==\n",
1463                         (unsigned long long) max_possible_sectors);
1464                 if (new_disk_conf->meta_dev_idx >= 0)
1465                         dev_warn(DEV, "==>> using internal or flexible "
1466                                       "meta data may help <<==\n");
1467         }
1468
1469         drbd_suspend_io(mdev);
1470         /* also wait for the last barrier ack. */
1471         /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1472          * We need a way to either ignore barrier acks for barriers sent before a device
1473          * was attached, or a way to wait for all pending barrier acks to come in.
1474          * As barriers are counted per resource,
1475          * we'd need to suspend io on all devices of a resource.
1476          */
1477         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1478         /* and for any other previously queued work */
1479         drbd_flush_workqueue(mdev);
1480
1481         rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1482         retcode = rv;  /* FIXME: Type mismatch. */
1483         drbd_resume_io(mdev);
1484         if (rv < SS_SUCCESS)
1485                 goto fail;
1486
1487         if (!get_ldev_if_state(mdev, D_ATTACHING))
1488                 goto force_diskless;
1489
1490         drbd_md_set_sector_offsets(mdev, nbc);
1491
1492         if (!mdev->bitmap) {
1493                 if (drbd_bm_init(mdev)) {
1494                         retcode = ERR_NOMEM;
1495                         goto force_diskless_dec;
1496                 }
1497         }
1498
1499         retcode = drbd_md_read(mdev, nbc);
1500         if (retcode != NO_ERROR)
1501                 goto force_diskless_dec;
1502
1503         if (mdev->state.conn < C_CONNECTED &&
1504             mdev->state.role == R_PRIMARY &&
1505             (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1506                 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1507                     (unsigned long long)mdev->ed_uuid);
1508                 retcode = ERR_DATA_NOT_CURRENT;
1509                 goto force_diskless_dec;
1510         }
1511
1512         /* Since we are diskless, fix the activity log first... */
1513         if (drbd_check_al_size(mdev, new_disk_conf)) {
1514                 retcode = ERR_NOMEM;
1515                 goto force_diskless_dec;
1516         }
1517
1518         /* Prevent shrinking of consistent devices ! */
1519         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1520             drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1521                 dev_warn(DEV, "refusing to truncate a consistent device\n");
1522                 retcode = ERR_DISK_TOO_SMALL;
1523                 goto force_diskless_dec;
1524         }
1525
1526         /* Reset the "barriers don't work" bits here, then force meta data to
1527          * be written, to ensure we determine if barriers are supported. */
1528         if (new_disk_conf->md_flushes)
1529                 clear_bit(MD_NO_FUA, &mdev->flags);
1530         else
1531                 set_bit(MD_NO_FUA, &mdev->flags);
1532
1533         /* Point of no return reached.
1534          * Devices and memory are no longer released by error cleanup below.
1535          * now mdev takes over responsibility, and the state engine should
1536          * clean it up somewhere.  */
1537         D_ASSERT(mdev->ldev == NULL);
1538         mdev->ldev = nbc;
1539         mdev->resync = resync_lru;
1540         mdev->rs_plan_s = new_plan;
1541         nbc = NULL;
1542         resync_lru = NULL;
1543         new_disk_conf = NULL;
1544         new_plan = NULL;
1545
1546         drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1547
1548         if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1549                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1550         else
1551                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1552
1553         if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1554             !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
1555                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1556
1557         mdev->send_cnt = 0;
1558         mdev->recv_cnt = 0;
1559         mdev->read_cnt = 0;
1560         mdev->writ_cnt = 0;
1561
1562         drbd_reconsider_max_bio_size(mdev);
1563
1564         /* If I am currently not R_PRIMARY,
1565          * but meta data primary indicator is set,
1566          * I just now recover from a hard crash,
1567          * and have been R_PRIMARY before that crash.
1568          *
1569          * Now, if I had no connection before that crash
1570          * (have been degraded R_PRIMARY), chances are that
1571          * I won't find my peer now either.
1572          *
1573          * In that case, and _only_ in that case,
1574          * we use the degr-wfc-timeout instead of the default,
1575          * so we can automatically recover from a crash of a
1576          * degraded but active "cluster" after a certain timeout.
1577          */
1578         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1579         if (mdev->state.role != R_PRIMARY &&
1580              drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1581             !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1582                 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1583
1584         dd = drbd_determine_dev_size(mdev, 0);
1585         if (dd == dev_size_error) {
1586                 retcode = ERR_NOMEM_BITMAP;
1587                 goto force_diskless_dec;
1588         } else if (dd == grew)
1589                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1590
1591         if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
1592             (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
1593              drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
1594                 dev_info(DEV, "Assuming that all blocks are out of sync "
1595                      "(aka FullSync)\n");
1596                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1597                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1598                         retcode = ERR_IO_MD_DISK;
1599                         goto force_diskless_dec;
1600                 }
1601         } else {
1602                 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1603                         "read from attaching", BM_LOCKED_MASK)) {
1604                         retcode = ERR_IO_MD_DISK;
1605                         goto force_diskless_dec;
1606                 }
1607         }
1608
1609         if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1610                 drbd_suspend_al(mdev); /* IO is still suspended here... */
1611
1612         spin_lock_irq(&mdev->tconn->req_lock);
1613         os = drbd_read_state(mdev);
1614         ns = os;
1615         /* If MDF_CONSISTENT is not set go into inconsistent state,
1616            otherwise investigate MDF_WasUpToDate...
1617            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1618            otherwise into D_CONSISTENT state.
1619         */
1620         if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1621                 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1622                         ns.disk = D_CONSISTENT;
1623                 else
1624                         ns.disk = D_OUTDATED;
1625         } else {
1626                 ns.disk = D_INCONSISTENT;
1627         }
1628
1629         if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1630                 ns.pdsk = D_OUTDATED;
1631
1632         rcu_read_lock();
1633         if (ns.disk == D_CONSISTENT &&
1634             (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
1635                 ns.disk = D_UP_TO_DATE;
1636
1637         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1638            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1639            this point, because drbd_request_state() modifies these
1640            flags. */
1641
1642         if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
1643                 mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
1644         else
1645                 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1646
1647         rcu_read_unlock();
1648
1649         /* In case we are C_CONNECTED postpone any decision on the new disk
1650            state after the negotiation phase. */
1651         if (mdev->state.conn == C_CONNECTED) {
1652                 mdev->new_state_tmp.i = ns.i;
1653                 ns.i = os.i;
1654                 ns.disk = D_NEGOTIATING;
1655
1656                 /* We expect to receive up-to-date UUIDs soon.
1657                    To avoid a race in receive_state, free p_uuid while
1658                    holding req_lock. I.e. atomic with the state change */
1659                 kfree(mdev->p_uuid);
1660                 mdev->p_uuid = NULL;
1661         }
1662
1663         rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1664         spin_unlock_irq(&mdev->tconn->req_lock);
1665
1666         if (rv < SS_SUCCESS)
1667                 goto force_diskless_dec;
1668
1669         mod_timer(&mdev->request_timer, jiffies + HZ);
1670
1671         if (mdev->state.role == R_PRIMARY)
1672                 mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1673         else
1674                 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1675
1676         drbd_md_mark_dirty(mdev);
1677         drbd_md_sync(mdev);
1678
1679         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1680         put_ldev(mdev);
1681         conn_reconfig_done(mdev->tconn);
1682         drbd_adm_finish(info, retcode);
1683         return 0;
1684
1685  force_diskless_dec:
1686         put_ldev(mdev);
1687  force_diskless:
1688         drbd_force_state(mdev, NS(disk, D_DISKLESS));
1689         drbd_md_sync(mdev);
1690  fail:
1691         conn_reconfig_done(mdev->tconn);
1692         if (nbc) {
1693                 if (nbc->backing_bdev)
1694                         blkdev_put(nbc->backing_bdev,
1695                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1696                 if (nbc->md_bdev)
1697                         blkdev_put(nbc->md_bdev,
1698                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1699                 kfree(nbc);
1700         }
1701         kfree(new_disk_conf);
1702         lc_destroy(resync_lru);
1703         kfree(new_plan);
1704
1705  finish:
1706         drbd_adm_finish(info, retcode);
1707         return 0;
1708 }
1709
1710 static int adm_detach(struct drbd_conf *mdev, int force)
1711 {
1712         enum drbd_state_rv retcode;
1713         int ret;
1714
1715         if (force) {
1716                 set_bit(FORCE_DETACH, &mdev->flags);
1717                 drbd_force_state(mdev, NS(disk, D_FAILED));
1718                 retcode = SS_SUCCESS;
1719                 goto out;
1720         }
1721
1722         drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1723         drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
1724         retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1725         drbd_md_put_buffer(mdev);
1726         /* D_FAILED will transition to DISKLESS. */
1727         ret = wait_event_interruptible(mdev->misc_wait,
1728                         mdev->state.disk != D_FAILED);
1729         drbd_resume_io(mdev);
1730         if ((int)retcode == (int)SS_IS_DISKLESS)
1731                 retcode = SS_NOTHING_TO_DO;
1732         if (ret)
1733                 retcode = ERR_INTR;
1734 out:
1735         return retcode;
1736 }
1737
1738 /* Detaching the disk is a process in multiple stages.  First we need to lock
1739  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1740  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1741  * internal references as well.
1742  * Only then we have finally detached. */
1743 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1744 {
1745         enum drbd_ret_code retcode;
1746         struct detach_parms parms = { };
1747         int err;
1748
1749         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1750         if (!adm_ctx.reply_skb)
1751                 return retcode;
1752         if (retcode != NO_ERROR)
1753                 goto out;
1754
1755         if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1756                 err = detach_parms_from_attrs(&parms, info);
1757                 if (err) {
1758                         retcode = ERR_MANDATORY_TAG;
1759                         drbd_msg_put_info(from_attrs_err_to_txt(err));
1760                         goto out;
1761                 }
1762         }
1763
1764         retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
1765 out:
1766         drbd_adm_finish(info, retcode);
1767         return 0;
1768 }
1769
1770 static bool conn_resync_running(struct drbd_tconn *tconn)
1771 {
1772         struct drbd_conf *mdev;
1773         bool rv = false;
1774         int vnr;
1775
1776         rcu_read_lock();
1777         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1778                 if (mdev->state.conn == C_SYNC_SOURCE ||
1779                     mdev->state.conn == C_SYNC_TARGET ||
1780                     mdev->state.conn == C_PAUSED_SYNC_S ||
1781                     mdev->state.conn == C_PAUSED_SYNC_T) {
1782                         rv = true;
1783                         break;
1784                 }
1785         }
1786         rcu_read_unlock();
1787
1788         return rv;
1789 }
1790
1791 static bool conn_ov_running(struct drbd_tconn *tconn)
1792 {
1793         struct drbd_conf *mdev;
1794         bool rv = false;
1795         int vnr;
1796
1797         rcu_read_lock();
1798         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1799                 if (mdev->state.conn == C_VERIFY_S ||
1800                     mdev->state.conn == C_VERIFY_T) {
1801                         rv = true;
1802                         break;
1803                 }
1804         }
1805         rcu_read_unlock();
1806
1807         return rv;
1808 }
1809
1810 static enum drbd_ret_code
1811 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1812 {
1813         struct drbd_conf *mdev;
1814         int i;
1815
1816         if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1817                 if (new_conf->wire_protocol != old_conf->wire_protocol)
1818                         return ERR_NEED_APV_100;
1819
1820                 if (new_conf->two_primaries != old_conf->two_primaries)
1821                         return ERR_NEED_APV_100;
1822
1823                 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1824                         return ERR_NEED_APV_100;
1825         }
1826
1827         if (!new_conf->two_primaries &&
1828             conn_highest_role(tconn) == R_PRIMARY &&
1829             conn_highest_peer(tconn) == R_PRIMARY)
1830                 return ERR_NEED_ALLOW_TWO_PRI;
1831
1832         if (new_conf->two_primaries &&
1833             (new_conf->wire_protocol != DRBD_PROT_C))
1834                 return ERR_NOT_PROTO_C;
1835
1836         idr_for_each_entry(&tconn->volumes, mdev, i) {
1837                 if (get_ldev(mdev)) {
1838                         enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1839                         put_ldev(mdev);
1840                         if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1841                                 return ERR_STONITH_AND_PROT_A;
1842                 }
1843                 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
1844                         return ERR_DISCARD_IMPOSSIBLE;
1845         }
1846
1847         if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1848                 return ERR_CONG_NOT_PROTO_A;
1849
1850         return NO_ERROR;
1851 }
1852
1853 static enum drbd_ret_code
1854 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1855 {
1856         static enum drbd_ret_code rv;
1857         struct drbd_conf *mdev;
1858         int i;
1859
1860         rcu_read_lock();
1861         rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1862         rcu_read_unlock();
1863
1864         /* tconn->volumes protected by genl_lock() here */
1865         idr_for_each_entry(&tconn->volumes, mdev, i) {
1866                 if (!mdev->bitmap) {
1867                         if(drbd_bm_init(mdev))
1868                                 return ERR_NOMEM;
1869                 }
1870         }
1871
1872         return rv;
1873 }
1874
1875 struct crypto {
1876         struct crypto_hash *verify_tfm;
1877         struct crypto_hash *csums_tfm;
1878         struct crypto_hash *cram_hmac_tfm;
1879         struct crypto_hash *integrity_tfm;
1880 };
1881
1882 static int
1883 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1884 {
1885         if (!tfm_name[0])
1886                 return NO_ERROR;
1887
1888         *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1889         if (IS_ERR(*tfm)) {
1890                 *tfm = NULL;
1891                 return err_alg;
1892         }
1893
1894         return NO_ERROR;
1895 }
1896
1897 static enum drbd_ret_code
1898 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1899 {
1900         char hmac_name[CRYPTO_MAX_ALG_NAME];
1901         enum drbd_ret_code rv;
1902
1903         rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1904                        ERR_CSUMS_ALG);
1905         if (rv != NO_ERROR)
1906                 return rv;
1907         rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1908                        ERR_VERIFY_ALG);
1909         if (rv != NO_ERROR)
1910                 return rv;
1911         rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1912                        ERR_INTEGRITY_ALG);
1913         if (rv != NO_ERROR)
1914                 return rv;
1915         if (new_conf->cram_hmac_alg[0] != 0) {
1916                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1917                          new_conf->cram_hmac_alg);
1918
1919                 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1920                                ERR_AUTH_ALG);
1921         }
1922
1923         return rv;
1924 }
1925
1926 static void free_crypto(struct crypto *crypto)
1927 {
1928         crypto_free_hash(crypto->cram_hmac_tfm);
1929         crypto_free_hash(crypto->integrity_tfm);
1930         crypto_free_hash(crypto->csums_tfm);
1931         crypto_free_hash(crypto->verify_tfm);
1932 }
1933
1934 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1935 {
1936         enum drbd_ret_code retcode;
1937         struct drbd_tconn *tconn;
1938         struct net_conf *old_conf, *new_conf = NULL;
1939         int err;
1940         int ovr; /* online verify running */
1941         int rsr; /* re-sync running */
1942         struct crypto crypto = { };
1943
1944         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
1945         if (!adm_ctx.reply_skb)
1946                 return retcode;
1947         if (retcode != NO_ERROR)
1948                 goto out;
1949
1950         tconn = adm_ctx.tconn;
1951
1952         new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1953         if (!new_conf) {
1954                 retcode = ERR_NOMEM;
1955                 goto out;
1956         }
1957
1958         conn_reconfig_start(tconn);
1959
1960         mutex_lock(&tconn->data.mutex);
1961         mutex_lock(&tconn->conf_update);
1962         old_conf = tconn->net_conf;
1963
1964         if (!old_conf) {
1965                 drbd_msg_put_info("net conf missing, try connect");
1966                 retcode = ERR_INVALID_REQUEST;
1967                 goto fail;
1968         }
1969
1970         *new_conf = *old_conf;
1971         if (should_set_defaults(info))
1972                 set_net_conf_defaults(new_conf);
1973
1974         err = net_conf_from_attrs_for_change(new_conf, info);
1975         if (err && err != -ENOMSG) {
1976                 retcode = ERR_MANDATORY_TAG;
1977                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1978                 goto fail;
1979         }
1980
1981         retcode = check_net_options(tconn, new_conf);
1982         if (retcode != NO_ERROR)
1983                 goto fail;
1984
1985         /* re-sync running */
1986         rsr = conn_resync_running(tconn);
1987         if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
1988                 retcode = ERR_CSUMS_RESYNC_RUNNING;
1989                 goto fail;
1990         }
1991
1992         /* online verify running */
1993         ovr = conn_ov_running(tconn);
1994         if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1995                 retcode = ERR_VERIFY_RUNNING;
1996                 goto fail;
1997         }
1998
1999         retcode = alloc_crypto(&crypto, new_conf);
2000         if (retcode != NO_ERROR)
2001                 goto fail;
2002
2003         rcu_assign_pointer(tconn->net_conf, new_conf);
2004
2005         if (!rsr) {
2006                 crypto_free_hash(tconn->csums_tfm);
2007                 tconn->csums_tfm = crypto.csums_tfm;
2008                 crypto.csums_tfm = NULL;
2009         }
2010         if (!ovr) {
2011                 crypto_free_hash(tconn->verify_tfm);
2012                 tconn->verify_tfm = crypto.verify_tfm;
2013                 crypto.verify_tfm = NULL;
2014         }
2015
2016         crypto_free_hash(tconn->integrity_tfm);
2017         tconn->integrity_tfm = crypto.integrity_tfm;
2018         if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
2019                 /* Do this without trying to take tconn->data.mutex again.  */
2020                 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
2021
2022         crypto_free_hash(tconn->cram_hmac_tfm);
2023         tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2024
2025         mutex_unlock(&tconn->conf_update);
2026         mutex_unlock(&tconn->data.mutex);
2027         synchronize_rcu();
2028         kfree(old_conf);
2029
2030         if (tconn->cstate >= C_WF_REPORT_PARAMS)
2031                 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2032
2033         goto done;
2034
2035  fail:
2036         mutex_unlock(&tconn->conf_update);
2037         mutex_unlock(&tconn->data.mutex);
2038         free_crypto(&crypto);
2039         kfree(new_conf);
2040  done:
2041         conn_reconfig_done(tconn);
2042  out:
2043         drbd_adm_finish(info, retcode);
2044         return 0;
2045 }
2046
2047 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2048 {
2049         struct drbd_conf *mdev;
2050         struct net_conf *old_conf, *new_conf = NULL;
2051         struct crypto crypto = { };
2052         struct drbd_tconn *tconn;
2053         enum drbd_ret_code retcode;
2054         int i;
2055         int err;
2056
2057         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2058
2059         if (!adm_ctx.reply_skb)
2060                 return retcode;
2061         if (retcode != NO_ERROR)
2062                 goto out;
2063         if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2064                 drbd_msg_put_info("connection endpoint(s) missing");
2065                 retcode = ERR_INVALID_REQUEST;
2066                 goto out;
2067         }
2068
2069         /* No need for _rcu here. All reconfiguration is
2070          * strictly serialized on genl_lock(). We are protected against
2071          * concurrent reconfiguration/addition/deletion */
2072         list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2073                 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2074                     !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2075                         retcode = ERR_LOCAL_ADDR;
2076                         goto out;
2077                 }
2078
2079                 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2080                     !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2081                         retcode = ERR_PEER_ADDR;
2082                         goto out;
2083                 }
2084         }
2085
2086         tconn = adm_ctx.tconn;
2087         conn_reconfig_start(tconn);
2088
2089         if (tconn->cstate > C_STANDALONE) {
2090                 retcode = ERR_NET_CONFIGURED;
2091                 goto fail;
2092         }
2093
2094         /* allocation not in the IO path, drbdsetup / netlink process context */
2095         new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
2096         if (!new_conf) {
2097                 retcode = ERR_NOMEM;
2098                 goto fail;
2099         }
2100
2101         set_net_conf_defaults(new_conf);
2102
2103         err = net_conf_from_attrs(new_conf, info);
2104         if (err && err != -ENOMSG) {
2105                 retcode = ERR_MANDATORY_TAG;
2106                 drbd_msg_put_info(from_attrs_err_to_txt(err));
2107                 goto fail;
2108         }
2109
2110         retcode = check_net_options(tconn, new_conf);
2111         if (retcode != NO_ERROR)
2112                 goto fail;
2113
2114         retcode = alloc_crypto(&crypto, new_conf);
2115         if (retcode != NO_ERROR)
2116                 goto fail;
2117
2118         ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2119
2120         conn_flush_workqueue(tconn);
2121
2122         mutex_lock(&tconn->conf_update);
2123         old_conf = tconn->net_conf;
2124         if (old_conf) {
2125                 retcode = ERR_NET_CONFIGURED;
2126                 mutex_unlock(&tconn->conf_update);
2127                 goto fail;
2128         }
2129         rcu_assign_pointer(tconn->net_conf, new_conf);
2130
2131         conn_free_crypto(tconn);
2132         tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2133         tconn->integrity_tfm = crypto.integrity_tfm;
2134         tconn->csums_tfm = crypto.csums_tfm;
2135         tconn->verify_tfm = crypto.verify_tfm;
2136
2137         tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2138         memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2139         tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2140         memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2141
2142         mutex_unlock(&tconn->conf_update);
2143
2144         rcu_read_lock();
2145         idr_for_each_entry(&tconn->volumes, mdev, i) {
2146                 mdev->send_cnt = 0;
2147                 mdev->recv_cnt = 0;
2148         }
2149         rcu_read_unlock();
2150
2151         retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2152
2153         conn_reconfig_done(tconn);
2154         drbd_adm_finish(info, retcode);
2155         return 0;
2156
2157 fail:
2158         free_crypto(&crypto);
2159         kfree(new_conf);
2160
2161         conn_reconfig_done(tconn);
2162 out:
2163         drbd_adm_finish(info, retcode);
2164         return 0;
2165 }
2166
2167 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2168 {
2169         enum drbd_state_rv rv;
2170
2171         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2172                         force ? CS_HARD : 0);
2173
2174         switch (rv) {
2175         case SS_NOTHING_TO_DO:
2176                 break;
2177         case SS_ALREADY_STANDALONE:
2178                 return SS_SUCCESS;
2179         case SS_PRIMARY_NOP:
2180                 /* Our state checking code wants to see the peer outdated. */
2181                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2182                                                 pdsk, D_OUTDATED), CS_VERBOSE);
2183                 break;
2184         case SS_CW_FAILED_BY_PEER:
2185                 /* The peer probably wants to see us outdated. */
2186                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2187                                                         disk, D_OUTDATED), 0);
2188                 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2189                         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2190                                         CS_HARD);
2191                 }
2192                 break;
2193         default:;
2194                 /* no special handling necessary */
2195         }
2196
2197         if (rv >= SS_SUCCESS) {
2198                 enum drbd_state_rv rv2;
2199                 /* No one else can reconfigure the network while I am here.
2200                  * The state handling only uses drbd_thread_stop_nowait(),
2201                  * we want to really wait here until the receiver is no more.
2202                  */
2203                 drbd_thread_stop(&adm_ctx.tconn->receiver);
2204
2205                 /* Race breaker.  This additional state change request may be
2206                  * necessary, if this was a forced disconnect during a receiver
2207                  * restart.  We may have "killed" the receiver thread just
2208                  * after drbdd_init() returned.  Typically, we should be
2209                  * C_STANDALONE already, now, and this becomes a no-op.
2210                  */
2211                 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2212                                 CS_VERBOSE | CS_HARD);
2213                 if (rv2 < SS_SUCCESS)
2214                         conn_err(tconn,
2215                                 "unexpected rv2=%d in conn_try_disconnect()\n",
2216                                 rv2);
2217         }
2218         return rv;
2219 }
2220
2221 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2222 {
2223         struct disconnect_parms parms;
2224         struct drbd_tconn *tconn;
2225         enum drbd_state_rv rv;
2226         enum drbd_ret_code retcode;
2227         int err;
2228
2229         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
2230         if (!adm_ctx.reply_skb)
2231                 return retcode;
2232         if (retcode != NO_ERROR)
2233                 goto fail;
2234
2235         tconn = adm_ctx.tconn;
2236         memset(&parms, 0, sizeof(parms));
2237         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2238                 err = disconnect_parms_from_attrs(&parms, info);
2239                 if (err) {
2240                         retcode = ERR_MANDATORY_TAG;
2241                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2242                         goto fail;
2243                 }
2244         }
2245
2246         rv = conn_try_disconnect(tconn, parms.force_disconnect);
2247         if (rv < SS_SUCCESS)
2248                 retcode = rv;  /* FIXME: Type mismatch. */
2249         else
2250                 retcode = NO_ERROR;
2251  fail:
2252         drbd_adm_finish(info, retcode);
2253         return 0;
2254 }
2255
2256 void resync_after_online_grow(struct drbd_conf *mdev)
2257 {
2258         int iass; /* I am sync source */
2259
2260         dev_info(DEV, "Resync of new storage after online grow\n");
2261         if (mdev->state.role != mdev->state.peer)
2262                 iass = (mdev->state.role == R_PRIMARY);
2263         else
2264                 iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
2265
2266         if (iass)
2267                 drbd_start_resync(mdev, C_SYNC_SOURCE);
2268         else
2269                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2270 }
2271
2272 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2273 {
2274         struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2275         struct resize_parms rs;
2276         struct drbd_conf *mdev;
2277         enum drbd_ret_code retcode;
2278         enum determine_dev_size dd;
2279         enum dds_flags ddsf;
2280         sector_t u_size;
2281         int err;
2282
2283         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2284         if (!adm_ctx.reply_skb)
2285                 return retcode;
2286         if (retcode != NO_ERROR)
2287                 goto fail;
2288
2289         memset(&rs, 0, sizeof(struct resize_parms));
2290         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2291                 err = resize_parms_from_attrs(&rs, info);
2292                 if (err) {
2293                         retcode = ERR_MANDATORY_TAG;
2294                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2295                         goto fail;
2296                 }
2297         }
2298
2299         mdev = adm_ctx.mdev;
2300         if (mdev->state.conn > C_CONNECTED) {
2301                 retcode = ERR_RESIZE_RESYNC;
2302                 goto fail;
2303         }
2304
2305         if (mdev->state.role == R_SECONDARY &&
2306             mdev->state.peer == R_SECONDARY) {
2307                 retcode = ERR_NO_PRIMARY;
2308                 goto fail;
2309         }
2310
2311         if (!get_ldev(mdev)) {
2312                 retcode = ERR_NO_DISK;
2313                 goto fail;
2314         }
2315
2316         if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2317                 retcode = ERR_NEED_APV_93;
2318                 goto fail_ldev;
2319         }
2320
2321         rcu_read_lock();
2322         u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2323         rcu_read_unlock();
2324         if (u_size != (sector_t)rs.resize_size) {
2325                 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2326                 if (!new_disk_conf) {
2327                         retcode = ERR_NOMEM;
2328                         goto fail_ldev;
2329                 }
2330         }
2331
2332         if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2333                 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2334
2335         if (new_disk_conf) {
2336                 mutex_lock(&mdev->tconn->conf_update);
2337                 old_disk_conf = mdev->ldev->disk_conf;
2338                 *new_disk_conf = *old_disk_conf;
2339                 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2340                 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2341                 mutex_unlock(&mdev->tconn->conf_update);
2342                 synchronize_rcu();
2343                 kfree(old_disk_conf);
2344         }
2345
2346         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2347         dd = drbd_determine_dev_size(mdev, ddsf);
2348         drbd_md_sync(mdev);
2349         put_ldev(mdev);
2350         if (dd == dev_size_error) {
2351                 retcode = ERR_NOMEM_BITMAP;
2352                 goto fail;
2353         }
2354
2355         if (mdev->state.conn == C_CONNECTED) {
2356                 if (dd == grew)
2357                         set_bit(RESIZE_PENDING, &mdev->flags);
2358
2359                 drbd_send_uuids(mdev);
2360                 drbd_send_sizes(mdev, 1, ddsf);
2361         }
2362
2363  fail:
2364         drbd_adm_finish(info, retcode);
2365         return 0;
2366
2367  fail_ldev:
2368         put_ldev(mdev);
2369         goto fail;
2370 }
2371
2372 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2373 {
2374         enum drbd_ret_code retcode;
2375         struct drbd_tconn *tconn;
2376         struct res_opts res_opts;
2377         int err;
2378
2379         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2380         if (!adm_ctx.reply_skb)
2381                 return retcode;
2382         if (retcode != NO_ERROR)
2383                 goto fail;
2384         tconn = adm_ctx.tconn;
2385
2386         res_opts = tconn->res_opts;
2387         if (should_set_defaults(info))
2388                 set_res_opts_defaults(&res_opts);
2389
2390         err = res_opts_from_attrs(&res_opts, info);
2391         if (err && err != -ENOMSG) {
2392                 retcode = ERR_MANDATORY_TAG;
2393                 drbd_msg_put_info(from_attrs_err_to_txt(err));
2394                 goto fail;
2395         }
2396
2397         err = set_resource_options(tconn, &res_opts);
2398         if (err) {
2399                 retcode = ERR_INVALID_REQUEST;
2400                 if (err == -ENOMEM)
2401                         retcode = ERR_NOMEM;
2402         }
2403
2404 fail:
2405         drbd_adm_finish(info, retcode);
2406         return 0;
2407 }
2408
2409 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2410 {
2411         struct drbd_conf *mdev;
2412         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2413
2414         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2415         if (!adm_ctx.reply_skb)
2416                 return retcode;
2417         if (retcode != NO_ERROR)
2418                 goto out;
2419
2420         mdev = adm_ctx.mdev;
2421
2422         /* If there is still bitmap IO pending, probably because of a previous
2423          * resync just being finished, wait for it before requesting a new resync.
2424          * Also wait for it's after_state_ch(). */
2425         drbd_suspend_io(mdev);
2426         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2427         drbd_flush_workqueue(mdev);
2428
2429         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2430
2431         if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2432                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2433
2434         while (retcode == SS_NEED_CONNECTION) {
2435                 spin_lock_irq(&mdev->tconn->req_lock);
2436                 if (mdev->state.conn < C_CONNECTED)
2437                         retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2438                 spin_unlock_irq(&mdev->tconn->req_lock);
2439
2440                 if (retcode != SS_NEED_CONNECTION)
2441                         break;
2442
2443                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2444         }
2445         drbd_resume_io(mdev);
2446
2447 out:
2448         drbd_adm_finish(info, retcode);
2449         return 0;
2450 }
2451
2452 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2453                 union drbd_state mask, union drbd_state val)
2454 {
2455         enum drbd_ret_code retcode;
2456
2457         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2458         if (!adm_ctx.reply_skb)
2459                 return retcode;
2460         if (retcode != NO_ERROR)
2461                 goto out;
2462
2463         retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2464 out:
2465         drbd_adm_finish(info, retcode);
2466         return 0;
2467 }
2468
2469 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2470 {
2471         int rv;
2472
2473         rv = drbd_bmio_set_n_write(mdev);
2474         drbd_suspend_al(mdev);
2475         return rv;
2476 }
2477
2478 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2479 {
2480         int retcode; /* drbd_ret_code, drbd_state_rv */
2481         struct drbd_conf *mdev;
2482
2483         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2484         if (!adm_ctx.reply_skb)
2485                 return retcode;
2486         if (retcode != NO_ERROR)
2487                 goto out;
2488
2489         mdev = adm_ctx.mdev;
2490
2491         /* If there is still bitmap IO pending, probably because of a previous
2492          * resync just being finished, wait for it before requesting a new resync.
2493          * Also wait for it's after_state_ch(). */
2494         drbd_suspend_io(mdev);
2495         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2496         drbd_flush_workqueue(mdev);
2497
2498         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
2499         if (retcode < SS_SUCCESS) {
2500                 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
2501                         /* The peer will get a resync upon connect anyways.
2502                          * Just make that into a full resync. */
2503                         retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2504                         if (retcode >= SS_SUCCESS) {
2505                                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
2506                                                    "set_n_write from invalidate_peer",
2507                                                    BM_LOCKED_SET_ALLOWED))
2508                                         retcode = ERR_IO_MD_DISK;
2509                         }
2510                 } else
2511                         retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
2512         }
2513         drbd_resume_io(mdev);
2514
2515 out:
2516         drbd_adm_finish(info, retcode);
2517         return 0;
2518 }
2519
2520 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2521 {
2522         enum drbd_ret_code retcode;
2523
2524         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2525         if (!adm_ctx.reply_skb)
2526                 return retcode;
2527         if (retcode != NO_ERROR)
2528                 goto out;
2529
2530         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2531                 retcode = ERR_PAUSE_IS_SET;
2532 out:
2533         drbd_adm_finish(info, retcode);
2534         return 0;
2535 }
2536
2537 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2538 {
2539         union drbd_dev_state s;
2540         enum drbd_ret_code retcode;
2541
2542         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2543         if (!adm_ctx.reply_skb)
2544                 return retcode;
2545         if (retcode != NO_ERROR)
2546                 goto out;
2547
2548         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2549                 s = adm_ctx.mdev->state;
2550                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2551                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2552                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2553                 } else {
2554                         retcode = ERR_PAUSE_IS_CLEAR;
2555                 }
2556         }
2557
2558 out:
2559         drbd_adm_finish(info, retcode);
2560         return 0;
2561 }
2562
2563 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2564 {
2565         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2566 }
2567
2568 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2569 {
2570         struct drbd_conf *mdev;
2571         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2572
2573         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2574         if (!adm_ctx.reply_skb)
2575                 return retcode;
2576         if (retcode != NO_ERROR)
2577                 goto out;
2578
2579         mdev = adm_ctx.mdev;
2580         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2581                 drbd_uuid_new_current(mdev);
2582                 clear_bit(NEW_CUR_UUID, &mdev->flags);
2583         }
2584         drbd_suspend_io(mdev);
2585         retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2586         if (retcode == SS_SUCCESS) {
2587                 if (mdev->state.conn < C_CONNECTED)
2588                         tl_clear(mdev->tconn);
2589                 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2590                         tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2591         }
2592         drbd_resume_io(mdev);
2593
2594 out:
2595         drbd_adm_finish(info, retcode);
2596         return 0;
2597 }
2598
2599 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2600 {
2601         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2602 }
2603
2604 int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
2605 {
2606         struct nlattr *nla;
2607         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2608         if (!nla)
2609                 goto nla_put_failure;
2610         if (vnr != VOLUME_UNSPECIFIED &&
2611             nla_put_u32(skb, T_ctx_volume, vnr))
2612                 goto nla_put_failure;
2613         if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
2614                 goto nla_put_failure;
2615         if (tconn->my_addr_len &&
2616             nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
2617                 goto nla_put_failure;
2618         if (tconn->peer_addr_len &&
2619             nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
2620                 goto nla_put_failure;
2621         nla_nest_end(skb, nla);
2622         return 0;
2623
2624 nla_put_failure:
2625         if (nla)
2626                 nla_nest_cancel(skb, nla);
2627         return -EMSGSIZE;
2628 }
2629
2630 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2631                 const struct sib_info *sib)
2632 {
2633         struct state_info *si = NULL; /* for sizeof(si->member); */
2634         struct net_conf *nc;
2635         struct nlattr *nla;
2636         int got_ldev;
2637         int err = 0;
2638         int exclude_sensitive;
2639
2640         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2641          * to.  So we better exclude_sensitive information.
2642          *
2643          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2644          * in the context of the requesting user process. Exclude sensitive
2645          * information, unless current has superuser.
2646          *
2647          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2648          * relies on the current implementation of netlink_dump(), which
2649          * executes the dump callback successively from netlink_recvmsg(),
2650          * always in the context of the receiving process */
2651         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2652
2653         got_ldev = get_ldev(mdev);
2654
2655         /* We need to add connection name and volume number information still.
2656          * Minor number is in drbd_genlmsghdr. */
2657         if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
2658                 goto nla_put_failure;
2659
2660         if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2661                 goto nla_put_failure;
2662
2663         rcu_read_lock();
2664         if (got_ldev)
2665                 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
2666                         goto nla_put_failure;
2667
2668         nc = rcu_dereference(mdev->tconn->net_conf);
2669         if (nc)
2670                 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2671         rcu_read_unlock();
2672         if (err)
2673                 goto nla_put_failure;
2674
2675         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2676         if (!nla)
2677                 goto nla_put_failure;
2678         if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2679             nla_put_u32(skb, T_current_state, mdev->state.i) ||
2680             nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
2681             nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
2682             nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
2683             nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
2684             nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
2685             nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
2686             nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
2687             nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
2688             nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
2689             nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
2690             nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
2691                 goto nla_put_failure;
2692
2693         if (got_ldev) {
2694                 int err;
2695
2696                 spin_lock_irq(&mdev->ldev->md.uuid_lock);
2697                 err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2698                 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2699
2700                 if (err)
2701                         goto nla_put_failure;
2702
2703                 if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
2704                     nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
2705                     nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
2706                         goto nla_put_failure;
2707                 if (C_SYNC_SOURCE <= mdev->state.conn &&
2708                     C_PAUSED_SYNC_T >= mdev->state.conn) {
2709                         if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
2710                             nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
2711                                 goto nla_put_failure;
2712                 }
2713         }
2714
2715         if (sib) {
2716                 switch(sib->sib_reason) {
2717                 case SIB_SYNC_PROGRESS:
2718                 case SIB_GET_STATUS_REPLY:
2719                         break;
2720                 case SIB_STATE_CHANGE:
2721                         if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
2722                             nla_put_u32(skb, T_new_state, sib->ns.i))
2723                                 goto nla_put_failure;
2724                         break;
2725                 case SIB_HELPER_POST:
2726                         if (nla_put_u32(skb, T_helper_exit_code,
2727                                         sib->helper_exit_code))
2728                                 goto nla_put_failure;
2729                         /* fall through */
2730                 case SIB_HELPER_PRE:
2731                         if (nla_put_string(skb, T_helper, sib->helper_name))
2732                                 goto nla_put_failure;
2733                         break;
2734                 }
2735         }
2736         nla_nest_end(skb, nla);
2737
2738         if (0)
2739 nla_put_failure:
2740                 err = -EMSGSIZE;
2741         if (got_ldev)
2742                 put_ldev(mdev);
2743         return err;
2744 }
2745
2746 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2747 {
2748         enum drbd_ret_code retcode;
2749         int err;
2750
2751         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2752         if (!adm_ctx.reply_skb)
2753                 return retcode;
2754         if (retcode != NO_ERROR)
2755                 goto out;
2756
2757         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2758         if (err) {
2759                 nlmsg_free(adm_ctx.reply_skb);
2760                 return err;
2761         }
2762 out:
2763         drbd_adm_finish(info, retcode);
2764         return 0;
2765 }
2766
2767 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2768 {
2769         struct drbd_conf *mdev;
2770         struct drbd_genlmsghdr *dh;
2771         struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2772         struct drbd_tconn *tconn = NULL;
2773         struct drbd_tconn *tmp;
2774         unsigned volume = cb->args[1];
2775
2776         /* Open coded, deferred, iteration:
2777          * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2778          *      idr_for_each_entry(&tconn->volumes, mdev, i) {
2779          *        ...
2780          *      }
2781          * }
2782          * where tconn is cb->args[0];
2783          * and i is cb->args[1];
2784          *
2785          * cb->args[2] indicates if we shall loop over all resources,
2786          * or just dump all volumes of a single resource.
2787          *
2788          * This may miss entries inserted after this dump started,
2789          * or entries deleted before they are reached.
2790          *
2791          * We need to make sure the mdev won't disappear while
2792          * we are looking at it, and revalidate our iterators
2793          * on each iteration.
2794          */
2795
2796         /* synchronize with conn_create()/conn_destroy() */
2797         rcu_read_lock();
2798         /* revalidate iterator position */
2799         list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
2800                 if (pos == NULL) {
2801                         /* first iteration */
2802                         pos = tmp;
2803                         tconn = pos;
2804                         break;
2805                 }
2806                 if (tmp == pos) {
2807                         tconn = pos;
2808                         break;
2809                 }
2810         }
2811         if (tconn) {
2812 next_tconn:
2813                 mdev = idr_get_next(&tconn->volumes, &volume);
2814                 if (!mdev) {
2815                         /* No more volumes to dump on this tconn.
2816                          * Advance tconn iterator. */
2817                         pos = list_entry_rcu(tconn->all_tconn.next,
2818                                              struct drbd_tconn, all_tconn);
2819                         /* Did we dump any volume on this tconn yet? */
2820                         if (volume != 0) {
2821                                 /* If we reached the end of the list,
2822                                  * or only a single resource dump was requested,
2823                                  * we are done. */
2824                                 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2825                                         goto out;
2826                                 volume = 0;
2827                                 tconn = pos;
2828                                 goto next_tconn;
2829                         }
2830                 }
2831
2832                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2833                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
2834                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2835                 if (!dh)
2836                         goto out;
2837
2838                 if (!mdev) {
2839                         /* This is a tconn without a single volume.
2840                          * Suprisingly enough, it may have a network
2841                          * configuration. */
2842                         struct net_conf *nc;
2843                         dh->minor = -1U;
2844                         dh->ret_code = NO_ERROR;
2845                         if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
2846                                 goto cancel;
2847                         nc = rcu_dereference(tconn->net_conf);
2848                         if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2849                                 goto cancel;
2850                         goto done;
2851                 }
2852
2853                 D_ASSERT(mdev->vnr == volume);
2854                 D_ASSERT(mdev->tconn == tconn);
2855
2856                 dh->minor = mdev_to_minor(mdev);
2857                 dh->ret_code = NO_ERROR;
2858
2859                 if (nla_put_status_info(skb, mdev, NULL)) {
2860 cancel:
2861                         genlmsg_cancel(skb, dh);
2862                         goto out;
2863                 }
2864 done:
2865                 genlmsg_end(skb, dh);
2866         }
2867
2868 out:
2869         rcu_read_unlock();
2870         /* where to start the next iteration */
2871         cb->args[0] = (long)pos;
2872         cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2873
2874         /* No more tconns/volumes/minors found results in an empty skb.
2875          * Which will terminate the dump. */
2876         return skb->len;
2877 }
2878
2879 /*
2880  * Request status of all resources, or of all volumes within a single resource.
2881  *
2882  * This is a dump, as the answer may not fit in a single reply skb otherwise.
2883  * Which means we cannot use the family->attrbuf or other such members, because
2884  * dump is NOT protected by the genl_lock().  During dump, we only have access
2885  * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2886  *
2887  * Once things are setup properly, we call into get_one_status().
2888  */
2889 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2890 {
2891         const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2892         struct nlattr *nla;
2893         const char *resource_name;
2894         struct drbd_tconn *tconn;
2895         int maxtype;
2896
2897         /* Is this a followup call? */
2898         if (cb->args[0]) {
2899                 /* ... of a single resource dump,
2900                  * and the resource iterator has been advanced already? */
2901                 if (cb->args[2] && cb->args[2] != cb->args[0])
2902                         return 0; /* DONE. */
2903                 goto dump;
2904         }
2905
2906         /* First call (from netlink_dump_start).  We need to figure out
2907          * which resource(s) the user wants us to dump. */
2908         nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2909                         nlmsg_attrlen(cb->nlh, hdrlen),
2910                         DRBD_NLA_CFG_CONTEXT);
2911
2912         /* No explicit context given.  Dump all. */
2913         if (!nla)
2914                 goto dump;
2915         maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2916         nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2917         if (IS_ERR(nla))
2918                 return PTR_ERR(nla);
2919         /* context given, but no name present? */
2920         if (!nla)
2921                 return -EINVAL;
2922         resource_name = nla_data(nla);
2923         tconn = conn_get_by_name(resource_name);
2924
2925         if (!tconn)
2926                 return -ENODEV;
2927
2928         kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2929
2930         /* prime iterators, and set "filter" mode mark:
2931          * only dump this tconn. */
2932         cb->args[0] = (long)tconn;
2933         /* cb->args[1] = 0; passed in this way. */
2934         cb->args[2] = (long)tconn;
2935
2936 dump:
2937         return get_one_status(skb, cb);
2938 }
2939
2940 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2941 {
2942         enum drbd_ret_code retcode;
2943         struct timeout_parms tp;
2944         int err;
2945
2946         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2947         if (!adm_ctx.reply_skb)
2948                 return retcode;
2949         if (retcode != NO_ERROR)
2950                 goto out;
2951
2952         tp.timeout_type =
2953                 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2954                 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2955                 UT_DEFAULT;
2956
2957         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2958         if (err) {
2959                 nlmsg_free(adm_ctx.reply_skb);
2960                 return err;
2961         }
2962 out:
2963         drbd_adm_finish(info, retcode);
2964         return 0;
2965 }
2966
2967 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2968 {
2969         struct drbd_conf *mdev;
2970         enum drbd_ret_code retcode;
2971         struct start_ov_parms parms;
2972
2973         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2974         if (!adm_ctx.reply_skb)
2975                 return retcode;
2976         if (retcode != NO_ERROR)
2977                 goto out;
2978
2979         mdev = adm_ctx.mdev;
2980
2981         /* resume from last known position, if possible */
2982         parms.ov_start_sector = mdev->ov_start_sector;
2983         parms.ov_stop_sector = ULLONG_MAX;
2984         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2985                 int err = start_ov_parms_from_attrs(&parms, info);
2986                 if (err) {
2987                         retcode = ERR_MANDATORY_TAG;
2988                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2989                         goto out;
2990                 }
2991         }
2992         /* w_make_ov_request expects position to be aligned */
2993         mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
2994         mdev->ov_stop_sector = parms.ov_stop_sector;
2995
2996         /* If there is still bitmap IO pending, e.g. previous resync or verify
2997          * just being finished, wait for it before requesting a new resync. */
2998         drbd_suspend_io(mdev);
2999         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
3000         retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
3001         drbd_resume_io(mdev);
3002 out:
3003         drbd_adm_finish(info, retcode);
3004         return 0;
3005 }
3006
3007
3008 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
3009 {
3010         struct drbd_conf *mdev;
3011         enum drbd_ret_code retcode;
3012         int skip_initial_sync = 0;
3013         int err;
3014         struct new_c_uuid_parms args;
3015
3016         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3017         if (!adm_ctx.reply_skb)
3018                 return retcode;
3019         if (retcode != NO_ERROR)
3020                 goto out_nolock;
3021
3022         mdev = adm_ctx.mdev;
3023         memset(&args, 0, sizeof(args));
3024         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
3025                 err = new_c_uuid_parms_from_attrs(&args, info);
3026                 if (err) {
3027                         retcode = ERR_MANDATORY_TAG;
3028                         drbd_msg_put_info(from_attrs_err_to_txt(err));
3029                         goto out_nolock;
3030                 }
3031         }
3032
3033         mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
3034
3035         if (!get_ldev(mdev)) {
3036                 retcode = ERR_NO_DISK;
3037                 goto out;
3038         }
3039
3040         /* this is "skip initial sync", assume to be clean */
3041         if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
3042             mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3043                 dev_info(DEV, "Preparing to skip initial sync\n");
3044                 skip_initial_sync = 1;
3045         } else if (mdev->state.conn != C_STANDALONE) {
3046                 retcode = ERR_CONNECTED;
3047                 goto out_dec;
3048         }
3049
3050         drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3051         drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
3052
3053         if (args.clear_bm) {
3054                 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3055                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
3056                 if (err) {
3057                         dev_err(DEV, "Writing bitmap failed with %d\n",err);
3058                         retcode = ERR_IO_MD_DISK;
3059                 }
3060                 if (skip_initial_sync) {
3061                         drbd_send_uuids_skip_initial_sync(mdev);
3062                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
3063                         drbd_print_uuids(mdev, "cleared bitmap UUID");
3064                         spin_lock_irq(&mdev->tconn->req_lock);
3065                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3066                                         CS_VERBOSE, NULL);
3067                         spin_unlock_irq(&mdev->tconn->req_lock);
3068                 }
3069         }
3070
3071         drbd_md_sync(mdev);
3072 out_dec:
3073         put_ldev(mdev);
3074 out:
3075         mutex_unlock(mdev->state_mutex);
3076 out_nolock:
3077         drbd_adm_finish(info, retcode);
3078         return 0;
3079 }
3080
3081 static enum drbd_ret_code
3082 drbd_check_resource_name(const char *name)
3083 {
3084         if (!name || !name[0]) {
3085                 drbd_msg_put_info("resource name missing");
3086                 return ERR_MANDATORY_TAG;
3087         }
3088         /* if we want to use these in sysfs/configfs/debugfs some day,
3089          * we must not allow slashes */
3090         if (strchr(name, '/')) {
3091                 drbd_msg_put_info("invalid resource name");
3092                 return ERR_INVALID_REQUEST;
3093         }
3094         return NO_ERROR;
3095 }
3096
3097 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
3098 {
3099         enum drbd_ret_code retcode;
3100         struct res_opts res_opts;
3101         int err;
3102
3103         retcode = drbd_adm_prepare(skb, info, 0);
3104         if (!adm_ctx.reply_skb)
3105                 return retcode;
3106         if (retcode != NO_ERROR)
3107                 goto out;
3108
3109         set_res_opts_defaults(&res_opts);
3110         err = res_opts_from_attrs(&res_opts, info);
3111         if (err && err != -ENOMSG) {
3112                 retcode = ERR_MANDATORY_TAG;
3113                 drbd_msg_put_info(from_attrs_err_to_txt(err));
3114                 goto out;
3115         }
3116
3117         retcode = drbd_check_resource_name(adm_ctx.resource_name);
3118         if (retcode != NO_ERROR)
3119                 goto out;
3120
3121         if (adm_ctx.tconn) {
3122                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3123                         retcode = ERR_INVALID_REQUEST;
3124                         drbd_msg_put_info("resource exists");
3125                 }
3126                 /* else: still NO_ERROR */
3127                 goto out;
3128         }
3129
3130         if (!conn_create(adm_ctx.resource_name, &res_opts))
3131                 retcode = ERR_NOMEM;
3132 out:
3133         drbd_adm_finish(info, retcode);
3134         return 0;
3135 }
3136
3137 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3138 {
3139         struct drbd_genlmsghdr *dh = info->userhdr;
3140         enum drbd_ret_code retcode;
3141
3142         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3143         if (!adm_ctx.reply_skb)
3144                 return retcode;
3145         if (retcode != NO_ERROR)
3146                 goto out;
3147
3148         if (dh->minor > MINORMASK) {
3149                 drbd_msg_put_info("requested minor out of range");
3150                 retcode = ERR_INVALID_REQUEST;
3151                 goto out;
3152         }
3153         if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3154                 drbd_msg_put_info("requested volume id out of range");
3155                 retcode = ERR_INVALID_REQUEST;
3156                 goto out;
3157         }
3158
3159         /* drbd_adm_prepare made sure already
3160          * that mdev->tconn and mdev->vnr match the request. */
3161         if (adm_ctx.mdev) {
3162                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3163                         retcode = ERR_MINOR_EXISTS;
3164                 /* else: still NO_ERROR */
3165                 goto out;
3166         }
3167
3168         retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3169 out:
3170         drbd_adm_finish(info, retcode);
3171         return 0;
3172 }
3173
3174 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3175 {
3176         if (mdev->state.disk == D_DISKLESS &&
3177             /* no need to be mdev->state.conn == C_STANDALONE &&
3178              * we may want to delete a minor from a live replication group.
3179              */
3180             mdev->state.role == R_SECONDARY) {
3181                 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3182                                     CS_VERBOSE + CS_WAIT_COMPLETE);
3183                 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3184                 idr_remove(&minors, mdev_to_minor(mdev));
3185                 del_gendisk(mdev->vdisk);
3186                 synchronize_rcu();
3187                 kref_put(&mdev->kref, &drbd_minor_destroy);
3188                 return NO_ERROR;
3189         } else
3190                 return ERR_MINOR_CONFIGURED;
3191 }
3192
3193 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
3194 {
3195         enum drbd_ret_code retcode;
3196
3197         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3198         if (!adm_ctx.reply_skb)
3199                 return retcode;
3200         if (retcode != NO_ERROR)
3201                 goto out;
3202
3203         retcode = adm_delete_minor(adm_ctx.mdev);
3204 out:
3205         drbd_adm_finish(info, retcode);
3206         return 0;
3207 }
3208
3209 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3210 {
3211         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3212         struct drbd_conf *mdev;
3213         unsigned i;
3214
3215         retcode = drbd_adm_prepare(skb, info, 0);
3216         if (!adm_ctx.reply_skb)
3217                 return retcode;
3218         if (retcode != NO_ERROR)
3219                 goto out;
3220
3221         if (!adm_ctx.tconn) {
3222                 retcode = ERR_RES_NOT_KNOWN;
3223                 goto out;
3224         }
3225
3226         /* demote */
3227         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3228                 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3229                 if (retcode < SS_SUCCESS) {
3230                         drbd_msg_put_info("failed to demote");
3231                         goto out;
3232                 }
3233         }
3234
3235         retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3236         if (retcode < SS_SUCCESS) {
3237                 drbd_msg_put_info("failed to disconnect");
3238                 goto out;
3239         }
3240
3241         /* detach */
3242         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3243                 retcode = adm_detach(mdev, 0);
3244                 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
3245                         drbd_msg_put_info("failed to detach");
3246                         goto out;
3247                 }
3248         }
3249
3250         /* If we reach this, all volumes (of this tconn) are Secondary,
3251          * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3252          * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3253         drbd_thread_stop(&adm_ctx.tconn->worker);
3254
3255         /* Now, nothing can fail anymore */
3256
3257         /* delete volumes */
3258         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3259                 retcode = adm_delete_minor(mdev);
3260                 if (retcode != NO_ERROR) {
3261                         /* "can not happen" */
3262                         drbd_msg_put_info("failed to delete volume");
3263                         goto out;
3264                 }
3265         }
3266
3267         /* delete connection */
3268         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3269                 list_del_rcu(&adm_ctx.tconn->all_tconn);
3270                 synchronize_rcu();
3271                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3272
3273                 retcode = NO_ERROR;
3274         } else {
3275                 /* "can not happen" */
3276                 retcode = ERR_RES_IN_USE;
3277                 drbd_msg_put_info("failed to delete connection");
3278         }
3279         goto out;
3280 out:
3281         drbd_adm_finish(info, retcode);
3282         return 0;
3283 }
3284
3285 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3286 {
3287         enum drbd_ret_code retcode;
3288
3289         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3290         if (!adm_ctx.reply_skb)
3291                 return retcode;
3292         if (retcode != NO_ERROR)
3293                 goto out;
3294
3295         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3296                 list_del_rcu(&adm_ctx.tconn->all_tconn);
3297                 synchronize_rcu();
3298                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3299
3300                 retcode = NO_ERROR;
3301         } else {
3302                 retcode = ERR_RES_IN_USE;
3303         }
3304
3305         if (retcode == NO_ERROR)
3306                 drbd_thread_stop(&adm_ctx.tconn->worker);
3307 out:
3308         drbd_adm_finish(info, retcode);
3309         return 0;
3310 }
3311
3312 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3313 {
3314         static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3315         struct sk_buff *msg;
3316         struct drbd_genlmsghdr *d_out;
3317         unsigned seq;
3318         int err = -ENOMEM;
3319
3320         if (sib->sib_reason == SIB_SYNC_PROGRESS) {
3321                 if (time_after(jiffies, mdev->rs_last_bcast + HZ))
3322                         mdev->rs_last_bcast = jiffies;
3323                 else
3324                         return;
3325         }
3326
3327         seq = atomic_inc_return(&drbd_genl_seq);
3328         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3329         if (!msg)
3330                 goto failed;
3331
3332         err = -EMSGSIZE;
3333         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3334         if (!d_out) /* cannot happen, but anyways. */
3335                 goto nla_put_failure;
3336         d_out->minor = mdev_to_minor(mdev);
3337         d_out->ret_code = NO_ERROR;
3338
3339         if (nla_put_status_info(msg, mdev, sib))
3340                 goto nla_put_failure;
3341         genlmsg_end(msg, d_out);
3342         err = drbd_genl_multicast_events(msg, 0);
3343         /* msg has been consumed or freed in netlink_broadcast() */
3344         if (err && err != -ESRCH)
3345                 goto failed;
3346
3347         return;
3348
3349 nla_put_failure:
3350         nlmsg_free(msg);
3351 failed:
3352         dev_err(DEV, "Error %d while broadcasting event. "
3353                         "Event seq:%u sib_reason:%u\n",
3354                         err, seq, sib->sib_reason);
3355 }