4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/module.h>
29 #include <linux/drbd.h>
32 #include <linux/file.h>
33 #include <linux/slab.h>
34 #include <linux/blkpg.h>
35 #include <linux/cpumask.h>
37 #include "drbd_protocol.h"
39 #include "drbd_state_change.h"
40 #include <asm/unaligned.h>
41 #include <linux/drbd_limits.h>
42 #include <linux/kthread.h>
44 #include <net/genetlink.h>
47 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
48 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
53 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
74 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
76 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
78 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
79 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
80 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
81 int drbd_adm_dump_devices_done(struct netlink_callback *cb);
82 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
83 int drbd_adm_dump_connections_done(struct netlink_callback *cb);
84 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
85 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
86 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
88 #include <linux/drbd_genl_api.h>
90 #include <linux/genl_magic_func.h>
92 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
93 static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
95 DEFINE_MUTEX(notification_mutex);
97 /* used blkdev_get_by_path, to claim our meta data device(s) */
98 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
100 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
102 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
103 if (genlmsg_reply(skb, info))
104 pr_err("error sending genl reply\n");
107 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
108 * reason it could fail was no space in skb, and there are 4k available. */
109 static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
114 if (!info || !info[0])
117 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
121 err = nla_put_string(skb, T_info_text, info);
123 nla_nest_cancel(skb, nla);
126 nla_nest_end(skb, nla);
130 /* This would be a good candidate for a "pre_doit" hook,
131 * and per-family private info->pointers.
132 * But we need to stay compatible with older kernels.
133 * If it returns successfully, adm_ctx members are valid.
135 * At this point, we still rely on the global genl_lock().
136 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
137 * to add additional synchronization against object destruction/modification.
139 #define DRBD_ADM_NEED_MINOR 1
140 #define DRBD_ADM_NEED_RESOURCE 2
141 #define DRBD_ADM_NEED_CONNECTION 4
142 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
143 struct sk_buff *skb, struct genl_info *info, unsigned flags)
145 struct drbd_genlmsghdr *d_in = info->userhdr;
146 const u8 cmd = info->genlhdr->cmd;
149 memset(adm_ctx, 0, sizeof(*adm_ctx));
151 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
152 if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
155 adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
156 if (!adm_ctx->reply_skb) {
161 adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
162 info, &drbd_genl_family, 0, cmd);
163 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
165 if (!adm_ctx->reply_dh) {
170 adm_ctx->reply_dh->minor = d_in->minor;
171 adm_ctx->reply_dh->ret_code = NO_ERROR;
173 adm_ctx->volume = VOLUME_UNSPECIFIED;
174 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
176 /* parse and validate only */
177 err = drbd_cfg_context_from_attrs(NULL, info);
181 /* It was present, and valid,
182 * copy it over to the reply skb. */
183 err = nla_put_nohdr(adm_ctx->reply_skb,
184 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
185 info->attrs[DRBD_NLA_CFG_CONTEXT]);
189 /* and assign stuff to the adm_ctx */
190 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
192 adm_ctx->volume = nla_get_u32(nla);
193 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
195 adm_ctx->resource_name = nla_data(nla);
196 adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
197 adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
198 if ((adm_ctx->my_addr &&
199 nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
200 (adm_ctx->peer_addr &&
201 nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
207 adm_ctx->minor = d_in->minor;
208 adm_ctx->device = minor_to_device(d_in->minor);
210 /* We are protected by the global genl_lock().
211 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
212 * so make sure this object stays around. */
214 kref_get(&adm_ctx->device->kref);
216 if (adm_ctx->resource_name) {
217 adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
220 if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
221 drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
222 return ERR_MINOR_INVALID;
224 if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
225 drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
226 if (adm_ctx->resource_name)
227 return ERR_RES_NOT_KNOWN;
228 return ERR_INVALID_REQUEST;
231 if (flags & DRBD_ADM_NEED_CONNECTION) {
232 if (adm_ctx->resource) {
233 drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
234 return ERR_INVALID_REQUEST;
236 if (adm_ctx->device) {
237 drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
238 return ERR_INVALID_REQUEST;
240 if (adm_ctx->my_addr && adm_ctx->peer_addr)
241 adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
242 nla_len(adm_ctx->my_addr),
243 nla_data(adm_ctx->peer_addr),
244 nla_len(adm_ctx->peer_addr));
245 if (!adm_ctx->connection) {
246 drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
247 return ERR_INVALID_REQUEST;
251 /* some more paranoia, if the request was over-determined */
252 if (adm_ctx->device && adm_ctx->resource &&
253 adm_ctx->device->resource != adm_ctx->resource) {
254 pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
255 adm_ctx->minor, adm_ctx->resource->name,
256 adm_ctx->device->resource->name);
257 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
258 return ERR_INVALID_REQUEST;
260 if (adm_ctx->device &&
261 adm_ctx->volume != VOLUME_UNSPECIFIED &&
262 adm_ctx->volume != adm_ctx->device->vnr) {
263 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
264 adm_ctx->minor, adm_ctx->volume,
265 adm_ctx->device->vnr,
266 adm_ctx->device->resource->name);
267 drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
268 return ERR_INVALID_REQUEST;
271 /* still, provide adm_ctx->resource always, if possible. */
272 if (!adm_ctx->resource) {
273 adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
274 : adm_ctx->connection ? adm_ctx->connection->resource : NULL;
275 if (adm_ctx->resource)
276 kref_get(&adm_ctx->resource->kref);
282 nlmsg_free(adm_ctx->reply_skb);
283 adm_ctx->reply_skb = NULL;
287 static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
288 struct genl_info *info, int retcode)
290 if (adm_ctx->device) {
291 kref_put(&adm_ctx->device->kref, drbd_destroy_device);
292 adm_ctx->device = NULL;
294 if (adm_ctx->connection) {
295 kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
296 adm_ctx->connection = NULL;
298 if (adm_ctx->resource) {
299 kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
300 adm_ctx->resource = NULL;
303 if (!adm_ctx->reply_skb)
306 adm_ctx->reply_dh->ret_code = retcode;
307 drbd_adm_send_reply(adm_ctx->reply_skb, info);
311 static void setup_khelper_env(struct drbd_connection *connection, char **envp)
315 /* FIXME: A future version will not allow this case. */
316 if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
319 switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
322 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
323 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
327 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
328 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
332 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
333 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
335 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
338 int drbd_khelper(struct drbd_device *device, char *cmd)
340 char *envp[] = { "HOME=/",
342 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
343 (char[20]) { }, /* address family */
344 (char[60]) { }, /* address */
347 char *argv[] = {usermode_helper, cmd, mb, NULL };
348 struct drbd_connection *connection = first_peer_device(device)->connection;
352 if (current == connection->worker.task)
353 set_bit(CALLBACK_PENDING, &connection->flags);
355 snprintf(mb, 14, "minor-%d", device_to_minor(device));
356 setup_khelper_env(connection, envp);
358 /* The helper may take some time.
359 * write out any unsynced meta data changes now */
360 drbd_md_sync(device);
362 drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
363 sib.sib_reason = SIB_HELPER_PRE;
364 sib.helper_name = cmd;
365 drbd_bcast_event(device, &sib);
366 notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
367 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
369 drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
370 usermode_helper, cmd, mb,
371 (ret >> 8) & 0xff, ret);
373 drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
374 usermode_helper, cmd, mb,
375 (ret >> 8) & 0xff, ret);
376 sib.sib_reason = SIB_HELPER_POST;
377 sib.helper_exit_code = ret;
378 drbd_bcast_event(device, &sib);
379 notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
381 if (current == connection->worker.task)
382 clear_bit(CALLBACK_PENDING, &connection->flags);
384 if (ret < 0) /* Ignore any ERRNOs we got. */
390 enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
392 char *envp[] = { "HOME=/",
394 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
395 (char[20]) { }, /* address family */
396 (char[60]) { }, /* address */
398 char *resource_name = connection->resource->name;
399 char *argv[] = {usermode_helper, cmd, resource_name, NULL };
402 setup_khelper_env(connection, envp);
403 conn_md_sync(connection);
405 drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
406 /* TODO: conn_bcast_event() ?? */
407 notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
409 ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
411 drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
412 usermode_helper, cmd, resource_name,
413 (ret >> 8) & 0xff, ret);
415 drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
416 usermode_helper, cmd, resource_name,
417 (ret >> 8) & 0xff, ret);
418 /* TODO: conn_bcast_event() ?? */
419 notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
421 if (ret < 0) /* Ignore any ERRNOs we got. */
427 static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
429 enum drbd_fencing_p fp = FP_NOT_AVAIL;
430 struct drbd_peer_device *peer_device;
434 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
435 struct drbd_device *device = peer_device->device;
436 if (get_ldev_if_state(device, D_CONSISTENT)) {
437 struct disk_conf *disk_conf =
438 rcu_dereference(peer_device->device->ldev->disk_conf);
439 fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
448 static bool resource_is_supended(struct drbd_resource *resource)
450 return resource->susp || resource->susp_fen || resource->susp_nod;
453 bool conn_try_outdate_peer(struct drbd_connection *connection)
455 struct drbd_resource * const resource = connection->resource;
456 unsigned int connect_cnt;
457 union drbd_state mask = { };
458 union drbd_state val = { };
459 enum drbd_fencing_p fp;
463 spin_lock_irq(&resource->req_lock);
464 if (connection->cstate >= C_WF_REPORT_PARAMS) {
465 drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
466 spin_unlock_irq(&resource->req_lock);
470 connect_cnt = connection->connect_cnt;
471 spin_unlock_irq(&resource->req_lock);
473 fp = highest_fencing_policy(connection);
476 drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
477 spin_lock_irq(&resource->req_lock);
478 if (connection->cstate < C_WF_REPORT_PARAMS) {
479 _conn_request_state(connection,
480 (union drbd_state) { { .susp_fen = 1 } },
481 (union drbd_state) { { .susp_fen = 0 } },
482 CS_VERBOSE | CS_HARD | CS_DC_SUSP);
483 /* We are no longer suspended due to the fencing policy.
484 * We may still be suspended due to the on-no-data-accessible policy.
485 * If that was OND_IO_ERROR, fail pending requests. */
486 if (!resource_is_supended(resource))
487 _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
489 /* Else: in case we raced with a connection handshake,
490 * let the handshake figure out if we maybe can RESEND,
491 * and do not resume/fail pending requests here.
492 * Worst case is we stay suspended for now, which may be
493 * resolved by either re-establishing the replication link, or
494 * the next link failure, or eventually the administrator. */
495 spin_unlock_irq(&resource->req_lock);
503 r = conn_khelper(connection, "fence-peer");
505 switch ((r>>8) & 0xff) {
506 case P_INCONSISTENT: /* peer is inconsistent */
507 ex_to_string = "peer is inconsistent or worse";
509 val.pdsk = D_INCONSISTENT;
511 case P_OUTDATED: /* peer got outdated, or was already outdated */
512 ex_to_string = "peer was fenced";
514 val.pdsk = D_OUTDATED;
516 case P_DOWN: /* peer was down */
517 if (conn_highest_disk(connection) == D_UP_TO_DATE) {
518 /* we will(have) create(d) a new UUID anyways... */
519 ex_to_string = "peer is unreachable, assumed to be dead";
521 val.pdsk = D_OUTDATED;
523 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
526 case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
527 * This is useful when an unconnected R_SECONDARY is asked to
528 * become R_PRIMARY, but finds the other peer being active. */
529 ex_to_string = "peer is active";
530 drbd_warn(connection, "Peer is primary, outdating myself.\n");
532 val.disk = D_OUTDATED;
535 /* THINK: do we need to handle this
536 * like case 4, or more like case 5? */
537 if (fp != FP_STONITH)
538 drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
539 ex_to_string = "peer was stonithed";
541 val.pdsk = D_OUTDATED;
544 /* The script is broken ... */
545 drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
546 return false; /* Eventually leave IO frozen */
549 drbd_info(connection, "fence-peer helper returned %d (%s)\n",
550 (r>>8) & 0xff, ex_to_string);
553 conn_request_state(connection, mask, val, CS_VERBOSE);
554 here, because we might were able to re-establish the connection in the
556 spin_lock_irq(&resource->req_lock);
557 if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
558 if (connection->connect_cnt != connect_cnt)
559 /* In case the connection was established and droped
560 while the fence-peer handler was running, ignore it */
561 drbd_info(connection, "Ignoring fence-peer exit code\n");
563 _conn_request_state(connection, mask, val, CS_VERBOSE);
565 spin_unlock_irq(&resource->req_lock);
567 return conn_highest_pdsk(connection) <= D_OUTDATED;
570 static int _try_outdate_peer_async(void *data)
572 struct drbd_connection *connection = (struct drbd_connection *)data;
574 conn_try_outdate_peer(connection);
576 kref_put(&connection->kref, drbd_destroy_connection);
580 void conn_try_outdate_peer_async(struct drbd_connection *connection)
582 struct task_struct *opa;
584 kref_get(&connection->kref);
585 /* We may just have force_sig()'ed this thread
586 * to get it out of some blocking network function.
587 * Clear signals; otherwise kthread_run(), which internally uses
588 * wait_on_completion_killable(), will mistake our pending signal
589 * for a new fatal signal and fail. */
590 flush_signals(current);
591 opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
593 drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
594 kref_put(&connection->kref, drbd_destroy_connection);
599 drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
601 struct drbd_peer_device *const peer_device = first_peer_device(device);
602 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
603 const int max_tries = 4;
604 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
608 union drbd_state mask, val;
610 if (new_role == R_PRIMARY) {
611 struct drbd_connection *connection;
613 /* Detect dead peers as soon as possible. */
616 for_each_connection(connection, device->resource)
617 request_ping(connection);
621 mutex_lock(device->state_mutex);
623 mask.i = 0; mask.role = R_MASK;
624 val.i = 0; val.role = new_role;
626 while (try++ < max_tries) {
627 rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
629 /* in case we first succeeded to outdate,
630 * but now suddenly could establish a connection */
631 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
637 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
638 (device->state.disk < D_UP_TO_DATE &&
639 device->state.disk >= D_INCONSISTENT)) {
641 val.disk = D_UP_TO_DATE;
646 if (rv == SS_NO_UP_TO_DATE_DISK &&
647 device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
648 D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
650 if (conn_try_outdate_peer(connection)) {
651 val.disk = D_UP_TO_DATE;
657 if (rv == SS_NOTHING_TO_DO)
659 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
660 if (!conn_try_outdate_peer(connection) && force) {
661 drbd_warn(device, "Forced into split brain situation!\n");
663 val.pdsk = D_OUTDATED;
668 if (rv == SS_TWO_PRIMARIES) {
669 /* Maybe the peer is detected as dead very soon...
670 retry at most once more in this case. */
671 if (try < max_tries) {
675 nc = rcu_dereference(connection->net_conf);
676 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
678 schedule_timeout_interruptible(timeo);
682 if (rv < SS_SUCCESS) {
683 rv = _drbd_request_state(device, mask, val,
684 CS_VERBOSE + CS_WAIT_COMPLETE);
695 drbd_warn(device, "Forced to consider local data as UpToDate!\n");
697 /* Wait until nothing is on the fly :) */
698 wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
700 /* FIXME also wait for all pending P_BARRIER_ACK? */
702 if (new_role == R_SECONDARY) {
703 if (get_ldev(device)) {
704 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
708 mutex_lock(&device->resource->conf_update);
709 nc = connection->net_conf;
711 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
712 mutex_unlock(&device->resource->conf_update);
714 if (get_ldev(device)) {
715 if (((device->state.conn < C_CONNECTED ||
716 device->state.pdsk <= D_FAILED)
717 && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
718 drbd_uuid_new_current(device);
720 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
725 /* writeout of activity log covered areas of the bitmap
726 * to stable storage done in after state change already */
728 if (device->state.conn >= C_WF_REPORT_PARAMS) {
729 /* if this was forced, we should consider sync */
731 drbd_send_uuids(peer_device);
732 drbd_send_current_state(peer_device);
735 drbd_md_sync(device);
736 set_disk_ro(device->vdisk, new_role == R_SECONDARY);
737 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
739 mutex_unlock(device->state_mutex);
743 static const char *from_attrs_err_to_txt(int err)
745 return err == -ENOMSG ? "required attribute missing" :
746 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
747 err == -EEXIST ? "can not change invariant setting" :
748 "invalid attribute value";
751 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
753 struct drbd_config_context adm_ctx;
754 struct set_role_parms parms;
756 enum drbd_ret_code retcode;
758 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
759 if (!adm_ctx.reply_skb)
761 if (retcode != NO_ERROR)
764 memset(&parms, 0, sizeof(parms));
765 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
766 err = set_role_parms_from_attrs(&parms, info);
768 retcode = ERR_MANDATORY_TAG;
769 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
774 mutex_lock(&adm_ctx.resource->adm_mutex);
776 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
777 retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
778 R_PRIMARY, parms.assume_uptodate);
780 retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
783 mutex_unlock(&adm_ctx.resource->adm_mutex);
786 drbd_adm_finish(&adm_ctx, info, retcode);
790 /* Initializes the md.*_offset members, so we are able to find
791 * the on disk meta data.
793 * We currently have two possible layouts:
795 * |----------- md_size_sect ------------------|
796 * [ 4k superblock ][ activity log ][ Bitmap ]
798 * | bm_offset = al_offset + X |
799 * ==> bitmap sectors = md_size_sect - bm_offset
802 * |----------- md_size_sect ------------------|
803 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
805 * | bm_offset = al_offset - Y |
806 * ==> bitmap sectors = Y = al_offset - bm_offset
808 * Activity log size used to be fixed 32kB,
809 * but is about to become configurable.
811 static void drbd_md_set_sector_offsets(struct drbd_device *device,
812 struct drbd_backing_dev *bdev)
814 sector_t md_size_sect = 0;
815 unsigned int al_size_sect = bdev->md.al_size_4k * 8;
817 bdev->md.md_offset = drbd_md_ss(bdev);
819 switch (bdev->md.meta_dev_idx) {
821 /* v07 style fixed size indexed meta data */
822 bdev->md.md_size_sect = MD_128MB_SECT;
823 bdev->md.al_offset = MD_4kB_SECT;
824 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
826 case DRBD_MD_INDEX_FLEX_EXT:
827 /* just occupy the full device; unit: sectors */
828 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
829 bdev->md.al_offset = MD_4kB_SECT;
830 bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
832 case DRBD_MD_INDEX_INTERNAL:
833 case DRBD_MD_INDEX_FLEX_INT:
834 /* al size is still fixed */
835 bdev->md.al_offset = -al_size_sect;
836 /* we need (slightly less than) ~ this much bitmap sectors: */
837 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
838 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
839 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
840 md_size_sect = ALIGN(md_size_sect, 8);
842 /* plus the "drbd meta data super block",
843 * and the activity log; */
844 md_size_sect += MD_4kB_SECT + al_size_sect;
846 bdev->md.md_size_sect = md_size_sect;
847 /* bitmap offset is adjusted by 'super' block size */
848 bdev->md.bm_offset = -md_size_sect + MD_4kB_SECT;
853 /* input size is expected to be in KB */
854 char *ppsize(char *buf, unsigned long long size)
856 /* Needs 9 bytes at max including trailing NUL:
857 * -1ULL ==> "16384 EB" */
858 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
860 while (size >= 10000 && base < sizeof(units)-1) {
862 size = (size >> 10) + !!(size & (1<<9));
865 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
870 /* there is still a theoretical deadlock when called from receiver
871 * on an D_INCONSISTENT R_PRIMARY:
872 * remote READ does inc_ap_bio, receiver would need to receive answer
873 * packet from remote to dec_ap_bio again.
874 * receiver receive_sizes(), comes here,
875 * waits for ap_bio_cnt == 0. -> deadlock.
876 * but this cannot happen, actually, because:
877 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
878 * (not connected, or bad/no disk on peer):
879 * see drbd_fail_request_early, ap_bio_cnt is zero.
880 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
881 * peer may not initiate a resize.
883 /* Note these are not to be confused with
884 * drbd_adm_suspend_io/drbd_adm_resume_io,
885 * which are (sub) state changes triggered by admin (drbdsetup),
886 * and can be long lived.
887 * This changes an device->flag, is triggered by drbd internals,
888 * and should be short-lived. */
889 /* It needs to be a counter, since multiple threads might
890 independently suspend and resume IO. */
891 void drbd_suspend_io(struct drbd_device *device)
893 atomic_inc(&device->suspend_cnt);
894 if (drbd_suspended(device))
896 wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
899 void drbd_resume_io(struct drbd_device *device)
901 if (atomic_dec_and_test(&device->suspend_cnt))
902 wake_up(&device->misc_wait);
906 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
907 * @device: DRBD device.
909 * Returns 0 on success, negative return values indicate errors.
910 * You should call drbd_md_sync() after calling this function.
912 enum determine_dev_size
913 drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
915 struct md_offsets_and_sizes {
916 u64 last_agreed_sect;
923 u32 al_stripe_size_4k;
925 sector_t u_size, size;
926 struct drbd_md *md = &device->ldev->md;
930 int md_moved, la_size_changed;
931 enum determine_dev_size rv = DS_UNCHANGED;
933 /* We may change the on-disk offsets of our meta data below. Lock out
934 * anything that may cause meta data IO, to avoid acting on incomplete
935 * layout changes or scribbling over meta data that is in the process
938 * Move is not exactly correct, btw, currently we have all our meta
939 * data in core memory, to "move" it we just write it all out, there
941 drbd_suspend_io(device);
942 buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
944 drbd_resume_io(device);
948 /* remember current offset and sizes */
949 prev.last_agreed_sect = md->la_size_sect;
950 prev.md_offset = md->md_offset;
951 prev.al_offset = md->al_offset;
952 prev.bm_offset = md->bm_offset;
953 prev.md_size_sect = md->md_size_sect;
954 prev.al_stripes = md->al_stripes;
955 prev.al_stripe_size_4k = md->al_stripe_size_4k;
958 /* rs is non NULL if we should change the AL layout only */
959 md->al_stripes = rs->al_stripes;
960 md->al_stripe_size_4k = rs->al_stripe_size / 4;
961 md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
964 drbd_md_set_sector_offsets(device, device->ldev);
967 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
969 size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
971 if (size < prev.last_agreed_sect) {
972 if (rs && u_size == 0) {
973 /* Remove "rs &&" later. This check should always be active, but
974 right now the receiver expects the permissive behavior */
975 drbd_warn(device, "Implicit shrink not allowed. "
976 "Use --size=%llus for explicit shrink.\n",
977 (unsigned long long)size);
978 rv = DS_ERROR_SHRINK;
981 rv = DS_ERROR_SPACE_MD;
982 if (rv != DS_UNCHANGED)
986 if (drbd_get_capacity(device->this_bdev) != size ||
987 drbd_bm_capacity(device) != size) {
989 err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
991 /* currently there is only one error: ENOMEM! */
992 size = drbd_bm_capacity(device);
994 drbd_err(device, "OUT OF MEMORY! "
995 "Could not allocate bitmap!\n");
997 drbd_err(device, "BM resizing failed. "
998 "Leaving size unchanged\n");
1002 /* racy, see comments above. */
1003 drbd_set_my_capacity(device, size);
1004 md->la_size_sect = size;
1005 drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
1006 (unsigned long long)size>>1);
1011 la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
1013 md_moved = prev.md_offset != md->md_offset
1014 || prev.md_size_sect != md->md_size_sect;
1016 if (la_size_changed || md_moved || rs) {
1019 /* We do some synchronous IO below, which may take some time.
1020 * Clear the timer, to avoid scary "timer expired!" messages,
1021 * "Superblock" is written out at least twice below, anyways. */
1022 del_timer(&device->md_sync_timer);
1024 /* We won't change the "al-extents" setting, we just may need
1025 * to move the on-disk location of the activity log ringbuffer.
1026 * Lock for transaction is good enough, it may well be "dirty"
1027 * or even "starving". */
1028 wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1030 /* mark current on-disk bitmap and activity log as unreliable */
1031 prev_flags = md->flags;
1032 md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
1033 drbd_md_write(device, buffer);
1035 drbd_al_initialize(device, buffer);
1037 drbd_info(device, "Writing the whole bitmap, %s\n",
1038 la_size_changed && md_moved ? "size changed and md moved" :
1039 la_size_changed ? "size changed" : "md moved");
1040 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1041 drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1042 "size changed", BM_LOCKED_MASK);
1044 /* on-disk bitmap and activity log is authoritative again
1045 * (unless there was an IO error meanwhile...) */
1046 md->flags = prev_flags;
1047 drbd_md_write(device, buffer);
1050 drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1051 md->al_stripes, md->al_stripe_size_4k * 4);
1054 if (size > prev.last_agreed_sect)
1055 rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1056 if (size < prev.last_agreed_sect)
1061 /* restore previous offset and sizes */
1062 md->la_size_sect = prev.last_agreed_sect;
1063 md->md_offset = prev.md_offset;
1064 md->al_offset = prev.al_offset;
1065 md->bm_offset = prev.bm_offset;
1066 md->md_size_sect = prev.md_size_sect;
1067 md->al_stripes = prev.al_stripes;
1068 md->al_stripe_size_4k = prev.al_stripe_size_4k;
1069 md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
1071 lc_unlock(device->act_log);
1072 wake_up(&device->al_wait);
1073 drbd_md_put_buffer(device);
1074 drbd_resume_io(device);
1080 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1081 sector_t u_size, int assume_peer_has_space)
1083 sector_t p_size = device->p_size; /* partner's disk size. */
1084 sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1085 sector_t m_size; /* my size */
1088 m_size = drbd_get_max_capacity(bdev);
1090 if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1091 drbd_warn(device, "Resize while not connected was forced by the user!\n");
1095 if (p_size && m_size) {
1096 size = min_t(sector_t, p_size, m_size);
1099 size = la_size_sect;
1100 if (m_size && m_size < size)
1102 if (p_size && p_size < size)
1113 drbd_err(device, "Both nodes diskless!\n");
1117 drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1118 (unsigned long)u_size>>1, (unsigned long)size>>1);
1127 * drbd_check_al_size() - Ensures that the AL is of the right size
1128 * @device: DRBD device.
1130 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1131 * failed, and 0 on success. You should call drbd_md_sync() after you called
1134 static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1136 struct lru_cache *n, *t;
1137 struct lc_element *e;
1138 unsigned int in_use;
1141 if (device->act_log &&
1142 device->act_log->nr_elements == dc->al_extents)
1146 t = device->act_log;
1147 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1148 dc->al_extents, sizeof(struct lc_element), 0);
1151 drbd_err(device, "Cannot allocate act_log lru!\n");
1154 spin_lock_irq(&device->al_lock);
1156 for (i = 0; i < t->nr_elements; i++) {
1157 e = lc_element_by_index(t, i);
1159 drbd_err(device, "refcnt(%d)==%d\n",
1160 e->lc_number, e->refcnt);
1161 in_use += e->refcnt;
1165 device->act_log = n;
1166 spin_unlock_irq(&device->al_lock);
1168 drbd_err(device, "Activity log still in use!\n");
1174 drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1178 static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
1180 q->limits.discard_granularity = granularity;
1183 static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1185 /* when we introduced REQ_WRITE_SAME support, we also bumped
1186 * our maximum supported batch bio size used for discards. */
1187 if (connection->agreed_features & DRBD_FF_WSAME)
1188 return DRBD_MAX_BBIO_SECTORS;
1189 /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1190 return AL_EXTENT_SIZE >> 9;
1193 static void decide_on_discard_support(struct drbd_device *device,
1194 struct request_queue *q,
1195 struct request_queue *b,
1196 bool discard_zeroes_if_aligned)
1198 /* q = drbd device queue (device->rq_queue)
1199 * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
1200 * or NULL if diskless
1202 struct drbd_connection *connection = first_peer_device(device)->connection;
1203 bool can_do = b ? blk_queue_discard(b) : true;
1205 if (can_do && b && !b->limits.discard_zeroes_data && !discard_zeroes_if_aligned) {
1207 drbd_info(device, "discard_zeroes_data=0 and discard_zeroes_if_aligned=no: disabling discards\n");
1209 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
1211 drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
1214 /* We don't care for the granularity, really.
1215 * Stacking limits below should fix it for the local
1216 * device. Whether or not it is a suitable granularity
1217 * on the remote device is not our problem, really. If
1218 * you care, you need to use devices with similar
1219 * topology on all peers. */
1220 blk_queue_discard_granularity(q, 512);
1221 q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
1222 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1224 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1225 blk_queue_discard_granularity(q, 0);
1226 q->limits.max_discard_sectors = 0;
1230 static void fixup_discard_if_not_supported(struct request_queue *q)
1232 /* To avoid confusion, if this queue does not support discard, clear
1233 * max_discard_sectors, which is what lsblk -D reports to the user.
1234 * Older kernels got this wrong in "stack limits".
1236 if (!blk_queue_discard(q)) {
1237 blk_queue_max_discard_sectors(q, 0);
1238 blk_queue_discard_granularity(q, 0);
1242 static void decide_on_write_same_support(struct drbd_device *device,
1243 struct request_queue *q,
1244 struct request_queue *b, struct o_qlim *o)
1246 struct drbd_peer_device *peer_device = first_peer_device(device);
1247 struct drbd_connection *connection = peer_device->connection;
1248 bool can_do = b ? b->limits.max_write_same_sectors : true;
1250 if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
1252 drbd_info(peer_device, "peer does not support WRITE_SAME\n");
1256 /* logical block size; queue_logical_block_size(NULL) is 512 */
1257 unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
1258 unsigned int me_lbs_b = queue_logical_block_size(b);
1259 unsigned int me_lbs = queue_logical_block_size(q);
1261 if (me_lbs_b != me_lbs) {
1263 "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
1265 /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
1268 if (me_lbs_b != peer_lbs) {
1269 drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
1272 drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
1275 me_lbs = max(me_lbs, me_lbs_b);
1276 /* We cannot change the logical block size of an in-use queue.
1277 * We can only hope that access happens to be properly aligned.
1278 * If not, the peer will likely produce an IO error, and detach. */
1279 if (peer_lbs > me_lbs) {
1280 if (device->state.role != R_PRIMARY) {
1281 blk_queue_logical_block_size(q, peer_lbs);
1282 drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
1284 drbd_warn(peer_device,
1285 "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
1290 if (can_do && !o->write_same_capable) {
1291 /* If we introduce an open-coded write-same loop on the receiving side,
1292 * the peer would present itself as "capable". */
1293 drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
1298 blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
1301 static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1302 unsigned int max_bio_size, struct o_qlim *o)
1304 struct request_queue * const q = device->rq_queue;
1305 unsigned int max_hw_sectors = max_bio_size >> 9;
1306 unsigned int max_segments = 0;
1307 struct request_queue *b = NULL;
1308 struct disk_conf *dc;
1309 bool discard_zeroes_if_aligned = true;
1312 b = bdev->backing_bdev->bd_disk->queue;
1314 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1316 dc = rcu_dereference(device->ldev->disk_conf);
1317 max_segments = dc->max_bio_bvecs;
1318 discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
1321 blk_set_stacking_limits(&q->limits);
1324 blk_queue_max_hw_sectors(q, max_hw_sectors);
1325 /* This is the workaround for "bio would need to, but cannot, be split" */
1326 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1327 blk_queue_segment_boundary(q, PAGE_SIZE-1);
1328 decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
1329 decide_on_write_same_support(device, q, b, o);
1332 blk_queue_stack_limits(q, b);
1334 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1335 drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1336 q->backing_dev_info.ra_pages,
1337 b->backing_dev_info.ra_pages);
1338 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1341 fixup_discard_if_not_supported(q);
1344 void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
1346 unsigned int now, new, local, peer;
1348 now = queue_max_hw_sectors(device->rq_queue) << 9;
1349 local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1350 peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1353 local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1354 device->local_max_bio_size = local;
1356 local = min(local, DRBD_MAX_BIO_SIZE);
1358 /* We may ignore peer limits if the peer is modern enough.
1359 Because new from 8.3.8 onwards the peer can use multiple
1360 BIOs for a single peer_request */
1361 if (device->state.conn >= C_WF_REPORT_PARAMS) {
1362 if (first_peer_device(device)->connection->agreed_pro_version < 94)
1363 peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1364 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1365 else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1366 peer = DRBD_MAX_SIZE_H80_PACKET;
1367 else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1368 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1370 peer = DRBD_MAX_BIO_SIZE;
1372 /* We may later detach and re-attach on a disconnected Primary.
1373 * Avoid this setting to jump back in that case.
1374 * We want to store what we know the peer DRBD can handle,
1375 * not what the peer IO backend can handle. */
1376 if (peer > device->peer_max_bio_size)
1377 device->peer_max_bio_size = peer;
1379 new = min(local, peer);
1381 if (device->state.role == R_PRIMARY && new < now)
1382 drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1385 drbd_info(device, "max BIO size = %u\n", new);
1387 drbd_setup_queue_param(device, bdev, new, o);
1390 /* Starts the worker thread */
1391 static void conn_reconfig_start(struct drbd_connection *connection)
1393 drbd_thread_start(&connection->worker);
1394 drbd_flush_workqueue(&connection->sender_work);
1397 /* if still unconfigured, stops worker again. */
1398 static void conn_reconfig_done(struct drbd_connection *connection)
1401 spin_lock_irq(&connection->resource->req_lock);
1402 stop_threads = conn_all_vols_unconf(connection) &&
1403 connection->cstate == C_STANDALONE;
1404 spin_unlock_irq(&connection->resource->req_lock);
1406 /* ack_receiver thread and ack_sender workqueue are implicitly
1407 * stopped by receiver in conn_disconnect() */
1408 drbd_thread_stop(&connection->receiver);
1409 drbd_thread_stop(&connection->worker);
1413 /* Make sure IO is suspended before calling this function(). */
1414 static void drbd_suspend_al(struct drbd_device *device)
1418 if (!lc_try_lock(device->act_log)) {
1419 drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1423 drbd_al_shrink(device);
1424 spin_lock_irq(&device->resource->req_lock);
1425 if (device->state.conn < C_CONNECTED)
1426 s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1427 spin_unlock_irq(&device->resource->req_lock);
1428 lc_unlock(device->act_log);
1431 drbd_info(device, "Suspended AL updates\n");
1435 static bool should_set_defaults(struct genl_info *info)
1437 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1438 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1441 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1443 /* This is limited by 16 bit "slot" numbers,
1444 * and by available on-disk context storage.
1446 * Also (u16)~0 is special (denotes a "free" extent).
1448 * One transaction occupies one 4kB on-disk block,
1449 * we have n such blocks in the on disk ring buffer,
1450 * the "current" transaction may fail (n-1),
1451 * and there is 919 slot numbers context information per transaction.
1453 * 72 transaction blocks amounts to more than 2**16 context slots,
1454 * so cap there first.
1456 const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1457 const unsigned int sufficient_on_disk =
1458 (max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1459 /AL_CONTEXT_PER_TRANSACTION;
1461 unsigned int al_size_4k = bdev->md.al_size_4k;
1463 if (al_size_4k > sufficient_on_disk)
1466 return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1469 static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1471 return a->disk_barrier != b->disk_barrier ||
1472 a->disk_flushes != b->disk_flushes ||
1473 a->disk_drain != b->disk_drain;
1476 static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1477 struct drbd_backing_dev *nbc)
1479 struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
1481 if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1482 disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1483 if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1484 disk_conf->al_extents = drbd_al_extents_max(nbc);
1486 if (!blk_queue_discard(q)
1487 || (!q->limits.discard_zeroes_data && !disk_conf->discard_zeroes_if_aligned)) {
1488 if (disk_conf->rs_discard_granularity) {
1489 disk_conf->rs_discard_granularity = 0; /* disable feature */
1490 drbd_info(device, "rs_discard_granularity feature disabled\n");
1494 if (disk_conf->rs_discard_granularity) {
1495 int orig_value = disk_conf->rs_discard_granularity;
1498 if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
1499 disk_conf->rs_discard_granularity = q->limits.discard_granularity;
1501 remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
1502 disk_conf->rs_discard_granularity += remainder;
1504 if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
1505 disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
1507 if (disk_conf->rs_discard_granularity != orig_value)
1508 drbd_info(device, "rs_discard_granularity changed to %d\n",
1509 disk_conf->rs_discard_granularity);
1513 static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1517 if (device->act_log &&
1518 device->act_log->nr_elements == dc->al_extents)
1521 drbd_suspend_io(device);
1522 /* If IO completion is currently blocked, we would likely wait
1523 * "forever" for the activity log to become unused. So we don't. */
1524 if (atomic_read(&device->ap_bio_cnt))
1527 wait_event(device->al_wait, lc_try_lock(device->act_log));
1528 drbd_al_shrink(device);
1529 err = drbd_check_al_size(device, dc);
1530 lc_unlock(device->act_log);
1531 wake_up(&device->al_wait);
1533 drbd_resume_io(device);
1537 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1539 struct drbd_config_context adm_ctx;
1540 enum drbd_ret_code retcode;
1541 struct drbd_device *device;
1542 struct disk_conf *new_disk_conf, *old_disk_conf;
1543 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1546 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1547 if (!adm_ctx.reply_skb)
1549 if (retcode != NO_ERROR)
1552 device = adm_ctx.device;
1553 mutex_lock(&adm_ctx.resource->adm_mutex);
1555 /* we also need a disk
1556 * to change the options on */
1557 if (!get_ldev(device)) {
1558 retcode = ERR_NO_DISK;
1562 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1563 if (!new_disk_conf) {
1564 retcode = ERR_NOMEM;
1568 mutex_lock(&device->resource->conf_update);
1569 old_disk_conf = device->ldev->disk_conf;
1570 *new_disk_conf = *old_disk_conf;
1571 if (should_set_defaults(info))
1572 set_disk_conf_defaults(new_disk_conf);
1574 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1575 if (err && err != -ENOMSG) {
1576 retcode = ERR_MANDATORY_TAG;
1577 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1581 if (!expect(new_disk_conf->resync_rate >= 1))
1582 new_disk_conf->resync_rate = 1;
1584 sanitize_disk_conf(device, new_disk_conf, device->ldev);
1586 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1587 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1589 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1590 if (fifo_size != device->rs_plan_s->size) {
1591 new_plan = fifo_alloc(fifo_size);
1593 drbd_err(device, "kmalloc of fifo_buffer failed");
1594 retcode = ERR_NOMEM;
1599 err = disk_opts_check_al_size(device, new_disk_conf);
1601 /* Could be just "busy". Ignore?
1602 * Introduce dedicated error code? */
1603 drbd_msg_put_info(adm_ctx.reply_skb,
1604 "Try again without changing current al-extents setting");
1605 retcode = ERR_NOMEM;
1609 lock_all_resources();
1610 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1611 if (retcode == NO_ERROR) {
1612 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1613 drbd_resync_after_changed(device);
1615 unlock_all_resources();
1617 if (retcode != NO_ERROR)
1621 old_plan = device->rs_plan_s;
1622 rcu_assign_pointer(device->rs_plan_s, new_plan);
1625 mutex_unlock(&device->resource->conf_update);
1627 if (new_disk_conf->al_updates)
1628 device->ldev->md.flags &= ~MDF_AL_DISABLED;
1630 device->ldev->md.flags |= MDF_AL_DISABLED;
1632 if (new_disk_conf->md_flushes)
1633 clear_bit(MD_NO_FUA, &device->flags);
1635 set_bit(MD_NO_FUA, &device->flags);
1637 if (write_ordering_changed(old_disk_conf, new_disk_conf))
1638 drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1640 if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
1641 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1643 drbd_md_sync(device);
1645 if (device->state.conn >= C_CONNECTED) {
1646 struct drbd_peer_device *peer_device;
1648 for_each_peer_device(peer_device, device)
1649 drbd_send_sync_param(peer_device);
1653 kfree(old_disk_conf);
1655 mod_timer(&device->request_timer, jiffies + HZ);
1659 mutex_unlock(&device->resource->conf_update);
1661 kfree(new_disk_conf);
1666 mutex_unlock(&adm_ctx.resource->adm_mutex);
1668 drbd_adm_finish(&adm_ctx, info, retcode);
1672 static struct block_device *open_backing_dev(struct drbd_device *device,
1673 const char *bdev_path, void *claim_ptr, bool do_bd_link)
1675 struct block_device *bdev;
1678 bdev = blkdev_get_by_path(bdev_path,
1679 FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
1681 drbd_err(device, "open(\"%s\") failed with %ld\n",
1682 bdev_path, PTR_ERR(bdev));
1689 err = bd_link_disk_holder(bdev, device->vdisk);
1691 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1692 drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1694 bdev = ERR_PTR(err);
1699 static int open_backing_devices(struct drbd_device *device,
1700 struct disk_conf *new_disk_conf,
1701 struct drbd_backing_dev *nbc)
1703 struct block_device *bdev;
1705 bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
1707 return ERR_OPEN_DISK;
1708 nbc->backing_bdev = bdev;
1711 * meta_dev_idx >= 0: external fixed size, possibly multiple
1712 * drbd sharing one meta device. TODO in that case, paranoia
1713 * check that [md_bdev, meta_dev_idx] is not yet used by some
1714 * other drbd minor! (if you use drbd.conf + drbdadm, that
1715 * should check it for you already; but if you don't, or
1716 * someone fooled it, we need to double check here)
1718 bdev = open_backing_dev(device, new_disk_conf->meta_dev,
1719 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1720 * if potentially shared with other drbd minors */
1721 (new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1722 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1723 * as would happen with internal metadata. */
1724 (new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1725 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
1727 return ERR_OPEN_MD_DISK;
1728 nbc->md_bdev = bdev;
1732 static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
1738 bd_unlink_disk_holder(bdev, device->vdisk);
1739 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1742 void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1747 close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev);
1748 close_backing_dev(device, ldev->backing_bdev, true);
1750 kfree(ldev->disk_conf);
1754 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1756 struct drbd_config_context adm_ctx;
1757 struct drbd_device *device;
1758 struct drbd_peer_device *peer_device;
1759 struct drbd_connection *connection;
1761 enum drbd_ret_code retcode;
1762 enum determine_dev_size dd;
1763 sector_t max_possible_sectors;
1764 sector_t min_md_device_sectors;
1765 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1766 struct disk_conf *new_disk_conf = NULL;
1767 struct lru_cache *resync_lru = NULL;
1768 struct fifo_buffer *new_plan = NULL;
1769 union drbd_state ns, os;
1770 enum drbd_state_rv rv;
1771 struct net_conf *nc;
1773 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1774 if (!adm_ctx.reply_skb)
1776 if (retcode != NO_ERROR)
1779 device = adm_ctx.device;
1780 mutex_lock(&adm_ctx.resource->adm_mutex);
1781 peer_device = first_peer_device(device);
1782 connection = peer_device->connection;
1783 conn_reconfig_start(connection);
1785 /* if you want to reconfigure, please tear down first */
1786 if (device->state.disk > D_DISKLESS) {
1787 retcode = ERR_DISK_CONFIGURED;
1790 /* It may just now have detached because of IO error. Make sure
1791 * drbd_ldev_destroy is done already, we may end up here very fast,
1792 * e.g. if someone calls attach from the on-io-error handler,
1793 * to realize a "hot spare" feature (not that I'd recommend that) */
1794 wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1796 /* make sure there is no leftover from previous force-detach attempts */
1797 clear_bit(FORCE_DETACH, &device->flags);
1798 clear_bit(WAS_IO_ERROR, &device->flags);
1799 clear_bit(WAS_READ_ERROR, &device->flags);
1801 /* and no leftover from previously aborted resync or verify, either */
1802 device->rs_total = 0;
1803 device->rs_failed = 0;
1804 atomic_set(&device->rs_pending_cnt, 0);
1806 /* allocation not in the IO path, drbdsetup context */
1807 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1809 retcode = ERR_NOMEM;
1812 spin_lock_init(&nbc->md.uuid_lock);
1814 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1815 if (!new_disk_conf) {
1816 retcode = ERR_NOMEM;
1819 nbc->disk_conf = new_disk_conf;
1821 set_disk_conf_defaults(new_disk_conf);
1822 err = disk_conf_from_attrs(new_disk_conf, info);
1824 retcode = ERR_MANDATORY_TAG;
1825 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1829 if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1830 new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1832 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1834 retcode = ERR_NOMEM;
1838 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1839 retcode = ERR_MD_IDX_INVALID;
1844 nc = rcu_dereference(connection->net_conf);
1846 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1848 retcode = ERR_STONITH_AND_PROT_A;
1854 retcode = open_backing_devices(device, new_disk_conf, nbc);
1855 if (retcode != NO_ERROR)
1858 if ((nbc->backing_bdev == nbc->md_bdev) !=
1859 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1860 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1861 retcode = ERR_MD_IDX_INVALID;
1865 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1866 1, 61, sizeof(struct bm_extent),
1867 offsetof(struct bm_extent, lce));
1869 retcode = ERR_NOMEM;
1873 /* Read our meta data super block early.
1874 * This also sets other on-disk offsets. */
1875 retcode = drbd_md_read(device, nbc);
1876 if (retcode != NO_ERROR)
1879 sanitize_disk_conf(device, new_disk_conf, nbc);
1881 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1882 drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1883 (unsigned long long) drbd_get_max_capacity(nbc),
1884 (unsigned long long) new_disk_conf->disk_size);
1885 retcode = ERR_DISK_TOO_SMALL;
1889 if (new_disk_conf->meta_dev_idx < 0) {
1890 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1891 /* at least one MB, otherwise it does not make sense */
1892 min_md_device_sectors = (2<<10);
1894 max_possible_sectors = DRBD_MAX_SECTORS;
1895 min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1898 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1899 retcode = ERR_MD_DISK_TOO_SMALL;
1900 drbd_warn(device, "refusing attach: md-device too small, "
1901 "at least %llu sectors needed for this meta-disk type\n",
1902 (unsigned long long) min_md_device_sectors);
1906 /* Make sure the new disk is big enough
1907 * (we may currently be R_PRIMARY with no local disk...) */
1908 if (drbd_get_max_capacity(nbc) <
1909 drbd_get_capacity(device->this_bdev)) {
1910 retcode = ERR_DISK_TOO_SMALL;
1914 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1916 if (nbc->known_size > max_possible_sectors) {
1917 drbd_warn(device, "==> truncating very big lower level device "
1918 "to currently maximum possible %llu sectors <==\n",
1919 (unsigned long long) max_possible_sectors);
1920 if (new_disk_conf->meta_dev_idx >= 0)
1921 drbd_warn(device, "==>> using internal or flexible "
1922 "meta data may help <<==\n");
1925 drbd_suspend_io(device);
1926 /* also wait for the last barrier ack. */
1927 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1928 * We need a way to either ignore barrier acks for barriers sent before a device
1929 * was attached, or a way to wait for all pending barrier acks to come in.
1930 * As barriers are counted per resource,
1931 * we'd need to suspend io on all devices of a resource.
1933 wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1934 /* and for any other previously queued work */
1935 drbd_flush_workqueue(&connection->sender_work);
1937 rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1938 retcode = (enum drbd_ret_code)rv;
1939 drbd_resume_io(device);
1940 if (rv < SS_SUCCESS)
1943 if (!get_ldev_if_state(device, D_ATTACHING))
1944 goto force_diskless;
1946 if (!device->bitmap) {
1947 if (drbd_bm_init(device)) {
1948 retcode = ERR_NOMEM;
1949 goto force_diskless_dec;
1953 if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
1954 (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
1955 (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1956 drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1957 (unsigned long long)device->ed_uuid);
1958 retcode = ERR_DATA_NOT_CURRENT;
1959 goto force_diskless_dec;
1962 /* Since we are diskless, fix the activity log first... */
1963 if (drbd_check_al_size(device, new_disk_conf)) {
1964 retcode = ERR_NOMEM;
1965 goto force_diskless_dec;
1968 /* Prevent shrinking of consistent devices ! */
1969 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1970 drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1971 drbd_warn(device, "refusing to truncate a consistent device\n");
1972 retcode = ERR_DISK_TOO_SMALL;
1973 goto force_diskless_dec;
1976 lock_all_resources();
1977 retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1978 if (retcode != NO_ERROR) {
1979 unlock_all_resources();
1980 goto force_diskless_dec;
1983 /* Reset the "barriers don't work" bits here, then force meta data to
1984 * be written, to ensure we determine if barriers are supported. */
1985 if (new_disk_conf->md_flushes)
1986 clear_bit(MD_NO_FUA, &device->flags);
1988 set_bit(MD_NO_FUA, &device->flags);
1990 /* Point of no return reached.
1991 * Devices and memory are no longer released by error cleanup below.
1992 * now device takes over responsibility, and the state engine should
1993 * clean it up somewhere. */
1994 D_ASSERT(device, device->ldev == NULL);
1996 device->resync = resync_lru;
1997 device->rs_plan_s = new_plan;
2000 new_disk_conf = NULL;
2003 drbd_resync_after_changed(device);
2004 drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
2005 unlock_all_resources();
2007 if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
2008 set_bit(CRASHED_PRIMARY, &device->flags);
2010 clear_bit(CRASHED_PRIMARY, &device->flags);
2012 if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2013 !(device->state.role == R_PRIMARY && device->resource->susp_nod))
2014 set_bit(CRASHED_PRIMARY, &device->flags);
2016 device->send_cnt = 0;
2017 device->recv_cnt = 0;
2018 device->read_cnt = 0;
2019 device->writ_cnt = 0;
2021 drbd_reconsider_queue_parameters(device, device->ldev, NULL);
2023 /* If I am currently not R_PRIMARY,
2024 * but meta data primary indicator is set,
2025 * I just now recover from a hard crash,
2026 * and have been R_PRIMARY before that crash.
2028 * Now, if I had no connection before that crash
2029 * (have been degraded R_PRIMARY), chances are that
2030 * I won't find my peer now either.
2032 * In that case, and _only_ in that case,
2033 * we use the degr-wfc-timeout instead of the default,
2034 * so we can automatically recover from a crash of a
2035 * degraded but active "cluster" after a certain timeout.
2037 clear_bit(USE_DEGR_WFC_T, &device->flags);
2038 if (device->state.role != R_PRIMARY &&
2039 drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2040 !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2041 set_bit(USE_DEGR_WFC_T, &device->flags);
2043 dd = drbd_determine_dev_size(device, 0, NULL);
2044 if (dd <= DS_ERROR) {
2045 retcode = ERR_NOMEM_BITMAP;
2046 goto force_diskless_dec;
2047 } else if (dd == DS_GREW)
2048 set_bit(RESYNC_AFTER_NEG, &device->flags);
2050 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2051 (test_bit(CRASHED_PRIMARY, &device->flags) &&
2052 drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
2053 drbd_info(device, "Assuming that all blocks are out of sync "
2054 "(aka FullSync)\n");
2055 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2056 "set_n_write from attaching", BM_LOCKED_MASK)) {
2057 retcode = ERR_IO_MD_DISK;
2058 goto force_diskless_dec;
2061 if (drbd_bitmap_io(device, &drbd_bm_read,
2062 "read from attaching", BM_LOCKED_MASK)) {
2063 retcode = ERR_IO_MD_DISK;
2064 goto force_diskless_dec;
2068 if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2069 drbd_suspend_al(device); /* IO is still suspended here... */
2071 spin_lock_irq(&device->resource->req_lock);
2072 os = drbd_read_state(device);
2074 /* If MDF_CONSISTENT is not set go into inconsistent state,
2075 otherwise investigate MDF_WasUpToDate...
2076 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2077 otherwise into D_CONSISTENT state.
2079 if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2080 if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
2081 ns.disk = D_CONSISTENT;
2083 ns.disk = D_OUTDATED;
2085 ns.disk = D_INCONSISTENT;
2088 if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
2089 ns.pdsk = D_OUTDATED;
2092 if (ns.disk == D_CONSISTENT &&
2093 (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
2094 ns.disk = D_UP_TO_DATE;
2096 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2097 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2098 this point, because drbd_request_state() modifies these
2101 if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2102 device->ldev->md.flags &= ~MDF_AL_DISABLED;
2104 device->ldev->md.flags |= MDF_AL_DISABLED;
2108 /* In case we are C_CONNECTED postpone any decision on the new disk
2109 state after the negotiation phase. */
2110 if (device->state.conn == C_CONNECTED) {
2111 device->new_state_tmp.i = ns.i;
2113 ns.disk = D_NEGOTIATING;
2115 /* We expect to receive up-to-date UUIDs soon.
2116 To avoid a race in receive_state, free p_uuid while
2117 holding req_lock. I.e. atomic with the state change */
2118 kfree(device->p_uuid);
2119 device->p_uuid = NULL;
2122 rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
2123 spin_unlock_irq(&device->resource->req_lock);
2125 if (rv < SS_SUCCESS)
2126 goto force_diskless_dec;
2128 mod_timer(&device->request_timer, jiffies + HZ);
2130 if (device->state.role == R_PRIMARY)
2131 device->ldev->md.uuid[UI_CURRENT] |= (u64)1;
2133 device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
2135 drbd_md_mark_dirty(device);
2136 drbd_md_sync(device);
2138 kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2140 conn_reconfig_done(connection);
2141 mutex_unlock(&adm_ctx.resource->adm_mutex);
2142 drbd_adm_finish(&adm_ctx, info, retcode);
2148 drbd_force_state(device, NS(disk, D_DISKLESS));
2149 drbd_md_sync(device);
2151 conn_reconfig_done(connection);
2153 close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev);
2154 close_backing_dev(device, nbc->backing_bdev, true);
2157 kfree(new_disk_conf);
2158 lc_destroy(resync_lru);
2160 mutex_unlock(&adm_ctx.resource->adm_mutex);
2162 drbd_adm_finish(&adm_ctx, info, retcode);
2166 static int adm_detach(struct drbd_device *device, int force)
2168 enum drbd_state_rv retcode;
2173 set_bit(FORCE_DETACH, &device->flags);
2174 drbd_force_state(device, NS(disk, D_FAILED));
2175 retcode = SS_SUCCESS;
2179 drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
2180 buffer = drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
2182 retcode = drbd_request_state(device, NS(disk, D_FAILED));
2183 drbd_md_put_buffer(device);
2184 } else /* already <= D_FAILED */
2185 retcode = SS_NOTHING_TO_DO;
2186 /* D_FAILED will transition to DISKLESS. */
2187 drbd_resume_io(device);
2188 ret = wait_event_interruptible(device->misc_wait,
2189 device->state.disk != D_FAILED);
2190 if ((int)retcode == (int)SS_IS_DISKLESS)
2191 retcode = SS_NOTHING_TO_DO;
2198 /* Detaching the disk is a process in multiple stages. First we need to lock
2199 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2200 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2201 * internal references as well.
2202 * Only then we have finally detached. */
2203 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
2205 struct drbd_config_context adm_ctx;
2206 enum drbd_ret_code retcode;
2207 struct detach_parms parms = { };
2210 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2211 if (!adm_ctx.reply_skb)
2213 if (retcode != NO_ERROR)
2216 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2217 err = detach_parms_from_attrs(&parms, info);
2219 retcode = ERR_MANDATORY_TAG;
2220 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2225 mutex_lock(&adm_ctx.resource->adm_mutex);
2226 retcode = adm_detach(adm_ctx.device, parms.force_detach);
2227 mutex_unlock(&adm_ctx.resource->adm_mutex);
2229 drbd_adm_finish(&adm_ctx, info, retcode);
2233 static bool conn_resync_running(struct drbd_connection *connection)
2235 struct drbd_peer_device *peer_device;
2240 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2241 struct drbd_device *device = peer_device->device;
2242 if (device->state.conn == C_SYNC_SOURCE ||
2243 device->state.conn == C_SYNC_TARGET ||
2244 device->state.conn == C_PAUSED_SYNC_S ||
2245 device->state.conn == C_PAUSED_SYNC_T) {
2255 static bool conn_ov_running(struct drbd_connection *connection)
2257 struct drbd_peer_device *peer_device;
2262 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2263 struct drbd_device *device = peer_device->device;
2264 if (device->state.conn == C_VERIFY_S ||
2265 device->state.conn == C_VERIFY_T) {
2275 static enum drbd_ret_code
2276 _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2278 struct drbd_peer_device *peer_device;
2281 if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2282 if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2283 return ERR_NEED_APV_100;
2285 if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2286 return ERR_NEED_APV_100;
2288 if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2289 return ERR_NEED_APV_100;
2292 if (!new_net_conf->two_primaries &&
2293 conn_highest_role(connection) == R_PRIMARY &&
2294 conn_highest_peer(connection) == R_PRIMARY)
2295 return ERR_NEED_ALLOW_TWO_PRI;
2297 if (new_net_conf->two_primaries &&
2298 (new_net_conf->wire_protocol != DRBD_PROT_C))
2299 return ERR_NOT_PROTO_C;
2301 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2302 struct drbd_device *device = peer_device->device;
2303 if (get_ldev(device)) {
2304 enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2306 if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2307 return ERR_STONITH_AND_PROT_A;
2309 if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2310 return ERR_DISCARD_IMPOSSIBLE;
2313 if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2314 return ERR_CONG_NOT_PROTO_A;
2319 static enum drbd_ret_code
2320 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2322 static enum drbd_ret_code rv;
2323 struct drbd_peer_device *peer_device;
2327 rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2330 /* connection->peer_devices protected by genl_lock() here */
2331 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2332 struct drbd_device *device = peer_device->device;
2333 if (!device->bitmap) {
2334 if (drbd_bm_init(device))
2343 struct crypto_ahash *verify_tfm;
2344 struct crypto_ahash *csums_tfm;
2345 struct crypto_shash *cram_hmac_tfm;
2346 struct crypto_ahash *integrity_tfm;
2350 alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
2355 *tfm = crypto_alloc_shash(tfm_name, 0, 0);
2365 alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
2370 *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
2379 static enum drbd_ret_code
2380 alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2382 char hmac_name[CRYPTO_MAX_ALG_NAME];
2383 enum drbd_ret_code rv;
2385 rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
2389 rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
2393 rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2397 if (new_net_conf->cram_hmac_alg[0] != 0) {
2398 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2399 new_net_conf->cram_hmac_alg);
2401 rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2408 static void free_crypto(struct crypto *crypto)
2410 crypto_free_shash(crypto->cram_hmac_tfm);
2411 crypto_free_ahash(crypto->integrity_tfm);
2412 crypto_free_ahash(crypto->csums_tfm);
2413 crypto_free_ahash(crypto->verify_tfm);
2416 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2418 struct drbd_config_context adm_ctx;
2419 enum drbd_ret_code retcode;
2420 struct drbd_connection *connection;
2421 struct net_conf *old_net_conf, *new_net_conf = NULL;
2423 int ovr; /* online verify running */
2424 int rsr; /* re-sync running */
2425 struct crypto crypto = { };
2427 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2428 if (!adm_ctx.reply_skb)
2430 if (retcode != NO_ERROR)
2433 connection = adm_ctx.connection;
2434 mutex_lock(&adm_ctx.resource->adm_mutex);
2436 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2437 if (!new_net_conf) {
2438 retcode = ERR_NOMEM;
2442 conn_reconfig_start(connection);
2444 mutex_lock(&connection->data.mutex);
2445 mutex_lock(&connection->resource->conf_update);
2446 old_net_conf = connection->net_conf;
2448 if (!old_net_conf) {
2449 drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2450 retcode = ERR_INVALID_REQUEST;
2454 *new_net_conf = *old_net_conf;
2455 if (should_set_defaults(info))
2456 set_net_conf_defaults(new_net_conf);
2458 err = net_conf_from_attrs_for_change(new_net_conf, info);
2459 if (err && err != -ENOMSG) {
2460 retcode = ERR_MANDATORY_TAG;
2461 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2465 retcode = check_net_options(connection, new_net_conf);
2466 if (retcode != NO_ERROR)
2469 /* re-sync running */
2470 rsr = conn_resync_running(connection);
2471 if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2472 retcode = ERR_CSUMS_RESYNC_RUNNING;
2476 /* online verify running */
2477 ovr = conn_ov_running(connection);
2478 if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2479 retcode = ERR_VERIFY_RUNNING;
2483 retcode = alloc_crypto(&crypto, new_net_conf);
2484 if (retcode != NO_ERROR)
2487 rcu_assign_pointer(connection->net_conf, new_net_conf);
2490 crypto_free_ahash(connection->csums_tfm);
2491 connection->csums_tfm = crypto.csums_tfm;
2492 crypto.csums_tfm = NULL;
2495 crypto_free_ahash(connection->verify_tfm);
2496 connection->verify_tfm = crypto.verify_tfm;
2497 crypto.verify_tfm = NULL;
2500 crypto_free_ahash(connection->integrity_tfm);
2501 connection->integrity_tfm = crypto.integrity_tfm;
2502 if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2503 /* Do this without trying to take connection->data.mutex again. */
2504 __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2506 crypto_free_shash(connection->cram_hmac_tfm);
2507 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2509 mutex_unlock(&connection->resource->conf_update);
2510 mutex_unlock(&connection->data.mutex);
2512 kfree(old_net_conf);
2514 if (connection->cstate >= C_WF_REPORT_PARAMS) {
2515 struct drbd_peer_device *peer_device;
2518 idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2519 drbd_send_sync_param(peer_device);
2525 mutex_unlock(&connection->resource->conf_update);
2526 mutex_unlock(&connection->data.mutex);
2527 free_crypto(&crypto);
2528 kfree(new_net_conf);
2530 conn_reconfig_done(connection);
2532 mutex_unlock(&adm_ctx.resource->adm_mutex);
2534 drbd_adm_finish(&adm_ctx, info, retcode);
2538 static void connection_to_info(struct connection_info *info,
2539 struct drbd_connection *connection)
2541 info->conn_connection_state = connection->cstate;
2542 info->conn_role = conn_highest_peer(connection);
2545 static void peer_device_to_info(struct peer_device_info *info,
2546 struct drbd_peer_device *peer_device)
2548 struct drbd_device *device = peer_device->device;
2550 info->peer_repl_state =
2551 max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2552 info->peer_disk_state = device->state.pdsk;
2553 info->peer_resync_susp_user = device->state.user_isp;
2554 info->peer_resync_susp_peer = device->state.peer_isp;
2555 info->peer_resync_susp_dependency = device->state.aftr_isp;
2558 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2560 struct connection_info connection_info;
2561 enum drbd_notification_type flags;
2562 unsigned int peer_devices = 0;
2563 struct drbd_config_context adm_ctx;
2564 struct drbd_peer_device *peer_device;
2565 struct net_conf *old_net_conf, *new_net_conf = NULL;
2566 struct crypto crypto = { };
2567 struct drbd_resource *resource;
2568 struct drbd_connection *connection;
2569 enum drbd_ret_code retcode;
2573 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2575 if (!adm_ctx.reply_skb)
2577 if (retcode != NO_ERROR)
2579 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2580 drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2581 retcode = ERR_INVALID_REQUEST;
2585 /* No need for _rcu here. All reconfiguration is
2586 * strictly serialized on genl_lock(). We are protected against
2587 * concurrent reconfiguration/addition/deletion */
2588 for_each_resource(resource, &drbd_resources) {
2589 for_each_connection(connection, resource) {
2590 if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2591 !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2592 connection->my_addr_len)) {
2593 retcode = ERR_LOCAL_ADDR;
2597 if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2598 !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2599 connection->peer_addr_len)) {
2600 retcode = ERR_PEER_ADDR;
2606 mutex_lock(&adm_ctx.resource->adm_mutex);
2607 connection = first_connection(adm_ctx.resource);
2608 conn_reconfig_start(connection);
2610 if (connection->cstate > C_STANDALONE) {
2611 retcode = ERR_NET_CONFIGURED;
2615 /* allocation not in the IO path, drbdsetup / netlink process context */
2616 new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2617 if (!new_net_conf) {
2618 retcode = ERR_NOMEM;
2622 set_net_conf_defaults(new_net_conf);
2624 err = net_conf_from_attrs(new_net_conf, info);
2625 if (err && err != -ENOMSG) {
2626 retcode = ERR_MANDATORY_TAG;
2627 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2631 retcode = check_net_options(connection, new_net_conf);
2632 if (retcode != NO_ERROR)
2635 retcode = alloc_crypto(&crypto, new_net_conf);
2636 if (retcode != NO_ERROR)
2639 ((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2641 drbd_flush_workqueue(&connection->sender_work);
2643 mutex_lock(&adm_ctx.resource->conf_update);
2644 old_net_conf = connection->net_conf;
2646 retcode = ERR_NET_CONFIGURED;
2647 mutex_unlock(&adm_ctx.resource->conf_update);
2650 rcu_assign_pointer(connection->net_conf, new_net_conf);
2652 conn_free_crypto(connection);
2653 connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2654 connection->integrity_tfm = crypto.integrity_tfm;
2655 connection->csums_tfm = crypto.csums_tfm;
2656 connection->verify_tfm = crypto.verify_tfm;
2658 connection->my_addr_len = nla_len(adm_ctx.my_addr);
2659 memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2660 connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2661 memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2663 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2667 connection_to_info(&connection_info, connection);
2668 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2669 mutex_lock(¬ification_mutex);
2670 notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2671 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2672 struct peer_device_info peer_device_info;
2674 peer_device_to_info(&peer_device_info, peer_device);
2675 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2676 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2678 mutex_unlock(¬ification_mutex);
2679 mutex_unlock(&adm_ctx.resource->conf_update);
2682 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2683 struct drbd_device *device = peer_device->device;
2684 device->send_cnt = 0;
2685 device->recv_cnt = 0;
2689 retcode = (enum drbd_ret_code)conn_request_state(connection,
2690 NS(conn, C_UNCONNECTED), CS_VERBOSE);
2692 conn_reconfig_done(connection);
2693 mutex_unlock(&adm_ctx.resource->adm_mutex);
2694 drbd_adm_finish(&adm_ctx, info, retcode);
2698 free_crypto(&crypto);
2699 kfree(new_net_conf);
2701 conn_reconfig_done(connection);
2702 mutex_unlock(&adm_ctx.resource->adm_mutex);
2704 drbd_adm_finish(&adm_ctx, info, retcode);
2708 static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2710 enum drbd_state_rv rv;
2712 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2713 force ? CS_HARD : 0);
2716 case SS_NOTHING_TO_DO:
2718 case SS_ALREADY_STANDALONE:
2720 case SS_PRIMARY_NOP:
2721 /* Our state checking code wants to see the peer outdated. */
2722 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2724 if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2725 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2728 case SS_CW_FAILED_BY_PEER:
2729 /* The peer probably wants to see us outdated. */
2730 rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2731 disk, D_OUTDATED), 0);
2732 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2733 rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2738 /* no special handling necessary */
2741 if (rv >= SS_SUCCESS) {
2742 enum drbd_state_rv rv2;
2743 /* No one else can reconfigure the network while I am here.
2744 * The state handling only uses drbd_thread_stop_nowait(),
2745 * we want to really wait here until the receiver is no more.
2747 drbd_thread_stop(&connection->receiver);
2749 /* Race breaker. This additional state change request may be
2750 * necessary, if this was a forced disconnect during a receiver
2751 * restart. We may have "killed" the receiver thread just
2752 * after drbd_receiver() returned. Typically, we should be
2753 * C_STANDALONE already, now, and this becomes a no-op.
2755 rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2756 CS_VERBOSE | CS_HARD);
2757 if (rv2 < SS_SUCCESS)
2758 drbd_err(connection,
2759 "unexpected rv2=%d in conn_try_disconnect()\n",
2761 /* Unlike in DRBD 9, the state engine has generated
2762 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2767 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2769 struct drbd_config_context adm_ctx;
2770 struct disconnect_parms parms;
2771 struct drbd_connection *connection;
2772 enum drbd_state_rv rv;
2773 enum drbd_ret_code retcode;
2776 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2777 if (!adm_ctx.reply_skb)
2779 if (retcode != NO_ERROR)
2782 connection = adm_ctx.connection;
2783 memset(&parms, 0, sizeof(parms));
2784 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2785 err = disconnect_parms_from_attrs(&parms, info);
2787 retcode = ERR_MANDATORY_TAG;
2788 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2793 mutex_lock(&adm_ctx.resource->adm_mutex);
2794 rv = conn_try_disconnect(connection, parms.force_disconnect);
2795 if (rv < SS_SUCCESS)
2796 retcode = (enum drbd_ret_code)rv;
2799 mutex_unlock(&adm_ctx.resource->adm_mutex);
2801 drbd_adm_finish(&adm_ctx, info, retcode);
2805 void resync_after_online_grow(struct drbd_device *device)
2807 int iass; /* I am sync source */
2809 drbd_info(device, "Resync of new storage after online grow\n");
2810 if (device->state.role != device->state.peer)
2811 iass = (device->state.role == R_PRIMARY);
2813 iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2816 drbd_start_resync(device, C_SYNC_SOURCE);
2818 _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2821 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2823 struct drbd_config_context adm_ctx;
2824 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2825 struct resize_parms rs;
2826 struct drbd_device *device;
2827 enum drbd_ret_code retcode;
2828 enum determine_dev_size dd;
2829 bool change_al_layout = false;
2830 enum dds_flags ddsf;
2834 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2835 if (!adm_ctx.reply_skb)
2837 if (retcode != NO_ERROR)
2840 mutex_lock(&adm_ctx.resource->adm_mutex);
2841 device = adm_ctx.device;
2842 if (!get_ldev(device)) {
2843 retcode = ERR_NO_DISK;
2847 memset(&rs, 0, sizeof(struct resize_parms));
2848 rs.al_stripes = device->ldev->md.al_stripes;
2849 rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2850 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2851 err = resize_parms_from_attrs(&rs, info);
2853 retcode = ERR_MANDATORY_TAG;
2854 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2859 if (device->state.conn > C_CONNECTED) {
2860 retcode = ERR_RESIZE_RESYNC;
2864 if (device->state.role == R_SECONDARY &&
2865 device->state.peer == R_SECONDARY) {
2866 retcode = ERR_NO_PRIMARY;
2870 if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2871 retcode = ERR_NEED_APV_93;
2876 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2878 if (u_size != (sector_t)rs.resize_size) {
2879 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2880 if (!new_disk_conf) {
2881 retcode = ERR_NOMEM;
2886 if (device->ldev->md.al_stripes != rs.al_stripes ||
2887 device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2888 u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2890 if (al_size_k > (16 * 1024 * 1024)) {
2891 retcode = ERR_MD_LAYOUT_TOO_BIG;
2895 if (al_size_k < MD_32kB_SECT/2) {
2896 retcode = ERR_MD_LAYOUT_TOO_SMALL;
2900 if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2901 retcode = ERR_MD_LAYOUT_CONNECTED;
2905 change_al_layout = true;
2908 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2909 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2911 if (new_disk_conf) {
2912 mutex_lock(&device->resource->conf_update);
2913 old_disk_conf = device->ldev->disk_conf;
2914 *new_disk_conf = *old_disk_conf;
2915 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2916 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2917 mutex_unlock(&device->resource->conf_update);
2919 kfree(old_disk_conf);
2920 new_disk_conf = NULL;
2923 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2924 dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2925 drbd_md_sync(device);
2927 if (dd == DS_ERROR) {
2928 retcode = ERR_NOMEM_BITMAP;
2930 } else if (dd == DS_ERROR_SPACE_MD) {
2931 retcode = ERR_MD_LAYOUT_NO_FIT;
2933 } else if (dd == DS_ERROR_SHRINK) {
2934 retcode = ERR_IMPLICIT_SHRINK;
2938 if (device->state.conn == C_CONNECTED) {
2940 set_bit(RESIZE_PENDING, &device->flags);
2942 drbd_send_uuids(first_peer_device(device));
2943 drbd_send_sizes(first_peer_device(device), 1, ddsf);
2947 mutex_unlock(&adm_ctx.resource->adm_mutex);
2949 drbd_adm_finish(&adm_ctx, info, retcode);
2954 kfree(new_disk_conf);
2958 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2960 struct drbd_config_context adm_ctx;
2961 enum drbd_ret_code retcode;
2962 struct res_opts res_opts;
2965 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2966 if (!adm_ctx.reply_skb)
2968 if (retcode != NO_ERROR)
2971 res_opts = adm_ctx.resource->res_opts;
2972 if (should_set_defaults(info))
2973 set_res_opts_defaults(&res_opts);
2975 err = res_opts_from_attrs(&res_opts, info);
2976 if (err && err != -ENOMSG) {
2977 retcode = ERR_MANDATORY_TAG;
2978 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2982 mutex_lock(&adm_ctx.resource->adm_mutex);
2983 err = set_resource_options(adm_ctx.resource, &res_opts);
2985 retcode = ERR_INVALID_REQUEST;
2987 retcode = ERR_NOMEM;
2989 mutex_unlock(&adm_ctx.resource->adm_mutex);
2992 drbd_adm_finish(&adm_ctx, info, retcode);
2996 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2998 struct drbd_config_context adm_ctx;
2999 struct drbd_device *device;
3000 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3002 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3003 if (!adm_ctx.reply_skb)
3005 if (retcode != NO_ERROR)
3008 device = adm_ctx.device;
3009 if (!get_ldev(device)) {
3010 retcode = ERR_NO_DISK;
3014 mutex_lock(&adm_ctx.resource->adm_mutex);
3016 /* If there is still bitmap IO pending, probably because of a previous
3017 * resync just being finished, wait for it before requesting a new resync.
3018 * Also wait for it's after_state_ch(). */
3019 drbd_suspend_io(device);
3020 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3021 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3023 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
3024 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
3025 * try to start a resync handshake as sync target for full sync.
3027 if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
3028 retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
3029 if (retcode >= SS_SUCCESS) {
3030 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
3031 "set_n_write from invalidate", BM_LOCKED_MASK))
3032 retcode = ERR_IO_MD_DISK;
3035 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
3036 drbd_resume_io(device);
3037 mutex_unlock(&adm_ctx.resource->adm_mutex);
3040 drbd_adm_finish(&adm_ctx, info, retcode);
3044 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
3045 union drbd_state mask, union drbd_state val)
3047 struct drbd_config_context adm_ctx;
3048 enum drbd_ret_code retcode;
3050 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3051 if (!adm_ctx.reply_skb)
3053 if (retcode != NO_ERROR)
3056 mutex_lock(&adm_ctx.resource->adm_mutex);
3057 retcode = drbd_request_state(adm_ctx.device, mask, val);
3058 mutex_unlock(&adm_ctx.resource->adm_mutex);
3060 drbd_adm_finish(&adm_ctx, info, retcode);
3064 static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
3068 rv = drbd_bmio_set_n_write(device);
3069 drbd_suspend_al(device);
3073 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
3075 struct drbd_config_context adm_ctx;
3076 int retcode; /* drbd_ret_code, drbd_state_rv */
3077 struct drbd_device *device;
3079 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3080 if (!adm_ctx.reply_skb)
3082 if (retcode != NO_ERROR)
3085 device = adm_ctx.device;
3086 if (!get_ldev(device)) {
3087 retcode = ERR_NO_DISK;
3091 mutex_lock(&adm_ctx.resource->adm_mutex);
3093 /* If there is still bitmap IO pending, probably because of a previous
3094 * resync just being finished, wait for it before requesting a new resync.
3095 * Also wait for it's after_state_ch(). */
3096 drbd_suspend_io(device);
3097 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3098 drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3100 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3101 * in the bitmap. Otherwise, try to start a resync handshake
3102 * as sync source for full sync.
3104 if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
3105 /* The peer will get a resync upon connect anyways. Just make that
3106 into a full resync. */
3107 retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
3108 if (retcode >= SS_SUCCESS) {
3109 if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
3110 "set_n_write from invalidate_peer",
3111 BM_LOCKED_SET_ALLOWED))
3112 retcode = ERR_IO_MD_DISK;
3115 retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3116 drbd_resume_io(device);
3117 mutex_unlock(&adm_ctx.resource->adm_mutex);
3120 drbd_adm_finish(&adm_ctx, info, retcode);
3124 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
3126 struct drbd_config_context adm_ctx;
3127 enum drbd_ret_code retcode;
3129 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3130 if (!adm_ctx.reply_skb)
3132 if (retcode != NO_ERROR)
3135 mutex_lock(&adm_ctx.resource->adm_mutex);
3136 if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
3137 retcode = ERR_PAUSE_IS_SET;
3138 mutex_unlock(&adm_ctx.resource->adm_mutex);
3140 drbd_adm_finish(&adm_ctx, info, retcode);
3144 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
3146 struct drbd_config_context adm_ctx;
3147 union drbd_dev_state s;
3148 enum drbd_ret_code retcode;
3150 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3151 if (!adm_ctx.reply_skb)
3153 if (retcode != NO_ERROR)
3156 mutex_lock(&adm_ctx.resource->adm_mutex);
3157 if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3158 s = adm_ctx.device->state;
3159 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3160 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3161 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3163 retcode = ERR_PAUSE_IS_CLEAR;
3166 mutex_unlock(&adm_ctx.resource->adm_mutex);
3168 drbd_adm_finish(&adm_ctx, info, retcode);
3172 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
3174 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
3177 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
3179 struct drbd_config_context adm_ctx;
3180 struct drbd_device *device;
3181 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3183 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3184 if (!adm_ctx.reply_skb)
3186 if (retcode != NO_ERROR)
3189 mutex_lock(&adm_ctx.resource->adm_mutex);
3190 device = adm_ctx.device;
3191 if (test_bit(NEW_CUR_UUID, &device->flags)) {
3192 if (get_ldev_if_state(device, D_ATTACHING)) {
3193 drbd_uuid_new_current(device);
3196 /* This is effectively a multi-stage "forced down".
3197 * The NEW_CUR_UUID bit is supposedly only set, if we
3198 * lost the replication connection, and are configured
3199 * to freeze IO and wait for some fence-peer handler.
3200 * So we still don't have a replication connection.
3201 * And now we don't have a local disk either. After
3202 * resume, we will fail all pending and new IO, because
3203 * we don't have any data anymore. Which means we will
3204 * eventually be able to terminate all users of this
3205 * device, and then take it down. By bumping the
3206 * "effective" data uuid, we make sure that you really
3207 * need to tear down before you reconfigure, we will
3208 * the refuse to re-connect or re-attach (because no
3209 * matching real data uuid exists).
3212 get_random_bytes(&val, sizeof(u64));
3213 drbd_set_ed_uuid(device, val);
3214 drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3216 clear_bit(NEW_CUR_UUID, &device->flags);
3218 drbd_suspend_io(device);
3219 retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
3220 if (retcode == SS_SUCCESS) {
3221 if (device->state.conn < C_CONNECTED)
3222 tl_clear(first_peer_device(device)->connection);
3223 if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
3224 tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
3226 drbd_resume_io(device);
3227 mutex_unlock(&adm_ctx.resource->adm_mutex);
3229 drbd_adm_finish(&adm_ctx, info, retcode);
3233 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
3235 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
3238 static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3239 struct drbd_resource *resource,
3240 struct drbd_connection *connection,
3241 struct drbd_device *device)
3244 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
3246 goto nla_put_failure;
3248 nla_put_u32(skb, T_ctx_volume, device->vnr))
3249 goto nla_put_failure;
3250 if (nla_put_string(skb, T_ctx_resource_name, resource->name))
3251 goto nla_put_failure;
3253 if (connection->my_addr_len &&
3254 nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3255 goto nla_put_failure;
3256 if (connection->peer_addr_len &&
3257 nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3258 goto nla_put_failure;
3260 nla_nest_end(skb, nla);
3265 nla_nest_cancel(skb, nla);
3270 * The generic netlink dump callbacks are called outside the genl_lock(), so
3271 * they cannot use the simple attribute parsing code which uses global
3274 static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3276 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3277 const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3280 nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3281 DRBD_NLA_CFG_CONTEXT);
3284 return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
3287 static void resource_to_info(struct resource_info *, struct drbd_resource *);
3289 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3291 struct drbd_genlmsghdr *dh;
3292 struct drbd_resource *resource;
3293 struct resource_info resource_info;
3294 struct resource_statistics resource_statistics;
3299 for_each_resource_rcu(resource, &drbd_resources)
3300 if (resource == (struct drbd_resource *)cb->args[0])
3301 goto found_resource;
3302 err = 0; /* resource was probably deleted */
3305 resource = list_entry(&drbd_resources,
3306 struct drbd_resource, resources);
3309 list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3316 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3317 cb->nlh->nlmsg_seq, &drbd_genl_family,
3318 NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3323 dh->ret_code = NO_ERROR;
3324 err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3327 err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3330 resource_to_info(&resource_info, resource);
3331 err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3334 resource_statistics.res_stat_write_ordering = resource->write_ordering;
3335 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3338 cb->args[0] = (long)resource;
3339 genlmsg_end(skb, dh);
3349 static void device_to_statistics(struct device_statistics *s,
3350 struct drbd_device *device)
3352 memset(s, 0, sizeof(*s));
3353 s->dev_upper_blocked = !may_inc_ap_bio(device);
3354 if (get_ldev(device)) {
3355 struct drbd_md *md = &device->ldev->md;
3356 u64 *history_uuids = (u64 *)s->history_uuids;
3357 struct request_queue *q;
3360 spin_lock_irq(&md->uuid_lock);
3361 s->dev_current_uuid = md->uuid[UI_CURRENT];
3362 BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3363 for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3364 history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3365 for (; n < HISTORY_UUIDS; n++)
3366 history_uuids[n] = 0;
3367 s->history_uuids_len = HISTORY_UUIDS;
3368 spin_unlock_irq(&md->uuid_lock);
3370 s->dev_disk_flags = md->flags;
3371 q = bdev_get_queue(device->ldev->backing_bdev);
3372 s->dev_lower_blocked =
3373 bdi_congested(&q->backing_dev_info,
3374 (1 << WB_async_congested) |
3375 (1 << WB_sync_congested));
3378 s->dev_size = drbd_get_capacity(device->this_bdev);
3379 s->dev_read = device->read_cnt;
3380 s->dev_write = device->writ_cnt;
3381 s->dev_al_writes = device->al_writ_cnt;
3382 s->dev_bm_writes = device->bm_writ_cnt;
3383 s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3384 s->dev_lower_pending = atomic_read(&device->local_cnt);
3385 s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3386 s->dev_exposed_data_uuid = device->ed_uuid;
3389 static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3392 struct drbd_resource *resource =
3393 (struct drbd_resource *)cb->args[0];
3394 kref_put(&resource->kref, drbd_destroy_resource);
3400 int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3401 return put_resource_in_arg0(cb, 7);
3404 static void device_to_info(struct device_info *, struct drbd_device *);
3406 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3408 struct nlattr *resource_filter;
3409 struct drbd_resource *resource;
3410 struct drbd_device *uninitialized_var(device);
3411 int minor, err, retcode;
3412 struct drbd_genlmsghdr *dh;
3413 struct device_info device_info;
3414 struct device_statistics device_statistics;
3415 struct idr *idr_to_search;
3417 resource = (struct drbd_resource *)cb->args[0];
3418 if (!cb->args[0] && !cb->args[1]) {
3419 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3420 if (resource_filter) {
3421 retcode = ERR_RES_NOT_KNOWN;
3422 resource = drbd_find_resource(nla_data(resource_filter));
3425 cb->args[0] = (long)resource;
3430 minor = cb->args[1];
3431 idr_to_search = resource ? &resource->devices : &drbd_devices;
3432 device = idr_get_next(idr_to_search, &minor);
3437 idr_for_each_entry_continue(idr_to_search, device, minor) {
3439 goto put_result; /* only one iteration */
3442 goto out; /* no more devices */
3445 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3446 cb->nlh->nlmsg_seq, &drbd_genl_family,
3447 NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3451 dh->ret_code = retcode;
3453 if (retcode == NO_ERROR) {
3454 dh->minor = device->minor;
3455 err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3458 if (get_ldev(device)) {
3459 struct disk_conf *disk_conf =
3460 rcu_dereference(device->ldev->disk_conf);
3462 err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3467 device_to_info(&device_info, device);
3468 err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3472 device_to_statistics(&device_statistics, device);
3473 err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3476 cb->args[1] = minor + 1;
3478 genlmsg_end(skb, dh);
3488 int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3490 return put_resource_in_arg0(cb, 6);
3493 enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3495 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3497 struct nlattr *resource_filter;
3498 struct drbd_resource *resource = NULL, *next_resource;
3499 struct drbd_connection *uninitialized_var(connection);
3500 int err = 0, retcode;
3501 struct drbd_genlmsghdr *dh;
3502 struct connection_info connection_info;
3503 struct connection_statistics connection_statistics;
3506 resource = (struct drbd_resource *)cb->args[0];
3508 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3509 if (resource_filter) {
3510 retcode = ERR_RES_NOT_KNOWN;
3511 resource = drbd_find_resource(nla_data(resource_filter));
3514 cb->args[0] = (long)resource;
3515 cb->args[1] = SINGLE_RESOURCE;
3519 if (list_empty(&drbd_resources))
3521 resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3522 kref_get(&resource->kref);
3523 cb->args[0] = (long)resource;
3524 cb->args[1] = ITERATE_RESOURCES;
3529 mutex_lock(&resource->conf_update);
3532 for_each_connection_rcu(connection, resource)
3533 if (connection == (struct drbd_connection *)cb->args[2])
3534 goto found_connection;
3535 /* connection was probably deleted */
3536 goto no_more_connections;
3538 connection = list_entry(&resource->connections, struct drbd_connection, connections);
3541 list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3542 if (!has_net_conf(connection))
3545 goto put_result; /* only one iteration */
3548 no_more_connections:
3549 if (cb->args[1] == ITERATE_RESOURCES) {
3550 for_each_resource_rcu(next_resource, &drbd_resources) {
3551 if (next_resource == resource)
3552 goto found_resource;
3554 /* resource was probably deleted */
3559 list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3560 mutex_unlock(&resource->conf_update);
3561 kref_put(&resource->kref, drbd_destroy_resource);
3562 resource = next_resource;
3563 kref_get(&resource->kref);
3564 cb->args[0] = (long)resource;
3568 goto out; /* no more resources */
3571 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3572 cb->nlh->nlmsg_seq, &drbd_genl_family,
3573 NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3577 dh->ret_code = retcode;
3579 if (retcode == NO_ERROR) {
3580 struct net_conf *net_conf;
3582 err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3585 net_conf = rcu_dereference(connection->net_conf);
3587 err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3591 connection_to_info(&connection_info, connection);
3592 err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3595 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3596 err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3599 cb->args[2] = (long)connection;
3601 genlmsg_end(skb, dh);
3607 mutex_unlock(&resource->conf_update);
3613 enum mdf_peer_flag {
3614 MDF_PEER_CONNECTED = 1 << 0,
3615 MDF_PEER_OUTDATED = 1 << 1,
3616 MDF_PEER_FENCING = 1 << 2,
3617 MDF_PEER_FULL_SYNC = 1 << 3,
3620 static void peer_device_to_statistics(struct peer_device_statistics *s,
3621 struct drbd_peer_device *peer_device)
3623 struct drbd_device *device = peer_device->device;
3625 memset(s, 0, sizeof(*s));
3626 s->peer_dev_received = device->recv_cnt;
3627 s->peer_dev_sent = device->send_cnt;
3628 s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3629 atomic_read(&device->rs_pending_cnt);
3630 s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3631 s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3632 s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3633 if (get_ldev(device)) {
3634 struct drbd_md *md = &device->ldev->md;
3636 spin_lock_irq(&md->uuid_lock);
3637 s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3638 spin_unlock_irq(&md->uuid_lock);
3640 (drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3641 MDF_PEER_CONNECTED : 0) +
3642 (drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3643 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3644 MDF_PEER_OUTDATED : 0) +
3645 /* FIXME: MDF_PEER_FENCING? */
3646 (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3647 MDF_PEER_FULL_SYNC : 0);
3652 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3654 return put_resource_in_arg0(cb, 9);
3657 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3659 struct nlattr *resource_filter;
3660 struct drbd_resource *resource;
3661 struct drbd_device *uninitialized_var(device);
3662 struct drbd_peer_device *peer_device = NULL;
3663 int minor, err, retcode;
3664 struct drbd_genlmsghdr *dh;
3665 struct idr *idr_to_search;
3667 resource = (struct drbd_resource *)cb->args[0];
3668 if (!cb->args[0] && !cb->args[1]) {
3669 resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3670 if (resource_filter) {
3671 retcode = ERR_RES_NOT_KNOWN;
3672 resource = drbd_find_resource(nla_data(resource_filter));
3676 cb->args[0] = (long)resource;
3680 minor = cb->args[1];
3681 idr_to_search = resource ? &resource->devices : &drbd_devices;
3682 device = idr_find(idr_to_search, minor);
3687 device = idr_get_next(idr_to_search, &minor);
3694 for_each_peer_device(peer_device, device)
3695 if (peer_device == (struct drbd_peer_device *)cb->args[2])
3696 goto found_peer_device;
3697 /* peer device was probably deleted */
3700 /* Make peer_device point to the list head (not the first entry). */
3701 peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3704 list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3705 if (!has_net_conf(peer_device->connection))
3708 goto put_result; /* only one iteration */
3713 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3714 cb->nlh->nlmsg_seq, &drbd_genl_family,
3715 NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3719 dh->ret_code = retcode;
3721 if (retcode == NO_ERROR) {
3722 struct peer_device_info peer_device_info;
3723 struct peer_device_statistics peer_device_statistics;
3726 err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3729 peer_device_to_info(&peer_device_info, peer_device);
3730 err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3733 peer_device_to_statistics(&peer_device_statistics, peer_device);
3734 err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3737 cb->args[1] = minor;
3738 cb->args[2] = (long)peer_device;
3740 genlmsg_end(skb, dh);
3750 * Return the connection of @resource if @resource has exactly one connection.
3752 static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3754 struct list_head *connections = &resource->connections;
3756 if (list_empty(connections) || connections->next->next != connections)
3758 return list_first_entry(&resource->connections, struct drbd_connection, connections);
3761 static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
3762 const struct sib_info *sib)
3764 struct drbd_resource *resource = device->resource;
3765 struct state_info *si = NULL; /* for sizeof(si->member); */
3769 int exclude_sensitive;
3771 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3772 * to. So we better exclude_sensitive information.
3774 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3775 * in the context of the requesting user process. Exclude sensitive
3776 * information, unless current has superuser.
3778 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3779 * relies on the current implementation of netlink_dump(), which
3780 * executes the dump callback successively from netlink_recvmsg(),
3781 * always in the context of the receiving process */
3782 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3784 got_ldev = get_ldev(device);
3786 /* We need to add connection name and volume number information still.
3787 * Minor number is in drbd_genlmsghdr. */
3788 if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3789 goto nla_put_failure;
3791 if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3792 goto nla_put_failure;
3796 struct disk_conf *disk_conf;
3798 disk_conf = rcu_dereference(device->ldev->disk_conf);
3799 err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3802 struct net_conf *nc;
3804 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3806 err = net_conf_to_skb(skb, nc, exclude_sensitive);
3810 goto nla_put_failure;
3812 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
3814 goto nla_put_failure;
3815 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3816 nla_put_u32(skb, T_current_state, device->state.i) ||
3817 nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
3818 nla_put_u64_0pad(skb, T_capacity,
3819 drbd_get_capacity(device->this_bdev)) ||
3820 nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3821 nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3822 nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3823 nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3824 nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3825 nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3826 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3827 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3828 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3829 goto nla_put_failure;
3834 spin_lock_irq(&device->ldev->md.uuid_lock);
3835 err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3836 spin_unlock_irq(&device->ldev->md.uuid_lock);
3839 goto nla_put_failure;
3841 if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3842 nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3843 nla_put_u64_0pad(skb, T_bits_oos,
3844 drbd_bm_total_weight(device)))
3845 goto nla_put_failure;
3846 if (C_SYNC_SOURCE <= device->state.conn &&
3847 C_PAUSED_SYNC_T >= device->state.conn) {
3848 if (nla_put_u64_0pad(skb, T_bits_rs_total,
3849 device->rs_total) ||
3850 nla_put_u64_0pad(skb, T_bits_rs_failed,
3852 goto nla_put_failure;
3857 switch(sib->sib_reason) {
3858 case SIB_SYNC_PROGRESS:
3859 case SIB_GET_STATUS_REPLY:
3861 case SIB_STATE_CHANGE:
3862 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3863 nla_put_u32(skb, T_new_state, sib->ns.i))
3864 goto nla_put_failure;
3866 case SIB_HELPER_POST:
3867 if (nla_put_u32(skb, T_helper_exit_code,
3868 sib->helper_exit_code))
3869 goto nla_put_failure;
3871 case SIB_HELPER_PRE:
3872 if (nla_put_string(skb, T_helper, sib->helper_name))
3873 goto nla_put_failure;
3877 nla_nest_end(skb, nla);
3887 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3889 struct drbd_config_context adm_ctx;
3890 enum drbd_ret_code retcode;
3893 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3894 if (!adm_ctx.reply_skb)
3896 if (retcode != NO_ERROR)
3899 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3901 nlmsg_free(adm_ctx.reply_skb);
3905 drbd_adm_finish(&adm_ctx, info, retcode);
3909 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3911 struct drbd_device *device;
3912 struct drbd_genlmsghdr *dh;
3913 struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3914 struct drbd_resource *resource = NULL;
3915 struct drbd_resource *tmp;
3916 unsigned volume = cb->args[1];
3918 /* Open coded, deferred, iteration:
3919 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3920 * connection = "first connection of resource or undefined";
3921 * idr_for_each_entry(&resource->devices, device, i) {
3925 * where resource is cb->args[0];
3926 * and i is cb->args[1];
3928 * cb->args[2] indicates if we shall loop over all resources,
3929 * or just dump all volumes of a single resource.
3931 * This may miss entries inserted after this dump started,
3932 * or entries deleted before they are reached.
3934 * We need to make sure the device won't disappear while
3935 * we are looking at it, and revalidate our iterators
3936 * on each iteration.
3939 /* synchronize with conn_create()/drbd_destroy_connection() */
3941 /* revalidate iterator position */
3942 for_each_resource_rcu(tmp, &drbd_resources) {
3944 /* first iteration */
3956 device = idr_get_next(&resource->devices, &volume);
3958 /* No more volumes to dump on this resource.
3959 * Advance resource iterator. */
3960 pos = list_entry_rcu(resource->resources.next,
3961 struct drbd_resource, resources);
3962 /* Did we dump any volume of this resource yet? */
3964 /* If we reached the end of the list,
3965 * or only a single resource dump was requested,
3967 if (&pos->resources == &drbd_resources || cb->args[2])
3975 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3976 cb->nlh->nlmsg_seq, &drbd_genl_family,
3977 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3982 /* This is a connection without a single volume.
3983 * Suprisingly enough, it may have a network
3985 struct drbd_connection *connection;
3988 dh->ret_code = NO_ERROR;
3989 connection = the_only_connection(resource);
3990 if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
3993 struct net_conf *nc;
3995 nc = rcu_dereference(connection->net_conf);
3996 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
4002 D_ASSERT(device, device->vnr == volume);
4003 D_ASSERT(device, device->resource == resource);
4005 dh->minor = device_to_minor(device);
4006 dh->ret_code = NO_ERROR;
4008 if (nla_put_status_info(skb, device, NULL)) {
4010 genlmsg_cancel(skb, dh);
4014 genlmsg_end(skb, dh);
4019 /* where to start the next iteration */
4020 cb->args[0] = (long)pos;
4021 cb->args[1] = (pos == resource) ? volume + 1 : 0;
4023 /* No more resources/volumes/minors found results in an empty skb.
4024 * Which will terminate the dump. */
4029 * Request status of all resources, or of all volumes within a single resource.
4031 * This is a dump, as the answer may not fit in a single reply skb otherwise.
4032 * Which means we cannot use the family->attrbuf or other such members, because
4033 * dump is NOT protected by the genl_lock(). During dump, we only have access
4034 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
4036 * Once things are setup properly, we call into get_one_status().
4038 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
4040 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
4042 const char *resource_name;
4043 struct drbd_resource *resource;
4046 /* Is this a followup call? */
4048 /* ... of a single resource dump,
4049 * and the resource iterator has been advanced already? */
4050 if (cb->args[2] && cb->args[2] != cb->args[0])
4051 return 0; /* DONE. */
4055 /* First call (from netlink_dump_start). We need to figure out
4056 * which resource(s) the user wants us to dump. */
4057 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
4058 nlmsg_attrlen(cb->nlh, hdrlen),
4059 DRBD_NLA_CFG_CONTEXT);
4061 /* No explicit context given. Dump all. */
4064 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
4065 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
4067 return PTR_ERR(nla);
4068 /* context given, but no name present? */
4071 resource_name = nla_data(nla);
4072 if (!*resource_name)
4074 resource = drbd_find_resource(resource_name);
4078 kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
4080 /* prime iterators, and set "filter" mode mark:
4081 * only dump this connection. */
4082 cb->args[0] = (long)resource;
4083 /* cb->args[1] = 0; passed in this way. */
4084 cb->args[2] = (long)resource;
4087 return get_one_status(skb, cb);
4090 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
4092 struct drbd_config_context adm_ctx;
4093 enum drbd_ret_code retcode;
4094 struct timeout_parms tp;
4097 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4098 if (!adm_ctx.reply_skb)
4100 if (retcode != NO_ERROR)
4104 adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4105 test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
4108 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
4110 nlmsg_free(adm_ctx.reply_skb);
4114 drbd_adm_finish(&adm_ctx, info, retcode);
4118 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
4120 struct drbd_config_context adm_ctx;
4121 struct drbd_device *device;
4122 enum drbd_ret_code retcode;
4123 struct start_ov_parms parms;
4125 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4126 if (!adm_ctx.reply_skb)
4128 if (retcode != NO_ERROR)
4131 device = adm_ctx.device;
4133 /* resume from last known position, if possible */
4134 parms.ov_start_sector = device->ov_start_sector;
4135 parms.ov_stop_sector = ULLONG_MAX;
4136 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
4137 int err = start_ov_parms_from_attrs(&parms, info);
4139 retcode = ERR_MANDATORY_TAG;
4140 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4144 mutex_lock(&adm_ctx.resource->adm_mutex);
4146 /* w_make_ov_request expects position to be aligned */
4147 device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4148 device->ov_stop_sector = parms.ov_stop_sector;
4150 /* If there is still bitmap IO pending, e.g. previous resync or verify
4151 * just being finished, wait for it before requesting a new resync. */
4152 drbd_suspend_io(device);
4153 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4154 retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4155 drbd_resume_io(device);
4157 mutex_unlock(&adm_ctx.resource->adm_mutex);
4159 drbd_adm_finish(&adm_ctx, info, retcode);
4164 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
4166 struct drbd_config_context adm_ctx;
4167 struct drbd_device *device;
4168 enum drbd_ret_code retcode;
4169 int skip_initial_sync = 0;
4171 struct new_c_uuid_parms args;
4173 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4174 if (!adm_ctx.reply_skb)
4176 if (retcode != NO_ERROR)
4179 device = adm_ctx.device;
4180 memset(&args, 0, sizeof(args));
4181 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
4182 err = new_c_uuid_parms_from_attrs(&args, info);
4184 retcode = ERR_MANDATORY_TAG;
4185 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4190 mutex_lock(&adm_ctx.resource->adm_mutex);
4191 mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
4193 if (!get_ldev(device)) {
4194 retcode = ERR_NO_DISK;
4198 /* this is "skip initial sync", assume to be clean */
4199 if (device->state.conn == C_CONNECTED &&
4200 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
4201 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
4202 drbd_info(device, "Preparing to skip initial sync\n");
4203 skip_initial_sync = 1;
4204 } else if (device->state.conn != C_STANDALONE) {
4205 retcode = ERR_CONNECTED;
4209 drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4210 drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
4212 if (args.clear_bm) {
4213 err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4214 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
4216 drbd_err(device, "Writing bitmap failed with %d\n", err);
4217 retcode = ERR_IO_MD_DISK;
4219 if (skip_initial_sync) {
4220 drbd_send_uuids_skip_initial_sync(first_peer_device(device));
4221 _drbd_uuid_set(device, UI_BITMAP, 0);
4222 drbd_print_uuids(device, "cleared bitmap UUID");
4223 spin_lock_irq(&device->resource->req_lock);
4224 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4226 spin_unlock_irq(&device->resource->req_lock);
4230 drbd_md_sync(device);
4234 mutex_unlock(device->state_mutex);
4235 mutex_unlock(&adm_ctx.resource->adm_mutex);
4237 drbd_adm_finish(&adm_ctx, info, retcode);
4241 static enum drbd_ret_code
4242 drbd_check_resource_name(struct drbd_config_context *adm_ctx)
4244 const char *name = adm_ctx->resource_name;
4245 if (!name || !name[0]) {
4246 drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
4247 return ERR_MANDATORY_TAG;
4249 /* if we want to use these in sysfs/configfs/debugfs some day,
4250 * we must not allow slashes */
4251 if (strchr(name, '/')) {
4252 drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
4253 return ERR_INVALID_REQUEST;
4258 static void resource_to_info(struct resource_info *info,
4259 struct drbd_resource *resource)
4261 info->res_role = conn_highest_role(first_connection(resource));
4262 info->res_susp = resource->susp;
4263 info->res_susp_nod = resource->susp_nod;
4264 info->res_susp_fen = resource->susp_fen;
4267 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
4269 struct drbd_connection *connection;
4270 struct drbd_config_context adm_ctx;
4271 enum drbd_ret_code retcode;
4272 struct res_opts res_opts;
4275 retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
4276 if (!adm_ctx.reply_skb)
4278 if (retcode != NO_ERROR)
4281 set_res_opts_defaults(&res_opts);
4282 err = res_opts_from_attrs(&res_opts, info);
4283 if (err && err != -ENOMSG) {
4284 retcode = ERR_MANDATORY_TAG;
4285 drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4289 retcode = drbd_check_resource_name(&adm_ctx);
4290 if (retcode != NO_ERROR)
4293 if (adm_ctx.resource) {
4294 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4295 retcode = ERR_INVALID_REQUEST;
4296 drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
4298 /* else: still NO_ERROR */
4302 /* not yet safe for genl_family.parallel_ops */
4303 mutex_lock(&resources_mutex);
4304 connection = conn_create(adm_ctx.resource_name, &res_opts);
4305 mutex_unlock(&resources_mutex);
4308 struct resource_info resource_info;
4310 mutex_lock(¬ification_mutex);
4311 resource_to_info(&resource_info, connection->resource);
4312 notify_resource_state(NULL, 0, connection->resource,
4313 &resource_info, NOTIFY_CREATE);
4314 mutex_unlock(¬ification_mutex);
4316 retcode = ERR_NOMEM;
4319 drbd_adm_finish(&adm_ctx, info, retcode);
4323 static void device_to_info(struct device_info *info,
4324 struct drbd_device *device)
4326 info->dev_disk_state = device->state.disk;
4330 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
4332 struct drbd_config_context adm_ctx;
4333 struct drbd_genlmsghdr *dh = info->userhdr;
4334 enum drbd_ret_code retcode;
4336 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4337 if (!adm_ctx.reply_skb)
4339 if (retcode != NO_ERROR)
4342 if (dh->minor > MINORMASK) {
4343 drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
4344 retcode = ERR_INVALID_REQUEST;
4347 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
4348 drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
4349 retcode = ERR_INVALID_REQUEST;
4353 /* drbd_adm_prepare made sure already
4354 * that first_peer_device(device)->connection and device->vnr match the request. */
4355 if (adm_ctx.device) {
4356 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
4357 retcode = ERR_MINOR_OR_VOLUME_EXISTS;
4358 /* else: still NO_ERROR */
4362 mutex_lock(&adm_ctx.resource->adm_mutex);
4363 retcode = drbd_create_device(&adm_ctx, dh->minor);
4364 if (retcode == NO_ERROR) {
4365 struct drbd_device *device;
4366 struct drbd_peer_device *peer_device;
4367 struct device_info info;
4368 unsigned int peer_devices = 0;
4369 enum drbd_notification_type flags;
4371 device = minor_to_device(dh->minor);
4372 for_each_peer_device(peer_device, device) {
4373 if (!has_net_conf(peer_device->connection))
4378 device_to_info(&info, device);
4379 mutex_lock(¬ification_mutex);
4380 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4381 notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4382 for_each_peer_device(peer_device, device) {
4383 struct peer_device_info peer_device_info;
4385 if (!has_net_conf(peer_device->connection))
4387 peer_device_to_info(&peer_device_info, peer_device);
4388 flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4389 notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4390 NOTIFY_CREATE | flags);
4392 mutex_unlock(¬ification_mutex);
4394 mutex_unlock(&adm_ctx.resource->adm_mutex);
4396 drbd_adm_finish(&adm_ctx, info, retcode);
4400 static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
4402 struct drbd_peer_device *peer_device;
4404 if (device->state.disk == D_DISKLESS &&
4405 /* no need to be device->state.conn == C_STANDALONE &&
4406 * we may want to delete a minor from a live replication group.
4408 device->state.role == R_SECONDARY) {
4409 struct drbd_connection *connection =
4410 first_connection(device->resource);
4412 _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
4413 CS_VERBOSE + CS_WAIT_COMPLETE);
4415 /* If the state engine hasn't stopped the sender thread yet, we
4416 * need to flush the sender work queue before generating the
4417 * DESTROY events here. */
4418 if (get_t_state(&connection->worker) == RUNNING)
4419 drbd_flush_workqueue(&connection->sender_work);
4421 mutex_lock(¬ification_mutex);
4422 for_each_peer_device(peer_device, device) {
4423 if (!has_net_conf(peer_device->connection))
4425 notify_peer_device_state(NULL, 0, peer_device, NULL,
4426 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4428 notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4429 mutex_unlock(¬ification_mutex);
4431 drbd_delete_device(device);
4434 return ERR_MINOR_CONFIGURED;
4437 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
4439 struct drbd_config_context adm_ctx;
4440 enum drbd_ret_code retcode;
4442 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4443 if (!adm_ctx.reply_skb)
4445 if (retcode != NO_ERROR)
4448 mutex_lock(&adm_ctx.resource->adm_mutex);
4449 retcode = adm_del_minor(adm_ctx.device);
4450 mutex_unlock(&adm_ctx.resource->adm_mutex);
4452 drbd_adm_finish(&adm_ctx, info, retcode);
4456 static int adm_del_resource(struct drbd_resource *resource)
4458 struct drbd_connection *connection;
4460 for_each_connection(connection, resource) {
4461 if (connection->cstate > C_STANDALONE)
4462 return ERR_NET_CONFIGURED;
4464 if (!idr_is_empty(&resource->devices))
4465 return ERR_RES_IN_USE;
4467 /* The state engine has stopped the sender thread, so we don't
4468 * need to flush the sender work queue before generating the
4469 * DESTROY event here. */
4470 mutex_lock(¬ification_mutex);
4471 notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4472 mutex_unlock(¬ification_mutex);
4474 mutex_lock(&resources_mutex);
4475 list_del_rcu(&resource->resources);
4476 mutex_unlock(&resources_mutex);
4477 /* Make sure all threads have actually stopped: state handling only
4478 * does drbd_thread_stop_nowait(). */
4479 list_for_each_entry(connection, &resource->connections, connections)
4480 drbd_thread_stop(&connection->worker);
4482 drbd_free_resource(resource);
4486 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
4488 struct drbd_config_context adm_ctx;
4489 struct drbd_resource *resource;
4490 struct drbd_connection *connection;
4491 struct drbd_device *device;
4492 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4495 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4496 if (!adm_ctx.reply_skb)
4498 if (retcode != NO_ERROR)
4501 resource = adm_ctx.resource;
4502 mutex_lock(&resource->adm_mutex);
4504 for_each_connection(connection, resource) {
4505 struct drbd_peer_device *peer_device;
4507 idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4508 retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4509 if (retcode < SS_SUCCESS) {
4510 drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
4515 retcode = conn_try_disconnect(connection, 0);
4516 if (retcode < SS_SUCCESS) {
4517 drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
4523 idr_for_each_entry(&resource->devices, device, i) {
4524 retcode = adm_detach(device, 0);
4525 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
4526 drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
4531 /* delete volumes */
4532 idr_for_each_entry(&resource->devices, device, i) {
4533 retcode = adm_del_minor(device);
4534 if (retcode != NO_ERROR) {
4535 /* "can not happen" */
4536 drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
4541 retcode = adm_del_resource(resource);
4543 mutex_unlock(&resource->adm_mutex);
4545 drbd_adm_finish(&adm_ctx, info, retcode);
4549 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
4551 struct drbd_config_context adm_ctx;
4552 struct drbd_resource *resource;
4553 enum drbd_ret_code retcode;
4555 retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4556 if (!adm_ctx.reply_skb)
4558 if (retcode != NO_ERROR)
4560 resource = adm_ctx.resource;
4562 mutex_lock(&resource->adm_mutex);
4563 retcode = adm_del_resource(resource);
4564 mutex_unlock(&resource->adm_mutex);
4566 drbd_adm_finish(&adm_ctx, info, retcode);
4570 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
4572 struct sk_buff *msg;
4573 struct drbd_genlmsghdr *d_out;
4577 seq = atomic_inc_return(&drbd_genl_seq);
4578 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4583 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4584 if (!d_out) /* cannot happen, but anyways. */
4585 goto nla_put_failure;
4586 d_out->minor = device_to_minor(device);
4587 d_out->ret_code = NO_ERROR;
4589 if (nla_put_status_info(msg, device, sib))
4590 goto nla_put_failure;
4591 genlmsg_end(msg, d_out);
4592 err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
4593 /* msg has been consumed or freed in netlink_broadcast() */
4594 if (err && err != -ESRCH)
4602 drbd_err(device, "Error %d while broadcasting event. "
4603 "Event seq:%u sib_reason:%u\n",
4604 err, seq, sib->sib_reason);
4607 static int nla_put_notification_header(struct sk_buff *msg,
4608 enum drbd_notification_type type)
4610 struct drbd_notification_header nh = {
4614 return drbd_notification_header_to_skb(msg, &nh, true);
4617 int notify_resource_state(struct sk_buff *skb,
4619 struct drbd_resource *resource,
4620 struct resource_info *resource_info,
4621 enum drbd_notification_type type)
4623 struct resource_statistics resource_statistics;
4624 struct drbd_genlmsghdr *dh;
4625 bool multicast = false;
4629 seq = atomic_inc_return(¬ify_genl_seq);
4630 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4638 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4640 goto nla_put_failure;
4642 dh->ret_code = NO_ERROR;
4643 if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4644 nla_put_notification_header(skb, type) ||
4645 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4646 resource_info_to_skb(skb, resource_info, true)))
4647 goto nla_put_failure;
4648 resource_statistics.res_stat_write_ordering = resource->write_ordering;
4649 err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4651 goto nla_put_failure;
4652 genlmsg_end(skb, dh);
4654 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4655 /* skb has been consumed or freed in netlink_broadcast() */
4656 if (err && err != -ESRCH)
4664 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4669 int notify_device_state(struct sk_buff *skb,
4671 struct drbd_device *device,
4672 struct device_info *device_info,
4673 enum drbd_notification_type type)
4675 struct device_statistics device_statistics;
4676 struct drbd_genlmsghdr *dh;
4677 bool multicast = false;
4681 seq = atomic_inc_return(¬ify_genl_seq);
4682 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4690 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4692 goto nla_put_failure;
4693 dh->minor = device->minor;
4694 dh->ret_code = NO_ERROR;
4695 if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4696 nla_put_notification_header(skb, type) ||
4697 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4698 device_info_to_skb(skb, device_info, true)))
4699 goto nla_put_failure;
4700 device_to_statistics(&device_statistics, device);
4701 device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4702 genlmsg_end(skb, dh);
4704 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4705 /* skb has been consumed or freed in netlink_broadcast() */
4706 if (err && err != -ESRCH)
4714 drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4719 int notify_connection_state(struct sk_buff *skb,
4721 struct drbd_connection *connection,
4722 struct connection_info *connection_info,
4723 enum drbd_notification_type type)
4725 struct connection_statistics connection_statistics;
4726 struct drbd_genlmsghdr *dh;
4727 bool multicast = false;
4731 seq = atomic_inc_return(¬ify_genl_seq);
4732 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4740 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4742 goto nla_put_failure;
4744 dh->ret_code = NO_ERROR;
4745 if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4746 nla_put_notification_header(skb, type) ||
4747 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4748 connection_info_to_skb(skb, connection_info, true)))
4749 goto nla_put_failure;
4750 connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4751 connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4752 genlmsg_end(skb, dh);
4754 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4755 /* skb has been consumed or freed in netlink_broadcast() */
4756 if (err && err != -ESRCH)
4764 drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4769 int notify_peer_device_state(struct sk_buff *skb,
4771 struct drbd_peer_device *peer_device,
4772 struct peer_device_info *peer_device_info,
4773 enum drbd_notification_type type)
4775 struct peer_device_statistics peer_device_statistics;
4776 struct drbd_resource *resource = peer_device->device->resource;
4777 struct drbd_genlmsghdr *dh;
4778 bool multicast = false;
4782 seq = atomic_inc_return(¬ify_genl_seq);
4783 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4791 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4793 goto nla_put_failure;
4795 dh->ret_code = NO_ERROR;
4796 if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4797 nla_put_notification_header(skb, type) ||
4798 ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4799 peer_device_info_to_skb(skb, peer_device_info, true)))
4800 goto nla_put_failure;
4801 peer_device_to_statistics(&peer_device_statistics, peer_device);
4802 peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4803 genlmsg_end(skb, dh);
4805 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4806 /* skb has been consumed or freed in netlink_broadcast() */
4807 if (err && err != -ESRCH)
4815 drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4820 void notify_helper(enum drbd_notification_type type,
4821 struct drbd_device *device, struct drbd_connection *connection,
4822 const char *name, int status)
4824 struct drbd_resource *resource = device ? device->resource : connection->resource;
4825 struct drbd_helper_info helper_info;
4826 unsigned int seq = atomic_inc_return(¬ify_genl_seq);
4827 struct sk_buff *skb = NULL;
4828 struct drbd_genlmsghdr *dh;
4831 strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4832 helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4833 helper_info.helper_status = status;
4835 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4841 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4844 dh->minor = device ? device->minor : -1;
4845 dh->ret_code = NO_ERROR;
4846 mutex_lock(¬ification_mutex);
4847 if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4848 nla_put_notification_header(skb, type) ||
4849 drbd_helper_info_to_skb(skb, &helper_info, true))
4851 genlmsg_end(skb, dh);
4852 err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4854 /* skb has been consumed or freed in netlink_broadcast() */
4855 if (err && err != -ESRCH)
4857 mutex_unlock(¬ification_mutex);
4861 mutex_unlock(¬ification_mutex);
4864 drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4868 static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4870 struct drbd_genlmsghdr *dh;
4874 dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4876 goto nla_put_failure;
4878 dh->ret_code = NO_ERROR;
4879 if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4880 goto nla_put_failure;
4881 genlmsg_end(skb, dh);
4886 pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4890 static void free_state_changes(struct list_head *list)
4892 while (!list_empty(list)) {
4893 struct drbd_state_change *state_change =
4894 list_first_entry(list, struct drbd_state_change, list);
4895 list_del(&state_change->list);
4896 forget_state_change(state_change);
4900 static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4903 state_change->n_connections +
4904 state_change->n_devices +
4905 state_change->n_devices * state_change->n_connections;
4908 static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4910 struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4911 unsigned int seq = cb->args[2];
4913 enum drbd_notification_type flags = 0;
4916 /* There is no need for taking notification_mutex here: it doesn't
4917 matter if the initial state events mix with later state chage
4918 events; we can always tell the events apart by the NOTIFY_EXISTS
4922 if (cb->args[5] == 1) {
4923 err = notify_initial_state_done(skb, seq);
4927 if (cb->args[4] < cb->args[3])
4928 flags |= NOTIFY_CONTINUES;
4930 err = notify_resource_state_change(skb, seq, state_change->resource,
4931 NOTIFY_EXISTS | flags);
4935 if (n < state_change->n_connections) {
4936 err = notify_connection_state_change(skb, seq, &state_change->connections[n],
4937 NOTIFY_EXISTS | flags);
4940 n -= state_change->n_connections;
4941 if (n < state_change->n_devices) {
4942 err = notify_device_state_change(skb, seq, &state_change->devices[n],
4943 NOTIFY_EXISTS | flags);
4946 n -= state_change->n_devices;
4947 if (n < state_change->n_devices * state_change->n_connections) {
4948 err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4949 NOTIFY_EXISTS | flags);
4954 if (cb->args[4] == cb->args[3]) {
4955 struct drbd_state_change *next_state_change =
4956 list_entry(state_change->list.next,
4957 struct drbd_state_change, list);
4958 cb->args[0] = (long)next_state_change;
4959 cb->args[3] = notifications_for_state_change(next_state_change);
4969 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4971 struct drbd_resource *resource;
4974 if (cb->args[5] >= 1) {
4975 if (cb->args[5] > 1)
4976 return get_initial_state(skb, cb);
4978 struct drbd_state_change *state_change =
4979 (struct drbd_state_change *)cb->args[0];
4981 /* connect list to head */
4982 list_add(&head, &state_change->list);
4983 free_state_changes(&head);
4988 cb->args[5] = 2; /* number of iterations */
4989 mutex_lock(&resources_mutex);
4990 for_each_resource(resource, &drbd_resources) {
4991 struct drbd_state_change *state_change;
4993 state_change = remember_old_state(resource, GFP_KERNEL);
4994 if (!state_change) {
4995 if (!list_empty(&head))
4996 free_state_changes(&head);
4997 mutex_unlock(&resources_mutex);
5000 copy_old_to_new_state_change(state_change);
5001 list_add_tail(&state_change->list, &head);
5002 cb->args[5] += notifications_for_state_change(state_change);
5004 mutex_unlock(&resources_mutex);
5006 if (!list_empty(&head)) {
5007 struct drbd_state_change *state_change =
5008 list_entry(head.next, struct drbd_state_change, list);
5009 cb->args[0] = (long)state_change;
5010 cb->args[3] = notifications_for_state_change(state_change);
5011 list_del(&head); /* detach list from head */
5014 cb->args[2] = cb->nlh->nlmsg_seq;
5015 return get_initial_state(skb, cb);