4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/jiffies.h>
33 #include <linux/drbd.h>
34 #include <linux/uaccess.h>
35 #include <asm/types.h>
37 #include <linux/ctype.h>
38 #include <linux/mutex.h>
40 #include <linux/file.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
44 #include <linux/memcontrol.h>
45 #include <linux/mm_inline.h>
46 #include <linux/slab.h>
47 #include <linux/random.h>
48 #include <linux/reboot.h>
49 #include <linux/notifier.h>
50 #include <linux/kthread.h>
51 #include <linux/workqueue.h>
52 #define __KERNEL_SYSCALLS__
53 #include <linux/unistd.h>
54 #include <linux/vmalloc.h>
55 #include <linux/sched/signal.h>
57 #include <linux/drbd_limits.h>
59 #include "drbd_protocol.h"
60 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
62 #include "drbd_debugfs.h"
64 static DEFINE_MUTEX(drbd_main_mutex);
65 static int drbd_open(struct block_device *bdev, fmode_t mode);
66 static void drbd_release(struct gendisk *gd, fmode_t mode);
67 static void md_sync_timer_fn(unsigned long data);
68 static int w_bitmap_io(struct drbd_work *w, int unused);
70 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
71 "Lars Ellenberg <lars@linbit.com>");
72 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
73 MODULE_VERSION(REL_VERSION);
74 MODULE_LICENSE("GPL");
75 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
76 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
77 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
79 #include <linux/moduleparam.h>
80 /* thanks to these macros, if compiled into the kernel (not-module),
81 * these become boot parameters (e.g., drbd.minor_count) */
83 #ifdef CONFIG_DRBD_FAULT_INJECTION
84 int drbd_enable_faults;
86 static int drbd_fault_count;
87 static int drbd_fault_devs;
88 /* bitmap of enabled faults */
89 module_param_named(enable_faults, drbd_enable_faults, int, 0664);
90 /* fault rate % value - applies to all enabled faults */
91 module_param_named(fault_rate, drbd_fault_rate, int, 0664);
92 /* count of faults inserted */
93 module_param_named(fault_count, drbd_fault_count, int, 0664);
94 /* bitmap of devices to insert faults on */
95 module_param_named(fault_devs, drbd_fault_devs, int, 0644);
98 /* module parameters we can keep static */
99 static bool drbd_allow_oos; /* allow_open_on_secondary */
100 static bool drbd_disable_sendpage;
101 MODULE_PARM_DESC(allow_oos, "DONT USE!");
102 module_param_named(allow_oos, drbd_allow_oos, bool, 0);
103 module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
105 /* module parameters we share */
106 int drbd_proc_details; /* Detail level in proc drbd*/
107 module_param_named(proc_details, drbd_proc_details, int, 0644);
108 /* module parameters shared with defaults */
109 unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
110 /* Module parameter for setting the user mode helper program
111 * to run. Default is /sbin/drbdadm */
112 char drbd_usermode_helper[80] = "/sbin/drbdadm";
113 module_param_named(minor_count, drbd_minor_count, uint, 0444);
114 module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
116 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
117 * as member "struct gendisk *vdisk;"
119 struct idr drbd_devices;
120 struct list_head drbd_resources;
121 struct mutex resources_mutex;
123 struct kmem_cache *drbd_request_cache;
124 struct kmem_cache *drbd_ee_cache; /* peer requests */
125 struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
126 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
127 mempool_t *drbd_request_mempool;
128 mempool_t *drbd_ee_mempool;
129 mempool_t *drbd_md_io_page_pool;
130 struct bio_set *drbd_md_io_bio_set;
131 struct bio_set *drbd_io_bio_set;
133 /* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
139 struct page *drbd_pp_pool;
140 spinlock_t drbd_pp_lock;
142 wait_queue_head_t drbd_pp_wait;
144 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
146 static const struct block_device_operations drbd_ops = {
147 .owner = THIS_MODULE,
149 .release = drbd_release,
152 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
156 if (!drbd_md_io_bio_set)
157 return bio_alloc(gfp_mask, 1);
159 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
166 /* When checking with sparse, and this is an inline function, sparse will
167 give tons of false positives. When this is a real functions sparse works.
169 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
173 atomic_inc(&device->local_cnt);
174 io_allowed = (device->state.disk >= mins);
176 if (atomic_dec_and_test(&device->local_cnt))
177 wake_up(&device->misc_wait);
185 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
186 * @connection: DRBD connection.
187 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
188 * @set_size: Expected number of requests before that barrier.
190 * In case the passed barrier_nr or set_size does not match the oldest
191 * epoch of not yet barrier-acked requests, this function will cause a
192 * termination of the connection.
194 void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
195 unsigned int set_size)
197 struct drbd_request *r;
198 struct drbd_request *req = NULL, *tmp = NULL;
199 int expect_epoch = 0;
202 spin_lock_irq(&connection->resource->req_lock);
204 /* find oldest not yet barrier-acked write request,
205 * count writes in its epoch. */
206 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
207 const unsigned s = r->rq_state;
211 if (!(s & RQ_NET_MASK))
216 expect_epoch = req->epoch;
219 if (r->epoch != expect_epoch)
223 /* if (s & RQ_DONE): not expected */
224 /* if (!(s & RQ_NET_MASK)): not expected */
229 /* first some paranoia code */
231 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
235 if (expect_epoch != barrier_nr) {
236 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
237 barrier_nr, expect_epoch);
241 if (expect_size != set_size) {
242 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
243 barrier_nr, set_size, expect_size);
247 /* Clean up list of requests processed during current epoch. */
248 /* this extra list walk restart is paranoia,
249 * to catch requests being barrier-acked "unexpectedly".
250 * It usually should find the same req again, or some READ preceding it. */
251 list_for_each_entry(req, &connection->transfer_log, tl_requests)
252 if (req->epoch == expect_epoch) {
256 req = list_prepare_entry(tmp, &connection->transfer_log, tl_requests);
257 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
258 if (req->epoch != expect_epoch)
260 _req_mod(req, BARRIER_ACKED);
262 spin_unlock_irq(&connection->resource->req_lock);
267 spin_unlock_irq(&connection->resource->req_lock);
268 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
273 * _tl_restart() - Walks the transfer log, and applies an action to all requests
274 * @connection: DRBD connection to operate on.
275 * @what: The action/event to perform with all request objects
277 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
278 * RESTART_FROZEN_DISK_IO.
280 /* must hold resource->req_lock */
281 void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
283 struct drbd_request *req, *r;
285 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
289 void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
291 spin_lock_irq(&connection->resource->req_lock);
292 _tl_restart(connection, what);
293 spin_unlock_irq(&connection->resource->req_lock);
297 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
298 * @device: DRBD device.
300 * This is called after the connection to the peer was lost. The storage covered
301 * by the requests on the transfer gets marked as our of sync. Called from the
302 * receiver thread and the worker thread.
304 void tl_clear(struct drbd_connection *connection)
306 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
310 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
311 * @device: DRBD device.
313 void tl_abort_disk_io(struct drbd_device *device)
315 struct drbd_connection *connection = first_peer_device(device)->connection;
316 struct drbd_request *req, *r;
318 spin_lock_irq(&connection->resource->req_lock);
319 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
320 if (!(req->rq_state & RQ_LOCAL_PENDING))
322 if (req->device != device)
324 _req_mod(req, ABORT_DISK_IO);
326 spin_unlock_irq(&connection->resource->req_lock);
329 static int drbd_thread_setup(void *arg)
331 struct drbd_thread *thi = (struct drbd_thread *) arg;
332 struct drbd_resource *resource = thi->resource;
336 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
340 allow_kernel_signal(DRBD_SIGKILL);
341 allow_kernel_signal(SIGXCPU);
343 retval = thi->function(thi);
345 spin_lock_irqsave(&thi->t_lock, flags);
347 /* if the receiver has been "EXITING", the last thing it did
348 * was set the conn state to "StandAlone",
349 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
350 * and receiver thread will be "started".
351 * drbd_thread_start needs to set "RESTARTING" in that case.
352 * t_state check and assignment needs to be within the same spinlock,
353 * so either thread_start sees EXITING, and can remap to RESTARTING,
354 * or thread_start see NONE, and can proceed as normal.
357 if (thi->t_state == RESTARTING) {
358 drbd_info(resource, "Restarting %s thread\n", thi->name);
359 thi->t_state = RUNNING;
360 spin_unlock_irqrestore(&thi->t_lock, flags);
367 complete_all(&thi->stop);
368 spin_unlock_irqrestore(&thi->t_lock, flags);
370 drbd_info(resource, "Terminating %s\n", current->comm);
372 /* Release mod reference taken when thread was started */
375 kref_put(&thi->connection->kref, drbd_destroy_connection);
376 kref_put(&resource->kref, drbd_destroy_resource);
377 module_put(THIS_MODULE);
381 static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
382 int (*func) (struct drbd_thread *), const char *name)
384 spin_lock_init(&thi->t_lock);
387 thi->function = func;
388 thi->resource = resource;
389 thi->connection = NULL;
393 int drbd_thread_start(struct drbd_thread *thi)
395 struct drbd_resource *resource = thi->resource;
396 struct task_struct *nt;
399 /* is used from state engine doing drbd_thread_stop_nowait,
400 * while holding the req lock irqsave */
401 spin_lock_irqsave(&thi->t_lock, flags);
403 switch (thi->t_state) {
405 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
406 thi->name, current->comm, current->pid);
408 /* Get ref on module for thread - this is released when thread exits */
409 if (!try_module_get(THIS_MODULE)) {
410 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
411 spin_unlock_irqrestore(&thi->t_lock, flags);
415 kref_get(&resource->kref);
417 kref_get(&thi->connection->kref);
419 init_completion(&thi->stop);
420 thi->reset_cpu_mask = 1;
421 thi->t_state = RUNNING;
422 spin_unlock_irqrestore(&thi->t_lock, flags);
423 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
425 nt = kthread_create(drbd_thread_setup, (void *) thi,
426 "drbd_%c_%s", thi->name[0], thi->resource->name);
429 drbd_err(resource, "Couldn't start thread\n");
432 kref_put(&thi->connection->kref, drbd_destroy_connection);
433 kref_put(&resource->kref, drbd_destroy_resource);
434 module_put(THIS_MODULE);
437 spin_lock_irqsave(&thi->t_lock, flags);
439 thi->t_state = RUNNING;
440 spin_unlock_irqrestore(&thi->t_lock, flags);
444 thi->t_state = RESTARTING;
445 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
446 thi->name, current->comm, current->pid);
451 spin_unlock_irqrestore(&thi->t_lock, flags);
459 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
463 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
465 /* may be called from state engine, holding the req lock irqsave */
466 spin_lock_irqsave(&thi->t_lock, flags);
468 if (thi->t_state == NONE) {
469 spin_unlock_irqrestore(&thi->t_lock, flags);
471 drbd_thread_start(thi);
475 if (thi->t_state != ns) {
476 if (thi->task == NULL) {
477 spin_unlock_irqrestore(&thi->t_lock, flags);
483 init_completion(&thi->stop);
484 if (thi->task != current)
485 force_sig(DRBD_SIGKILL, thi->task);
488 spin_unlock_irqrestore(&thi->t_lock, flags);
491 wait_for_completion(&thi->stop);
494 int conn_lowest_minor(struct drbd_connection *connection)
496 struct drbd_peer_device *peer_device;
497 int vnr = 0, minor = -1;
500 peer_device = idr_get_next(&connection->peer_devices, &vnr);
502 minor = device_to_minor(peer_device->device);
510 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
512 * Forces all threads of a resource onto the same CPU. This is beneficial for
513 * DRBD's performance. May be overwritten by user's configuration.
515 static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
517 unsigned int *resources_per_cpu, min_index = ~0;
519 resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
520 if (resources_per_cpu) {
521 struct drbd_resource *resource;
522 unsigned int cpu, min = ~0;
525 for_each_resource_rcu(resource, &drbd_resources) {
526 for_each_cpu(cpu, resource->cpu_mask)
527 resources_per_cpu[cpu]++;
530 for_each_online_cpu(cpu) {
531 if (resources_per_cpu[cpu] < min) {
532 min = resources_per_cpu[cpu];
536 kfree(resources_per_cpu);
538 if (min_index == ~0) {
539 cpumask_setall(*cpu_mask);
542 cpumask_set_cpu(min_index, *cpu_mask);
546 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
547 * @device: DRBD device.
548 * @thi: drbd_thread object
550 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
553 void drbd_thread_current_set_cpu(struct drbd_thread *thi)
555 struct drbd_resource *resource = thi->resource;
556 struct task_struct *p = current;
558 if (!thi->reset_cpu_mask)
560 thi->reset_cpu_mask = 0;
561 set_cpus_allowed_ptr(p, resource->cpu_mask);
564 #define drbd_calc_cpu_mask(A) ({})
568 * drbd_header_size - size of a packet header
570 * The header size is a multiple of 8, so any payload following the header is
571 * word aligned on 64-bit architectures. (The bitmap send and receive code
574 unsigned int drbd_header_size(struct drbd_connection *connection)
576 if (connection->agreed_pro_version >= 100) {
577 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
578 return sizeof(struct p_header100);
580 BUILD_BUG_ON(sizeof(struct p_header80) !=
581 sizeof(struct p_header95));
582 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
583 return sizeof(struct p_header80);
587 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
589 h->magic = cpu_to_be32(DRBD_MAGIC);
590 h->command = cpu_to_be16(cmd);
591 h->length = cpu_to_be16(size);
592 return sizeof(struct p_header80);
595 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
597 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
598 h->command = cpu_to_be16(cmd);
599 h->length = cpu_to_be32(size);
600 return sizeof(struct p_header95);
603 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
606 h->magic = cpu_to_be32(DRBD_MAGIC_100);
607 h->volume = cpu_to_be16(vnr);
608 h->command = cpu_to_be16(cmd);
609 h->length = cpu_to_be32(size);
611 return sizeof(struct p_header100);
614 static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
615 void *buffer, enum drbd_packet cmd, int size)
617 if (connection->agreed_pro_version >= 100)
618 return prepare_header100(buffer, cmd, size, vnr);
619 else if (connection->agreed_pro_version >= 95 &&
620 size > DRBD_MAX_SIZE_H80_PACKET)
621 return prepare_header95(buffer, cmd, size);
623 return prepare_header80(buffer, cmd, size);
626 static void *__conn_prepare_command(struct drbd_connection *connection,
627 struct drbd_socket *sock)
631 return sock->sbuf + drbd_header_size(connection);
634 void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
638 mutex_lock(&sock->mutex);
639 p = __conn_prepare_command(connection, sock);
641 mutex_unlock(&sock->mutex);
646 void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
648 return conn_prepare_command(peer_device->connection, sock);
651 static int __send_command(struct drbd_connection *connection, int vnr,
652 struct drbd_socket *sock, enum drbd_packet cmd,
653 unsigned int header_size, void *data,
660 * Called with @data == NULL and the size of the data blocks in @size
661 * for commands that send data blocks. For those commands, omit the
662 * MSG_MORE flag: this will increase the likelihood that data blocks
663 * which are page aligned on the sender will end up page aligned on the
666 msg_flags = data ? MSG_MORE : 0;
668 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
670 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
673 err = drbd_send_all(connection, sock->socket, data, size, 0);
674 /* DRBD protocol "pings" are latency critical.
675 * This is supposed to trigger tcp_push_pending_frames() */
676 if (!err && (cmd == P_PING || cmd == P_PING_ACK))
677 drbd_tcp_nodelay(sock->socket);
682 static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
683 enum drbd_packet cmd, unsigned int header_size,
684 void *data, unsigned int size)
686 return __send_command(connection, 0, sock, cmd, header_size, data, size);
689 int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
690 enum drbd_packet cmd, unsigned int header_size,
691 void *data, unsigned int size)
695 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
696 mutex_unlock(&sock->mutex);
700 int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
701 enum drbd_packet cmd, unsigned int header_size,
702 void *data, unsigned int size)
706 err = __send_command(peer_device->connection, peer_device->device->vnr,
707 sock, cmd, header_size, data, size);
708 mutex_unlock(&sock->mutex);
712 int drbd_send_ping(struct drbd_connection *connection)
714 struct drbd_socket *sock;
716 sock = &connection->meta;
717 if (!conn_prepare_command(connection, sock))
719 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
722 int drbd_send_ping_ack(struct drbd_connection *connection)
724 struct drbd_socket *sock;
726 sock = &connection->meta;
727 if (!conn_prepare_command(connection, sock))
729 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
732 int drbd_send_sync_param(struct drbd_peer_device *peer_device)
734 struct drbd_socket *sock;
735 struct p_rs_param_95 *p;
737 const int apv = peer_device->connection->agreed_pro_version;
738 enum drbd_packet cmd;
740 struct disk_conf *dc;
742 sock = &peer_device->connection->data;
743 p = drbd_prepare_command(peer_device, sock);
748 nc = rcu_dereference(peer_device->connection->net_conf);
750 size = apv <= 87 ? sizeof(struct p_rs_param)
751 : apv == 88 ? sizeof(struct p_rs_param)
752 + strlen(nc->verify_alg) + 1
753 : apv <= 94 ? sizeof(struct p_rs_param_89)
754 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
756 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
758 /* initialize verify_alg and csums_alg */
759 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
761 if (get_ldev(peer_device->device)) {
762 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
763 p->resync_rate = cpu_to_be32(dc->resync_rate);
764 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
765 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
766 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
767 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
768 put_ldev(peer_device->device);
770 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
771 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
772 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
773 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
774 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
778 strcpy(p->verify_alg, nc->verify_alg);
780 strcpy(p->csums_alg, nc->csums_alg);
783 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
786 int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
788 struct drbd_socket *sock;
789 struct p_protocol *p;
793 sock = &connection->data;
794 p = __conn_prepare_command(connection, sock);
799 nc = rcu_dereference(connection->net_conf);
801 if (nc->tentative && connection->agreed_pro_version < 92) {
803 drbd_err(connection, "--dry-run is not supported by peer");
808 if (connection->agreed_pro_version >= 87)
809 size += strlen(nc->integrity_alg) + 1;
811 p->protocol = cpu_to_be32(nc->wire_protocol);
812 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
813 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
814 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
815 p->two_primaries = cpu_to_be32(nc->two_primaries);
817 if (nc->discard_my_data)
818 cf |= CF_DISCARD_MY_DATA;
821 p->conn_flags = cpu_to_be32(cf);
823 if (connection->agreed_pro_version >= 87)
824 strcpy(p->integrity_alg, nc->integrity_alg);
827 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
830 int drbd_send_protocol(struct drbd_connection *connection)
834 mutex_lock(&connection->data.mutex);
835 err = __drbd_send_protocol(connection, P_PROTOCOL);
836 mutex_unlock(&connection->data.mutex);
841 static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
843 struct drbd_device *device = peer_device->device;
844 struct drbd_socket *sock;
848 if (!get_ldev_if_state(device, D_NEGOTIATING))
851 sock = &peer_device->connection->data;
852 p = drbd_prepare_command(peer_device, sock);
857 spin_lock_irq(&device->ldev->md.uuid_lock);
858 for (i = UI_CURRENT; i < UI_SIZE; i++)
859 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
860 spin_unlock_irq(&device->ldev->md.uuid_lock);
862 device->comm_bm_set = drbd_bm_total_weight(device);
863 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
865 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
867 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
868 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
869 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
872 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
875 int drbd_send_uuids(struct drbd_peer_device *peer_device)
877 return _drbd_send_uuids(peer_device, 0);
880 int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
882 return _drbd_send_uuids(peer_device, 8);
885 void drbd_print_uuids(struct drbd_device *device, const char *text)
887 if (get_ldev_if_state(device, D_NEGOTIATING)) {
888 u64 *uuid = device->ldev->md.uuid;
889 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
891 (unsigned long long)uuid[UI_CURRENT],
892 (unsigned long long)uuid[UI_BITMAP],
893 (unsigned long long)uuid[UI_HISTORY_START],
894 (unsigned long long)uuid[UI_HISTORY_END]);
897 drbd_info(device, "%s effective data uuid: %016llX\n",
899 (unsigned long long)device->ed_uuid);
903 void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
905 struct drbd_device *device = peer_device->device;
906 struct drbd_socket *sock;
910 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
912 uuid = device->ldev->md.uuid[UI_BITMAP];
913 if (uuid && uuid != UUID_JUST_CREATED)
914 uuid = uuid + UUID_NEW_BM_OFFSET;
916 get_random_bytes(&uuid, sizeof(u64));
917 drbd_uuid_set(device, UI_BITMAP, uuid);
918 drbd_print_uuids(device, "updated sync UUID");
919 drbd_md_sync(device);
921 sock = &peer_device->connection->data;
922 p = drbd_prepare_command(peer_device, sock);
924 p->uuid = cpu_to_be64(uuid);
925 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
929 /* communicated if (agreed_features & DRBD_FF_WSAME) */
931 assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
932 struct request_queue *q)
935 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
936 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
937 p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
938 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
939 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
940 p->qlim->discard_enabled = blk_queue_discard(q);
941 p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
943 q = device->rq_queue;
944 p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
945 p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
946 p->qlim->alignment_offset = 0;
947 p->qlim->io_min = cpu_to_be32(queue_io_min(q));
948 p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
949 p->qlim->discard_enabled = 0;
950 p->qlim->write_same_capable = 0;
954 int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
956 struct drbd_device *device = peer_device->device;
957 struct drbd_socket *sock;
959 sector_t d_size, u_size;
961 unsigned int max_bio_size;
962 unsigned int packet_size;
964 sock = &peer_device->connection->data;
965 p = drbd_prepare_command(peer_device, sock);
969 packet_size = sizeof(*p);
970 if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
971 packet_size += sizeof(p->qlim[0]);
973 memset(p, 0, packet_size);
974 if (get_ldev_if_state(device, D_NEGOTIATING)) {
975 struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
976 d_size = drbd_get_max_capacity(device->ldev);
978 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
980 q_order_type = drbd_queue_order_type(device);
981 max_bio_size = queue_max_hw_sectors(q) << 9;
982 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
983 assign_p_sizes_qlim(device, p, q);
988 q_order_type = QUEUE_ORDERED_NONE;
989 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
990 assign_p_sizes_qlim(device, p, NULL);
993 if (peer_device->connection->agreed_pro_version <= 94)
994 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
995 else if (peer_device->connection->agreed_pro_version < 100)
996 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
998 p->d_size = cpu_to_be64(d_size);
999 p->u_size = cpu_to_be64(u_size);
1000 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
1001 p->max_bio_size = cpu_to_be32(max_bio_size);
1002 p->queue_order_type = cpu_to_be16(q_order_type);
1003 p->dds_flags = cpu_to_be16(flags);
1005 return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
1009 * drbd_send_current_state() - Sends the drbd state to the peer
1010 * @peer_device: DRBD peer device.
1012 int drbd_send_current_state(struct drbd_peer_device *peer_device)
1014 struct drbd_socket *sock;
1017 sock = &peer_device->connection->data;
1018 p = drbd_prepare_command(peer_device, sock);
1021 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1022 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1026 * drbd_send_state() - After a state change, sends the new state to the peer
1027 * @peer_device: DRBD peer device.
1028 * @state: the state to send, not necessarily the current state.
1030 * Each state change queues an "after_state_ch" work, which will eventually
1031 * send the resulting new state to the peer. If more state changes happen
1032 * between queuing and processing of the after_state_ch work, we still
1033 * want to send each intermediary state in the order it occurred.
1035 int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
1037 struct drbd_socket *sock;
1040 sock = &peer_device->connection->data;
1041 p = drbd_prepare_command(peer_device, sock);
1044 p->state = cpu_to_be32(state.i); /* Within the send mutex */
1045 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1048 int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1050 struct drbd_socket *sock;
1051 struct p_req_state *p;
1053 sock = &peer_device->connection->data;
1054 p = drbd_prepare_command(peer_device, sock);
1057 p->mask = cpu_to_be32(mask.i);
1058 p->val = cpu_to_be32(val.i);
1059 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1062 int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1064 enum drbd_packet cmd;
1065 struct drbd_socket *sock;
1066 struct p_req_state *p;
1068 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1069 sock = &connection->data;
1070 p = conn_prepare_command(connection, sock);
1073 p->mask = cpu_to_be32(mask.i);
1074 p->val = cpu_to_be32(val.i);
1075 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1078 void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1080 struct drbd_socket *sock;
1081 struct p_req_state_reply *p;
1083 sock = &peer_device->connection->meta;
1084 p = drbd_prepare_command(peer_device, sock);
1086 p->retcode = cpu_to_be32(retcode);
1087 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1091 void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1093 struct drbd_socket *sock;
1094 struct p_req_state_reply *p;
1095 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1097 sock = &connection->meta;
1098 p = conn_prepare_command(connection, sock);
1100 p->retcode = cpu_to_be32(retcode);
1101 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1105 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1107 BUG_ON(code & ~0xf);
1108 p->encoding = (p->encoding & ~0xf) | code;
1111 static void dcbp_set_start(struct p_compressed_bm *p, int set)
1113 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1116 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1119 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1122 static int fill_bitmap_rle_bits(struct drbd_device *device,
1123 struct p_compressed_bm *p,
1125 struct bm_xfer_ctx *c)
1127 struct bitstream bs;
1128 unsigned long plain_bits;
1135 /* may we use this feature? */
1137 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1139 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1142 if (c->bit_offset >= c->bm_bits)
1143 return 0; /* nothing to do. */
1145 /* use at most thus many bytes */
1146 bitstream_init(&bs, p->code, size, 0);
1147 memset(p->code, 0, size);
1148 /* plain bits covered in this code string */
1151 /* p->encoding & 0x80 stores whether the first run length is set.
1152 * bit offset is implicit.
1153 * start with toggle == 2 to be able to tell the first iteration */
1156 /* see how much plain bits we can stuff into one packet
1157 * using RLE and VLI. */
1159 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1160 : _drbd_bm_find_next(device, c->bit_offset);
1163 rl = tmp - c->bit_offset;
1165 if (toggle == 2) { /* first iteration */
1167 /* the first checked bit was set,
1168 * store start value, */
1169 dcbp_set_start(p, 1);
1170 /* but skip encoding of zero run length */
1174 dcbp_set_start(p, 0);
1177 /* paranoia: catch zero runlength.
1178 * can only happen if bitmap is modified while we scan it. */
1180 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1181 "t:%u bo:%lu\n", toggle, c->bit_offset);
1185 bits = vli_encode_bits(&bs, rl);
1186 if (bits == -ENOBUFS) /* buffer full */
1189 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1195 c->bit_offset = tmp;
1196 } while (c->bit_offset < c->bm_bits);
1198 len = bs.cur.b - p->code + !!bs.cur.bit;
1200 if (plain_bits < (len << 3)) {
1201 /* incompressible with this method.
1202 * we need to rewind both word and bit position. */
1203 c->bit_offset -= plain_bits;
1204 bm_xfer_ctx_bit_to_word_offset(c);
1205 c->bit_offset = c->word_offset * BITS_PER_LONG;
1209 /* RLE + VLI was able to compress it just fine.
1210 * update c->word_offset. */
1211 bm_xfer_ctx_bit_to_word_offset(c);
1213 /* store pad_bits */
1214 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1220 * send_bitmap_rle_or_plain
1222 * Return 0 when done, 1 when another iteration is needed, and a negative error
1223 * code upon failure.
1226 send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1228 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1229 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1230 struct p_compressed_bm *p = sock->sbuf + header_size;
1233 len = fill_bitmap_rle_bits(device, p,
1234 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1239 dcbp_set_code(p, RLE_VLI_Bits);
1240 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1241 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1244 c->bytes[0] += header_size + sizeof(*p) + len;
1246 if (c->bit_offset >= c->bm_bits)
1249 /* was not compressible.
1250 * send a buffer full of plain text bits instead. */
1251 unsigned int data_size;
1252 unsigned long num_words;
1253 unsigned long *p = sock->sbuf + header_size;
1255 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1256 num_words = min_t(size_t, data_size / sizeof(*p),
1257 c->bm_words - c->word_offset);
1258 len = num_words * sizeof(*p);
1260 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1261 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1262 c->word_offset += num_words;
1263 c->bit_offset = c->word_offset * BITS_PER_LONG;
1266 c->bytes[1] += header_size + len;
1268 if (c->bit_offset > c->bm_bits)
1269 c->bit_offset = c->bm_bits;
1273 INFO_bm_xfer_stats(device, "send", c);
1281 /* See the comment at receive_bitmap() */
1282 static int _drbd_send_bitmap(struct drbd_device *device)
1284 struct bm_xfer_ctx c;
1287 if (!expect(device->bitmap))
1290 if (get_ldev(device)) {
1291 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1292 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1293 drbd_bm_set_all(device);
1294 if (drbd_bm_write(device)) {
1295 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1296 * but otherwise process as per normal - need to tell other
1297 * side that a full resync is required! */
1298 drbd_err(device, "Failed to write bitmap to disk!\n");
1300 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1301 drbd_md_sync(device);
1307 c = (struct bm_xfer_ctx) {
1308 .bm_bits = drbd_bm_bits(device),
1309 .bm_words = drbd_bm_words(device),
1313 err = send_bitmap_rle_or_plain(device, &c);
1319 int drbd_send_bitmap(struct drbd_device *device)
1321 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1324 mutex_lock(&sock->mutex);
1326 err = !_drbd_send_bitmap(device);
1327 mutex_unlock(&sock->mutex);
1331 void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1333 struct drbd_socket *sock;
1334 struct p_barrier_ack *p;
1336 if (connection->cstate < C_WF_REPORT_PARAMS)
1339 sock = &connection->meta;
1340 p = conn_prepare_command(connection, sock);
1343 p->barrier = barrier_nr;
1344 p->set_size = cpu_to_be32(set_size);
1345 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1349 * _drbd_send_ack() - Sends an ack packet
1350 * @device: DRBD device.
1351 * @cmd: Packet command code.
1352 * @sector: sector, needs to be in big endian byte order
1353 * @blksize: size in byte, needs to be in big endian byte order
1354 * @block_id: Id, big endian byte order
1356 static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1357 u64 sector, u32 blksize, u64 block_id)
1359 struct drbd_socket *sock;
1360 struct p_block_ack *p;
1362 if (peer_device->device->state.conn < C_CONNECTED)
1365 sock = &peer_device->connection->meta;
1366 p = drbd_prepare_command(peer_device, sock);
1370 p->block_id = block_id;
1371 p->blksize = blksize;
1372 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1373 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1376 /* dp->sector and dp->block_id already/still in network byte order,
1377 * data_size is payload size according to dp->head,
1378 * and may need to be corrected for digest size. */
1379 void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1380 struct p_data *dp, int data_size)
1382 if (peer_device->connection->peer_integrity_tfm)
1383 data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
1384 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1388 void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1389 struct p_block_req *rp)
1391 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1395 * drbd_send_ack() - Sends an ack packet
1396 * @device: DRBD device
1397 * @cmd: packet command code
1398 * @peer_req: peer request
1400 int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1401 struct drbd_peer_request *peer_req)
1403 return _drbd_send_ack(peer_device, cmd,
1404 cpu_to_be64(peer_req->i.sector),
1405 cpu_to_be32(peer_req->i.size),
1406 peer_req->block_id);
1409 /* This function misuses the block_id field to signal if the blocks
1410 * are is sync or not. */
1411 int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1412 sector_t sector, int blksize, u64 block_id)
1414 return _drbd_send_ack(peer_device, cmd,
1415 cpu_to_be64(sector),
1416 cpu_to_be32(blksize),
1417 cpu_to_be64(block_id));
1420 int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
1421 struct drbd_peer_request *peer_req)
1423 struct drbd_socket *sock;
1424 struct p_block_desc *p;
1426 sock = &peer_device->connection->data;
1427 p = drbd_prepare_command(peer_device, sock);
1430 p->sector = cpu_to_be64(peer_req->i.sector);
1431 p->blksize = cpu_to_be32(peer_req->i.size);
1433 return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
1436 int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1437 sector_t sector, int size, u64 block_id)
1439 struct drbd_socket *sock;
1440 struct p_block_req *p;
1442 sock = &peer_device->connection->data;
1443 p = drbd_prepare_command(peer_device, sock);
1446 p->sector = cpu_to_be64(sector);
1447 p->block_id = block_id;
1448 p->blksize = cpu_to_be32(size);
1449 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1452 int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1453 void *digest, int digest_size, enum drbd_packet cmd)
1455 struct drbd_socket *sock;
1456 struct p_block_req *p;
1458 /* FIXME: Put the digest into the preallocated socket buffer. */
1460 sock = &peer_device->connection->data;
1461 p = drbd_prepare_command(peer_device, sock);
1464 p->sector = cpu_to_be64(sector);
1465 p->block_id = ID_SYNCER /* unused */;
1466 p->blksize = cpu_to_be32(size);
1467 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1470 int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1472 struct drbd_socket *sock;
1473 struct p_block_req *p;
1475 sock = &peer_device->connection->data;
1476 p = drbd_prepare_command(peer_device, sock);
1479 p->sector = cpu_to_be64(sector);
1480 p->block_id = ID_SYNCER /* unused */;
1481 p->blksize = cpu_to_be32(size);
1482 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1485 /* called on sndtimeo
1486 * returns false if we should retry,
1487 * true if we think connection is dead
1489 static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1492 /* long elapsed = (long)(jiffies - device->last_received); */
1494 drop_it = connection->meta.socket == sock
1495 || !connection->ack_receiver.task
1496 || get_t_state(&connection->ack_receiver) != RUNNING
1497 || connection->cstate < C_WF_REPORT_PARAMS;
1502 drop_it = !--connection->ko_count;
1504 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1505 current->comm, current->pid, connection->ko_count);
1506 request_ping(connection);
1509 return drop_it; /* && (device->state == R_PRIMARY) */;
1512 static void drbd_update_congested(struct drbd_connection *connection)
1514 struct sock *sk = connection->data.socket->sk;
1515 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1516 set_bit(NET_CONGESTED, &connection->flags);
1519 /* The idea of sendpage seems to be to put some kind of reference
1520 * to the page into the skb, and to hand it over to the NIC. In
1521 * this process get_page() gets called.
1523 * As soon as the page was really sent over the network put_page()
1524 * gets called by some part of the network layer. [ NIC driver? ]
1526 * [ get_page() / put_page() increment/decrement the count. If count
1527 * reaches 0 the page will be freed. ]
1529 * This works nicely with pages from FSs.
1530 * But this means that in protocol A we might signal IO completion too early!
1532 * In order not to corrupt data during a resync we must make sure
1533 * that we do not reuse our own buffer pages (EEs) to early, therefore
1534 * we have the net_ee list.
1536 * XFS seems to have problems, still, it submits pages with page_count == 0!
1537 * As a workaround, we disable sendpage on pages
1538 * with page_count == 0 or PageSlab.
1540 static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1541 int offset, size_t size, unsigned msg_flags)
1543 struct socket *socket;
1547 socket = peer_device->connection->data.socket;
1548 addr = kmap(page) + offset;
1549 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1552 peer_device->device->send_cnt += size >> 9;
1556 static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1557 int offset, size_t size, unsigned msg_flags)
1559 struct socket *socket = peer_device->connection->data.socket;
1563 /* e.g. XFS meta- & log-data is in slab pages, which have a
1564 * page_count of 0 and/or have PageSlab() set.
1565 * we cannot use send_page for those, as that does get_page();
1566 * put_page(); and would cause either a VM_BUG directly, or
1567 * __page_cache_release a page that would actually still be referenced
1568 * by someone, leading to some obscure delayed Oops somewhere else. */
1569 if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1570 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1572 msg_flags |= MSG_NOSIGNAL;
1573 drbd_update_congested(peer_device->connection);
1577 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1579 if (sent == -EAGAIN) {
1580 if (we_should_drop_the_connection(peer_device->connection, socket))
1584 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1585 __func__, (int)size, len, sent);
1592 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1593 clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1597 peer_device->device->send_cnt += size >> 9;
1602 static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1604 struct bio_vec bvec;
1605 struct bvec_iter iter;
1607 /* hint all but last page with MSG_MORE */
1608 bio_for_each_segment(bvec, bio, iter) {
1611 err = _drbd_no_send_page(peer_device, bvec.bv_page,
1612 bvec.bv_offset, bvec.bv_len,
1613 bio_iter_last(bvec, iter)
1617 /* REQ_OP_WRITE_SAME has only one segment */
1618 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1624 static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1626 struct bio_vec bvec;
1627 struct bvec_iter iter;
1629 /* hint all but last page with MSG_MORE */
1630 bio_for_each_segment(bvec, bio, iter) {
1633 err = _drbd_send_page(peer_device, bvec.bv_page,
1634 bvec.bv_offset, bvec.bv_len,
1635 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1638 /* REQ_OP_WRITE_SAME has only one segment */
1639 if (bio_op(bio) == REQ_OP_WRITE_SAME)
1645 static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1646 struct drbd_peer_request *peer_req)
1648 struct page *page = peer_req->pages;
1649 unsigned len = peer_req->i.size;
1652 /* hint all but last page with MSG_MORE */
1653 page_chain_for_each(page) {
1654 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1656 err = _drbd_send_page(peer_device, page, 0, l,
1657 page_chain_next(page) ? MSG_MORE : 0);
1665 static u32 bio_flags_to_wire(struct drbd_connection *connection,
1668 if (connection->agreed_pro_version >= 95)
1669 return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1670 (bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1671 (bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1672 (bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1673 (bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0) |
1674 (bio_op(bio) == REQ_OP_WRITE_ZEROES ? DP_DISCARD : 0);
1676 return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1679 /* Used to send write or TRIM aka REQ_DISCARD requests
1680 * R_PRIMARY -> Peer (P_DATA, P_TRIM)
1682 int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1684 struct drbd_device *device = peer_device->device;
1685 struct drbd_socket *sock;
1687 struct p_wsame *wsame = NULL;
1689 unsigned int dp_flags = 0;
1693 sock = &peer_device->connection->data;
1694 p = drbd_prepare_command(peer_device, sock);
1695 digest_size = peer_device->connection->integrity_tfm ?
1696 crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1700 p->sector = cpu_to_be64(req->i.sector);
1701 p->block_id = (unsigned long)req;
1702 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1703 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
1704 if (device->state.conn >= C_SYNC_SOURCE &&
1705 device->state.conn <= C_PAUSED_SYNC_T)
1706 dp_flags |= DP_MAY_SET_IN_SYNC;
1707 if (peer_device->connection->agreed_pro_version >= 100) {
1708 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1709 dp_flags |= DP_SEND_RECEIVE_ACK;
1710 /* During resync, request an explicit write ack,
1711 * even in protocol != C */
1712 if (req->rq_state & RQ_EXP_WRITE_ACK
1713 || (dp_flags & DP_MAY_SET_IN_SYNC))
1714 dp_flags |= DP_SEND_WRITE_ACK;
1716 p->dp_flags = cpu_to_be32(dp_flags);
1718 if (dp_flags & DP_DISCARD) {
1719 struct p_trim *t = (struct p_trim*)p;
1720 t->size = cpu_to_be32(req->i.size);
1721 err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
1724 if (dp_flags & DP_WSAME) {
1725 /* this will only work if DRBD_FF_WSAME is set AND the
1726 * handshake agreed that all nodes and backend devices are
1727 * WRITE_SAME capable and agree on logical_block_size */
1728 wsame = (struct p_wsame*)p;
1729 digest_out = wsame + 1;
1730 wsame->size = cpu_to_be32(req->i.size);
1734 /* our digest is still only over the payload.
1735 * TRIM does not carry any payload. */
1737 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
1740 __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
1741 sizeof(*wsame) + digest_size, NULL,
1742 bio_iovec(req->master_bio).bv_len);
1745 __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1746 sizeof(*p) + digest_size, NULL, req->i.size);
1748 /* For protocol A, we have to memcpy the payload into
1749 * socket buffers, as we may complete right away
1750 * as soon as we handed it over to tcp, at which point the data
1751 * pages may become invalid.
1753 * For data-integrity enabled, we copy it as well, so we can be
1754 * sure that even if the bio pages may still be modified, it
1755 * won't change the data on the wire, thus if the digest checks
1756 * out ok after sending on this side, but does not fit on the
1757 * receiving side, we sure have detected corruption elsewhere.
1759 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
1760 err = _drbd_send_bio(peer_device, req->master_bio);
1762 err = _drbd_send_zc_bio(peer_device, req->master_bio);
1764 /* double check digest, sometimes buffers have been modified in flight. */
1765 if (digest_size > 0 && digest_size <= 64) {
1766 /* 64 byte, 512 bit, is the largest digest size
1767 * currently supported in kernel crypto. */
1768 unsigned char digest[64];
1769 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1770 if (memcmp(p + 1, digest, digest_size)) {
1772 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1773 (unsigned long long)req->i.sector, req->i.size);
1775 } /* else if (digest_size > 64) {
1776 ... Be noisy about digest too large ...
1780 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1785 /* answer packet, used to send data back for read requests:
1786 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1787 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1789 int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1790 struct drbd_peer_request *peer_req)
1792 struct drbd_device *device = peer_device->device;
1793 struct drbd_socket *sock;
1798 sock = &peer_device->connection->data;
1799 p = drbd_prepare_command(peer_device, sock);
1801 digest_size = peer_device->connection->integrity_tfm ?
1802 crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1806 p->sector = cpu_to_be64(peer_req->i.sector);
1807 p->block_id = peer_req->block_id;
1808 p->seq_num = 0; /* unused */
1811 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1812 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1814 err = _drbd_send_zc_ee(peer_device, peer_req);
1815 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1820 int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1822 struct drbd_socket *sock;
1823 struct p_block_desc *p;
1825 sock = &peer_device->connection->data;
1826 p = drbd_prepare_command(peer_device, sock);
1829 p->sector = cpu_to_be64(req->i.sector);
1830 p->blksize = cpu_to_be32(req->i.size);
1831 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1835 drbd_send distinguishes two cases:
1837 Packets sent via the data socket "sock"
1838 and packets sent via the meta data socket "msock"
1841 -----------------+-------------------------+------------------------------
1842 timeout conf.timeout / 2 conf.timeout / 2
1843 timeout action send a ping via msock Abort communication
1844 and close all sockets
1848 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1850 int drbd_send(struct drbd_connection *connection, struct socket *sock,
1851 void *buf, size_t size, unsigned msg_flags)
1853 struct kvec iov = {.iov_base = buf, .iov_len = size};
1860 /* THINK if (signal_pending) return ... ? */
1862 msg.msg_name = NULL;
1863 msg.msg_namelen = 0;
1864 msg.msg_control = NULL;
1865 msg.msg_controllen = 0;
1866 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1868 iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
1870 if (sock == connection->data.socket) {
1872 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1874 drbd_update_congested(connection);
1877 rv = sock_sendmsg(sock, &msg);
1878 if (rv == -EAGAIN) {
1879 if (we_should_drop_the_connection(connection, sock))
1885 flush_signals(current);
1891 } while (sent < size);
1893 if (sock == connection->data.socket)
1894 clear_bit(NET_CONGESTED, &connection->flags);
1897 if (rv != -EAGAIN) {
1898 drbd_err(connection, "%s_sendmsg returned %d\n",
1899 sock == connection->meta.socket ? "msock" : "sock",
1901 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1903 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1910 * drbd_send_all - Send an entire buffer
1912 * Returns 0 upon success and a negative error value otherwise.
1914 int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1915 size_t size, unsigned msg_flags)
1919 err = drbd_send(connection, sock, buffer, size, msg_flags);
1927 static int drbd_open(struct block_device *bdev, fmode_t mode)
1929 struct drbd_device *device = bdev->bd_disk->private_data;
1930 unsigned long flags;
1933 mutex_lock(&drbd_main_mutex);
1934 spin_lock_irqsave(&device->resource->req_lock, flags);
1935 /* to have a stable device->state.role
1936 * and no race with updating open_cnt */
1938 if (device->state.role != R_PRIMARY) {
1939 if (mode & FMODE_WRITE)
1941 else if (!drbd_allow_oos)
1947 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1948 mutex_unlock(&drbd_main_mutex);
1953 static void drbd_release(struct gendisk *gd, fmode_t mode)
1955 struct drbd_device *device = gd->private_data;
1956 mutex_lock(&drbd_main_mutex);
1958 mutex_unlock(&drbd_main_mutex);
1961 /* need to hold resource->req_lock */
1962 void drbd_queue_unplug(struct drbd_device *device)
1964 if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
1965 D_ASSERT(device, device->state.role == R_PRIMARY);
1966 if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
1967 drbd_queue_work_if_unqueued(
1968 &first_peer_device(device)->connection->sender_work,
1969 &device->unplug_work);
1974 static void drbd_set_defaults(struct drbd_device *device)
1976 /* Beware! The actual layout differs
1977 * between big endian and little endian */
1978 device->state = (union drbd_dev_state) {
1979 { .role = R_SECONDARY,
1981 .conn = C_STANDALONE,
1987 void drbd_init_set_defaults(struct drbd_device *device)
1989 /* the memset(,0,) did most of this.
1990 * note: only assignments, no allocation in here */
1992 drbd_set_defaults(device);
1994 atomic_set(&device->ap_bio_cnt, 0);
1995 atomic_set(&device->ap_actlog_cnt, 0);
1996 atomic_set(&device->ap_pending_cnt, 0);
1997 atomic_set(&device->rs_pending_cnt, 0);
1998 atomic_set(&device->unacked_cnt, 0);
1999 atomic_set(&device->local_cnt, 0);
2000 atomic_set(&device->pp_in_use_by_net, 0);
2001 atomic_set(&device->rs_sect_in, 0);
2002 atomic_set(&device->rs_sect_ev, 0);
2003 atomic_set(&device->ap_in_flight, 0);
2004 atomic_set(&device->md_io.in_use, 0);
2006 mutex_init(&device->own_state_mutex);
2007 device->state_mutex = &device->own_state_mutex;
2009 spin_lock_init(&device->al_lock);
2010 spin_lock_init(&device->peer_seq_lock);
2012 INIT_LIST_HEAD(&device->active_ee);
2013 INIT_LIST_HEAD(&device->sync_ee);
2014 INIT_LIST_HEAD(&device->done_ee);
2015 INIT_LIST_HEAD(&device->read_ee);
2016 INIT_LIST_HEAD(&device->net_ee);
2017 INIT_LIST_HEAD(&device->resync_reads);
2018 INIT_LIST_HEAD(&device->resync_work.list);
2019 INIT_LIST_HEAD(&device->unplug_work.list);
2020 INIT_LIST_HEAD(&device->bm_io_work.w.list);
2021 INIT_LIST_HEAD(&device->pending_master_completion[0]);
2022 INIT_LIST_HEAD(&device->pending_master_completion[1]);
2023 INIT_LIST_HEAD(&device->pending_completion[0]);
2024 INIT_LIST_HEAD(&device->pending_completion[1]);
2026 device->resync_work.cb = w_resync_timer;
2027 device->unplug_work.cb = w_send_write_hint;
2028 device->bm_io_work.w.cb = w_bitmap_io;
2030 setup_timer(&device->resync_timer, resync_timer_fn,
2031 (unsigned long)device);
2032 setup_timer(&device->md_sync_timer, md_sync_timer_fn,
2033 (unsigned long)device);
2034 setup_timer(&device->start_resync_timer, start_resync_timer_fn,
2035 (unsigned long)device);
2036 setup_timer(&device->request_timer, request_timer_fn,
2037 (unsigned long)device);
2039 init_waitqueue_head(&device->misc_wait);
2040 init_waitqueue_head(&device->state_wait);
2041 init_waitqueue_head(&device->ee_wait);
2042 init_waitqueue_head(&device->al_wait);
2043 init_waitqueue_head(&device->seq_wait);
2045 device->resync_wenr = LC_FREE;
2046 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2047 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2050 void drbd_device_cleanup(struct drbd_device *device)
2053 if (first_peer_device(device)->connection->receiver.t_state != NONE)
2054 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2055 first_peer_device(device)->connection->receiver.t_state);
2057 device->al_writ_cnt =
2058 device->bm_writ_cnt =
2066 device->rs_failed = 0;
2067 device->rs_last_events = 0;
2068 device->rs_last_sect_ev = 0;
2069 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2070 device->rs_mark_left[i] = 0;
2071 device->rs_mark_time[i] = 0;
2073 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2075 drbd_set_my_capacity(device, 0);
2076 if (device->bitmap) {
2077 /* maybe never allocated. */
2078 drbd_bm_resize(device, 0, 1);
2079 drbd_bm_cleanup(device);
2082 drbd_backing_dev_free(device, device->ldev);
2083 device->ldev = NULL;
2085 clear_bit(AL_SUSPENDED, &device->flags);
2087 D_ASSERT(device, list_empty(&device->active_ee));
2088 D_ASSERT(device, list_empty(&device->sync_ee));
2089 D_ASSERT(device, list_empty(&device->done_ee));
2090 D_ASSERT(device, list_empty(&device->read_ee));
2091 D_ASSERT(device, list_empty(&device->net_ee));
2092 D_ASSERT(device, list_empty(&device->resync_reads));
2093 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2094 D_ASSERT(device, list_empty(&device->resync_work.list));
2095 D_ASSERT(device, list_empty(&device->unplug_work.list));
2097 drbd_set_defaults(device);
2101 static void drbd_destroy_mempools(void)
2105 while (drbd_pp_pool) {
2106 page = drbd_pp_pool;
2107 drbd_pp_pool = (struct page *)page_private(page);
2112 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2114 if (drbd_io_bio_set)
2115 bioset_free(drbd_io_bio_set);
2116 if (drbd_md_io_bio_set)
2117 bioset_free(drbd_md_io_bio_set);
2118 if (drbd_md_io_page_pool)
2119 mempool_destroy(drbd_md_io_page_pool);
2120 if (drbd_ee_mempool)
2121 mempool_destroy(drbd_ee_mempool);
2122 if (drbd_request_mempool)
2123 mempool_destroy(drbd_request_mempool);
2125 kmem_cache_destroy(drbd_ee_cache);
2126 if (drbd_request_cache)
2127 kmem_cache_destroy(drbd_request_cache);
2128 if (drbd_bm_ext_cache)
2129 kmem_cache_destroy(drbd_bm_ext_cache);
2130 if (drbd_al_ext_cache)
2131 kmem_cache_destroy(drbd_al_ext_cache);
2133 drbd_io_bio_set = NULL;
2134 drbd_md_io_bio_set = NULL;
2135 drbd_md_io_page_pool = NULL;
2136 drbd_ee_mempool = NULL;
2137 drbd_request_mempool = NULL;
2138 drbd_ee_cache = NULL;
2139 drbd_request_cache = NULL;
2140 drbd_bm_ext_cache = NULL;
2141 drbd_al_ext_cache = NULL;
2146 static int drbd_create_mempools(void)
2149 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
2152 /* prepare our caches and mempools */
2153 drbd_request_mempool = NULL;
2154 drbd_ee_cache = NULL;
2155 drbd_request_cache = NULL;
2156 drbd_bm_ext_cache = NULL;
2157 drbd_al_ext_cache = NULL;
2158 drbd_pp_pool = NULL;
2159 drbd_md_io_page_pool = NULL;
2160 drbd_md_io_bio_set = NULL;
2161 drbd_io_bio_set = NULL;
2164 drbd_request_cache = kmem_cache_create(
2165 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2166 if (drbd_request_cache == NULL)
2169 drbd_ee_cache = kmem_cache_create(
2170 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2171 if (drbd_ee_cache == NULL)
2174 drbd_bm_ext_cache = kmem_cache_create(
2175 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2176 if (drbd_bm_ext_cache == NULL)
2179 drbd_al_ext_cache = kmem_cache_create(
2180 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2181 if (drbd_al_ext_cache == NULL)
2185 drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
2186 if (drbd_io_bio_set == NULL)
2189 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0,
2191 if (drbd_md_io_bio_set == NULL)
2194 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2195 if (drbd_md_io_page_pool == NULL)
2198 drbd_request_mempool = mempool_create_slab_pool(number,
2199 drbd_request_cache);
2200 if (drbd_request_mempool == NULL)
2203 drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache);
2204 if (drbd_ee_mempool == NULL)
2207 /* drbd's page pool */
2208 spin_lock_init(&drbd_pp_lock);
2210 for (i = 0; i < number; i++) {
2211 page = alloc_page(GFP_HIGHUSER);
2214 set_page_private(page, (unsigned long)drbd_pp_pool);
2215 drbd_pp_pool = page;
2217 drbd_pp_vacant = number;
2222 drbd_destroy_mempools(); /* in case we allocated some */
2226 static void drbd_release_all_peer_reqs(struct drbd_device *device)
2230 rr = drbd_free_peer_reqs(device, &device->active_ee);
2232 drbd_err(device, "%d EEs in active list found!\n", rr);
2234 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2236 drbd_err(device, "%d EEs in sync list found!\n", rr);
2238 rr = drbd_free_peer_reqs(device, &device->read_ee);
2240 drbd_err(device, "%d EEs in read list found!\n", rr);
2242 rr = drbd_free_peer_reqs(device, &device->done_ee);
2244 drbd_err(device, "%d EEs in done list found!\n", rr);
2246 rr = drbd_free_peer_reqs(device, &device->net_ee);
2248 drbd_err(device, "%d EEs in net list found!\n", rr);
2251 /* caution. no locking. */
2252 void drbd_destroy_device(struct kref *kref)
2254 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2255 struct drbd_resource *resource = device->resource;
2256 struct drbd_peer_device *peer_device, *tmp_peer_device;
2258 del_timer_sync(&device->request_timer);
2260 /* paranoia asserts */
2261 D_ASSERT(device, device->open_cnt == 0);
2262 /* end paranoia asserts */
2264 /* cleanup stuff that may have been allocated during
2265 * device (re-)configuration or state changes */
2267 if (device->this_bdev)
2268 bdput(device->this_bdev);
2270 drbd_backing_dev_free(device, device->ldev);
2271 device->ldev = NULL;
2273 drbd_release_all_peer_reqs(device);
2275 lc_destroy(device->act_log);
2276 lc_destroy(device->resync);
2278 kfree(device->p_uuid);
2279 /* device->p_uuid = NULL; */
2281 if (device->bitmap) /* should no longer be there. */
2282 drbd_bm_cleanup(device);
2283 __free_page(device->md_io.page);
2284 put_disk(device->vdisk);
2285 blk_cleanup_queue(device->rq_queue);
2286 kfree(device->rs_plan_s);
2288 /* not for_each_connection(connection, resource):
2289 * those may have been cleaned up and disassociated already.
2291 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2292 kref_put(&peer_device->connection->kref, drbd_destroy_connection);
2295 memset(device, 0xfd, sizeof(*device));
2297 kref_put(&resource->kref, drbd_destroy_resource);
2300 /* One global retry thread, if we need to push back some bio and have it
2301 * reinserted through our make request function.
2303 static struct retry_worker {
2304 struct workqueue_struct *wq;
2305 struct work_struct worker;
2308 struct list_head writes;
2311 static void do_retry(struct work_struct *ws)
2313 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2315 struct drbd_request *req, *tmp;
2317 spin_lock_irq(&retry->lock);
2318 list_splice_init(&retry->writes, &writes);
2319 spin_unlock_irq(&retry->lock);
2321 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2322 struct drbd_device *device = req->device;
2323 struct bio *bio = req->master_bio;
2324 unsigned long start_jif = req->start_jif;
2328 expect(atomic_read(&req->completion_ref) == 0) &&
2329 expect(req->rq_state & RQ_POSTPONED) &&
2330 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2331 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2334 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2335 req, atomic_read(&req->completion_ref),
2338 /* We still need to put one kref associated with the
2339 * "completion_ref" going zero in the code path that queued it
2340 * here. The request object may still be referenced by a
2341 * frozen local req->private_bio, in case we force-detached.
2343 kref_put(&req->kref, drbd_req_destroy);
2345 /* A single suspended or otherwise blocking device may stall
2346 * all others as well. Fortunately, this code path is to
2347 * recover from a situation that "should not happen":
2348 * concurrent writes in multi-primary setup.
2349 * In a "normal" lifecycle, this workqueue is supposed to be
2350 * destroyed without ever doing anything.
2351 * If it turns out to be an issue anyways, we can do per
2352 * resource (replication group) or per device (minor) retry
2353 * workqueues instead.
2356 /* We are not just doing generic_make_request(),
2357 * as we want to keep the start_time information. */
2359 __drbd_make_request(device, bio, start_jif);
2363 /* called via drbd_req_put_completion_ref(),
2364 * holds resource->req_lock */
2365 void drbd_restart_request(struct drbd_request *req)
2367 unsigned long flags;
2368 spin_lock_irqsave(&retry.lock, flags);
2369 list_move_tail(&req->tl_requests, &retry.writes);
2370 spin_unlock_irqrestore(&retry.lock, flags);
2372 /* Drop the extra reference that would otherwise
2373 * have been dropped by complete_master_bio.
2374 * do_retry() needs to grab a new one. */
2375 dec_ap_bio(req->device);
2377 queue_work(retry.wq, &retry.worker);
2380 void drbd_destroy_resource(struct kref *kref)
2382 struct drbd_resource *resource =
2383 container_of(kref, struct drbd_resource, kref);
2385 idr_destroy(&resource->devices);
2386 free_cpumask_var(resource->cpu_mask);
2387 kfree(resource->name);
2388 memset(resource, 0xf2, sizeof(*resource));
2392 void drbd_free_resource(struct drbd_resource *resource)
2394 struct drbd_connection *connection, *tmp;
2396 for_each_connection_safe(connection, tmp, resource) {
2397 list_del(&connection->connections);
2398 drbd_debugfs_connection_cleanup(connection);
2399 kref_put(&connection->kref, drbd_destroy_connection);
2401 drbd_debugfs_resource_cleanup(resource);
2402 kref_put(&resource->kref, drbd_destroy_resource);
2405 static void drbd_cleanup(void)
2408 struct drbd_device *device;
2409 struct drbd_resource *resource, *tmp;
2411 /* first remove proc,
2412 * drbdsetup uses it's presence to detect
2413 * whether DRBD is loaded.
2414 * If we would get stuck in proc removal,
2415 * but have netlink already deregistered,
2416 * some drbdsetup commands may wait forever
2420 remove_proc_entry("drbd", NULL);
2423 destroy_workqueue(retry.wq);
2425 drbd_genl_unregister();
2427 idr_for_each_entry(&drbd_devices, device, i)
2428 drbd_delete_device(device);
2430 /* not _rcu since, no other updater anymore. Genl already unregistered */
2431 for_each_resource_safe(resource, tmp, &drbd_resources) {
2432 list_del(&resource->resources);
2433 drbd_free_resource(resource);
2436 drbd_debugfs_cleanup();
2438 drbd_destroy_mempools();
2439 unregister_blkdev(DRBD_MAJOR, "drbd");
2441 idr_destroy(&drbd_devices);
2443 pr_info("module cleanup done.\n");
2447 * drbd_congested() - Callback for the flusher thread
2448 * @congested_data: User data
2449 * @bdi_bits: Bits the BDI flusher thread is currently interested in
2451 * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
2453 static int drbd_congested(void *congested_data, int bdi_bits)
2455 struct drbd_device *device = congested_data;
2456 struct request_queue *q;
2460 if (!may_inc_ap_bio(device)) {
2461 /* DRBD has frozen IO */
2467 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
2468 r |= (1 << WB_async_congested);
2469 /* Without good local data, we would need to read from remote,
2470 * and that would need the worker thread as well, which is
2471 * currently blocked waiting for that usermode helper to
2474 if (!get_ldev_if_state(device, D_UP_TO_DATE))
2475 r |= (1 << WB_sync_congested);
2483 if (get_ldev(device)) {
2484 q = bdev_get_queue(device->ldev->backing_bdev);
2485 r = bdi_congested(q->backing_dev_info, bdi_bits);
2491 if (bdi_bits & (1 << WB_async_congested) &&
2492 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
2493 r |= (1 << WB_async_congested);
2494 reason = reason == 'b' ? 'a' : 'n';
2498 device->congestion_reason = reason;
2502 static void drbd_init_workqueue(struct drbd_work_queue* wq)
2504 spin_lock_init(&wq->q_lock);
2505 INIT_LIST_HEAD(&wq->q);
2506 init_waitqueue_head(&wq->q_wait);
2509 struct completion_work {
2511 struct completion done;
2514 static int w_complete(struct drbd_work *w, int cancel)
2516 struct completion_work *completion_work =
2517 container_of(w, struct completion_work, w);
2519 complete(&completion_work->done);
2523 void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2525 struct completion_work completion_work;
2527 completion_work.w.cb = w_complete;
2528 init_completion(&completion_work.done);
2529 drbd_queue_work(work_queue, &completion_work.w);
2530 wait_for_completion(&completion_work.done);
2533 struct drbd_resource *drbd_find_resource(const char *name)
2535 struct drbd_resource *resource;
2537 if (!name || !name[0])
2541 for_each_resource_rcu(resource, &drbd_resources) {
2542 if (!strcmp(resource->name, name)) {
2543 kref_get(&resource->kref);
2553 struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2554 void *peer_addr, int peer_addr_len)
2556 struct drbd_resource *resource;
2557 struct drbd_connection *connection;
2560 for_each_resource_rcu(resource, &drbd_resources) {
2561 for_each_connection_rcu(connection, resource) {
2562 if (connection->my_addr_len == my_addr_len &&
2563 connection->peer_addr_len == peer_addr_len &&
2564 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2565 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2566 kref_get(&connection->kref);
2577 static int drbd_alloc_socket(struct drbd_socket *socket)
2579 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2582 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2588 static void drbd_free_socket(struct drbd_socket *socket)
2590 free_page((unsigned long) socket->sbuf);
2591 free_page((unsigned long) socket->rbuf);
2594 void conn_free_crypto(struct drbd_connection *connection)
2596 drbd_free_sock(connection);
2598 crypto_free_ahash(connection->csums_tfm);
2599 crypto_free_ahash(connection->verify_tfm);
2600 crypto_free_shash(connection->cram_hmac_tfm);
2601 crypto_free_ahash(connection->integrity_tfm);
2602 crypto_free_ahash(connection->peer_integrity_tfm);
2603 kfree(connection->int_dig_in);
2604 kfree(connection->int_dig_vv);
2606 connection->csums_tfm = NULL;
2607 connection->verify_tfm = NULL;
2608 connection->cram_hmac_tfm = NULL;
2609 connection->integrity_tfm = NULL;
2610 connection->peer_integrity_tfm = NULL;
2611 connection->int_dig_in = NULL;
2612 connection->int_dig_vv = NULL;
2615 int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2617 struct drbd_connection *connection;
2618 cpumask_var_t new_cpu_mask;
2621 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2624 /* silently ignore cpu mask on UP kernel */
2625 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2626 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2627 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2628 if (err == -EOVERFLOW) {
2629 /* So what. mask it out. */
2630 cpumask_var_t tmp_cpu_mask;
2631 if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
2632 cpumask_setall(tmp_cpu_mask);
2633 cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
2634 drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2636 strlen(res_opts->cpu_mask) > 12 ? "..." : "",
2638 free_cpumask_var(tmp_cpu_mask);
2643 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2644 /* retcode = ERR_CPU_MASK_PARSE; */
2648 resource->res_opts = *res_opts;
2649 if (cpumask_empty(new_cpu_mask))
2650 drbd_calc_cpu_mask(&new_cpu_mask);
2651 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2652 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2653 for_each_connection_rcu(connection, resource) {
2654 connection->receiver.reset_cpu_mask = 1;
2655 connection->ack_receiver.reset_cpu_mask = 1;
2656 connection->worker.reset_cpu_mask = 1;
2662 free_cpumask_var(new_cpu_mask);
2667 struct drbd_resource *drbd_create_resource(const char *name)
2669 struct drbd_resource *resource;
2671 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2674 resource->name = kstrdup(name, GFP_KERNEL);
2675 if (!resource->name)
2676 goto fail_free_resource;
2677 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2678 goto fail_free_name;
2679 kref_init(&resource->kref);
2680 idr_init(&resource->devices);
2681 INIT_LIST_HEAD(&resource->connections);
2682 resource->write_ordering = WO_BDEV_FLUSH;
2683 list_add_tail_rcu(&resource->resources, &drbd_resources);
2684 mutex_init(&resource->conf_update);
2685 mutex_init(&resource->adm_mutex);
2686 spin_lock_init(&resource->req_lock);
2687 drbd_debugfs_resource_add(resource);
2691 kfree(resource->name);
2698 /* caller must be under adm_mutex */
2699 struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2701 struct drbd_resource *resource;
2702 struct drbd_connection *connection;
2704 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2708 if (drbd_alloc_socket(&connection->data))
2710 if (drbd_alloc_socket(&connection->meta))
2713 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2714 if (!connection->current_epoch)
2717 INIT_LIST_HEAD(&connection->transfer_log);
2719 INIT_LIST_HEAD(&connection->current_epoch->list);
2720 connection->epochs = 1;
2721 spin_lock_init(&connection->epoch_lock);
2723 connection->send.seen_any_write_yet = false;
2724 connection->send.current_epoch_nr = 0;
2725 connection->send.current_epoch_writes = 0;
2727 resource = drbd_create_resource(name);
2731 connection->cstate = C_STANDALONE;
2732 mutex_init(&connection->cstate_mutex);
2733 init_waitqueue_head(&connection->ping_wait);
2734 idr_init(&connection->peer_devices);
2736 drbd_init_workqueue(&connection->sender_work);
2737 mutex_init(&connection->data.mutex);
2738 mutex_init(&connection->meta.mutex);
2740 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2741 connection->receiver.connection = connection;
2742 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2743 connection->worker.connection = connection;
2744 drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2745 connection->ack_receiver.connection = connection;
2747 kref_init(&connection->kref);
2749 connection->resource = resource;
2751 if (set_resource_options(resource, res_opts))
2754 kref_get(&resource->kref);
2755 list_add_tail_rcu(&connection->connections, &resource->connections);
2756 drbd_debugfs_connection_add(connection);
2760 list_del(&resource->resources);
2761 drbd_free_resource(resource);
2763 kfree(connection->current_epoch);
2764 drbd_free_socket(&connection->meta);
2765 drbd_free_socket(&connection->data);
2770 void drbd_destroy_connection(struct kref *kref)
2772 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2773 struct drbd_resource *resource = connection->resource;
2775 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
2776 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2777 kfree(connection->current_epoch);
2779 idr_destroy(&connection->peer_devices);
2781 drbd_free_socket(&connection->meta);
2782 drbd_free_socket(&connection->data);
2783 kfree(connection->int_dig_in);
2784 kfree(connection->int_dig_vv);
2785 memset(connection, 0xfc, sizeof(*connection));
2787 kref_put(&resource->kref, drbd_destroy_resource);
2790 static int init_submitter(struct drbd_device *device)
2792 /* opencoded create_singlethread_workqueue(),
2793 * to be able to say "drbd%d", ..., minor */
2795 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2796 if (!device->submit.wq)
2799 INIT_WORK(&device->submit.worker, do_submit);
2800 INIT_LIST_HEAD(&device->submit.writes);
2804 enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2806 struct drbd_resource *resource = adm_ctx->resource;
2807 struct drbd_connection *connection, *n;
2808 struct drbd_device *device;
2809 struct drbd_peer_device *peer_device, *tmp_peer_device;
2810 struct gendisk *disk;
2811 struct request_queue *q;
2813 int vnr = adm_ctx->volume;
2814 enum drbd_ret_code err = ERR_NOMEM;
2816 device = minor_to_device(minor);
2818 return ERR_MINOR_OR_VOLUME_EXISTS;
2820 /* GFP_KERNEL, we are outside of all write-out paths */
2821 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2824 kref_init(&device->kref);
2826 kref_get(&resource->kref);
2827 device->resource = resource;
2828 device->minor = minor;
2831 drbd_init_set_defaults(device);
2833 q = blk_alloc_queue(GFP_KERNEL);
2836 device->rq_queue = q;
2837 q->queuedata = device;
2839 disk = alloc_disk(1);
2842 device->vdisk = disk;
2844 set_disk_ro(disk, true);
2847 disk->major = DRBD_MAJOR;
2848 disk->first_minor = minor;
2849 disk->fops = &drbd_ops;
2850 sprintf(disk->disk_name, "drbd%d", minor);
2851 disk->private_data = device;
2853 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2854 /* we have no partitions. we contain only ourselves. */
2855 device->this_bdev->bd_contains = device->this_bdev;
2857 q->backing_dev_info->congested_fn = drbd_congested;
2858 q->backing_dev_info->congested_data = device;
2860 blk_queue_make_request(q, drbd_make_request);
2861 blk_queue_write_cache(q, true, true);
2862 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2863 This triggers a max_bio_size message upon first attach or connect */
2864 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2865 q->queue_lock = &resource->req_lock;
2867 device->md_io.page = alloc_page(GFP_KERNEL);
2868 if (!device->md_io.page)
2869 goto out_no_io_page;
2871 if (drbd_bm_init(device))
2873 device->read_requests = RB_ROOT;
2874 device->write_requests = RB_ROOT;
2876 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2879 err = ERR_MINOR_OR_VOLUME_EXISTS;
2880 goto out_no_minor_idr;
2882 kref_get(&device->kref);
2884 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2887 err = ERR_MINOR_OR_VOLUME_EXISTS;
2888 goto out_idr_remove_minor;
2890 kref_get(&device->kref);
2892 INIT_LIST_HEAD(&device->peer_devices);
2893 INIT_LIST_HEAD(&device->pending_bitmap_io);
2894 for_each_connection(connection, resource) {
2895 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2897 goto out_idr_remove_from_resource;
2898 peer_device->connection = connection;
2899 peer_device->device = device;
2901 list_add(&peer_device->peer_devices, &device->peer_devices);
2902 kref_get(&device->kref);
2904 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2907 err = ERR_INVALID_REQUEST;
2908 goto out_idr_remove_from_resource;
2910 kref_get(&connection->kref);
2911 INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
2914 if (init_submitter(device)) {
2916 goto out_idr_remove_vol;
2921 /* inherit the connection state */
2922 device->state.conn = first_connection(resource)->cstate;
2923 if (device->state.conn == C_WF_REPORT_PARAMS) {
2924 for_each_peer_device(peer_device, device)
2925 drbd_connected(peer_device);
2927 /* move to create_peer_device() */
2928 for_each_peer_device(peer_device, device)
2929 drbd_debugfs_peer_device_add(peer_device);
2930 drbd_debugfs_device_add(device);
2934 idr_remove(&connection->peer_devices, vnr);
2935 out_idr_remove_from_resource:
2936 for_each_connection_safe(connection, n, resource) {
2937 peer_device = idr_remove(&connection->peer_devices, vnr);
2939 kref_put(&connection->kref, drbd_destroy_connection);
2941 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2942 list_del(&peer_device->peer_devices);
2945 idr_remove(&resource->devices, vnr);
2946 out_idr_remove_minor:
2947 idr_remove(&drbd_devices, minor);
2950 drbd_bm_cleanup(device);
2952 __free_page(device->md_io.page);
2956 blk_cleanup_queue(q);
2958 kref_put(&resource->kref, drbd_destroy_resource);
2963 void drbd_delete_device(struct drbd_device *device)
2965 struct drbd_resource *resource = device->resource;
2966 struct drbd_connection *connection;
2967 struct drbd_peer_device *peer_device;
2969 /* move to free_peer_device() */
2970 for_each_peer_device(peer_device, device)
2971 drbd_debugfs_peer_device_cleanup(peer_device);
2972 drbd_debugfs_device_cleanup(device);
2973 for_each_connection(connection, resource) {
2974 idr_remove(&connection->peer_devices, device->vnr);
2975 kref_put(&device->kref, drbd_destroy_device);
2977 idr_remove(&resource->devices, device->vnr);
2978 kref_put(&device->kref, drbd_destroy_device);
2979 idr_remove(&drbd_devices, device_to_minor(device));
2980 kref_put(&device->kref, drbd_destroy_device);
2981 del_gendisk(device->vdisk);
2983 kref_put(&device->kref, drbd_destroy_device);
2986 static int __init drbd_init(void)
2990 if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
2991 pr_err("invalid minor_count (%d)\n", drbd_minor_count);
2995 drbd_minor_count = DRBD_MINOR_COUNT_DEF;
2999 err = register_blkdev(DRBD_MAJOR, "drbd");
3001 pr_err("unable to register block device major %d\n",
3007 * allocate all necessary structs
3009 init_waitqueue_head(&drbd_pp_wait);
3011 drbd_proc = NULL; /* play safe for drbd_cleanup */
3012 idr_init(&drbd_devices);
3014 mutex_init(&resources_mutex);
3015 INIT_LIST_HEAD(&drbd_resources);
3017 err = drbd_genl_register();
3019 pr_err("unable to register generic netlink family\n");
3023 err = drbd_create_mempools();
3028 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3030 pr_err("unable to register proc file\n");
3034 retry.wq = create_singlethread_workqueue("drbd-reissue");
3036 pr_err("unable to create retry workqueue\n");
3039 INIT_WORK(&retry.worker, do_retry);
3040 spin_lock_init(&retry.lock);
3041 INIT_LIST_HEAD(&retry.writes);
3043 if (drbd_debugfs_init())
3044 pr_notice("failed to initialize debugfs -- will not be available\n");
3046 pr_info("initialized. "
3047 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3048 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3049 pr_info("%s\n", drbd_buildtag());
3050 pr_info("registered as block device major %d\n", DRBD_MAJOR);
3051 return 0; /* Success! */
3056 pr_err("ran out of memory\n");
3058 pr_err("initialization failure\n");
3062 static void drbd_free_one_sock(struct drbd_socket *ds)
3065 mutex_lock(&ds->mutex);
3068 mutex_unlock(&ds->mutex);
3070 /* so debugfs does not need to mutex_lock() */
3072 kernel_sock_shutdown(s, SHUT_RDWR);
3077 void drbd_free_sock(struct drbd_connection *connection)
3079 if (connection->data.socket)
3080 drbd_free_one_sock(&connection->data);
3081 if (connection->meta.socket)
3082 drbd_free_one_sock(&connection->meta);
3085 /* meta data management */
3087 void conn_md_sync(struct drbd_connection *connection)
3089 struct drbd_peer_device *peer_device;
3093 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
3094 struct drbd_device *device = peer_device->device;
3096 kref_get(&device->kref);
3098 drbd_md_sync(device);
3099 kref_put(&device->kref, drbd_destroy_device);
3105 /* aligned 4kByte */
3106 struct meta_data_on_disk {
3107 u64 la_size_sect; /* last agreed size. */
3108 u64 uuid[UI_SIZE]; /* UUIDs. */
3111 u32 flags; /* MDF */
3114 u32 al_offset; /* offset to this block */
3115 u32 al_nr_extents; /* important for restoring the AL (userspace) */
3116 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
3117 u32 bm_offset; /* offset to the bitmap, from here */
3118 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3119 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3121 /* see al_tr_number_to_on_disk_sector() */
3123 u32 al_stripe_size_4k;
3125 u8 reserved_u8[4096 - (7*8 + 10*4)];
3130 void drbd_md_write(struct drbd_device *device, void *b)
3132 struct meta_data_on_disk *buffer = b;
3136 memset(buffer, 0, sizeof(*buffer));
3138 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
3139 for (i = UI_CURRENT; i < UI_SIZE; i++)
3140 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3141 buffer->flags = cpu_to_be32(device->ldev->md.flags);
3142 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3144 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3145 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3146 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3147 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3148 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3150 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3151 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3153 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3154 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3156 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3157 sector = device->ldev->md.md_offset;
3159 if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3160 /* this was a try anyways ... */
3161 drbd_err(device, "meta data update failed!\n");
3162 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3167 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3168 * @device: DRBD device.
3170 void drbd_md_sync(struct drbd_device *device)
3172 struct meta_data_on_disk *buffer;
3174 /* Don't accidentally change the DRBD meta data layout. */
3175 BUILD_BUG_ON(UI_SIZE != 4);
3176 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3178 del_timer(&device->md_sync_timer);
3179 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3180 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3183 /* We use here D_FAILED and not D_ATTACHING because we try to write
3184 * metadata even if we detach due to a disk failure! */
3185 if (!get_ldev_if_state(device, D_FAILED))
3188 buffer = drbd_md_get_buffer(device, __func__);
3192 drbd_md_write(device, buffer);
3194 /* Update device->ldev->md.la_size_sect,
3195 * since we updated it on metadata. */
3196 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
3198 drbd_md_put_buffer(device);
3203 static int check_activity_log_stripe_size(struct drbd_device *device,
3204 struct meta_data_on_disk *on_disk,
3205 struct drbd_md *in_core)
3207 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3208 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3211 /* both not set: default to old fixed size activity log */
3212 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3214 al_stripe_size_4k = MD_32kB_SECT/8;
3217 /* some paranoia plausibility checks */
3219 /* we need both values to be set */
3220 if (al_stripes == 0 || al_stripe_size_4k == 0)
3223 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3225 /* Upper limit of activity log area, to avoid potential overflow
3226 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3227 * than 72 * 4k blocks total only increases the amount of history,
3228 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3229 if (al_size_4k > (16 * 1024 * 1024/4))
3232 /* Lower limit: we need at least 8 transaction slots (32kB)
3233 * to not break existing setups */
3234 if (al_size_4k < MD_32kB_SECT/8)
3237 in_core->al_stripe_size_4k = al_stripe_size_4k;
3238 in_core->al_stripes = al_stripes;
3239 in_core->al_size_4k = al_size_4k;
3243 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3244 al_stripes, al_stripe_size_4k);
3248 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3250 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3251 struct drbd_md *in_core = &bdev->md;
3252 s32 on_disk_al_sect;
3253 s32 on_disk_bm_sect;
3255 /* The on-disk size of the activity log, calculated from offsets, and
3256 * the size of the activity log calculated from the stripe settings,
3258 * Though we could relax this a bit: it is ok, if the striped activity log
3259 * fits in the available on-disk activity log size.
3260 * Right now, that would break how resize is implemented.
3261 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3262 * of possible unused padding space in the on disk layout. */
3263 if (in_core->al_offset < 0) {
3264 if (in_core->bm_offset > in_core->al_offset)
3266 on_disk_al_sect = -in_core->al_offset;
3267 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3269 if (in_core->al_offset != MD_4kB_SECT)
3271 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3274 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3275 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3278 /* old fixed size meta data is exactly that: fixed. */
3279 if (in_core->meta_dev_idx >= 0) {
3280 if (in_core->md_size_sect != MD_128MB_SECT
3281 || in_core->al_offset != MD_4kB_SECT
3282 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3283 || in_core->al_stripes != 1
3284 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3288 if (capacity < in_core->md_size_sect)
3290 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3293 /* should be aligned, and at least 32k */
3294 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3297 /* should fit (for now: exactly) into the available on-disk space;
3298 * overflow prevention is in check_activity_log_stripe_size() above. */
3299 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3302 /* again, should be aligned */
3303 if (in_core->bm_offset & 7)
3306 /* FIXME check for device grow with flex external meta data? */
3308 /* can the available bitmap space cover the last agreed device size? */
3309 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3315 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3316 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3317 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3318 in_core->meta_dev_idx,
3319 in_core->al_stripes, in_core->al_stripe_size_4k,
3320 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3321 (unsigned long long)in_core->la_size_sect,
3322 (unsigned long long)capacity);
3329 * drbd_md_read() - Reads in the meta data super block
3330 * @device: DRBD device.
3331 * @bdev: Device from which the meta data should be read in.
3333 * Return NO_ERROR on success, and an enum drbd_ret_code in case
3334 * something goes wrong.
3336 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3337 * even before @bdev is assigned to @device->ldev.
3339 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3341 struct meta_data_on_disk *buffer;
3343 int i, rv = NO_ERROR;
3345 if (device->state.disk != D_DISKLESS)
3346 return ERR_DISK_CONFIGURED;
3348 buffer = drbd_md_get_buffer(device, __func__);
3352 /* First, figure out where our meta data superblock is located,
3354 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3355 bdev->md.md_offset = drbd_md_ss(bdev);
3356 /* Even for (flexible or indexed) external meta data,
3357 * initially restrict us to the 4k superblock for now.
3358 * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
3359 bdev->md.md_size_sect = 8;
3361 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3363 /* NOTE: can't do normal error processing here as this is
3364 called BEFORE disk is attached */
3365 drbd_err(device, "Error while reading metadata.\n");
3366 rv = ERR_IO_MD_DISK;
3370 magic = be32_to_cpu(buffer->magic);
3371 flags = be32_to_cpu(buffer->flags);
3372 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3373 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3374 /* btw: that's Activity Log clean, not "all" clean. */
3375 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3376 rv = ERR_MD_UNCLEAN;
3380 rv = ERR_MD_INVALID;
3381 if (magic != DRBD_MD_MAGIC_08) {
3382 if (magic == DRBD_MD_MAGIC_07)
3383 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3385 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3389 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3390 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3391 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3396 /* convert to in_core endian */
3397 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3398 for (i = UI_CURRENT; i < UI_SIZE; i++)
3399 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3400 bdev->md.flags = be32_to_cpu(buffer->flags);
3401 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3403 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3404 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3405 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3407 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3409 if (check_offsets_and_sizes(device, bdev))
3412 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3413 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3414 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3417 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3418 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3419 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3425 spin_lock_irq(&device->resource->req_lock);
3426 if (device->state.conn < C_CONNECTED) {
3428 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3429 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3430 device->peer_max_bio_size = peer;
3432 spin_unlock_irq(&device->resource->req_lock);
3435 drbd_md_put_buffer(device);
3441 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3442 * @device: DRBD device.
3444 * Call this function if you change anything that should be written to
3445 * the meta-data super block. This function sets MD_DIRTY, and starts a
3446 * timer that ensures that within five seconds you have to call drbd_md_sync().
3449 void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
3451 if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3452 mod_timer(&device->md_sync_timer, jiffies + HZ);
3453 device->last_md_mark_dirty.line = line;
3454 device->last_md_mark_dirty.func = func;
3458 void drbd_md_mark_dirty(struct drbd_device *device)
3460 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3461 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3465 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3469 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3470 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3473 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3475 if (idx == UI_CURRENT) {
3476 if (device->state.role == R_PRIMARY)
3481 drbd_set_ed_uuid(device, val);
3484 device->ldev->md.uuid[idx] = val;
3485 drbd_md_mark_dirty(device);
3488 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3490 unsigned long flags;
3491 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3492 __drbd_uuid_set(device, idx, val);
3493 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3496 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3498 unsigned long flags;
3499 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3500 if (device->ldev->md.uuid[idx]) {
3501 drbd_uuid_move_history(device);
3502 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3504 __drbd_uuid_set(device, idx, val);
3505 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3509 * drbd_uuid_new_current() - Creates a new current UUID
3510 * @device: DRBD device.
3512 * Creates a new current UUID, and rotates the old current UUID into
3513 * the bitmap slot. Causes an incremental resync upon next connect.
3515 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3518 unsigned long long bm_uuid;
3520 get_random_bytes(&val, sizeof(u64));
3522 spin_lock_irq(&device->ldev->md.uuid_lock);
3523 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3526 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3528 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3529 __drbd_uuid_set(device, UI_CURRENT, val);
3530 spin_unlock_irq(&device->ldev->md.uuid_lock);
3532 drbd_print_uuids(device, "new current UUID");
3533 /* get it to stable storage _now_ */
3534 drbd_md_sync(device);
3537 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3539 unsigned long flags;
3540 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3543 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3545 drbd_uuid_move_history(device);
3546 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3547 device->ldev->md.uuid[UI_BITMAP] = 0;
3549 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3551 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3553 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3555 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3557 drbd_md_mark_dirty(device);
3561 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3562 * @device: DRBD device.
3564 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3566 int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3570 drbd_md_set_flag(device, MDF_FULL_SYNC);
3571 drbd_md_sync(device);
3572 drbd_bm_set_all(device);
3574 rv = drbd_bm_write(device);
3577 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3578 drbd_md_sync(device);
3585 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3586 * @device: DRBD device.
3588 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3590 int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3592 drbd_resume_al(device);
3593 drbd_bm_clear_all(device);
3594 return drbd_bm_write(device);
3597 static int w_bitmap_io(struct drbd_work *w, int unused)
3599 struct drbd_device *device =
3600 container_of(w, struct drbd_device, bm_io_work.w);
3601 struct bm_io_work *work = &device->bm_io_work;
3604 if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
3605 int cnt = atomic_read(&device->ap_bio_cnt);
3607 drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3611 if (get_ldev(device)) {
3612 drbd_bm_lock(device, work->why, work->flags);
3613 rv = work->io_fn(device);
3614 drbd_bm_unlock(device);
3618 clear_bit_unlock(BITMAP_IO, &device->flags);
3619 wake_up(&device->misc_wait);
3622 work->done(device, rv);
3624 clear_bit(BITMAP_IO_QUEUED, &device->flags);
3632 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3633 * @device: DRBD device.
3634 * @io_fn: IO callback to be called when bitmap IO is possible
3635 * @done: callback to be called after the bitmap IO was performed
3636 * @why: Descriptive text of the reason for doing the IO
3638 * While IO on the bitmap happens we freeze application IO thus we ensure
3639 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3640 * called from worker context. It MUST NOT be used while a previous such
3641 * work is still pending!
3643 * Its worker function encloses the call of io_fn() by get_ldev() and
3646 void drbd_queue_bitmap_io(struct drbd_device *device,
3647 int (*io_fn)(struct drbd_device *),
3648 void (*done)(struct drbd_device *, int),
3649 char *why, enum bm_flag flags)
3651 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3653 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3654 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3655 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3656 if (device->bm_io_work.why)
3657 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3658 why, device->bm_io_work.why);
3660 device->bm_io_work.io_fn = io_fn;
3661 device->bm_io_work.done = done;
3662 device->bm_io_work.why = why;
3663 device->bm_io_work.flags = flags;
3665 spin_lock_irq(&device->resource->req_lock);
3666 set_bit(BITMAP_IO, &device->flags);
3667 /* don't wait for pending application IO if the caller indicates that
3668 * application IO does not conflict anyways. */
3669 if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3670 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3671 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3672 &device->bm_io_work.w);
3674 spin_unlock_irq(&device->resource->req_lock);
3678 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3679 * @device: DRBD device.
3680 * @io_fn: IO callback to be called when bitmap IO is possible
3681 * @why: Descriptive text of the reason for doing the IO
3683 * freezes application IO while that the actual IO operations runs. This
3684 * functions MAY NOT be called from worker context.
3686 int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3687 char *why, enum bm_flag flags)
3689 /* Only suspend io, if some operation is supposed to be locked out */
3690 const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
3693 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3696 drbd_suspend_io(device);
3698 drbd_bm_lock(device, why, flags);
3700 drbd_bm_unlock(device);
3703 drbd_resume_io(device);
3708 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3710 if ((device->ldev->md.flags & flag) != flag) {
3711 drbd_md_mark_dirty(device);
3712 device->ldev->md.flags |= flag;
3716 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3718 if ((device->ldev->md.flags & flag) != 0) {
3719 drbd_md_mark_dirty(device);
3720 device->ldev->md.flags &= ~flag;
3723 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3725 return (bdev->md.flags & flag) != 0;
3728 static void md_sync_timer_fn(unsigned long data)
3730 struct drbd_device *device = (struct drbd_device *) data;
3731 drbd_device_post_work(device, MD_SYNC);
3734 const char *cmdname(enum drbd_packet cmd)
3736 /* THINK may need to become several global tables
3737 * when we want to support more than
3738 * one PRO_VERSION */
3739 static const char *cmdnames[] = {
3741 [P_WSAME] = "WriteSame",
3743 [P_DATA_REPLY] = "DataReply",
3744 [P_RS_DATA_REPLY] = "RSDataReply",
3745 [P_BARRIER] = "Barrier",
3746 [P_BITMAP] = "ReportBitMap",
3747 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3748 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3749 [P_UNPLUG_REMOTE] = "UnplugRemote",
3750 [P_DATA_REQUEST] = "DataRequest",
3751 [P_RS_DATA_REQUEST] = "RSDataRequest",
3752 [P_SYNC_PARAM] = "SyncParam",
3753 [P_SYNC_PARAM89] = "SyncParam89",
3754 [P_PROTOCOL] = "ReportProtocol",
3755 [P_UUIDS] = "ReportUUIDs",
3756 [P_SIZES] = "ReportSizes",
3757 [P_STATE] = "ReportState",
3758 [P_SYNC_UUID] = "ReportSyncUUID",
3759 [P_AUTH_CHALLENGE] = "AuthChallenge",
3760 [P_AUTH_RESPONSE] = "AuthResponse",
3762 [P_PING_ACK] = "PingAck",
3763 [P_RECV_ACK] = "RecvAck",
3764 [P_WRITE_ACK] = "WriteAck",
3765 [P_RS_WRITE_ACK] = "RSWriteAck",
3766 [P_SUPERSEDED] = "Superseded",
3767 [P_NEG_ACK] = "NegAck",
3768 [P_NEG_DREPLY] = "NegDReply",
3769 [P_NEG_RS_DREPLY] = "NegRSDReply",
3770 [P_BARRIER_ACK] = "BarrierAck",
3771 [P_STATE_CHG_REQ] = "StateChgRequest",
3772 [P_STATE_CHG_REPLY] = "StateChgReply",
3773 [P_OV_REQUEST] = "OVRequest",
3774 [P_OV_REPLY] = "OVReply",
3775 [P_OV_RESULT] = "OVResult",
3776 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3777 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3778 [P_COMPRESSED_BITMAP] = "CBitmap",
3779 [P_DELAY_PROBE] = "DelayProbe",
3780 [P_OUT_OF_SYNC] = "OutOfSync",
3781 [P_RETRY_WRITE] = "RetryWrite",
3782 [P_RS_CANCEL] = "RSCancel",
3783 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3784 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
3785 [P_RETRY_WRITE] = "retry_write",
3786 [P_PROTOCOL_UPDATE] = "protocol_update",
3787 [P_RS_THIN_REQ] = "rs_thin_req",
3788 [P_RS_DEALLOCATED] = "rs_deallocated",
3790 /* enum drbd_packet, but not commands - obsoleted flags:
3796 /* too big for the array: 0xfffX */
3797 if (cmd == P_INITIAL_META)
3798 return "InitialMeta";
3799 if (cmd == P_INITIAL_DATA)
3800 return "InitialData";
3801 if (cmd == P_CONNECTION_FEATURES)
3802 return "ConnectionFeatures";
3803 if (cmd >= ARRAY_SIZE(cmdnames))
3805 return cmdnames[cmd];
3809 * drbd_wait_misc - wait for a request to make progress
3810 * @device: device associated with the request
3811 * @i: the struct drbd_interval embedded in struct drbd_request or
3812 * struct drbd_peer_request
3814 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3816 struct net_conf *nc;
3821 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3826 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3829 /* Indicate to wake up device->misc_wait on progress. */
3831 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3832 spin_unlock_irq(&device->resource->req_lock);
3833 timeout = schedule_timeout(timeout);
3834 finish_wait(&device->misc_wait, &wait);
3835 spin_lock_irq(&device->resource->req_lock);
3836 if (!timeout || device->state.conn < C_CONNECTED)
3838 if (signal_pending(current))
3839 return -ERESTARTSYS;
3843 void lock_all_resources(void)
3845 struct drbd_resource *resource;
3846 int __maybe_unused i = 0;
3848 mutex_lock(&resources_mutex);
3849 local_irq_disable();
3850 for_each_resource(resource, &drbd_resources)
3851 spin_lock_nested(&resource->req_lock, i++);
3854 void unlock_all_resources(void)
3856 struct drbd_resource *resource;
3858 for_each_resource(resource, &drbd_resources)
3859 spin_unlock(&resource->req_lock);
3861 mutex_unlock(&resources_mutex);
3864 #ifdef CONFIG_DRBD_FAULT_INJECTION
3865 /* Fault insertion support including random number generator shamelessly
3866 * stolen from kernel/rcutorture.c */
3867 struct fault_random_state {
3868 unsigned long state;
3869 unsigned long count;
3872 #define FAULT_RANDOM_MULT 39916801 /* prime */
3873 #define FAULT_RANDOM_ADD 479001701 /* prime */
3874 #define FAULT_RANDOM_REFRESH 10000
3877 * Crude but fast random-number generator. Uses a linear congruential
3878 * generator, with occasional help from get_random_bytes().
3880 static unsigned long
3881 _drbd_fault_random(struct fault_random_state *rsp)
3885 if (!rsp->count--) {
3886 get_random_bytes(&refresh, sizeof(refresh));
3887 rsp->state += refresh;
3888 rsp->count = FAULT_RANDOM_REFRESH;
3890 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3891 return swahw32(rsp->state);
3895 _drbd_fault_str(unsigned int type) {
3896 static char *_faults[] = {
3897 [DRBD_FAULT_MD_WR] = "Meta-data write",
3898 [DRBD_FAULT_MD_RD] = "Meta-data read",
3899 [DRBD_FAULT_RS_WR] = "Resync write",
3900 [DRBD_FAULT_RS_RD] = "Resync read",
3901 [DRBD_FAULT_DT_WR] = "Data write",
3902 [DRBD_FAULT_DT_RD] = "Data read",
3903 [DRBD_FAULT_DT_RA] = "Data read ahead",
3904 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3905 [DRBD_FAULT_AL_EE] = "EE allocation",
3906 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3909 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3913 _drbd_insert_fault(struct drbd_device *device, unsigned int type)
3915 static struct fault_random_state rrs = {0, 0};
3917 unsigned int ret = (
3918 (drbd_fault_devs == 0 ||
3919 ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
3920 (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
3925 if (__ratelimit(&drbd_ratelimit_state))
3926 drbd_warn(device, "***Simulating %s failure\n",
3927 _drbd_fault_str(type));
3934 const char *drbd_buildtag(void)
3936 /* DRBD built from external sources has here a reference to the
3937 git hash of the source code. */
3939 static char buildtag[38] = "\0uilt-in";
3941 if (buildtag[0] == 0) {
3943 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3952 module_init(drbd_init)
3953 module_exit(drbd_cleanup)
3955 EXPORT_SYMBOL(drbd_conn_str);
3956 EXPORT_SYMBOL(drbd_role_str);
3957 EXPORT_SYMBOL(drbd_disk_str);
3958 EXPORT_SYMBOL(drbd_set_st_err_str);