4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <linux/slab.h>
29 #include <linux/drbd.h>
34 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
36 /* Update disk stats at start of I/O request */
37 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
39 struct request_queue *q = device->rq_queue;
41 generic_start_io_acct(q, bio_data_dir(req->master_bio),
42 req->i.size >> 9, &device->vdisk->part0);
45 /* Update disk stats when completing request upwards */
46 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
48 struct request_queue *q = device->rq_queue;
50 generic_end_io_acct(q, bio_data_dir(req->master_bio),
51 &device->vdisk->part0, req->start_jif);
54 static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio *bio_src)
56 struct drbd_request *req;
58 req = mempool_alloc(drbd_request_mempool, GFP_NOIO);
61 memset(req, 0, sizeof(*req));
63 drbd_req_make_private_bio(req, bio_src);
64 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0)
65 | (bio_op(bio_src) == REQ_OP_WRITE_SAME ? RQ_WSAME : 0)
66 | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_UNMAP : 0)
67 | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0);
69 req->master_bio = bio_src;
72 drbd_clear_interval(&req->i);
73 req->i.sector = bio_src->bi_iter.bi_sector;
74 req->i.size = bio_src->bi_iter.bi_size;
76 req->i.waiting = false;
78 INIT_LIST_HEAD(&req->tl_requests);
79 INIT_LIST_HEAD(&req->w.list);
80 INIT_LIST_HEAD(&req->req_pending_master_completion);
81 INIT_LIST_HEAD(&req->req_pending_local);
83 /* one reference to be put by __drbd_make_request */
84 atomic_set(&req->completion_ref, 1);
85 /* one kref as long as completion_ref > 0 */
86 kref_init(&req->kref);
90 static void drbd_remove_request_interval(struct rb_root *root,
91 struct drbd_request *req)
93 struct drbd_device *device = req->device;
94 struct drbd_interval *i = &req->i;
96 drbd_remove_interval(root, i);
98 /* Wake up any processes waiting for this request to complete. */
100 wake_up(&device->misc_wait);
103 void drbd_req_destroy(struct kref *kref)
105 struct drbd_request *req = container_of(kref, struct drbd_request, kref);
106 struct drbd_device *device = req->device;
107 const unsigned s = req->rq_state;
109 if ((req->master_bio && !(s & RQ_POSTPONED)) ||
110 atomic_read(&req->completion_ref) ||
111 (s & RQ_LOCAL_PENDING) ||
112 ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
113 drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
114 s, atomic_read(&req->completion_ref));
118 /* If called from mod_rq_state (expected normal case) or
119 * drbd_send_and_submit (the less likely normal path), this holds the
120 * req_lock, and req->tl_requests will typicaly be on ->transfer_log,
121 * though it may be still empty (never added to the transfer log).
123 * If called from do_retry(), we do NOT hold the req_lock, but we are
124 * still allowed to unconditionally list_del(&req->tl_requests),
125 * because it will be on a local on-stack list only. */
126 list_del_init(&req->tl_requests);
128 /* finally remove the request from the conflict detection
129 * respective block_id verification interval tree. */
130 if (!drbd_interval_empty(&req->i)) {
131 struct rb_root *root;
134 root = &device->write_requests;
136 root = &device->read_requests;
137 drbd_remove_request_interval(root, req);
138 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
139 drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
140 s, (unsigned long long)req->i.sector, req->i.size);
142 /* if it was a write, we may have to set the corresponding
143 * bit(s) out-of-sync first. If it had a local part, we need to
144 * release the reference to the activity log. */
146 /* Set out-of-sync unless both OK flags are set
147 * (local only or remote failed).
148 * Other places where we set out-of-sync:
149 * READ with local io-error */
151 /* There is a special case:
152 * we may notice late that IO was suspended,
153 * and postpone, or schedule for retry, a write,
154 * before it even was submitted or sent.
155 * In that case we do not want to touch the bitmap at all.
157 if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
158 if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
159 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
161 if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
162 drbd_set_in_sync(device, req->i.sector, req->i.size);
165 /* one might be tempted to move the drbd_al_complete_io
166 * to the local io completion callback drbd_request_endio.
167 * but, if this was a mirror write, we may only
168 * drbd_al_complete_io after this is RQ_NET_DONE,
169 * otherwise the extent could be dropped from the al
170 * before it has actually been written on the peer.
171 * if we crash before our peer knows about the request,
172 * but after the extent has been dropped from the al,
173 * we would forget to resync the corresponding extent.
175 if (s & RQ_IN_ACT_LOG) {
176 if (get_ldev_if_state(device, D_FAILED)) {
177 drbd_al_complete_io(device, &req->i);
179 } else if (__ratelimit(&drbd_ratelimit_state)) {
180 drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
181 "but my Disk seems to have failed :(\n",
182 (unsigned long long) req->i.sector, req->i.size);
187 mempool_free(req, drbd_request_mempool);
190 static void wake_all_senders(struct drbd_connection *connection)
192 wake_up(&connection->sender_work.q_wait);
195 /* must hold resource->req_lock */
196 void start_new_tl_epoch(struct drbd_connection *connection)
198 /* no point closing an epoch, if it is empty, anyways. */
199 if (connection->current_tle_writes == 0)
202 connection->current_tle_writes = 0;
203 atomic_inc(&connection->current_tle_nr);
204 wake_all_senders(connection);
207 void complete_master_bio(struct drbd_device *device,
208 struct bio_and_error *m)
210 if (unlikely(m->error))
211 m->bio->bi_status = errno_to_blk_status(m->error);
217 /* Helper for __req_mod().
218 * Set m->bio to the master bio, if it is fit to be completed,
219 * or leave it alone (it is initialized to NULL in __req_mod),
220 * if it has already been completed, or cannot be completed yet.
221 * If m->bio is set, the error status to be returned is placed in m->error.
224 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
226 const unsigned s = req->rq_state;
227 struct drbd_device *device = req->device;
230 /* we must not complete the master bio, while it is
231 * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
232 * not yet acknowledged by the peer
233 * not yet completed by the local io subsystem
234 * these flags may get cleared in any order by
237 * the bio_endio completion callbacks.
239 if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
240 (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
241 (s & RQ_COMPLETION_SUSP)) {
242 drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
246 if (!req->master_bio) {
247 drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
252 * figure out whether to report success or failure.
254 * report success when at least one of the operations succeeded.
255 * or, to put the other way,
256 * only report failure, when both operations failed.
258 * what to do about the failures is handled elsewhere.
259 * what we need to do here is just: complete the master_bio.
261 * local completion error, if any, has been stored as ERR_PTR
262 * in private_bio within drbd_request_endio.
264 ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
265 error = PTR_ERR(req->private_bio);
267 /* Before we can signal completion to the upper layers,
268 * we may need to close the current transfer log epoch.
269 * We are within the request lock, so we can simply compare
270 * the request epoch number with the current transfer log
271 * epoch number. If they match, increase the current_tle_nr,
272 * and reset the transfer log epoch write_cnt.
274 if (op_is_write(bio_op(req->master_bio)) &&
275 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
276 start_new_tl_epoch(first_peer_device(device)->connection);
278 /* Update disk stats */
279 _drbd_end_io_acct(device, req);
282 * have it be pushed back to the retry work queue,
283 * so it will re-enter __drbd_make_request(),
284 * and be re-assigned to a suitable local or remote path,
285 * or failed if we do not have access to good data anymore.
287 * Unless it was failed early by __drbd_make_request(),
288 * because no path was available, in which case
289 * it was not even added to the transfer_log.
291 * read-ahead may fail, and will not be retried.
293 * WRITE should have used all available paths already.
296 bio_op(req->master_bio) == REQ_OP_READ &&
297 !(req->master_bio->bi_opf & REQ_RAHEAD) &&
298 !list_empty(&req->tl_requests))
299 req->rq_state |= RQ_POSTPONED;
301 if (!(req->rq_state & RQ_POSTPONED)) {
302 m->error = ok ? 0 : (error ?: -EIO);
303 m->bio = req->master_bio;
304 req->master_bio = NULL;
305 /* We leave it in the tree, to be able to verify later
306 * write-acks in protocol != C during resync.
307 * But we mark it as "complete", so it won't be counted as
308 * conflict in a multi-primary setup. */
309 req->i.completed = true;
313 wake_up(&device->misc_wait);
315 /* Either we are about to complete to upper layers,
316 * or we will restart this request.
317 * In either case, the request object will be destroyed soon,
318 * so better remove it from all lists. */
319 list_del_init(&req->req_pending_master_completion);
322 /* still holds resource->req_lock */
323 static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
325 struct drbd_device *device = req->device;
326 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
331 if (!atomic_sub_and_test(put, &req->completion_ref))
334 drbd_req_complete(req, m);
336 /* local completion may still come in later,
337 * we need to keep the req object around. */
338 if (req->rq_state & RQ_LOCAL_ABORTED)
341 if (req->rq_state & RQ_POSTPONED) {
342 /* don't destroy the req object just yet,
343 * but queue it for retry */
344 drbd_restart_request(req);
348 kref_put(&req->kref, drbd_req_destroy);
351 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
353 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
356 if (connection->req_next == NULL)
357 connection->req_next = req;
360 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
362 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
365 if (connection->req_next != req)
367 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
368 const unsigned s = req->rq_state;
369 if (s & RQ_NET_QUEUED)
372 if (&req->tl_requests == &connection->transfer_log)
374 connection->req_next = req;
377 static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
379 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
382 if (connection->req_ack_pending == NULL)
383 connection->req_ack_pending = req;
386 static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
388 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
391 if (connection->req_ack_pending != req)
393 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
394 const unsigned s = req->rq_state;
395 if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
398 if (&req->tl_requests == &connection->transfer_log)
400 connection->req_ack_pending = req;
403 static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
405 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
408 if (connection->req_not_net_done == NULL)
409 connection->req_not_net_done = req;
412 static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
414 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
417 if (connection->req_not_net_done != req)
419 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
420 const unsigned s = req->rq_state;
421 if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
424 if (&req->tl_requests == &connection->transfer_log)
426 connection->req_not_net_done = req;
429 /* I'd like this to be the only place that manipulates
430 * req->completion_ref and req->kref. */
431 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
434 struct drbd_device *device = req->device;
435 struct drbd_peer_device *peer_device = first_peer_device(device);
436 unsigned s = req->rq_state;
439 if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
440 set |= RQ_COMPLETION_SUSP;
444 req->rq_state &= ~clear;
445 req->rq_state |= set;
448 if (req->rq_state == s)
451 /* intent: get references */
453 kref_get(&req->kref);
455 if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
456 atomic_inc(&req->completion_ref);
458 if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
459 inc_ap_pending(device);
460 atomic_inc(&req->completion_ref);
463 if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
464 atomic_inc(&req->completion_ref);
465 set_if_null_req_next(peer_device, req);
468 if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
469 kref_get(&req->kref); /* wait for the DONE */
471 if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
472 /* potentially already completed in the ack_receiver thread */
473 if (!(s & RQ_NET_DONE)) {
474 atomic_add(req->i.size >> 9, &device->ap_in_flight);
475 set_if_null_req_not_net_done(peer_device, req);
477 if (req->rq_state & RQ_NET_PENDING)
478 set_if_null_req_ack_pending(peer_device, req);
481 if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
482 atomic_inc(&req->completion_ref);
484 /* progress: put references */
486 if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
489 if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
490 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
494 if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
495 if (req->rq_state & RQ_LOCAL_ABORTED)
496 kref_put(&req->kref, drbd_req_destroy);
499 list_del_init(&req->req_pending_local);
502 if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
503 dec_ap_pending(device);
505 req->acked_jif = jiffies;
506 advance_conn_req_ack_pending(peer_device, req);
509 if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
511 advance_conn_req_next(peer_device, req);
514 if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
516 atomic_sub(req->i.size >> 9, &device->ap_in_flight);
517 if (s & RQ_EXP_BARR_ACK)
518 kref_put(&req->kref, drbd_req_destroy);
519 req->net_done_jif = jiffies;
521 /* in ahead/behind mode, or just in case,
522 * before we finally destroy this request,
523 * the caching pointers must not reference it anymore */
524 advance_conn_req_next(peer_device, req);
525 advance_conn_req_ack_pending(peer_device, req);
526 advance_conn_req_not_net_done(peer_device, req);
529 /* potentially complete and destroy */
531 /* If we made progress, retry conflicting peer requests, if any. */
533 wake_up(&device->misc_wait);
535 drbd_req_put_completion_ref(req, m, c_put);
536 kref_put(&req->kref, drbd_req_destroy);
539 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
541 char b[BDEVNAME_SIZE];
543 if (!__ratelimit(&drbd_ratelimit_state))
546 drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
547 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
548 (unsigned long long)req->i.sector,
550 bdevname(device->ldev->backing_bdev, b));
553 /* Helper for HANDED_OVER_TO_NETWORK.
554 * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
555 * Is it also still "PENDING"?
556 * --> If so, clear PENDING and set NET_OK below.
557 * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
558 * (and we must not set RQ_NET_OK) */
559 static inline bool is_pending_write_protocol_A(struct drbd_request *req)
561 return (req->rq_state &
562 (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
563 == (RQ_WRITE|RQ_NET_PENDING);
566 /* obviously this could be coded as many single functions
567 * instead of one huge switch,
568 * or by putting the code directly in the respective locations
569 * (as it has been before).
571 * but having it this way
572 * enforces that it is all in this one place, where it is easier to audit,
573 * it makes it obvious that whatever "event" "happens" to a request should
574 * happen "atomically" within the req_lock,
575 * and it enforces that we have to think in a very structured manner
576 * about the "events" that may happen to a request during its life time ...
578 int __req_mod(struct drbd_request *req, enum drbd_req_event what,
579 struct bio_and_error *m)
581 struct drbd_device *const device = req->device;
582 struct drbd_peer_device *const peer_device = first_peer_device(device);
583 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
592 drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
595 /* does not happen...
596 * initialization done in drbd_req_new
601 case TO_BE_SENT: /* via network */
602 /* reached via __drbd_make_request
603 * and from w_read_retry_remote */
604 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
606 nc = rcu_dereference(connection->net_conf);
607 p = nc->wire_protocol;
610 p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
611 p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
612 mod_rq_state(req, m, 0, RQ_NET_PENDING);
615 case TO_BE_SUBMITTED: /* locally */
616 /* reached via __drbd_make_request */
617 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
618 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
622 if (req->rq_state & RQ_WRITE)
623 device->writ_cnt += req->i.size >> 9;
625 device->read_cnt += req->i.size >> 9;
627 mod_rq_state(req, m, RQ_LOCAL_PENDING,
628 RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
632 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
635 case WRITE_COMPLETED_WITH_ERROR:
636 drbd_report_io_error(device, req);
637 __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
638 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
641 case READ_COMPLETED_WITH_ERROR:
642 drbd_set_out_of_sync(device, req->i.sector, req->i.size);
643 drbd_report_io_error(device, req);
644 __drbd_chk_io_error(device, DRBD_READ_ERROR);
646 case READ_AHEAD_COMPLETED_WITH_ERROR:
647 /* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
648 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
651 case DISCARD_COMPLETED_NOTSUPP:
652 case DISCARD_COMPLETED_WITH_ERROR:
653 /* I'd rather not detach from local disk just because it
654 * failed a REQ_DISCARD. */
655 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
658 case QUEUE_FOR_NET_READ:
661 * or target area marked as invalid,
662 * or just got an io-error. */
663 /* from __drbd_make_request
664 * or from bio_endio during read io-error recovery */
666 /* So we can verify the handle in the answer packet.
667 * Corresponding drbd_remove_request_interval is in
668 * drbd_req_complete() */
669 D_ASSERT(device, drbd_interval_empty(&req->i));
670 drbd_insert_interval(&device->read_requests, &req->i);
672 set_bit(UNPLUG_REMOTE, &device->flags);
674 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
675 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
676 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
677 req->w.cb = w_send_read_req;
678 drbd_queue_work(&connection->sender_work,
682 case QUEUE_FOR_NET_WRITE:
683 /* assert something? */
684 /* from __drbd_make_request only */
686 /* Corresponding drbd_remove_request_interval is in
687 * drbd_req_complete() */
688 D_ASSERT(device, drbd_interval_empty(&req->i));
689 drbd_insert_interval(&device->write_requests, &req->i);
692 * In case the req ended up on the transfer log before being
693 * queued on the worker, it could lead to this request being
694 * missed during cleanup after connection loss.
695 * So we have to do both operations here,
696 * within the same lock that protects the transfer log.
698 * _req_add_to_epoch(req); this has to be after the
699 * _maybe_start_new_epoch(req); which happened in
700 * __drbd_make_request, because we now may set the bit
701 * again ourselves to close the current epoch.
703 * Add req to the (now) current epoch (barrier). */
705 /* otherwise we may lose an unplug, which may cause some remote
706 * io-scheduler timeout to expire, increasing maximum latency,
707 * hurting performance. */
708 set_bit(UNPLUG_REMOTE, &device->flags);
710 /* queue work item to send data */
711 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
712 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
713 req->w.cb = w_send_dblock;
714 drbd_queue_work(&connection->sender_work,
717 /* close the epoch, in case it outgrew the limit */
719 nc = rcu_dereference(connection->net_conf);
720 p = nc->max_epoch_size;
722 if (connection->current_tle_writes >= p)
723 start_new_tl_epoch(connection);
727 case QUEUE_FOR_SEND_OOS:
728 mod_rq_state(req, m, 0, RQ_NET_QUEUED);
729 req->w.cb = w_send_out_of_sync;
730 drbd_queue_work(&connection->sender_work,
734 case READ_RETRY_REMOTE_CANCELED:
737 /* real cleanup will be done from tl_clear. just update flags
738 * so it is no longer marked as on the worker queue */
739 mod_rq_state(req, m, RQ_NET_QUEUED, 0);
742 case HANDED_OVER_TO_NETWORK:
743 /* assert something? */
744 if (is_pending_write_protocol_A(req))
745 /* this is what is dangerous about protocol A:
746 * pretend it was successfully written on the peer. */
747 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
748 RQ_NET_SENT|RQ_NET_OK);
750 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
751 /* It is still not yet RQ_NET_DONE until the
752 * corresponding epoch barrier got acked as well,
753 * so we know what to dirty on connection loss. */
756 case OOS_HANDED_TO_NETWORK:
757 /* Was not set PENDING, no longer QUEUED, so is now DONE
758 * as far as this connection is concerned. */
759 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
762 case CONNECTION_LOST_WHILE_PENDING:
763 /* transfer log cleanup after connection loss */
765 RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
769 case CONFLICT_RESOLVED:
770 /* for superseded conflicting writes of multiple primaries,
771 * there is no need to keep anything in the tl, potential
772 * node crashes are covered by the activity log.
774 * If this request had been marked as RQ_POSTPONED before,
775 * it will actually not be completed, but "restarted",
776 * resubmitted from the retry worker context. */
777 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
778 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
779 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
782 case WRITE_ACKED_BY_PEER_AND_SIS:
783 req->rq_state |= RQ_NET_SIS;
784 case WRITE_ACKED_BY_PEER:
785 /* Normal operation protocol C: successfully written on peer.
786 * During resync, even in protocol != C,
787 * we requested an explicit write ack anyways.
788 * Which means we cannot even assert anything here.
789 * Nothing more to do here.
790 * We want to keep the tl in place for all protocols, to cater
791 * for volatile write-back caches on lower level devices. */
793 case RECV_ACKED_BY_PEER:
794 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
795 /* protocol B; pretends to be successfully written on peer.
796 * see also notes above in HANDED_OVER_TO_NETWORK about
799 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
803 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
804 /* If this node has already detected the write conflict, the
805 * worker will be waiting on misc_wait. Wake it up once this
806 * request has completed locally.
808 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
809 req->rq_state |= RQ_POSTPONED;
811 wake_up(&device->misc_wait);
812 /* Do not clear RQ_NET_PENDING. This request will make further
813 * progress via restart_conflicting_writes() or
814 * fail_postponed_requests(). Hopefully. */
818 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
821 case FAIL_FROZEN_DISK_IO:
822 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
824 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
827 case RESTART_FROZEN_DISK_IO:
828 if (!(req->rq_state & RQ_LOCAL_COMPLETED))
832 RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
836 if (bio_data_dir(req->master_bio) == WRITE)
839 get_ldev(device); /* always succeeds in this call path */
840 req->w.cb = w_restart_disk_io;
841 drbd_queue_work(&connection->sender_work,
846 /* Simply complete (local only) READs. */
847 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
848 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
852 /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
853 before the connection loss (B&C only); only P_BARRIER_ACK
854 (or the local completion?) was missing when we suspended.
855 Throwing them out of the TL here by pretending we got a BARRIER_ACK.
856 During connection handshake, we ensure that the peer was not rebooted. */
857 if (!(req->rq_state & RQ_NET_OK)) {
858 /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
859 * in that case we must not set RQ_NET_PENDING. */
861 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
863 /* w.cb expected to be w_send_dblock, or w_send_read_req */
864 drbd_queue_work(&connection->sender_work,
866 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
867 } /* else: FIXME can this happen? */
870 /* else, fall through to BARRIER_ACKED */
873 /* barrier ack for READ requests does not make sense */
874 if (!(req->rq_state & RQ_WRITE))
877 if (req->rq_state & RQ_NET_PENDING) {
878 /* barrier came in before all requests were acked.
879 * this is bad, because if the connection is lost now,
880 * we won't be able to clean them up... */
881 drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
883 /* Allowed to complete requests, even while suspended.
884 * As this is called for all requests within a matching epoch,
885 * we need to filter, and only set RQ_NET_DONE for those that
886 * have actually been on the wire. */
887 mod_rq_state(req, m, RQ_COMPLETION_SUSP,
888 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
892 D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
893 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
896 case QUEUE_AS_DRBD_BARRIER:
897 start_new_tl_epoch(connection);
898 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
905 /* we may do a local read if:
906 * - we are consistent (of course),
907 * - or we are generally inconsistent,
908 * BUT we are still/already IN SYNC for this area.
909 * since size may be bigger than BM_BLOCK_SIZE,
910 * we may need to check several bits.
912 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
914 unsigned long sbnr, ebnr;
915 sector_t esector, nr_sectors;
917 if (device->state.disk == D_UP_TO_DATE)
919 if (device->state.disk != D_INCONSISTENT)
921 esector = sector + (size >> 9) - 1;
922 nr_sectors = drbd_get_capacity(device->this_bdev);
923 D_ASSERT(device, sector < nr_sectors);
924 D_ASSERT(device, esector < nr_sectors);
926 sbnr = BM_SECT_TO_BIT(sector);
927 ebnr = BM_SECT_TO_BIT(esector);
929 return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
932 static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
933 enum drbd_read_balancing rbm)
935 struct backing_dev_info *bdi;
939 case RB_CONGESTED_REMOTE:
940 bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
941 return bdi_read_congested(bdi);
942 case RB_LEAST_PENDING:
943 return atomic_read(&device->local_cnt) >
944 atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
945 case RB_32K_STRIPING: /* stripe_shift = 15 */
946 case RB_64K_STRIPING:
947 case RB_128K_STRIPING:
948 case RB_256K_STRIPING:
949 case RB_512K_STRIPING:
950 case RB_1M_STRIPING: /* stripe_shift = 20 */
951 stripe_shift = (rbm - RB_32K_STRIPING + 15);
952 return (sector >> (stripe_shift - 9)) & 1;
954 return test_and_change_bit(READ_BALANCE_RR, &device->flags);
955 case RB_PREFER_REMOTE:
957 case RB_PREFER_LOCAL:
964 * complete_conflicting_writes - wait for any conflicting write requests
966 * The write_requests tree contains all active write requests which we
967 * currently know about. Wait for any requests to complete which conflict with
970 * Only way out: remove the conflicting intervals from the tree.
972 static void complete_conflicting_writes(struct drbd_request *req)
975 struct drbd_device *device = req->device;
976 struct drbd_interval *i;
977 sector_t sector = req->i.sector;
978 int size = req->i.size;
981 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
982 /* Ignore, if already completed to upper layers. */
985 /* Handle the first found overlap. After the schedule
986 * we have to restart the tree walk. */
992 /* Indicate to wake up device->misc_wait on progress. */
993 prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
995 spin_unlock_irq(&device->resource->req_lock);
997 spin_lock_irq(&device->resource->req_lock);
999 finish_wait(&device->misc_wait, &wait);
1002 /* called within req_lock */
1003 static void maybe_pull_ahead(struct drbd_device *device)
1005 struct drbd_connection *connection = first_peer_device(device)->connection;
1006 struct net_conf *nc;
1007 bool congested = false;
1008 enum drbd_on_congestion on_congestion;
1011 nc = rcu_dereference(connection->net_conf);
1012 on_congestion = nc ? nc->on_congestion : OC_BLOCK;
1014 if (on_congestion == OC_BLOCK ||
1015 connection->agreed_pro_version < 96)
1018 if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
1019 return; /* nothing to do ... */
1021 /* If I don't even have good local storage, we can not reasonably try
1022 * to pull ahead of the peer. We also need the local reference to make
1023 * sure device->act_log is there.
1025 if (!get_ldev_if_state(device, D_UP_TO_DATE))
1028 if (nc->cong_fill &&
1029 atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
1030 drbd_info(device, "Congestion-fill threshold reached\n");
1034 if (device->act_log->used >= nc->cong_extents) {
1035 drbd_info(device, "Congestion-extents threshold reached\n");
1040 /* start a new epoch for non-mirrored writes */
1041 start_new_tl_epoch(first_peer_device(device)->connection);
1043 if (on_congestion == OC_PULL_AHEAD)
1044 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
1045 else /*nc->on_congestion == OC_DISCONNECT */
1046 _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
1051 /* If this returns false, and req->private_bio is still set,
1052 * this should be submitted locally.
1054 * If it returns false, but req->private_bio is not set,
1055 * we do not have access to good data :(
1057 * Otherwise, this destroys req->private_bio, if any,
1060 static bool do_remote_read(struct drbd_request *req)
1062 struct drbd_device *device = req->device;
1063 enum drbd_read_balancing rbm;
1065 if (req->private_bio) {
1066 if (!drbd_may_do_local_read(device,
1067 req->i.sector, req->i.size)) {
1068 bio_put(req->private_bio);
1069 req->private_bio = NULL;
1074 if (device->state.pdsk != D_UP_TO_DATE)
1077 if (req->private_bio == NULL)
1080 /* TODO: improve read balancing decisions, take into account drbd
1081 * protocol, pending requests etc. */
1084 rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
1087 if (rbm == RB_PREFER_LOCAL && req->private_bio)
1088 return false; /* submit locally */
1090 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
1091 if (req->private_bio) {
1092 bio_put(req->private_bio);
1093 req->private_bio = NULL;
1102 bool drbd_should_do_remote(union drbd_dev_state s)
1104 return s.pdsk == D_UP_TO_DATE ||
1105 (s.pdsk >= D_INCONSISTENT &&
1106 s.conn >= C_WF_BITMAP_T &&
1108 /* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
1109 That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
1113 static bool drbd_should_send_out_of_sync(union drbd_dev_state s)
1115 return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
1116 /* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
1117 since we enter state C_AHEAD only if proto >= 96 */
1120 /* returns number of connections (== 1, for drbd 8.4)
1121 * expected to actually write this data,
1122 * which does NOT include those that we are L_AHEAD for. */
1123 static int drbd_process_write_request(struct drbd_request *req)
1125 struct drbd_device *device = req->device;
1126 int remote, send_oos;
1128 remote = drbd_should_do_remote(device->state);
1129 send_oos = drbd_should_send_out_of_sync(device->state);
1131 /* Need to replicate writes. Unless it is an empty flush,
1132 * which is better mapped to a DRBD P_BARRIER packet,
1133 * also for drbd wire protocol compatibility reasons.
1134 * If this was a flush, just start a new epoch.
1135 * Unless the current epoch was empty anyways, or we are not currently
1136 * replicating, in which case there is no point. */
1137 if (unlikely(req->i.size == 0)) {
1138 /* The only size==0 bios we expect are empty flushes. */
1139 D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
1141 _req_mod(req, QUEUE_AS_DRBD_BARRIER);
1145 if (!remote && !send_oos)
1148 D_ASSERT(device, !(remote && send_oos));
1151 _req_mod(req, TO_BE_SENT);
1152 _req_mod(req, QUEUE_FOR_NET_WRITE);
1153 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
1154 _req_mod(req, QUEUE_FOR_SEND_OOS);
1159 static void drbd_process_discard_req(struct drbd_request *req)
1161 struct block_device *bdev = req->device->ldev->backing_bdev;
1163 if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9,
1165 req->private_bio->bi_status = BLK_STS_IOERR;
1166 bio_endio(req->private_bio);
1170 drbd_submit_req_private_bio(struct drbd_request *req)
1172 struct drbd_device *device = req->device;
1173 struct bio *bio = req->private_bio;
1176 if (bio_op(bio) != REQ_OP_READ)
1177 type = DRBD_FAULT_DT_WR;
1178 else if (bio->bi_opf & REQ_RAHEAD)
1179 type = DRBD_FAULT_DT_RA;
1181 type = DRBD_FAULT_DT_RD;
1183 bio_set_dev(bio, device->ldev->backing_bdev);
1185 /* State may have changed since we grabbed our reference on the
1186 * ->ldev member. Double check, and short-circuit to endio.
1187 * In case the last activity log transaction failed to get on
1188 * stable storage, and this is a WRITE, we may not even submit
1190 if (get_ldev(device)) {
1191 if (drbd_insert_fault(device, type))
1193 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
1194 bio_op(bio) == REQ_OP_DISCARD)
1195 drbd_process_discard_req(req);
1197 generic_make_request(bio);
1203 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
1205 spin_lock_irq(&device->resource->req_lock);
1206 list_add_tail(&req->tl_requests, &device->submit.writes);
1207 list_add_tail(&req->req_pending_master_completion,
1208 &device->pending_master_completion[1 /* WRITE */]);
1209 spin_unlock_irq(&device->resource->req_lock);
1210 queue_work(device->submit.wq, &device->submit.worker);
1211 /* do_submit() may sleep internally on al_wait, too */
1212 wake_up(&device->al_wait);
1215 /* returns the new drbd_request pointer, if the caller is expected to
1216 * drbd_send_and_submit() it (to save latency), or NULL if we queued the
1217 * request on the submitter thread.
1218 * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
1220 static struct drbd_request *
1221 drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1223 const int rw = bio_data_dir(bio);
1224 struct drbd_request *req;
1226 /* allocate outside of all locks; */
1227 req = drbd_req_new(device, bio);
1230 /* only pass the error to the upper layers.
1231 * if user cannot handle io errors, that's not our business. */
1232 drbd_err(device, "could not kmalloc() req\n");
1233 bio->bi_status = BLK_STS_RESOURCE;
1235 return ERR_PTR(-ENOMEM);
1237 req->start_jif = start_jif;
1239 if (!get_ldev(device)) {
1240 bio_put(req->private_bio);
1241 req->private_bio = NULL;
1244 /* Update disk stats */
1245 _drbd_start_io_acct(device, req);
1247 /* process discards always from our submitter thread */
1248 if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
1249 bio_op(bio) == REQ_OP_DISCARD)
1250 goto queue_for_submitter_thread;
1252 if (rw == WRITE && req->private_bio && req->i.size
1253 && !test_bit(AL_SUSPENDED, &device->flags)) {
1254 if (!drbd_al_begin_io_fastpath(device, &req->i))
1255 goto queue_for_submitter_thread;
1256 req->rq_state |= RQ_IN_ACT_LOG;
1257 req->in_actlog_jif = jiffies;
1261 queue_for_submitter_thread:
1262 atomic_inc(&device->ap_actlog_cnt);
1263 drbd_queue_write(device, req);
1267 /* Require at least one path to current data.
1268 * We don't want to allow writes on C_STANDALONE D_INCONSISTENT:
1269 * We would not allow to read what was written,
1270 * we would not have bumped the data generation uuids,
1271 * we would cause data divergence for all the wrong reasons.
1273 * If we don't see at least one D_UP_TO_DATE, we will fail this request,
1274 * which either returns EIO, or, if OND_SUSPEND_IO is set, suspends IO,
1275 * and queues for retry later.
1277 static bool may_do_writes(struct drbd_device *device)
1279 const union drbd_dev_state s = device->state;
1280 return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
1283 struct drbd_plug_cb {
1284 struct blk_plug_cb cb;
1285 struct drbd_request *most_recent_req;
1286 /* do we need more? */
1289 static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
1291 struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
1292 struct drbd_resource *resource = plug->cb.data;
1293 struct drbd_request *req = plug->most_recent_req;
1299 spin_lock_irq(&resource->req_lock);
1300 /* In case the sender did not process it yet, raise the flag to
1301 * have it followed with P_UNPLUG_REMOTE just after. */
1302 req->rq_state |= RQ_UNPLUG;
1303 /* but also queue a generic unplug */
1304 drbd_queue_unplug(req->device);
1305 kref_put(&req->kref, drbd_req_destroy);
1306 spin_unlock_irq(&resource->req_lock);
1309 static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
1311 /* A lot of text to say
1312 * return (struct drbd_plug_cb*)blk_check_plugged(); */
1313 struct drbd_plug_cb *plug;
1314 struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
1317 plug = container_of(cb, struct drbd_plug_cb, cb);
1323 static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
1325 struct drbd_request *tmp = plug->most_recent_req;
1326 /* Will be sent to some peer.
1327 * Remember to tag it with UNPLUG_REMOTE on unplug */
1328 kref_get(&req->kref);
1329 plug->most_recent_req = req;
1331 kref_put(&tmp->kref, drbd_req_destroy);
1334 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
1336 struct drbd_resource *resource = device->resource;
1337 const int rw = bio_data_dir(req->master_bio);
1338 struct bio_and_error m = { NULL, };
1339 bool no_remote = false;
1340 bool submit_private_bio = false;
1342 spin_lock_irq(&resource->req_lock);
1344 /* This may temporarily give up the req_lock,
1345 * but will re-aquire it before it returns here.
1346 * Needs to be before the check on drbd_suspended() */
1347 complete_conflicting_writes(req);
1348 /* no more giving up req_lock from now on! */
1350 /* check for congestion, and potentially stop sending
1351 * full data updates, but start sending "dirty bits" only. */
1352 maybe_pull_ahead(device);
1356 if (drbd_suspended(device)) {
1357 /* push back and retry: */
1358 req->rq_state |= RQ_POSTPONED;
1359 if (req->private_bio) {
1360 bio_put(req->private_bio);
1361 req->private_bio = NULL;
1367 /* We fail READ early, if we can not serve it.
1368 * We must do this before req is registered on any lists.
1369 * Otherwise, drbd_req_complete() will queue failed READ for retry. */
1371 if (!do_remote_read(req) && !req->private_bio)
1375 /* which transfer log epoch does this belong to? */
1376 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
1378 /* no point in adding empty flushes to the transfer log,
1379 * they are mapped to drbd barriers already. */
1380 if (likely(req->i.size!=0)) {
1382 first_peer_device(device)->connection->current_tle_writes++;
1384 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
1388 if (req->private_bio && !may_do_writes(device)) {
1389 bio_put(req->private_bio);
1390 req->private_bio = NULL;
1394 if (!drbd_process_write_request(req))
1397 /* We either have a private_bio, or we can read from remote.
1398 * Otherwise we had done the goto nodata above. */
1399 if (req->private_bio == NULL) {
1400 _req_mod(req, TO_BE_SENT);
1401 _req_mod(req, QUEUE_FOR_NET_READ);
1406 if (no_remote == false) {
1407 struct drbd_plug_cb *plug = drbd_check_plugged(resource);
1409 drbd_update_plug(plug, req);
1412 /* If it took the fast path in drbd_request_prepare, add it here.
1413 * The slow path has added it already. */
1414 if (list_empty(&req->req_pending_master_completion))
1415 list_add_tail(&req->req_pending_master_completion,
1416 &device->pending_master_completion[rw == WRITE]);
1417 if (req->private_bio) {
1418 /* needs to be marked within the same spinlock */
1419 req->pre_submit_jif = jiffies;
1420 list_add_tail(&req->req_pending_local,
1421 &device->pending_completion[rw == WRITE]);
1422 _req_mod(req, TO_BE_SUBMITTED);
1423 /* but we need to give up the spinlock to submit */
1424 submit_private_bio = true;
1425 } else if (no_remote) {
1427 if (__ratelimit(&drbd_ratelimit_state))
1428 drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
1429 (unsigned long long)req->i.sector, req->i.size >> 9);
1430 /* A write may have been queued for send_oos, however.
1431 * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
1435 drbd_req_put_completion_ref(req, &m, 1);
1436 spin_unlock_irq(&resource->req_lock);
1438 /* Even though above is a kref_put(), this is safe.
1439 * As long as we still need to submit our private bio,
1440 * we hold a completion ref, and the request cannot disappear.
1441 * If however this request did not even have a private bio to submit
1442 * (e.g. remote read), req may already be invalid now.
1443 * That's why we cannot check on req->private_bio. */
1444 if (submit_private_bio)
1445 drbd_submit_req_private_bio(req);
1447 complete_master_bio(device, &m);
1450 void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
1452 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
1453 if (IS_ERR_OR_NULL(req))
1455 drbd_send_and_submit(device, req);
1458 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
1460 struct blk_plug plug;
1461 struct drbd_request *req, *tmp;
1463 blk_start_plug(&plug);
1464 list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
1465 const int rw = bio_data_dir(req->master_bio);
1467 if (rw == WRITE /* rw != WRITE should not even end up here! */
1468 && req->private_bio && req->i.size
1469 && !test_bit(AL_SUSPENDED, &device->flags)) {
1470 if (!drbd_al_begin_io_fastpath(device, &req->i))
1473 req->rq_state |= RQ_IN_ACT_LOG;
1474 req->in_actlog_jif = jiffies;
1475 atomic_dec(&device->ap_actlog_cnt);
1478 list_del_init(&req->tl_requests);
1479 drbd_send_and_submit(device, req);
1481 blk_finish_plug(&plug);
1484 static bool prepare_al_transaction_nonblock(struct drbd_device *device,
1485 struct list_head *incoming,
1486 struct list_head *pending,
1487 struct list_head *later)
1489 struct drbd_request *req;
1493 spin_lock_irq(&device->al_lock);
1494 while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
1495 err = drbd_al_begin_io_nonblock(device, &req->i);
1496 if (err == -ENOBUFS)
1501 list_move_tail(&req->tl_requests, later);
1503 list_move_tail(&req->tl_requests, pending);
1505 spin_unlock_irq(&device->al_lock);
1507 wake_up(&device->al_wait);
1508 return !list_empty(pending);
1511 static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
1513 struct blk_plug plug;
1514 struct drbd_request *req;
1516 blk_start_plug(&plug);
1517 while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
1518 req->rq_state |= RQ_IN_ACT_LOG;
1519 req->in_actlog_jif = jiffies;
1520 atomic_dec(&device->ap_actlog_cnt);
1521 list_del_init(&req->tl_requests);
1522 drbd_send_and_submit(device, req);
1524 blk_finish_plug(&plug);
1527 void do_submit(struct work_struct *ws)
1529 struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
1530 LIST_HEAD(incoming); /* from drbd_make_request() */
1531 LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */
1532 LIST_HEAD(busy); /* blocked by resync requests */
1534 /* grab new incoming requests */
1535 spin_lock_irq(&device->resource->req_lock);
1536 list_splice_tail_init(&device->submit.writes, &incoming);
1537 spin_unlock_irq(&device->resource->req_lock);
1542 /* move used-to-be-busy back to front of incoming */
1543 list_splice_init(&busy, &incoming);
1544 submit_fast_path(device, &incoming);
1545 if (list_empty(&incoming))
1549 prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
1551 list_splice_init(&busy, &incoming);
1552 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
1553 if (!list_empty(&pending))
1558 /* If all currently "hot" activity log extents are kept busy by
1559 * incoming requests, we still must not totally starve new
1560 * requests to "cold" extents.
1561 * Something left on &incoming means there had not been
1562 * enough update slots available, and the activity log
1563 * has been marked as "starving".
1565 * Try again now, without looking for new requests,
1566 * effectively blocking all new requests until we made
1567 * at least _some_ progress with what we currently have.
1569 if (!list_empty(&incoming))
1572 /* Nothing moved to pending, but nothing left
1573 * on incoming: all moved to busy!
1574 * Grab new and iterate. */
1575 spin_lock_irq(&device->resource->req_lock);
1576 list_splice_tail_init(&device->submit.writes, &incoming);
1577 spin_unlock_irq(&device->resource->req_lock);
1579 finish_wait(&device->al_wait, &wait);
1581 /* If the transaction was full, before all incoming requests
1582 * had been processed, skip ahead to commit, and iterate
1583 * without splicing in more incoming requests from upper layers.
1585 * Else, if all incoming have been processed,
1586 * they have become either "pending" (to be submitted after
1587 * next transaction commit) or "busy" (blocked by resync).
1589 * Maybe more was queued, while we prepared the transaction?
1590 * Try to stuff those into this transaction as well.
1591 * Be strictly non-blocking here,
1592 * we already have something to commit.
1594 * Commit if we don't make any more progres.
1597 while (list_empty(&incoming)) {
1598 LIST_HEAD(more_pending);
1599 LIST_HEAD(more_incoming);
1602 /* It is ok to look outside the lock,
1603 * it's only an optimization anyways */
1604 if (list_empty(&device->submit.writes))
1607 spin_lock_irq(&device->resource->req_lock);
1608 list_splice_tail_init(&device->submit.writes, &more_incoming);
1609 spin_unlock_irq(&device->resource->req_lock);
1611 if (list_empty(&more_incoming))
1614 made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
1616 list_splice_tail_init(&more_pending, &pending);
1617 list_splice_tail_init(&more_incoming, &incoming);
1622 drbd_al_begin_io_commit(device);
1623 send_and_submit_pending(device, &pending);
1627 blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
1629 struct drbd_device *device = (struct drbd_device *) q->queuedata;
1630 unsigned long start_jif;
1632 blk_queue_split(q, &bio);
1634 start_jif = jiffies;
1637 * what we "blindly" assume:
1639 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
1642 __drbd_make_request(device, bio, start_jif);
1643 return BLK_QC_T_NONE;
1646 static bool net_timeout_reached(struct drbd_request *net_req,
1647 struct drbd_connection *connection,
1648 unsigned long now, unsigned long ent,
1649 unsigned int ko_count, unsigned int timeout)
1651 struct drbd_device *device = net_req->device;
1653 if (!time_after(now, net_req->pre_send_jif + ent))
1656 if (time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent))
1659 if (net_req->rq_state & RQ_NET_PENDING) {
1660 drbd_warn(device, "Remote failed to finish a request within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1661 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1665 /* We received an ACK already (or are using protocol A),
1666 * but are waiting for the epoch closing barrier ack.
1667 * Check if we sent the barrier already. We should not blame the peer
1668 * for being unresponsive, if we did not even ask it yet. */
1669 if (net_req->epoch == connection->send.current_epoch_nr) {
1671 "We did not send a P_BARRIER for %ums > ko-count (%u) * timeout (%u * 0.1s); drbd kernel thread blocked?\n",
1672 jiffies_to_msecs(now - net_req->pre_send_jif), ko_count, timeout);
1676 /* Worst case: we may have been blocked for whatever reason, then
1677 * suddenly are able to send a lot of requests (and epoch separating
1678 * barriers) in quick succession.
1679 * The timestamp of the net_req may be much too old and not correspond
1680 * to the sending time of the relevant unack'ed barrier packet, so
1681 * would trigger a spurious timeout. The latest barrier packet may
1682 * have a too recent timestamp to trigger the timeout, potentially miss
1683 * a timeout. Right now we don't have a place to conveniently store
1685 * But in this particular situation, the application requests are still
1686 * completed to upper layers, DRBD should still "feel" responsive.
1687 * No need yet to kill this connection, it may still recover.
1688 * If not, eventually we will have queued enough into the network for
1689 * us to block. From that point of view, the timestamp of the last sent
1690 * barrier packet is relevant enough.
1692 if (time_after(now, connection->send.last_sent_barrier_jif + ent)) {
1693 drbd_warn(device, "Remote failed to answer a P_BARRIER (sent at %lu jif; now=%lu jif) within %ums > ko-count (%u) * timeout (%u * 0.1s)\n",
1694 connection->send.last_sent_barrier_jif, now,
1695 jiffies_to_msecs(now - connection->send.last_sent_barrier_jif), ko_count, timeout);
1701 /* A request is considered timed out, if
1702 * - we have some effective timeout from the configuration,
1703 * with some state restrictions applied,
1704 * - the oldest request is waiting for a response from the network
1705 * resp. the local disk,
1706 * - the oldest request is in fact older than the effective timeout,
1707 * - the connection was established (resp. disk was attached)
1708 * for longer than the timeout already.
1709 * Note that for 32bit jiffies and very stable connections/disks,
1710 * we may have a wrap around, which is catched by
1711 * !time_in_range(now, last_..._jif, last_..._jif + timeout).
1713 * Side effect: once per 32bit wrap-around interval, which means every
1714 * ~198 days with 250 HZ, we have a window where the timeout would need
1715 * to expire twice (worst case) to become effective. Good enough.
1718 void request_timer_fn(unsigned long data)
1720 struct drbd_device *device = (struct drbd_device *) data;
1721 struct drbd_connection *connection = first_peer_device(device)->connection;
1722 struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
1723 struct net_conf *nc;
1724 unsigned long oldest_submit_jif;
1725 unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
1727 unsigned int ko_count = 0, timeout = 0;
1730 nc = rcu_dereference(connection->net_conf);
1731 if (nc && device->state.conn >= C_WF_REPORT_PARAMS) {
1732 ko_count = nc->ko_count;
1733 timeout = nc->timeout;
1736 if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
1737 dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
1743 ent = timeout * HZ/10 * ko_count;
1744 et = min_not_zero(dt, ent);
1747 return; /* Recurring timer stopped */
1752 spin_lock_irq(&device->resource->req_lock);
1753 req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
1754 req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
1756 /* maybe the oldest request waiting for the peer is in fact still
1757 * blocking in tcp sendmsg. That's ok, though, that's handled via the
1758 * socket send timeout, requesting a ping, and bumping ko-count in
1759 * we_should_drop_the_connection().
1762 /* check the oldest request we did successfully sent,
1763 * but which is still waiting for an ACK. */
1764 req_peer = connection->req_ack_pending;
1766 /* if we don't have such request (e.g. protocoll A)
1767 * check the oldest requests which is still waiting on its epoch
1768 * closing barrier ack. */
1770 req_peer = connection->req_not_net_done;
1772 /* evaluate the oldest peer request only in one timer! */
1773 if (req_peer && req_peer->device != device)
1776 /* do we have something to evaluate? */
1777 if (req_peer == NULL && req_write == NULL && req_read == NULL)
1781 (req_write && req_read)
1782 ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
1783 ? req_write->pre_submit_jif : req_read->pre_submit_jif )
1784 : req_write ? req_write->pre_submit_jif
1785 : req_read ? req_read->pre_submit_jif : now;
1787 if (ent && req_peer && net_timeout_reached(req_peer, connection, now, ent, ko_count, timeout))
1788 _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
1790 if (dt && oldest_submit_jif != now &&
1791 time_after(now, oldest_submit_jif + dt) &&
1792 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
1793 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
1794 __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
1797 /* Reschedule timer for the nearest not already expired timeout.
1798 * Fallback to now + min(effective network timeout, disk timeout). */
1799 ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
1800 ? req_peer->pre_send_jif + ent : now + et;
1801 dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
1802 ? oldest_submit_jif + dt : now + et;
1803 nt = time_before(ent, dt) ? ent : dt;
1805 spin_unlock_irq(&device->resource->req_lock);
1806 mod_timer(&device->request_timer, nt);