1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ceph/ceph_debug.h>
5 #include <linux/module.h>
7 #include <linux/highmem.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
13 #include <linux/bio.h>
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
24 #define OSD_OPREPLY_FRONT_LEN 512
26 static struct kmem_cache *ceph_osd_request_cache;
28 static const struct ceph_connection_operations osd_con_ops;
31 * Implement client access to distributed object storage cluster.
33 * All data objects are stored within a cluster/cloud of OSDs, or
34 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
35 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
36 * remote daemons serving up and coordinating consistent and safe
39 * Cluster membership and the mapping of data objects onto storage devices
40 * are described by the osd map.
42 * We keep track of pending OSD requests (read, write), resubmit
43 * requests to different OSDs when the cluster topology/data layout
44 * change, or retry the affected requests when the communications
45 * channel with an OSD is reset.
48 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
49 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void link_linger(struct ceph_osd *osd,
51 struct ceph_osd_linger_request *lreq);
52 static void unlink_linger(struct ceph_osd *osd,
53 struct ceph_osd_linger_request *lreq);
54 static void clear_backoffs(struct ceph_osd *osd);
57 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
61 if (unlikely(down_read_trylock(sem))) {
68 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
70 WARN_ON(!rwsem_is_locked(&osdc->lock));
72 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
74 WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
76 static inline void verify_osd_locked(struct ceph_osd *osd)
78 struct ceph_osd_client *osdc = osd->o_osdc;
80 WARN_ON(!(mutex_is_locked(&osd->lock) &&
81 rwsem_is_locked(&osdc->lock)) &&
82 !rwsem_is_wrlocked(&osdc->lock));
84 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
86 WARN_ON(!mutex_is_locked(&lreq->lock));
89 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
90 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osd_locked(struct ceph_osd *osd) { }
92 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
96 * calculate the mapping of a file extent onto an object, and fill out the
97 * request accordingly. shorten extent as necessary if it crosses an
100 * fill osd op in request message.
102 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
103 u64 *objnum, u64 *objoff, u64 *objlen)
105 u64 orig_len = *plen;
109 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
113 if (*objlen < orig_len) {
115 dout(" skipping last %llu, final file extent %llu~%llu\n",
116 orig_len - *plen, off, *plen);
119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
124 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
126 memset(osd_data, 0, sizeof (*osd_data));
127 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
130 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
131 struct page **pages, u64 length, u32 alignment,
132 bool pages_from_pool, bool own_pages)
134 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
135 osd_data->pages = pages;
136 osd_data->length = length;
137 osd_data->alignment = alignment;
138 osd_data->pages_from_pool = pages_from_pool;
139 osd_data->own_pages = own_pages;
142 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
143 struct ceph_pagelist *pagelist)
145 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
146 osd_data->pagelist = pagelist;
150 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
151 struct bio *bio, size_t bio_length)
153 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
155 osd_data->bio_length = bio_length;
157 #endif /* CONFIG_BLOCK */
159 #define osd_req_op_data(oreq, whch, typ, fld) \
161 struct ceph_osd_request *__oreq = (oreq); \
162 unsigned int __whch = (whch); \
163 BUG_ON(__whch >= __oreq->r_num_ops); \
164 &__oreq->r_ops[__whch].typ.fld; \
167 static struct ceph_osd_data *
168 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
170 BUG_ON(which >= osd_req->r_num_ops);
172 return &osd_req->r_ops[which].raw_data_in;
175 struct ceph_osd_data *
176 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
179 return osd_req_op_data(osd_req, which, extent, osd_data);
181 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
183 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
184 unsigned int which, struct page **pages,
185 u64 length, u32 alignment,
186 bool pages_from_pool, bool own_pages)
188 struct ceph_osd_data *osd_data;
190 osd_data = osd_req_op_raw_data_in(osd_req, which);
191 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
192 pages_from_pool, own_pages);
194 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
196 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
197 unsigned int which, struct page **pages,
198 u64 length, u32 alignment,
199 bool pages_from_pool, bool own_pages)
201 struct ceph_osd_data *osd_data;
203 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
204 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
205 pages_from_pool, own_pages);
207 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
209 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
210 unsigned int which, struct ceph_pagelist *pagelist)
212 struct ceph_osd_data *osd_data;
214 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
215 ceph_osd_data_pagelist_init(osd_data, pagelist);
217 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
220 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
221 unsigned int which, struct bio *bio, size_t bio_length)
223 struct ceph_osd_data *osd_data;
225 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
226 ceph_osd_data_bio_init(osd_data, bio, bio_length);
228 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
229 #endif /* CONFIG_BLOCK */
231 static void osd_req_op_cls_request_info_pagelist(
232 struct ceph_osd_request *osd_req,
233 unsigned int which, struct ceph_pagelist *pagelist)
235 struct ceph_osd_data *osd_data;
237 osd_data = osd_req_op_data(osd_req, which, cls, request_info);
238 ceph_osd_data_pagelist_init(osd_data, pagelist);
241 void osd_req_op_cls_request_data_pagelist(
242 struct ceph_osd_request *osd_req,
243 unsigned int which, struct ceph_pagelist *pagelist)
245 struct ceph_osd_data *osd_data;
247 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
248 ceph_osd_data_pagelist_init(osd_data, pagelist);
249 osd_req->r_ops[which].cls.indata_len += pagelist->length;
250 osd_req->r_ops[which].indata_len += pagelist->length;
252 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
254 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
255 unsigned int which, struct page **pages, u64 length,
256 u32 alignment, bool pages_from_pool, bool own_pages)
258 struct ceph_osd_data *osd_data;
260 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
261 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
262 pages_from_pool, own_pages);
263 osd_req->r_ops[which].cls.indata_len += length;
264 osd_req->r_ops[which].indata_len += length;
266 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
268 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
269 unsigned int which, struct page **pages, u64 length,
270 u32 alignment, bool pages_from_pool, bool own_pages)
272 struct ceph_osd_data *osd_data;
274 osd_data = osd_req_op_data(osd_req, which, cls, response_data);
275 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
276 pages_from_pool, own_pages);
278 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
280 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
282 switch (osd_data->type) {
283 case CEPH_OSD_DATA_TYPE_NONE:
285 case CEPH_OSD_DATA_TYPE_PAGES:
286 return osd_data->length;
287 case CEPH_OSD_DATA_TYPE_PAGELIST:
288 return (u64)osd_data->pagelist->length;
290 case CEPH_OSD_DATA_TYPE_BIO:
291 return (u64)osd_data->bio_length;
292 #endif /* CONFIG_BLOCK */
294 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
299 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
301 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
304 num_pages = calc_pages_for((u64)osd_data->alignment,
305 (u64)osd_data->length);
306 ceph_release_page_vector(osd_data->pages, num_pages);
308 ceph_osd_data_init(osd_data);
311 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
314 struct ceph_osd_req_op *op;
316 BUG_ON(which >= osd_req->r_num_ops);
317 op = &osd_req->r_ops[which];
320 case CEPH_OSD_OP_READ:
321 case CEPH_OSD_OP_WRITE:
322 case CEPH_OSD_OP_WRITEFULL:
323 ceph_osd_data_release(&op->extent.osd_data);
325 case CEPH_OSD_OP_CALL:
326 ceph_osd_data_release(&op->cls.request_info);
327 ceph_osd_data_release(&op->cls.request_data);
328 ceph_osd_data_release(&op->cls.response_data);
330 case CEPH_OSD_OP_SETXATTR:
331 case CEPH_OSD_OP_CMPXATTR:
332 ceph_osd_data_release(&op->xattr.osd_data);
334 case CEPH_OSD_OP_STAT:
335 ceph_osd_data_release(&op->raw_data_in);
337 case CEPH_OSD_OP_NOTIFY_ACK:
338 ceph_osd_data_release(&op->notify_ack.request_data);
340 case CEPH_OSD_OP_NOTIFY:
341 ceph_osd_data_release(&op->notify.request_data);
342 ceph_osd_data_release(&op->notify.response_data);
344 case CEPH_OSD_OP_LIST_WATCHERS:
345 ceph_osd_data_release(&op->list_watchers.response_data);
353 * Assumes @t is zero-initialized.
355 static void target_init(struct ceph_osd_request_target *t)
357 ceph_oid_init(&t->base_oid);
358 ceph_oloc_init(&t->base_oloc);
359 ceph_oid_init(&t->target_oid);
360 ceph_oloc_init(&t->target_oloc);
362 ceph_osds_init(&t->acting);
363 ceph_osds_init(&t->up);
367 t->osd = CEPH_HOMELESS_OSD;
370 static void target_copy(struct ceph_osd_request_target *dest,
371 const struct ceph_osd_request_target *src)
373 ceph_oid_copy(&dest->base_oid, &src->base_oid);
374 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
375 ceph_oid_copy(&dest->target_oid, &src->target_oid);
376 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
378 dest->pgid = src->pgid; /* struct */
379 dest->spgid = src->spgid; /* struct */
380 dest->pg_num = src->pg_num;
381 dest->pg_num_mask = src->pg_num_mask;
382 ceph_osds_copy(&dest->acting, &src->acting);
383 ceph_osds_copy(&dest->up, &src->up);
384 dest->size = src->size;
385 dest->min_size = src->min_size;
386 dest->sort_bitwise = src->sort_bitwise;
387 dest->recovery_deletes = src->recovery_deletes;
389 dest->flags = src->flags;
390 dest->paused = src->paused;
392 dest->epoch = src->epoch;
393 dest->last_force_resend = src->last_force_resend;
395 dest->osd = src->osd;
398 static void target_destroy(struct ceph_osd_request_target *t)
400 ceph_oid_destroy(&t->base_oid);
401 ceph_oloc_destroy(&t->base_oloc);
402 ceph_oid_destroy(&t->target_oid);
403 ceph_oloc_destroy(&t->target_oloc);
409 static void request_release_checks(struct ceph_osd_request *req)
411 WARN_ON(!RB_EMPTY_NODE(&req->r_node));
412 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
413 WARN_ON(!list_empty(&req->r_unsafe_item));
417 static void ceph_osdc_release_request(struct kref *kref)
419 struct ceph_osd_request *req = container_of(kref,
420 struct ceph_osd_request, r_kref);
423 dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
424 req->r_request, req->r_reply);
425 request_release_checks(req);
428 ceph_msg_put(req->r_request);
430 ceph_msg_put(req->r_reply);
432 for (which = 0; which < req->r_num_ops; which++)
433 osd_req_op_data_release(req, which);
435 target_destroy(&req->r_t);
436 ceph_put_snap_context(req->r_snapc);
439 mempool_free(req, req->r_osdc->req_mempool);
440 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
441 kmem_cache_free(ceph_osd_request_cache, req);
446 void ceph_osdc_get_request(struct ceph_osd_request *req)
448 dout("%s %p (was %d)\n", __func__, req,
449 kref_read(&req->r_kref));
450 kref_get(&req->r_kref);
452 EXPORT_SYMBOL(ceph_osdc_get_request);
454 void ceph_osdc_put_request(struct ceph_osd_request *req)
457 dout("%s %p (was %d)\n", __func__, req,
458 kref_read(&req->r_kref));
459 kref_put(&req->r_kref, ceph_osdc_release_request);
462 EXPORT_SYMBOL(ceph_osdc_put_request);
464 static void request_init(struct ceph_osd_request *req)
466 /* req only, each op is zeroed in _osd_req_op_init() */
467 memset(req, 0, sizeof(*req));
469 kref_init(&req->r_kref);
470 init_completion(&req->r_completion);
471 RB_CLEAR_NODE(&req->r_node);
472 RB_CLEAR_NODE(&req->r_mc_node);
473 INIT_LIST_HEAD(&req->r_unsafe_item);
475 target_init(&req->r_t);
479 * This is ugly, but it allows us to reuse linger registration and ping
480 * requests, keeping the structure of the code around send_linger{_ping}()
481 * reasonable. Setting up a min_nr=2 mempool for each linger request
482 * and dealing with copying ops (this blasts req only, watch op remains
483 * intact) isn't any better.
485 static void request_reinit(struct ceph_osd_request *req)
487 struct ceph_osd_client *osdc = req->r_osdc;
488 bool mempool = req->r_mempool;
489 unsigned int num_ops = req->r_num_ops;
490 u64 snapid = req->r_snapid;
491 struct ceph_snap_context *snapc = req->r_snapc;
492 bool linger = req->r_linger;
493 struct ceph_msg *request_msg = req->r_request;
494 struct ceph_msg *reply_msg = req->r_reply;
496 dout("%s req %p\n", __func__, req);
497 WARN_ON(kref_read(&req->r_kref) != 1);
498 request_release_checks(req);
500 WARN_ON(kref_read(&request_msg->kref) != 1);
501 WARN_ON(kref_read(&reply_msg->kref) != 1);
502 target_destroy(&req->r_t);
506 req->r_mempool = mempool;
507 req->r_num_ops = num_ops;
508 req->r_snapid = snapid;
509 req->r_snapc = snapc;
510 req->r_linger = linger;
511 req->r_request = request_msg;
512 req->r_reply = reply_msg;
515 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
516 struct ceph_snap_context *snapc,
517 unsigned int num_ops,
521 struct ceph_osd_request *req;
524 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
525 req = mempool_alloc(osdc->req_mempool, gfp_flags);
526 } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
527 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
529 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
530 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
538 req->r_mempool = use_mempool;
539 req->r_num_ops = num_ops;
540 req->r_snapid = CEPH_NOSNAP;
541 req->r_snapc = ceph_get_snap_context(snapc);
543 dout("%s req %p\n", __func__, req);
546 EXPORT_SYMBOL(ceph_osdc_alloc_request);
548 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
550 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
553 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
555 struct ceph_osd_client *osdc = req->r_osdc;
556 struct ceph_msg *msg;
559 WARN_ON(ceph_oid_empty(&req->r_base_oid));
560 WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
562 /* create request message */
563 msg_size = CEPH_ENCODING_START_BLK_LEN +
564 CEPH_PGID_ENCODING_LEN + 1; /* spgid */
565 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
566 msg_size += CEPH_ENCODING_START_BLK_LEN +
567 sizeof(struct ceph_osd_reqid); /* reqid */
568 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
569 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
570 msg_size += CEPH_ENCODING_START_BLK_LEN +
571 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
572 msg_size += 4 + req->r_base_oid.name_len; /* oid */
573 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
574 msg_size += 8; /* snapid */
575 msg_size += 8; /* snap_seq */
576 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
577 msg_size += 4 + 8; /* retry_attempt, features */
580 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
582 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
586 memset(msg->front.iov_base, 0, msg->front.iov_len);
587 req->r_request = msg;
589 /* create reply message */
590 msg_size = OSD_OPREPLY_FRONT_LEN;
591 msg_size += req->r_base_oid.name_len;
592 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
595 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
597 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
605 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
607 static bool osd_req_opcode_valid(u16 opcode)
610 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
611 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
619 * This is an osd op init function for opcodes that have no data or
620 * other information associated with them. It also serves as a
621 * common init routine for all the other init functions, below.
623 static struct ceph_osd_req_op *
624 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
625 u16 opcode, u32 flags)
627 struct ceph_osd_req_op *op;
629 BUG_ON(which >= osd_req->r_num_ops);
630 BUG_ON(!osd_req_opcode_valid(opcode));
632 op = &osd_req->r_ops[which];
633 memset(op, 0, sizeof (*op));
640 void osd_req_op_init(struct ceph_osd_request *osd_req,
641 unsigned int which, u16 opcode, u32 flags)
643 (void)_osd_req_op_init(osd_req, which, opcode, flags);
645 EXPORT_SYMBOL(osd_req_op_init);
647 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
648 unsigned int which, u16 opcode,
649 u64 offset, u64 length,
650 u64 truncate_size, u32 truncate_seq)
652 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
654 size_t payload_len = 0;
656 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
657 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
658 opcode != CEPH_OSD_OP_TRUNCATE);
660 op->extent.offset = offset;
661 op->extent.length = length;
662 op->extent.truncate_size = truncate_size;
663 op->extent.truncate_seq = truncate_seq;
664 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
665 payload_len += length;
667 op->indata_len = payload_len;
669 EXPORT_SYMBOL(osd_req_op_extent_init);
671 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
672 unsigned int which, u64 length)
674 struct ceph_osd_req_op *op;
677 BUG_ON(which >= osd_req->r_num_ops);
678 op = &osd_req->r_ops[which];
679 previous = op->extent.length;
681 if (length == previous)
682 return; /* Nothing to do */
683 BUG_ON(length > previous);
685 op->extent.length = length;
686 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
687 op->indata_len -= previous - length;
689 EXPORT_SYMBOL(osd_req_op_extent_update);
691 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
692 unsigned int which, u64 offset_inc)
694 struct ceph_osd_req_op *op, *prev_op;
696 BUG_ON(which + 1 >= osd_req->r_num_ops);
698 prev_op = &osd_req->r_ops[which];
699 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
700 /* dup previous one */
701 op->indata_len = prev_op->indata_len;
702 op->outdata_len = prev_op->outdata_len;
703 op->extent = prev_op->extent;
705 op->extent.offset += offset_inc;
706 op->extent.length -= offset_inc;
708 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
709 op->indata_len -= offset_inc;
711 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
713 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
714 u16 opcode, const char *class, const char *method)
716 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
718 struct ceph_pagelist *pagelist;
719 size_t payload_len = 0;
722 BUG_ON(opcode != CEPH_OSD_OP_CALL);
724 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
726 ceph_pagelist_init(pagelist);
728 op->cls.class_name = class;
729 size = strlen(class);
730 BUG_ON(size > (size_t) U8_MAX);
731 op->cls.class_len = size;
732 ceph_pagelist_append(pagelist, class, size);
735 op->cls.method_name = method;
736 size = strlen(method);
737 BUG_ON(size > (size_t) U8_MAX);
738 op->cls.method_len = size;
739 ceph_pagelist_append(pagelist, method, size);
742 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
744 op->indata_len = payload_len;
746 EXPORT_SYMBOL(osd_req_op_cls_init);
748 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
749 u16 opcode, const char *name, const void *value,
750 size_t size, u8 cmp_op, u8 cmp_mode)
752 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
754 struct ceph_pagelist *pagelist;
757 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
759 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
763 ceph_pagelist_init(pagelist);
765 payload_len = strlen(name);
766 op->xattr.name_len = payload_len;
767 ceph_pagelist_append(pagelist, name, payload_len);
769 op->xattr.value_len = size;
770 ceph_pagelist_append(pagelist, value, size);
773 op->xattr.cmp_op = cmp_op;
774 op->xattr.cmp_mode = cmp_mode;
776 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
777 op->indata_len = payload_len;
780 EXPORT_SYMBOL(osd_req_op_xattr_init);
783 * @watch_opcode: CEPH_OSD_WATCH_OP_*
785 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
786 u64 cookie, u8 watch_opcode)
788 struct ceph_osd_req_op *op;
790 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
791 op->watch.cookie = cookie;
792 op->watch.op = watch_opcode;
796 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
798 u64 expected_object_size,
799 u64 expected_write_size)
801 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
802 CEPH_OSD_OP_SETALLOCHINT,
805 op->alloc_hint.expected_object_size = expected_object_size;
806 op->alloc_hint.expected_write_size = expected_write_size;
809 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
810 * not worth a feature bit. Set FAILOK per-op flag to make
811 * sure older osds don't trip over an unsupported opcode.
813 op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
815 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
817 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
818 struct ceph_osd_data *osd_data)
820 u64 length = ceph_osd_data_length(osd_data);
822 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
823 BUG_ON(length > (u64) SIZE_MAX);
825 ceph_msg_data_add_pages(msg, osd_data->pages,
826 length, osd_data->alignment);
827 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
829 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
831 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
832 ceph_msg_data_add_bio(msg, osd_data->bio, length);
835 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
839 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
840 const struct ceph_osd_req_op *src)
842 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
843 pr_err("unrecognized osd opcode %d\n", src->op);
849 case CEPH_OSD_OP_STAT:
851 case CEPH_OSD_OP_READ:
852 case CEPH_OSD_OP_WRITE:
853 case CEPH_OSD_OP_WRITEFULL:
854 case CEPH_OSD_OP_ZERO:
855 case CEPH_OSD_OP_TRUNCATE:
856 dst->extent.offset = cpu_to_le64(src->extent.offset);
857 dst->extent.length = cpu_to_le64(src->extent.length);
858 dst->extent.truncate_size =
859 cpu_to_le64(src->extent.truncate_size);
860 dst->extent.truncate_seq =
861 cpu_to_le32(src->extent.truncate_seq);
863 case CEPH_OSD_OP_CALL:
864 dst->cls.class_len = src->cls.class_len;
865 dst->cls.method_len = src->cls.method_len;
866 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
868 case CEPH_OSD_OP_WATCH:
869 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
870 dst->watch.ver = cpu_to_le64(0);
871 dst->watch.op = src->watch.op;
872 dst->watch.gen = cpu_to_le32(src->watch.gen);
874 case CEPH_OSD_OP_NOTIFY_ACK:
876 case CEPH_OSD_OP_NOTIFY:
877 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
879 case CEPH_OSD_OP_LIST_WATCHERS:
881 case CEPH_OSD_OP_SETALLOCHINT:
882 dst->alloc_hint.expected_object_size =
883 cpu_to_le64(src->alloc_hint.expected_object_size);
884 dst->alloc_hint.expected_write_size =
885 cpu_to_le64(src->alloc_hint.expected_write_size);
887 case CEPH_OSD_OP_SETXATTR:
888 case CEPH_OSD_OP_CMPXATTR:
889 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
890 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
891 dst->xattr.cmp_op = src->xattr.cmp_op;
892 dst->xattr.cmp_mode = src->xattr.cmp_mode;
894 case CEPH_OSD_OP_CREATE:
895 case CEPH_OSD_OP_DELETE:
898 pr_err("unsupported osd opcode %s\n",
899 ceph_osd_op_name(src->op));
905 dst->op = cpu_to_le16(src->op);
906 dst->flags = cpu_to_le32(src->flags);
907 dst->payload_len = cpu_to_le32(src->indata_len);
909 return src->indata_len;
913 * build new request AND message, calculate layout, and adjust file
916 * if the file was recently truncated, we include information about its
917 * old and new size so that the object can be updated appropriately. (we
918 * avoid synchronously deleting truncated objects because it's slow.)
920 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
921 struct ceph_file_layout *layout,
922 struct ceph_vino vino,
924 unsigned int which, int num_ops,
925 int opcode, int flags,
926 struct ceph_snap_context *snapc,
931 struct ceph_osd_request *req;
937 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
938 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
939 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
941 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
948 /* calculate max write size */
949 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
953 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
954 osd_req_op_init(req, which, opcode, 0);
956 u32 object_size = layout->object_size;
957 u32 object_base = off - objoff;
958 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
959 if (truncate_size <= object_base) {
962 truncate_size -= object_base;
963 if (truncate_size > object_size)
964 truncate_size = object_size;
967 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
968 truncate_size, truncate_seq);
971 req->r_abort_on_full = true;
972 req->r_flags = flags;
973 req->r_base_oloc.pool = layout->pool_id;
974 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
975 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
977 req->r_snapid = vino.snap;
978 if (flags & CEPH_OSD_FLAG_WRITE)
979 req->r_data_offset = off;
981 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
988 ceph_osdc_put_request(req);
991 EXPORT_SYMBOL(ceph_osdc_new_request);
994 * We keep osd requests in an rbtree, sorted by ->r_tid.
996 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
997 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
999 static bool osd_homeless(struct ceph_osd *osd)
1001 return osd->o_osd == CEPH_HOMELESS_OSD;
1004 static bool osd_registered(struct ceph_osd *osd)
1006 verify_osdc_locked(osd->o_osdc);
1008 return !RB_EMPTY_NODE(&osd->o_node);
1012 * Assumes @osd is zero-initialized.
1014 static void osd_init(struct ceph_osd *osd)
1016 refcount_set(&osd->o_ref, 1);
1017 RB_CLEAR_NODE(&osd->o_node);
1018 osd->o_requests = RB_ROOT;
1019 osd->o_linger_requests = RB_ROOT;
1020 osd->o_backoff_mappings = RB_ROOT;
1021 osd->o_backoffs_by_id = RB_ROOT;
1022 INIT_LIST_HEAD(&osd->o_osd_lru);
1023 INIT_LIST_HEAD(&osd->o_keepalive_item);
1024 osd->o_incarnation = 1;
1025 mutex_init(&osd->lock);
1028 static void osd_cleanup(struct ceph_osd *osd)
1030 WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1031 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1032 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1033 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1034 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1035 WARN_ON(!list_empty(&osd->o_osd_lru));
1036 WARN_ON(!list_empty(&osd->o_keepalive_item));
1038 if (osd->o_auth.authorizer) {
1039 WARN_ON(osd_homeless(osd));
1040 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1045 * Track open sessions with osds.
1047 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1049 struct ceph_osd *osd;
1051 WARN_ON(onum == CEPH_HOMELESS_OSD);
1053 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1058 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1063 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1065 if (refcount_inc_not_zero(&osd->o_ref)) {
1066 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1067 refcount_read(&osd->o_ref));
1070 dout("get_osd %p FAIL\n", osd);
1075 static void put_osd(struct ceph_osd *osd)
1077 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1078 refcount_read(&osd->o_ref) - 1);
1079 if (refcount_dec_and_test(&osd->o_ref)) {
1085 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1087 static void __move_osd_to_lru(struct ceph_osd *osd)
1089 struct ceph_osd_client *osdc = osd->o_osdc;
1091 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1092 BUG_ON(!list_empty(&osd->o_osd_lru));
1094 spin_lock(&osdc->osd_lru_lock);
1095 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1096 spin_unlock(&osdc->osd_lru_lock);
1098 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1101 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1103 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1104 RB_EMPTY_ROOT(&osd->o_linger_requests))
1105 __move_osd_to_lru(osd);
1108 static void __remove_osd_from_lru(struct ceph_osd *osd)
1110 struct ceph_osd_client *osdc = osd->o_osdc;
1112 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1114 spin_lock(&osdc->osd_lru_lock);
1115 if (!list_empty(&osd->o_osd_lru))
1116 list_del_init(&osd->o_osd_lru);
1117 spin_unlock(&osdc->osd_lru_lock);
1121 * Close the connection and assign any leftover requests to the
1124 static void close_osd(struct ceph_osd *osd)
1126 struct ceph_osd_client *osdc = osd->o_osdc;
1129 verify_osdc_wrlocked(osdc);
1130 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1132 ceph_con_close(&osd->o_con);
1134 for (n = rb_first(&osd->o_requests); n; ) {
1135 struct ceph_osd_request *req =
1136 rb_entry(n, struct ceph_osd_request, r_node);
1138 n = rb_next(n); /* unlink_request() */
1140 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1141 unlink_request(osd, req);
1142 link_request(&osdc->homeless_osd, req);
1144 for (n = rb_first(&osd->o_linger_requests); n; ) {
1145 struct ceph_osd_linger_request *lreq =
1146 rb_entry(n, struct ceph_osd_linger_request, node);
1148 n = rb_next(n); /* unlink_linger() */
1150 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1152 unlink_linger(osd, lreq);
1153 link_linger(&osdc->homeless_osd, lreq);
1155 clear_backoffs(osd);
1157 __remove_osd_from_lru(osd);
1158 erase_osd(&osdc->osds, osd);
1165 static int reopen_osd(struct ceph_osd *osd)
1167 struct ceph_entity_addr *peer_addr;
1169 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1171 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1172 RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1177 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1178 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1179 !ceph_con_opened(&osd->o_con)) {
1182 dout("osd addr hasn't changed and connection never opened, "
1183 "letting msgr retry\n");
1184 /* touch each r_stamp for handle_timeout()'s benfit */
1185 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1186 struct ceph_osd_request *req =
1187 rb_entry(n, struct ceph_osd_request, r_node);
1188 req->r_stamp = jiffies;
1194 ceph_con_close(&osd->o_con);
1195 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1196 osd->o_incarnation++;
1201 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1204 struct ceph_osd *osd;
1207 verify_osdc_wrlocked(osdc);
1209 verify_osdc_locked(osdc);
1211 if (o != CEPH_HOMELESS_OSD)
1212 osd = lookup_osd(&osdc->osds, o);
1214 osd = &osdc->homeless_osd;
1217 return ERR_PTR(-EAGAIN);
1219 osd = create_osd(osdc, o);
1220 insert_osd(&osdc->osds, osd);
1221 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1222 &osdc->osdmap->osd_addr[osd->o_osd]);
1225 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1230 * Create request <-> OSD session relation.
1232 * @req has to be assigned a tid, @osd may be homeless.
1234 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1236 verify_osd_locked(osd);
1237 WARN_ON(!req->r_tid || req->r_osd);
1238 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1241 if (!osd_homeless(osd))
1242 __remove_osd_from_lru(osd);
1244 atomic_inc(&osd->o_osdc->num_homeless);
1247 insert_request(&osd->o_requests, req);
1251 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1253 verify_osd_locked(osd);
1254 WARN_ON(req->r_osd != osd);
1255 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1259 erase_request(&osd->o_requests, req);
1262 if (!osd_homeless(osd))
1263 maybe_move_osd_to_lru(osd);
1265 atomic_dec(&osd->o_osdc->num_homeless);
1268 static bool __pool_full(struct ceph_pg_pool_info *pi)
1270 return pi->flags & CEPH_POOL_FLAG_FULL;
1273 static bool have_pool_full(struct ceph_osd_client *osdc)
1277 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1278 struct ceph_pg_pool_info *pi =
1279 rb_entry(n, struct ceph_pg_pool_info, node);
1281 if (__pool_full(pi))
1288 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1290 struct ceph_pg_pool_info *pi;
1292 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1296 return __pool_full(pi);
1300 * Returns whether a request should be blocked from being sent
1301 * based on the current osdmap and osd_client settings.
1303 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1304 const struct ceph_osd_request_target *t,
1305 struct ceph_pg_pool_info *pi)
1307 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1308 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1309 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1312 WARN_ON(pi->id != t->target_oloc.pool);
1313 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1314 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1315 (osdc->osdmap->epoch < osdc->epoch_barrier);
1318 enum calc_target_result {
1319 CALC_TARGET_NO_ACTION = 0,
1320 CALC_TARGET_NEED_RESEND,
1321 CALC_TARGET_POOL_DNE,
1324 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1325 struct ceph_osd_request_target *t,
1326 struct ceph_connection *con,
1329 struct ceph_pg_pool_info *pi;
1330 struct ceph_pg pgid, last_pgid;
1331 struct ceph_osds up, acting;
1332 bool force_resend = false;
1333 bool unpaused = false;
1334 bool legacy_change = false;
1336 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1337 bool recovery_deletes = ceph_osdmap_flag(osdc,
1338 CEPH_OSDMAP_RECOVERY_DELETES);
1339 enum calc_target_result ct_res;
1342 t->epoch = osdc->osdmap->epoch;
1343 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1345 t->osd = CEPH_HOMELESS_OSD;
1346 ct_res = CALC_TARGET_POOL_DNE;
1350 if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1351 if (t->last_force_resend < pi->last_force_request_resend) {
1352 t->last_force_resend = pi->last_force_request_resend;
1353 force_resend = true;
1354 } else if (t->last_force_resend == 0) {
1355 force_resend = true;
1360 ceph_oid_copy(&t->target_oid, &t->base_oid);
1361 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1362 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1363 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1364 t->target_oloc.pool = pi->read_tier;
1365 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1366 t->target_oloc.pool = pi->write_tier;
1368 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1370 t->osd = CEPH_HOMELESS_OSD;
1371 ct_res = CALC_TARGET_POOL_DNE;
1376 ret = __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc,
1379 WARN_ON(ret != -ENOENT);
1380 t->osd = CEPH_HOMELESS_OSD;
1381 ct_res = CALC_TARGET_POOL_DNE;
1384 last_pgid.pool = pgid.pool;
1385 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1387 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1389 ceph_is_new_interval(&t->acting,
1401 t->recovery_deletes,
1404 force_resend = true;
1406 if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1410 legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1411 ceph_osds_changed(&t->acting, &acting, any_change);
1413 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1415 if (legacy_change || force_resend || split) {
1416 t->pgid = pgid; /* struct */
1417 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1418 ceph_osds_copy(&t->acting, &acting);
1419 ceph_osds_copy(&t->up, &up);
1421 t->min_size = pi->min_size;
1422 t->pg_num = pi->pg_num;
1423 t->pg_num_mask = pi->pg_num_mask;
1424 t->sort_bitwise = sort_bitwise;
1425 t->recovery_deletes = recovery_deletes;
1427 t->osd = acting.primary;
1430 if (unpaused || legacy_change || force_resend || split)
1431 ct_res = CALC_TARGET_NEED_RESEND;
1433 ct_res = CALC_TARGET_NO_ACTION;
1436 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1437 legacy_change, force_resend, split, ct_res, t->osd);
1441 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1443 struct ceph_spg_mapping *spg;
1445 spg = kmalloc(sizeof(*spg), GFP_NOIO);
1449 RB_CLEAR_NODE(&spg->node);
1450 spg->backoffs = RB_ROOT;
1454 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1456 WARN_ON(!RB_EMPTY_NODE(&spg->node));
1457 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1463 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1464 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1465 * defined only within a specific spgid; it does not pass anything to
1466 * children on split, or to another primary.
1468 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1469 RB_BYPTR, const struct ceph_spg *, node)
1471 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1473 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1476 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1477 void **pkey, size_t *pkey_len)
1479 if (hoid->key_len) {
1481 *pkey_len = hoid->key_len;
1484 *pkey_len = hoid->oid_len;
1488 static int compare_names(const void *name1, size_t name1_len,
1489 const void *name2, size_t name2_len)
1493 ret = memcmp(name1, name2, min(name1_len, name2_len));
1495 if (name1_len < name2_len)
1497 else if (name1_len > name2_len)
1503 static int hoid_compare(const struct ceph_hobject_id *lhs,
1504 const struct ceph_hobject_id *rhs)
1506 void *effective_key1, *effective_key2;
1507 size_t effective_key1_len, effective_key2_len;
1510 if (lhs->is_max < rhs->is_max)
1512 if (lhs->is_max > rhs->is_max)
1515 if (lhs->pool < rhs->pool)
1517 if (lhs->pool > rhs->pool)
1520 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1522 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1525 ret = compare_names(lhs->nspace, lhs->nspace_len,
1526 rhs->nspace, rhs->nspace_len);
1530 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1531 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1532 ret = compare_names(effective_key1, effective_key1_len,
1533 effective_key2, effective_key2_len);
1537 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1541 if (lhs->snapid < rhs->snapid)
1543 if (lhs->snapid > rhs->snapid)
1550 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1551 * compat stuff here.
1553 * Assumes @hoid is zero-initialized.
1555 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1561 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1567 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1571 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1573 if (IS_ERR(hoid->key)) {
1574 ret = PTR_ERR(hoid->key);
1579 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1581 if (IS_ERR(hoid->oid)) {
1582 ret = PTR_ERR(hoid->oid);
1587 ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1588 ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1589 ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1591 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1593 if (IS_ERR(hoid->nspace)) {
1594 ret = PTR_ERR(hoid->nspace);
1595 hoid->nspace = NULL;
1599 ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1601 ceph_hoid_build_hash_cache(hoid);
1608 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1610 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1611 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1614 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1616 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1617 ceph_encode_string(p, end, hoid->key, hoid->key_len);
1618 ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1619 ceph_encode_64(p, hoid->snapid);
1620 ceph_encode_32(p, hoid->hash);
1621 ceph_encode_8(p, hoid->is_max);
1622 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1623 ceph_encode_64(p, hoid->pool);
1626 static void free_hoid(struct ceph_hobject_id *hoid)
1631 kfree(hoid->nspace);
1636 static struct ceph_osd_backoff *alloc_backoff(void)
1638 struct ceph_osd_backoff *backoff;
1640 backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1644 RB_CLEAR_NODE(&backoff->spg_node);
1645 RB_CLEAR_NODE(&backoff->id_node);
1649 static void free_backoff(struct ceph_osd_backoff *backoff)
1651 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1652 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1654 free_hoid(backoff->begin);
1655 free_hoid(backoff->end);
1660 * Within a specific spgid, backoffs are managed by ->begin hoid.
1662 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1663 RB_BYVAL, spg_node);
1665 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1666 const struct ceph_hobject_id *hoid)
1668 struct rb_node *n = root->rb_node;
1671 struct ceph_osd_backoff *cur =
1672 rb_entry(n, struct ceph_osd_backoff, spg_node);
1675 cmp = hoid_compare(hoid, cur->begin);
1678 } else if (cmp > 0) {
1679 if (hoid_compare(hoid, cur->end) < 0)
1692 * Each backoff has a unique id within its OSD session.
1694 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1696 static void clear_backoffs(struct ceph_osd *osd)
1698 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1699 struct ceph_spg_mapping *spg =
1700 rb_entry(rb_first(&osd->o_backoff_mappings),
1701 struct ceph_spg_mapping, node);
1703 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1704 struct ceph_osd_backoff *backoff =
1705 rb_entry(rb_first(&spg->backoffs),
1706 struct ceph_osd_backoff, spg_node);
1708 erase_backoff(&spg->backoffs, backoff);
1709 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1710 free_backoff(backoff);
1712 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1713 free_spg_mapping(spg);
1718 * Set up a temporary, non-owning view into @t.
1720 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1721 const struct ceph_osd_request_target *t)
1725 hoid->oid = t->target_oid.name;
1726 hoid->oid_len = t->target_oid.name_len;
1727 hoid->snapid = CEPH_NOSNAP;
1728 hoid->hash = t->pgid.seed;
1729 hoid->is_max = false;
1730 if (t->target_oloc.pool_ns) {
1731 hoid->nspace = t->target_oloc.pool_ns->str;
1732 hoid->nspace_len = t->target_oloc.pool_ns->len;
1734 hoid->nspace = NULL;
1735 hoid->nspace_len = 0;
1737 hoid->pool = t->target_oloc.pool;
1738 ceph_hoid_build_hash_cache(hoid);
1741 static bool should_plug_request(struct ceph_osd_request *req)
1743 struct ceph_osd *osd = req->r_osd;
1744 struct ceph_spg_mapping *spg;
1745 struct ceph_osd_backoff *backoff;
1746 struct ceph_hobject_id hoid;
1748 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1752 hoid_fill_from_target(&hoid, &req->r_t);
1753 backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1757 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1758 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1759 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1763 static void setup_request_data(struct ceph_osd_request *req,
1764 struct ceph_msg *msg)
1769 if (!list_empty(&msg->data))
1772 WARN_ON(msg->data_length);
1773 for (i = 0; i < req->r_num_ops; i++) {
1774 struct ceph_osd_req_op *op = &req->r_ops[i];
1778 case CEPH_OSD_OP_WRITE:
1779 case CEPH_OSD_OP_WRITEFULL:
1780 WARN_ON(op->indata_len != op->extent.length);
1781 ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1783 case CEPH_OSD_OP_SETXATTR:
1784 case CEPH_OSD_OP_CMPXATTR:
1785 WARN_ON(op->indata_len != op->xattr.name_len +
1786 op->xattr.value_len);
1787 ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1789 case CEPH_OSD_OP_NOTIFY_ACK:
1790 ceph_osdc_msg_data_add(msg,
1791 &op->notify_ack.request_data);
1795 case CEPH_OSD_OP_STAT:
1796 ceph_osdc_msg_data_add(req->r_reply,
1799 case CEPH_OSD_OP_READ:
1800 ceph_osdc_msg_data_add(req->r_reply,
1801 &op->extent.osd_data);
1803 case CEPH_OSD_OP_LIST_WATCHERS:
1804 ceph_osdc_msg_data_add(req->r_reply,
1805 &op->list_watchers.response_data);
1809 case CEPH_OSD_OP_CALL:
1810 WARN_ON(op->indata_len != op->cls.class_len +
1811 op->cls.method_len +
1812 op->cls.indata_len);
1813 ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1814 /* optional, can be NONE */
1815 ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1816 /* optional, can be NONE */
1817 ceph_osdc_msg_data_add(req->r_reply,
1818 &op->cls.response_data);
1820 case CEPH_OSD_OP_NOTIFY:
1821 ceph_osdc_msg_data_add(msg,
1822 &op->notify.request_data);
1823 ceph_osdc_msg_data_add(req->r_reply,
1824 &op->notify.response_data);
1828 data_len += op->indata_len;
1831 WARN_ON(data_len != msg->data_length);
1834 static void encode_pgid(void **p, const struct ceph_pg *pgid)
1836 ceph_encode_8(p, 1);
1837 ceph_encode_64(p, pgid->pool);
1838 ceph_encode_32(p, pgid->seed);
1839 ceph_encode_32(p, -1); /* preferred */
1842 static void encode_spgid(void **p, const struct ceph_spg *spgid)
1844 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
1845 encode_pgid(p, &spgid->pgid);
1846 ceph_encode_8(p, spgid->shard);
1849 static void encode_oloc(void **p, void *end,
1850 const struct ceph_object_locator *oloc)
1852 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
1853 ceph_encode_64(p, oloc->pool);
1854 ceph_encode_32(p, -1); /* preferred */
1855 ceph_encode_32(p, 0); /* key len */
1857 ceph_encode_string(p, end, oloc->pool_ns->str,
1858 oloc->pool_ns->len);
1860 ceph_encode_32(p, 0);
1863 static void encode_request_partial(struct ceph_osd_request *req,
1864 struct ceph_msg *msg)
1866 void *p = msg->front.iov_base;
1867 void *const end = p + msg->front_alloc_len;
1871 if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1872 /* snapshots aren't writeable */
1873 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1875 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1876 req->r_data_offset || req->r_snapc);
1879 setup_request_data(req, msg);
1881 encode_spgid(&p, &req->r_t.spgid); /* actual spg */
1882 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
1883 ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1884 ceph_encode_32(&p, req->r_flags);
1887 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
1888 memset(p, 0, sizeof(struct ceph_osd_reqid));
1889 p += sizeof(struct ceph_osd_reqid);
1892 memset(p, 0, sizeof(struct ceph_blkin_trace_info));
1893 p += sizeof(struct ceph_blkin_trace_info);
1895 ceph_encode_32(&p, 0); /* client_inc, always 0 */
1896 ceph_encode_timespec(p, &req->r_mtime);
1897 p += sizeof(struct ceph_timespec);
1899 encode_oloc(&p, end, &req->r_t.target_oloc);
1900 ceph_encode_string(&p, end, req->r_t.target_oid.name,
1901 req->r_t.target_oid.name_len);
1903 /* ops, can imply data */
1904 ceph_encode_16(&p, req->r_num_ops);
1905 for (i = 0; i < req->r_num_ops; i++) {
1906 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1907 p += sizeof(struct ceph_osd_op);
1910 ceph_encode_64(&p, req->r_snapid); /* snapid */
1912 ceph_encode_64(&p, req->r_snapc->seq);
1913 ceph_encode_32(&p, req->r_snapc->num_snaps);
1914 for (i = 0; i < req->r_snapc->num_snaps; i++)
1915 ceph_encode_64(&p, req->r_snapc->snaps[i]);
1917 ceph_encode_64(&p, 0); /* snap_seq */
1918 ceph_encode_32(&p, 0); /* snaps len */
1921 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1922 BUG_ON(p > end - 8); /* space for features */
1924 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
1925 /* front_len is finalized in encode_request_finish() */
1926 msg->front.iov_len = p - msg->front.iov_base;
1927 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1928 msg->hdr.data_len = cpu_to_le32(data_len);
1930 * The header "data_off" is a hint to the receiver allowing it
1931 * to align received data into its buffers such that there's no
1932 * need to re-copy it before writing it to disk (direct I/O).
1934 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1936 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
1937 req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1940 static void encode_request_finish(struct ceph_msg *msg)
1942 void *p = msg->front.iov_base;
1943 void *const partial_end = p + msg->front.iov_len;
1944 void *const end = p + msg->front_alloc_len;
1946 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
1947 /* luminous OSD -- encode features and be done */
1949 ceph_encode_64(&p, msg->con->peer_features);
1952 char spgid[CEPH_ENCODING_START_BLK_LEN +
1953 CEPH_PGID_ENCODING_LEN + 1];
1957 char reqid[CEPH_ENCODING_START_BLK_LEN +
1958 sizeof(struct ceph_osd_reqid)];
1959 char trace[sizeof(struct ceph_blkin_trace_info)];
1961 struct ceph_timespec mtime;
1963 struct ceph_pg pgid;
1964 void *oloc, *oid, *tail;
1965 int oloc_len, oid_len, tail_len;
1969 * Pre-luminous OSD -- reencode v8 into v4 using @head
1970 * as a temporary buffer. Encode the raw PG; the rest
1971 * is just a matter of moving oloc, oid and tail blobs
1974 memcpy(&head, p, sizeof(head));
1978 p += CEPH_ENCODING_START_BLK_LEN;
1979 pgid.pool = ceph_decode_64(&p);
1980 p += 4 + 4; /* preferred, key len */
1981 len = ceph_decode_32(&p);
1982 p += len; /* nspace */
1983 oloc_len = p - oloc;
1986 len = ceph_decode_32(&p);
1991 tail_len = partial_end - p;
1993 p = msg->front.iov_base;
1994 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
1995 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
1996 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
1997 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
1999 /* reassert_version */
2000 memset(p, 0, sizeof(struct ceph_eversion));
2001 p += sizeof(struct ceph_eversion);
2004 memmove(p, oloc, oloc_len);
2007 pgid.seed = le32_to_cpu(head.hash);
2008 encode_pgid(&p, &pgid); /* raw pg */
2011 memmove(p, oid, oid_len);
2014 /* tail -- ops, snapid, snapc, retry_attempt */
2016 memmove(p, tail, tail_len);
2019 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2023 msg->front.iov_len = p - msg->front.iov_base;
2024 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2026 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2027 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2028 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2029 le16_to_cpu(msg->hdr.version));
2033 * @req has to be assigned a tid and registered.
2035 static void send_request(struct ceph_osd_request *req)
2037 struct ceph_osd *osd = req->r_osd;
2039 verify_osd_locked(osd);
2040 WARN_ON(osd->o_osd != req->r_t.osd);
2043 if (should_plug_request(req))
2047 * We may have a previously queued request message hanging
2048 * around. Cancel it to avoid corrupting the msgr.
2051 ceph_msg_revoke(req->r_request);
2053 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2054 if (req->r_attempts)
2055 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2057 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2059 encode_request_partial(req, req->r_request);
2061 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2062 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2063 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2064 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2067 req->r_t.paused = false;
2068 req->r_stamp = jiffies;
2071 req->r_sent = osd->o_incarnation;
2072 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2073 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2076 static void maybe_request_map(struct ceph_osd_client *osdc)
2078 bool continuous = false;
2080 verify_osdc_locked(osdc);
2081 WARN_ON(!osdc->osdmap->epoch);
2083 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2084 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2085 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2086 dout("%s osdc %p continuous\n", __func__, osdc);
2089 dout("%s osdc %p onetime\n", __func__, osdc);
2092 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2093 osdc->osdmap->epoch + 1, continuous))
2094 ceph_monc_renew_subs(&osdc->client->monc);
2097 static void complete_request(struct ceph_osd_request *req, int err);
2098 static void send_map_check(struct ceph_osd_request *req);
2100 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2102 struct ceph_osd_client *osdc = req->r_osdc;
2103 struct ceph_osd *osd;
2104 enum calc_target_result ct_res;
2105 bool need_send = false;
2106 bool promoted = false;
2107 bool need_abort = false;
2109 WARN_ON(req->r_tid);
2110 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2113 ct_res = calc_target(osdc, &req->r_t, NULL, false);
2114 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2117 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2119 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2123 if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2124 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2125 osdc->epoch_barrier);
2126 req->r_t.paused = true;
2127 maybe_request_map(osdc);
2128 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2129 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2130 dout("req %p pausewr\n", req);
2131 req->r_t.paused = true;
2132 maybe_request_map(osdc);
2133 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2134 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2135 dout("req %p pauserd\n", req);
2136 req->r_t.paused = true;
2137 maybe_request_map(osdc);
2138 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2139 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2140 CEPH_OSD_FLAG_FULL_FORCE)) &&
2141 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2142 pool_full(osdc, req->r_t.base_oloc.pool))) {
2143 dout("req %p full/pool_full\n", req);
2144 pr_warn_ratelimited("FULL or reached pool quota\n");
2145 req->r_t.paused = true;
2146 maybe_request_map(osdc);
2147 if (req->r_abort_on_full)
2149 } else if (!osd_homeless(osd)) {
2152 maybe_request_map(osdc);
2155 mutex_lock(&osd->lock);
2157 * Assign the tid atomically with send_request() to protect
2158 * multiple writes to the same object from racing with each
2159 * other, resulting in out of order ops on the OSDs.
2161 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2162 link_request(osd, req);
2165 else if (need_abort)
2166 complete_request(req, -ENOSPC);
2167 mutex_unlock(&osd->lock);
2169 if (ct_res == CALC_TARGET_POOL_DNE)
2170 send_map_check(req);
2173 downgrade_write(&osdc->lock);
2177 up_read(&osdc->lock);
2178 down_write(&osdc->lock);
2184 static void account_request(struct ceph_osd_request *req)
2186 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2187 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2189 req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2190 atomic_inc(&req->r_osdc->num_requests);
2192 req->r_start_stamp = jiffies;
2195 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2197 ceph_osdc_get_request(req);
2198 account_request(req);
2199 __submit_request(req, wrlocked);
2202 static void finish_request(struct ceph_osd_request *req)
2204 struct ceph_osd_client *osdc = req->r_osdc;
2206 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2207 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2210 unlink_request(req->r_osd, req);
2211 atomic_dec(&osdc->num_requests);
2214 * If an OSD has failed or returned and a request has been sent
2215 * twice, it's possible to get a reply and end up here while the
2216 * request message is queued for delivery. We will ignore the
2217 * reply, so not a big deal, but better to try and catch it.
2219 ceph_msg_revoke(req->r_request);
2220 ceph_msg_revoke_incoming(req->r_reply);
2223 static void __complete_request(struct ceph_osd_request *req)
2225 if (req->r_callback) {
2226 dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
2227 req->r_tid, req->r_callback, req->r_result);
2228 req->r_callback(req);
2233 * This is open-coded in handle_reply().
2235 static void complete_request(struct ceph_osd_request *req, int err)
2237 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2239 req->r_result = err;
2240 finish_request(req);
2241 __complete_request(req);
2242 complete_all(&req->r_completion);
2243 ceph_osdc_put_request(req);
2246 static void cancel_map_check(struct ceph_osd_request *req)
2248 struct ceph_osd_client *osdc = req->r_osdc;
2249 struct ceph_osd_request *lookup_req;
2251 verify_osdc_wrlocked(osdc);
2253 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2257 WARN_ON(lookup_req != req);
2258 erase_request_mc(&osdc->map_checks, req);
2259 ceph_osdc_put_request(req);
2262 static void cancel_request(struct ceph_osd_request *req)
2264 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2266 cancel_map_check(req);
2267 finish_request(req);
2268 complete_all(&req->r_completion);
2269 ceph_osdc_put_request(req);
2272 static void abort_request(struct ceph_osd_request *req, int err)
2274 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2276 cancel_map_check(req);
2277 complete_request(req, err);
2280 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2282 if (likely(eb > osdc->epoch_barrier)) {
2283 dout("updating epoch_barrier from %u to %u\n",
2284 osdc->epoch_barrier, eb);
2285 osdc->epoch_barrier = eb;
2286 /* Request map if we're not to the barrier yet */
2287 if (eb > osdc->osdmap->epoch)
2288 maybe_request_map(osdc);
2292 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2294 down_read(&osdc->lock);
2295 if (unlikely(eb > osdc->epoch_barrier)) {
2296 up_read(&osdc->lock);
2297 down_write(&osdc->lock);
2298 update_epoch_barrier(osdc, eb);
2299 up_write(&osdc->lock);
2301 up_read(&osdc->lock);
2304 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2307 * Drop all pending requests that are stalled waiting on a full condition to
2308 * clear, and complete them with ENOSPC as the return code. Set the
2309 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2312 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2315 bool victims = false;
2317 dout("enter abort_on_full\n");
2319 if (!ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && !have_pool_full(osdc))
2322 /* Scan list and see if there is anything to abort */
2323 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2324 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2327 m = rb_first(&osd->o_requests);
2329 struct ceph_osd_request *req = rb_entry(m,
2330 struct ceph_osd_request, r_node);
2333 if (req->r_abort_on_full) {
2346 * Update the barrier to current epoch if it's behind that point,
2347 * since we know we have some calls to be aborted in the tree.
2349 update_epoch_barrier(osdc, osdc->osdmap->epoch);
2351 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2352 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2355 m = rb_first(&osd->o_requests);
2357 struct ceph_osd_request *req = rb_entry(m,
2358 struct ceph_osd_request, r_node);
2361 if (req->r_abort_on_full &&
2362 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2363 pool_full(osdc, req->r_t.target_oloc.pool)))
2364 abort_request(req, -ENOSPC);
2368 dout("return abort_on_full barrier=%u\n", osdc->epoch_barrier);
2371 static void check_pool_dne(struct ceph_osd_request *req)
2373 struct ceph_osd_client *osdc = req->r_osdc;
2374 struct ceph_osdmap *map = osdc->osdmap;
2376 verify_osdc_wrlocked(osdc);
2377 WARN_ON(!map->epoch);
2379 if (req->r_attempts) {
2381 * We sent a request earlier, which means that
2382 * previously the pool existed, and now it does not
2383 * (i.e., it was deleted).
2385 req->r_map_dne_bound = map->epoch;
2386 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2389 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2390 req, req->r_tid, req->r_map_dne_bound, map->epoch);
2393 if (req->r_map_dne_bound) {
2394 if (map->epoch >= req->r_map_dne_bound) {
2395 /* we had a new enough map */
2396 pr_info_ratelimited("tid %llu pool does not exist\n",
2398 complete_request(req, -ENOENT);
2401 send_map_check(req);
2405 static void map_check_cb(struct ceph_mon_generic_request *greq)
2407 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2408 struct ceph_osd_request *req;
2409 u64 tid = greq->private_data;
2411 WARN_ON(greq->result || !greq->u.newest);
2413 down_write(&osdc->lock);
2414 req = lookup_request_mc(&osdc->map_checks, tid);
2416 dout("%s tid %llu dne\n", __func__, tid);
2420 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2421 req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2422 if (!req->r_map_dne_bound)
2423 req->r_map_dne_bound = greq->u.newest;
2424 erase_request_mc(&osdc->map_checks, req);
2425 check_pool_dne(req);
2427 ceph_osdc_put_request(req);
2429 up_write(&osdc->lock);
2432 static void send_map_check(struct ceph_osd_request *req)
2434 struct ceph_osd_client *osdc = req->r_osdc;
2435 struct ceph_osd_request *lookup_req;
2438 verify_osdc_wrlocked(osdc);
2440 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2442 WARN_ON(lookup_req != req);
2446 ceph_osdc_get_request(req);
2447 insert_request_mc(&osdc->map_checks, req);
2448 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2449 map_check_cb, req->r_tid);
2454 * lingering requests, watch/notify v2 infrastructure
2456 static void linger_release(struct kref *kref)
2458 struct ceph_osd_linger_request *lreq =
2459 container_of(kref, struct ceph_osd_linger_request, kref);
2461 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2462 lreq->reg_req, lreq->ping_req);
2463 WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2464 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2465 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2466 WARN_ON(!list_empty(&lreq->scan_item));
2467 WARN_ON(!list_empty(&lreq->pending_lworks));
2471 ceph_osdc_put_request(lreq->reg_req);
2473 ceph_osdc_put_request(lreq->ping_req);
2474 target_destroy(&lreq->t);
2478 static void linger_put(struct ceph_osd_linger_request *lreq)
2481 kref_put(&lreq->kref, linger_release);
2484 static struct ceph_osd_linger_request *
2485 linger_get(struct ceph_osd_linger_request *lreq)
2487 kref_get(&lreq->kref);
2491 static struct ceph_osd_linger_request *
2492 linger_alloc(struct ceph_osd_client *osdc)
2494 struct ceph_osd_linger_request *lreq;
2496 lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2500 kref_init(&lreq->kref);
2501 mutex_init(&lreq->lock);
2502 RB_CLEAR_NODE(&lreq->node);
2503 RB_CLEAR_NODE(&lreq->osdc_node);
2504 RB_CLEAR_NODE(&lreq->mc_node);
2505 INIT_LIST_HEAD(&lreq->scan_item);
2506 INIT_LIST_HEAD(&lreq->pending_lworks);
2507 init_completion(&lreq->reg_commit_wait);
2508 init_completion(&lreq->notify_finish_wait);
2511 target_init(&lreq->t);
2513 dout("%s lreq %p\n", __func__, lreq);
2517 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2518 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2519 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2522 * Create linger request <-> OSD session relation.
2524 * @lreq has to be registered, @osd may be homeless.
2526 static void link_linger(struct ceph_osd *osd,
2527 struct ceph_osd_linger_request *lreq)
2529 verify_osd_locked(osd);
2530 WARN_ON(!lreq->linger_id || lreq->osd);
2531 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2532 osd->o_osd, lreq, lreq->linger_id);
2534 if (!osd_homeless(osd))
2535 __remove_osd_from_lru(osd);
2537 atomic_inc(&osd->o_osdc->num_homeless);
2540 insert_linger(&osd->o_linger_requests, lreq);
2544 static void unlink_linger(struct ceph_osd *osd,
2545 struct ceph_osd_linger_request *lreq)
2547 verify_osd_locked(osd);
2548 WARN_ON(lreq->osd != osd);
2549 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2550 osd->o_osd, lreq, lreq->linger_id);
2553 erase_linger(&osd->o_linger_requests, lreq);
2556 if (!osd_homeless(osd))
2557 maybe_move_osd_to_lru(osd);
2559 atomic_dec(&osd->o_osdc->num_homeless);
2562 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2564 verify_osdc_locked(lreq->osdc);
2566 return !RB_EMPTY_NODE(&lreq->osdc_node);
2569 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2571 struct ceph_osd_client *osdc = lreq->osdc;
2574 down_read(&osdc->lock);
2575 registered = __linger_registered(lreq);
2576 up_read(&osdc->lock);
2581 static void linger_register(struct ceph_osd_linger_request *lreq)
2583 struct ceph_osd_client *osdc = lreq->osdc;
2585 verify_osdc_wrlocked(osdc);
2586 WARN_ON(lreq->linger_id);
2589 lreq->linger_id = ++osdc->last_linger_id;
2590 insert_linger_osdc(&osdc->linger_requests, lreq);
2593 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2595 struct ceph_osd_client *osdc = lreq->osdc;
2597 verify_osdc_wrlocked(osdc);
2599 erase_linger_osdc(&osdc->linger_requests, lreq);
2603 static void cancel_linger_request(struct ceph_osd_request *req)
2605 struct ceph_osd_linger_request *lreq = req->r_priv;
2607 WARN_ON(!req->r_linger);
2608 cancel_request(req);
2612 struct linger_work {
2613 struct work_struct work;
2614 struct ceph_osd_linger_request *lreq;
2615 struct list_head pending_item;
2616 unsigned long queued_stamp;
2622 void *payload; /* points into @msg front */
2625 struct ceph_msg *msg; /* for ceph_msg_put() */
2633 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2636 struct linger_work *lwork;
2638 lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2642 INIT_WORK(&lwork->work, workfn);
2643 INIT_LIST_HEAD(&lwork->pending_item);
2644 lwork->lreq = linger_get(lreq);
2649 static void lwork_free(struct linger_work *lwork)
2651 struct ceph_osd_linger_request *lreq = lwork->lreq;
2653 mutex_lock(&lreq->lock);
2654 list_del(&lwork->pending_item);
2655 mutex_unlock(&lreq->lock);
2661 static void lwork_queue(struct linger_work *lwork)
2663 struct ceph_osd_linger_request *lreq = lwork->lreq;
2664 struct ceph_osd_client *osdc = lreq->osdc;
2666 verify_lreq_locked(lreq);
2667 WARN_ON(!list_empty(&lwork->pending_item));
2669 lwork->queued_stamp = jiffies;
2670 list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2671 queue_work(osdc->notify_wq, &lwork->work);
2674 static void do_watch_notify(struct work_struct *w)
2676 struct linger_work *lwork = container_of(w, struct linger_work, work);
2677 struct ceph_osd_linger_request *lreq = lwork->lreq;
2679 if (!linger_registered(lreq)) {
2680 dout("%s lreq %p not registered\n", __func__, lreq);
2684 WARN_ON(!lreq->is_watch);
2685 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2686 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2687 lwork->notify.payload_len);
2688 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2689 lwork->notify.notifier_id, lwork->notify.payload,
2690 lwork->notify.payload_len);
2693 ceph_msg_put(lwork->notify.msg);
2697 static void do_watch_error(struct work_struct *w)
2699 struct linger_work *lwork = container_of(w, struct linger_work, work);
2700 struct ceph_osd_linger_request *lreq = lwork->lreq;
2702 if (!linger_registered(lreq)) {
2703 dout("%s lreq %p not registered\n", __func__, lreq);
2707 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2708 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2714 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2716 struct linger_work *lwork;
2718 lwork = lwork_alloc(lreq, do_watch_error);
2720 pr_err("failed to allocate error-lwork\n");
2724 lwork->error.err = lreq->last_error;
2728 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2731 if (!completion_done(&lreq->reg_commit_wait)) {
2732 lreq->reg_commit_error = (result <= 0 ? result : 0);
2733 complete_all(&lreq->reg_commit_wait);
2737 static void linger_commit_cb(struct ceph_osd_request *req)
2739 struct ceph_osd_linger_request *lreq = req->r_priv;
2741 mutex_lock(&lreq->lock);
2742 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2743 lreq->linger_id, req->r_result);
2744 linger_reg_commit_complete(lreq, req->r_result);
2745 lreq->committed = true;
2747 if (!lreq->is_watch) {
2748 struct ceph_osd_data *osd_data =
2749 osd_req_op_data(req, 0, notify, response_data);
2750 void *p = page_address(osd_data->pages[0]);
2752 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2753 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2755 /* make note of the notify_id */
2756 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2757 lreq->notify_id = ceph_decode_64(&p);
2758 dout("lreq %p notify_id %llu\n", lreq,
2761 dout("lreq %p no notify_id\n", lreq);
2765 mutex_unlock(&lreq->lock);
2769 static int normalize_watch_error(int err)
2772 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2773 * notification and a failure to reconnect because we raced with
2774 * the delete appear the same to the user.
2782 static void linger_reconnect_cb(struct ceph_osd_request *req)
2784 struct ceph_osd_linger_request *lreq = req->r_priv;
2786 mutex_lock(&lreq->lock);
2787 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2788 lreq, lreq->linger_id, req->r_result, lreq->last_error);
2789 if (req->r_result < 0) {
2790 if (!lreq->last_error) {
2791 lreq->last_error = normalize_watch_error(req->r_result);
2792 queue_watch_error(lreq);
2796 mutex_unlock(&lreq->lock);
2800 static void send_linger(struct ceph_osd_linger_request *lreq)
2802 struct ceph_osd_request *req = lreq->reg_req;
2803 struct ceph_osd_req_op *op = &req->r_ops[0];
2805 verify_osdc_wrlocked(req->r_osdc);
2806 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2809 cancel_linger_request(req);
2811 request_reinit(req);
2812 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2813 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2814 req->r_flags = lreq->t.flags;
2815 req->r_mtime = lreq->mtime;
2817 mutex_lock(&lreq->lock);
2818 if (lreq->is_watch && lreq->committed) {
2819 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2820 op->watch.cookie != lreq->linger_id);
2821 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2822 op->watch.gen = ++lreq->register_gen;
2823 dout("lreq %p reconnect register_gen %u\n", lreq,
2825 req->r_callback = linger_reconnect_cb;
2827 if (!lreq->is_watch)
2828 lreq->notify_id = 0;
2830 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2831 dout("lreq %p register\n", lreq);
2832 req->r_callback = linger_commit_cb;
2834 mutex_unlock(&lreq->lock);
2836 req->r_priv = linger_get(lreq);
2837 req->r_linger = true;
2839 submit_request(req, true);
2842 static void linger_ping_cb(struct ceph_osd_request *req)
2844 struct ceph_osd_linger_request *lreq = req->r_priv;
2846 mutex_lock(&lreq->lock);
2847 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2848 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2850 if (lreq->register_gen == req->r_ops[0].watch.gen) {
2851 if (!req->r_result) {
2852 lreq->watch_valid_thru = lreq->ping_sent;
2853 } else if (!lreq->last_error) {
2854 lreq->last_error = normalize_watch_error(req->r_result);
2855 queue_watch_error(lreq);
2858 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2859 lreq->register_gen, req->r_ops[0].watch.gen);
2862 mutex_unlock(&lreq->lock);
2866 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2868 struct ceph_osd_client *osdc = lreq->osdc;
2869 struct ceph_osd_request *req = lreq->ping_req;
2870 struct ceph_osd_req_op *op = &req->r_ops[0];
2872 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2873 dout("%s PAUSERD\n", __func__);
2877 lreq->ping_sent = jiffies;
2878 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2879 __func__, lreq, lreq->linger_id, lreq->ping_sent,
2880 lreq->register_gen);
2883 cancel_linger_request(req);
2885 request_reinit(req);
2886 target_copy(&req->r_t, &lreq->t);
2888 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2889 op->watch.cookie != lreq->linger_id ||
2890 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2891 op->watch.gen = lreq->register_gen;
2892 req->r_callback = linger_ping_cb;
2893 req->r_priv = linger_get(lreq);
2894 req->r_linger = true;
2896 ceph_osdc_get_request(req);
2897 account_request(req);
2898 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2899 link_request(lreq->osd, req);
2903 static void linger_submit(struct ceph_osd_linger_request *lreq)
2905 struct ceph_osd_client *osdc = lreq->osdc;
2906 struct ceph_osd *osd;
2908 calc_target(osdc, &lreq->t, NULL, false);
2909 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2910 link_linger(osd, lreq);
2915 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2917 struct ceph_osd_client *osdc = lreq->osdc;
2918 struct ceph_osd_linger_request *lookup_lreq;
2920 verify_osdc_wrlocked(osdc);
2922 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2927 WARN_ON(lookup_lreq != lreq);
2928 erase_linger_mc(&osdc->linger_map_checks, lreq);
2933 * @lreq has to be both registered and linked.
2935 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2937 if (lreq->is_watch && lreq->ping_req->r_osd)
2938 cancel_linger_request(lreq->ping_req);
2939 if (lreq->reg_req->r_osd)
2940 cancel_linger_request(lreq->reg_req);
2941 cancel_linger_map_check(lreq);
2942 unlink_linger(lreq->osd, lreq);
2943 linger_unregister(lreq);
2946 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2948 struct ceph_osd_client *osdc = lreq->osdc;
2950 down_write(&osdc->lock);
2951 if (__linger_registered(lreq))
2952 __linger_cancel(lreq);
2953 up_write(&osdc->lock);
2956 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2958 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2960 struct ceph_osd_client *osdc = lreq->osdc;
2961 struct ceph_osdmap *map = osdc->osdmap;
2963 verify_osdc_wrlocked(osdc);
2964 WARN_ON(!map->epoch);
2966 if (lreq->register_gen) {
2967 lreq->map_dne_bound = map->epoch;
2968 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2969 lreq, lreq->linger_id);
2971 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2972 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2976 if (lreq->map_dne_bound) {
2977 if (map->epoch >= lreq->map_dne_bound) {
2978 /* we had a new enough map */
2979 pr_info("linger_id %llu pool does not exist\n",
2981 linger_reg_commit_complete(lreq, -ENOENT);
2982 __linger_cancel(lreq);
2985 send_linger_map_check(lreq);
2989 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2991 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2992 struct ceph_osd_linger_request *lreq;
2993 u64 linger_id = greq->private_data;
2995 WARN_ON(greq->result || !greq->u.newest);
2997 down_write(&osdc->lock);
2998 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3000 dout("%s linger_id %llu dne\n", __func__, linger_id);
3004 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3005 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3007 if (!lreq->map_dne_bound)
3008 lreq->map_dne_bound = greq->u.newest;
3009 erase_linger_mc(&osdc->linger_map_checks, lreq);
3010 check_linger_pool_dne(lreq);
3014 up_write(&osdc->lock);
3017 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3019 struct ceph_osd_client *osdc = lreq->osdc;
3020 struct ceph_osd_linger_request *lookup_lreq;
3023 verify_osdc_wrlocked(osdc);
3025 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3028 WARN_ON(lookup_lreq != lreq);
3033 insert_linger_mc(&osdc->linger_map_checks, lreq);
3034 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3035 linger_map_check_cb, lreq->linger_id);
3039 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3043 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3044 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3045 return ret ?: lreq->reg_commit_error;
3048 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3052 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3053 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3054 return ret ?: lreq->notify_finish_error;
3058 * Timeout callback, called every N seconds. When 1 or more OSD
3059 * requests has been active for more than N seconds, we send a keepalive
3060 * (tag + timestamp) to its OSD to ensure any communications channel
3061 * reset is detected.
3063 static void handle_timeout(struct work_struct *work)
3065 struct ceph_osd_client *osdc =
3066 container_of(work, struct ceph_osd_client, timeout_work.work);
3067 struct ceph_options *opts = osdc->client->options;
3068 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3069 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3070 LIST_HEAD(slow_osds);
3071 struct rb_node *n, *p;
3073 dout("%s osdc %p\n", __func__, osdc);
3074 down_write(&osdc->lock);
3077 * ping osds that are a bit slow. this ensures that if there
3078 * is a break in the TCP connection we will notice, and reopen
3079 * a connection with that osd (from the fault callback).
3081 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3082 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3085 for (p = rb_first(&osd->o_requests); p; ) {
3086 struct ceph_osd_request *req =
3087 rb_entry(p, struct ceph_osd_request, r_node);
3089 p = rb_next(p); /* abort_request() */
3091 if (time_before(req->r_stamp, cutoff)) {
3092 dout(" req %p tid %llu on osd%d is laggy\n",
3093 req, req->r_tid, osd->o_osd);
3096 if (opts->osd_request_timeout &&
3097 time_before(req->r_start_stamp, expiry_cutoff)) {
3098 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3099 req->r_tid, osd->o_osd);
3100 abort_request(req, -ETIMEDOUT);
3103 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3104 struct ceph_osd_linger_request *lreq =
3105 rb_entry(p, struct ceph_osd_linger_request, node);
3107 dout(" lreq %p linger_id %llu is served by osd%d\n",
3108 lreq, lreq->linger_id, osd->o_osd);
3111 mutex_lock(&lreq->lock);
3112 if (lreq->is_watch && lreq->committed && !lreq->last_error)
3113 send_linger_ping(lreq);
3114 mutex_unlock(&lreq->lock);
3118 list_move_tail(&osd->o_keepalive_item, &slow_osds);
3121 if (opts->osd_request_timeout) {
3122 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3123 struct ceph_osd_request *req =
3124 rb_entry(p, struct ceph_osd_request, r_node);
3126 p = rb_next(p); /* abort_request() */
3128 if (time_before(req->r_start_stamp, expiry_cutoff)) {
3129 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3130 req->r_tid, osdc->homeless_osd.o_osd);
3131 abort_request(req, -ETIMEDOUT);
3136 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3137 maybe_request_map(osdc);
3139 while (!list_empty(&slow_osds)) {
3140 struct ceph_osd *osd = list_first_entry(&slow_osds,
3143 list_del_init(&osd->o_keepalive_item);
3144 ceph_con_keepalive(&osd->o_con);
3147 up_write(&osdc->lock);
3148 schedule_delayed_work(&osdc->timeout_work,
3149 osdc->client->options->osd_keepalive_timeout);
3152 static void handle_osds_timeout(struct work_struct *work)
3154 struct ceph_osd_client *osdc =
3155 container_of(work, struct ceph_osd_client,
3156 osds_timeout_work.work);
3157 unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3158 struct ceph_osd *osd, *nosd;
3160 dout("%s osdc %p\n", __func__, osdc);
3161 down_write(&osdc->lock);
3162 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3163 if (time_before(jiffies, osd->lru_ttl))
3166 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3167 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3171 up_write(&osdc->lock);
3172 schedule_delayed_work(&osdc->osds_timeout_work,
3173 round_jiffies_relative(delay));
3176 static int ceph_oloc_decode(void **p, void *end,
3177 struct ceph_object_locator *oloc)
3179 u8 struct_v, struct_cv;
3184 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3185 struct_v = ceph_decode_8(p);
3186 struct_cv = ceph_decode_8(p);
3188 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3189 struct_v, struct_cv);
3192 if (struct_cv > 6) {
3193 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3194 struct_v, struct_cv);
3197 len = ceph_decode_32(p);
3198 ceph_decode_need(p, end, len, e_inval);
3199 struct_end = *p + len;
3201 oloc->pool = ceph_decode_64(p);
3202 *p += 4; /* skip preferred */
3204 len = ceph_decode_32(p);
3206 pr_warn("ceph_object_locator::key is set\n");
3210 if (struct_v >= 5) {
3211 bool changed = false;
3213 len = ceph_decode_32(p);
3215 ceph_decode_need(p, end, len, e_inval);
3216 if (!oloc->pool_ns ||
3217 ceph_compare_string(oloc->pool_ns, *p, len))
3225 /* redirect changes namespace */
3226 pr_warn("ceph_object_locator::nspace is changed\n");
3231 if (struct_v >= 6) {
3232 s64 hash = ceph_decode_64(p);
3234 pr_warn("ceph_object_locator::hash is set\n");
3249 static int ceph_redirect_decode(void **p, void *end,
3250 struct ceph_request_redirect *redir)
3252 u8 struct_v, struct_cv;
3257 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3258 struct_v = ceph_decode_8(p);
3259 struct_cv = ceph_decode_8(p);
3260 if (struct_cv > 1) {
3261 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3262 struct_v, struct_cv);
3265 len = ceph_decode_32(p);
3266 ceph_decode_need(p, end, len, e_inval);
3267 struct_end = *p + len;
3269 ret = ceph_oloc_decode(p, end, &redir->oloc);
3273 len = ceph_decode_32(p);
3275 pr_warn("ceph_request_redirect::object_name is set\n");
3279 len = ceph_decode_32(p);
3280 *p += len; /* skip osd_instructions */
3292 struct MOSDOpReply {
3293 struct ceph_pg pgid;
3298 u32 outdata_len[CEPH_OSD_MAX_OPS];
3299 s32 rval[CEPH_OSD_MAX_OPS];
3301 struct ceph_eversion replay_version;
3303 struct ceph_request_redirect redirect;
3306 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3308 void *p = msg->front.iov_base;
3309 void *const end = p + msg->front.iov_len;
3310 u16 version = le16_to_cpu(msg->hdr.version);
3311 struct ceph_eversion bad_replay_version;
3317 ceph_decode_32_safe(&p, end, len, e_inval);
3318 ceph_decode_need(&p, end, len, e_inval);
3319 p += len; /* skip oid */
3321 ret = ceph_decode_pgid(&p, end, &m->pgid);
3325 ceph_decode_64_safe(&p, end, m->flags, e_inval);
3326 ceph_decode_32_safe(&p, end, m->result, e_inval);
3327 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3328 memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3329 p += sizeof(bad_replay_version);
3330 ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3332 ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3333 if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3336 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3338 for (i = 0; i < m->num_ops; i++) {
3339 struct ceph_osd_op *op = p;
3341 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3345 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3346 for (i = 0; i < m->num_ops; i++)
3347 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3350 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3351 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3352 p += sizeof(m->replay_version);
3353 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3355 m->replay_version = bad_replay_version; /* struct */
3356 m->user_version = le64_to_cpu(m->replay_version.version);
3361 ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3369 ret = ceph_redirect_decode(&p, end, &m->redirect);
3373 ceph_oloc_init(&m->redirect.oloc);
3383 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
3386 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3388 struct ceph_osd_client *osdc = osd->o_osdc;
3389 struct ceph_osd_request *req;
3390 struct MOSDOpReply m;
3391 u64 tid = le64_to_cpu(msg->hdr.tid);
3396 dout("%s msg %p tid %llu\n", __func__, msg, tid);
3398 down_read(&osdc->lock);
3399 if (!osd_registered(osd)) {
3400 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3401 goto out_unlock_osdc;
3403 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3405 mutex_lock(&osd->lock);
3406 req = lookup_request(&osd->o_requests, tid);
3408 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3409 goto out_unlock_session;
3412 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3413 ret = decode_MOSDOpReply(msg, &m);
3414 m.redirect.oloc.pool_ns = NULL;
3416 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3421 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3422 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3423 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3424 le64_to_cpu(m.replay_version.version), m.user_version);
3426 if (m.retry_attempt >= 0) {
3427 if (m.retry_attempt != req->r_attempts - 1) {
3428 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3429 req, req->r_tid, m.retry_attempt,
3430 req->r_attempts - 1);
3431 goto out_unlock_session;
3434 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3437 if (!ceph_oloc_empty(&m.redirect.oloc)) {
3438 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3439 m.redirect.oloc.pool);
3440 unlink_request(osd, req);
3441 mutex_unlock(&osd->lock);
3444 * Not ceph_oloc_copy() - changing pool_ns is not
3447 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3448 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
3449 CEPH_OSD_FLAG_IGNORE_OVERLAY |
3450 CEPH_OSD_FLAG_IGNORE_CACHE;
3452 __submit_request(req, false);
3453 goto out_unlock_osdc;
3456 if (m.num_ops != req->r_num_ops) {
3457 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3458 req->r_num_ops, req->r_tid);
3461 for (i = 0; i < req->r_num_ops; i++) {
3462 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3463 req->r_tid, i, m.rval[i], m.outdata_len[i]);
3464 req->r_ops[i].rval = m.rval[i];
3465 req->r_ops[i].outdata_len = m.outdata_len[i];
3466 data_len += m.outdata_len[i];
3468 if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3469 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3470 le32_to_cpu(msg->hdr.data_len), req->r_tid);
3473 dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3474 req, req->r_tid, m.result, data_len);
3477 * Since we only ever request ONDISK, we should only ever get
3478 * one (type of) reply back.
3480 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3481 req->r_result = m.result ?: data_len;
3482 finish_request(req);
3483 mutex_unlock(&osd->lock);
3484 up_read(&osdc->lock);
3486 __complete_request(req);
3487 complete_all(&req->r_completion);
3488 ceph_osdc_put_request(req);
3492 complete_request(req, -EIO);
3494 mutex_unlock(&osd->lock);
3496 up_read(&osdc->lock);
3499 static void set_pool_was_full(struct ceph_osd_client *osdc)
3503 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3504 struct ceph_pg_pool_info *pi =
3505 rb_entry(n, struct ceph_pg_pool_info, node);
3507 pi->was_full = __pool_full(pi);
3511 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3513 struct ceph_pg_pool_info *pi;
3515 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3519 return pi->was_full && !__pool_full(pi);
3522 static enum calc_target_result
3523 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3525 struct ceph_osd_client *osdc = lreq->osdc;
3526 enum calc_target_result ct_res;
3528 ct_res = calc_target(osdc, &lreq->t, NULL, true);
3529 if (ct_res == CALC_TARGET_NEED_RESEND) {
3530 struct ceph_osd *osd;
3532 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3533 if (osd != lreq->osd) {
3534 unlink_linger(lreq->osd, lreq);
3535 link_linger(osd, lreq);
3543 * Requeue requests whose mapping to an OSD has changed.
3545 static void scan_requests(struct ceph_osd *osd,
3548 bool check_pool_cleared_full,
3549 struct rb_root *need_resend,
3550 struct list_head *need_resend_linger)
3552 struct ceph_osd_client *osdc = osd->o_osdc;
3554 bool force_resend_writes;
3556 for (n = rb_first(&osd->o_linger_requests); n; ) {
3557 struct ceph_osd_linger_request *lreq =
3558 rb_entry(n, struct ceph_osd_linger_request, node);
3559 enum calc_target_result ct_res;
3561 n = rb_next(n); /* recalc_linger_target() */
3563 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3565 ct_res = recalc_linger_target(lreq);
3567 case CALC_TARGET_NO_ACTION:
3568 force_resend_writes = cleared_full ||
3569 (check_pool_cleared_full &&
3570 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3571 if (!force_resend && !force_resend_writes)
3575 case CALC_TARGET_NEED_RESEND:
3576 cancel_linger_map_check(lreq);
3578 * scan_requests() for the previous epoch(s)
3579 * may have already added it to the list, since
3580 * it's not unlinked here.
3582 if (list_empty(&lreq->scan_item))
3583 list_add_tail(&lreq->scan_item, need_resend_linger);
3585 case CALC_TARGET_POOL_DNE:
3586 list_del_init(&lreq->scan_item);
3587 check_linger_pool_dne(lreq);
3592 for (n = rb_first(&osd->o_requests); n; ) {
3593 struct ceph_osd_request *req =
3594 rb_entry(n, struct ceph_osd_request, r_node);
3595 enum calc_target_result ct_res;
3597 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3599 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3600 ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
3603 case CALC_TARGET_NO_ACTION:
3604 force_resend_writes = cleared_full ||
3605 (check_pool_cleared_full &&
3606 pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3607 if (!force_resend &&
3608 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3609 !force_resend_writes))
3613 case CALC_TARGET_NEED_RESEND:
3614 cancel_map_check(req);
3615 unlink_request(osd, req);
3616 insert_request(need_resend, req);
3618 case CALC_TARGET_POOL_DNE:
3619 check_pool_dne(req);
3625 static int handle_one_map(struct ceph_osd_client *osdc,
3626 void *p, void *end, bool incremental,
3627 struct rb_root *need_resend,
3628 struct list_head *need_resend_linger)
3630 struct ceph_osdmap *newmap;
3632 bool skipped_map = false;
3635 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3636 set_pool_was_full(osdc);
3639 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3641 newmap = ceph_osdmap_decode(&p, end);
3643 return PTR_ERR(newmap);
3645 if (newmap != osdc->osdmap) {
3647 * Preserve ->was_full before destroying the old map.
3648 * For pools that weren't in the old map, ->was_full
3651 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3652 struct ceph_pg_pool_info *pi =
3653 rb_entry(n, struct ceph_pg_pool_info, node);
3654 struct ceph_pg_pool_info *old_pi;
3656 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3658 pi->was_full = old_pi->was_full;
3660 WARN_ON(pi->was_full);
3663 if (osdc->osdmap->epoch &&
3664 osdc->osdmap->epoch + 1 < newmap->epoch) {
3665 WARN_ON(incremental);
3669 ceph_osdmap_destroy(osdc->osdmap);
3670 osdc->osdmap = newmap;
3673 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3674 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3675 need_resend, need_resend_linger);
3677 for (n = rb_first(&osdc->osds); n; ) {
3678 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3680 n = rb_next(n); /* close_osd() */
3682 scan_requests(osd, skipped_map, was_full, true, need_resend,
3683 need_resend_linger);
3684 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3685 memcmp(&osd->o_con.peer_addr,
3686 ceph_osd_addr(osdc->osdmap, osd->o_osd),
3687 sizeof(struct ceph_entity_addr)))
3694 static void kick_requests(struct ceph_osd_client *osdc,
3695 struct rb_root *need_resend,
3696 struct list_head *need_resend_linger)
3698 struct ceph_osd_linger_request *lreq, *nlreq;
3699 enum calc_target_result ct_res;
3702 /* make sure need_resend targets reflect latest map */
3703 for (n = rb_first(need_resend); n; ) {
3704 struct ceph_osd_request *req =
3705 rb_entry(n, struct ceph_osd_request, r_node);
3709 if (req->r_t.epoch < osdc->osdmap->epoch) {
3710 ct_res = calc_target(osdc, &req->r_t, NULL, false);
3711 if (ct_res == CALC_TARGET_POOL_DNE) {
3712 erase_request(need_resend, req);
3713 check_pool_dne(req);
3718 for (n = rb_first(need_resend); n; ) {
3719 struct ceph_osd_request *req =
3720 rb_entry(n, struct ceph_osd_request, r_node);
3721 struct ceph_osd *osd;
3724 erase_request(need_resend, req); /* before link_request() */
3726 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3727 link_request(osd, req);
3728 if (!req->r_linger) {
3729 if (!osd_homeless(osd) && !req->r_t.paused)
3732 cancel_linger_request(req);
3736 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3737 if (!osd_homeless(lreq->osd))
3740 list_del_init(&lreq->scan_item);
3745 * Process updated osd map.
3747 * The message contains any number of incremental and full maps, normally
3748 * indicating some sort of topology change in the cluster. Kick requests
3749 * off to different OSDs as needed.
3751 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3753 void *p = msg->front.iov_base;
3754 void *const end = p + msg->front.iov_len;
3755 u32 nr_maps, maplen;
3757 struct ceph_fsid fsid;
3758 struct rb_root need_resend = RB_ROOT;
3759 LIST_HEAD(need_resend_linger);
3760 bool handled_incremental = false;
3761 bool was_pauserd, was_pausewr;
3762 bool pauserd, pausewr;
3765 dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3766 down_write(&osdc->lock);
3769 ceph_decode_need(&p, end, sizeof(fsid), bad);
3770 ceph_decode_copy(&p, &fsid, sizeof(fsid));
3771 if (ceph_check_fsid(osdc->client, &fsid) < 0)
3774 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3775 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3776 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3777 have_pool_full(osdc);
3779 /* incremental maps */
3780 ceph_decode_32_safe(&p, end, nr_maps, bad);
3781 dout(" %d inc maps\n", nr_maps);
3782 while (nr_maps > 0) {
3783 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3784 epoch = ceph_decode_32(&p);
3785 maplen = ceph_decode_32(&p);
3786 ceph_decode_need(&p, end, maplen, bad);
3787 if (osdc->osdmap->epoch &&
3788 osdc->osdmap->epoch + 1 == epoch) {
3789 dout("applying incremental map %u len %d\n",
3791 err = handle_one_map(osdc, p, p + maplen, true,
3792 &need_resend, &need_resend_linger);
3795 handled_incremental = true;
3797 dout("ignoring incremental map %u len %d\n",
3803 if (handled_incremental)
3807 ceph_decode_32_safe(&p, end, nr_maps, bad);
3808 dout(" %d full maps\n", nr_maps);
3810 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3811 epoch = ceph_decode_32(&p);
3812 maplen = ceph_decode_32(&p);
3813 ceph_decode_need(&p, end, maplen, bad);
3815 dout("skipping non-latest full map %u len %d\n",
3817 } else if (osdc->osdmap->epoch >= epoch) {
3818 dout("skipping full map %u len %d, "
3819 "older than our %u\n", epoch, maplen,
3820 osdc->osdmap->epoch);
3822 dout("taking full map %u len %d\n", epoch, maplen);
3823 err = handle_one_map(osdc, p, p + maplen, false,
3824 &need_resend, &need_resend_linger);
3834 * subscribe to subsequent osdmap updates if full to ensure
3835 * we find out when we are no longer full and stop returning
3838 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3839 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3840 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3841 have_pool_full(osdc);
3842 if (was_pauserd || was_pausewr || pauserd || pausewr ||
3843 osdc->osdmap->epoch < osdc->epoch_barrier)
3844 maybe_request_map(osdc);
3846 kick_requests(osdc, &need_resend, &need_resend_linger);
3848 ceph_osdc_abort_on_full(osdc);
3849 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3850 osdc->osdmap->epoch);
3851 up_write(&osdc->lock);
3852 wake_up_all(&osdc->client->auth_wq);
3856 pr_err("osdc handle_map corrupt msg\n");
3858 up_write(&osdc->lock);
3862 * Resubmit requests pending on the given osd.
3864 static void kick_osd_requests(struct ceph_osd *osd)
3868 clear_backoffs(osd);
3870 for (n = rb_first(&osd->o_requests); n; ) {
3871 struct ceph_osd_request *req =
3872 rb_entry(n, struct ceph_osd_request, r_node);
3874 n = rb_next(n); /* cancel_linger_request() */
3876 if (!req->r_linger) {
3877 if (!req->r_t.paused)
3880 cancel_linger_request(req);
3883 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3884 struct ceph_osd_linger_request *lreq =
3885 rb_entry(n, struct ceph_osd_linger_request, node);
3892 * If the osd connection drops, we need to resubmit all requests.
3894 static void osd_fault(struct ceph_connection *con)
3896 struct ceph_osd *osd = con->private;
3897 struct ceph_osd_client *osdc = osd->o_osdc;
3899 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3901 down_write(&osdc->lock);
3902 if (!osd_registered(osd)) {
3903 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3907 if (!reopen_osd(osd))
3908 kick_osd_requests(osd);
3909 maybe_request_map(osdc);
3912 up_write(&osdc->lock);
3915 struct MOSDBackoff {
3916 struct ceph_spg spgid;
3920 struct ceph_hobject_id *begin;
3921 struct ceph_hobject_id *end;
3924 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
3926 void *p = msg->front.iov_base;
3927 void *const end = p + msg->front.iov_len;
3932 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
3936 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
3940 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
3941 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
3942 ceph_decode_8_safe(&p, end, m->op, e_inval);
3943 ceph_decode_64_safe(&p, end, m->id, e_inval);
3945 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
3949 ret = decode_hoid(&p, end, m->begin);
3951 free_hoid(m->begin);
3955 m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
3957 free_hoid(m->begin);
3961 ret = decode_hoid(&p, end, m->end);
3963 free_hoid(m->begin);
3974 static struct ceph_msg *create_backoff_message(
3975 const struct ceph_osd_backoff *backoff,
3978 struct ceph_msg *msg;
3982 msg_size = CEPH_ENCODING_START_BLK_LEN +
3983 CEPH_PGID_ENCODING_LEN + 1; /* spgid */
3984 msg_size += 4 + 1 + 8; /* map_epoch, op, id */
3985 msg_size += CEPH_ENCODING_START_BLK_LEN +
3986 hoid_encoding_size(backoff->begin);
3987 msg_size += CEPH_ENCODING_START_BLK_LEN +
3988 hoid_encoding_size(backoff->end);
3990 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
3994 p = msg->front.iov_base;
3995 end = p + msg->front_alloc_len;
3997 encode_spgid(&p, &backoff->spgid);
3998 ceph_encode_32(&p, map_epoch);
3999 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4000 ceph_encode_64(&p, backoff->id);
4001 encode_hoid(&p, end, backoff->begin);
4002 encode_hoid(&p, end, backoff->end);
4005 msg->front.iov_len = p - msg->front.iov_base;
4006 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4007 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4012 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4014 struct ceph_spg_mapping *spg;
4015 struct ceph_osd_backoff *backoff;
4016 struct ceph_msg *msg;
4018 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4019 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4021 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4023 spg = alloc_spg_mapping();
4025 pr_err("%s failed to allocate spg\n", __func__);
4028 spg->spgid = m->spgid; /* struct */
4029 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4032 backoff = alloc_backoff();
4034 pr_err("%s failed to allocate backoff\n", __func__);
4037 backoff->spgid = m->spgid; /* struct */
4038 backoff->id = m->id;
4039 backoff->begin = m->begin;
4040 m->begin = NULL; /* backoff now owns this */
4041 backoff->end = m->end;
4042 m->end = NULL; /* ditto */
4044 insert_backoff(&spg->backoffs, backoff);
4045 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4048 * Ack with original backoff's epoch so that the OSD can
4049 * discard this if there was a PG split.
4051 msg = create_backoff_message(backoff, m->map_epoch);
4053 pr_err("%s failed to allocate msg\n", __func__);
4056 ceph_con_send(&osd->o_con, msg);
4059 static bool target_contained_by(const struct ceph_osd_request_target *t,
4060 const struct ceph_hobject_id *begin,
4061 const struct ceph_hobject_id *end)
4063 struct ceph_hobject_id hoid;
4066 hoid_fill_from_target(&hoid, t);
4067 cmp = hoid_compare(&hoid, begin);
4068 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4071 static void handle_backoff_unblock(struct ceph_osd *osd,
4072 const struct MOSDBackoff *m)
4074 struct ceph_spg_mapping *spg;
4075 struct ceph_osd_backoff *backoff;
4078 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4079 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4081 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4083 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4084 __func__, osd->o_osd, m->spgid.pgid.pool,
4085 m->spgid.pgid.seed, m->spgid.shard, m->id);
4089 if (hoid_compare(backoff->begin, m->begin) &&
4090 hoid_compare(backoff->end, m->end)) {
4091 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4092 __func__, osd->o_osd, m->spgid.pgid.pool,
4093 m->spgid.pgid.seed, m->spgid.shard, m->id);
4094 /* unblock it anyway... */
4097 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4100 erase_backoff(&spg->backoffs, backoff);
4101 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4102 free_backoff(backoff);
4104 if (RB_EMPTY_ROOT(&spg->backoffs)) {
4105 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4106 free_spg_mapping(spg);
4109 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4110 struct ceph_osd_request *req =
4111 rb_entry(n, struct ceph_osd_request, r_node);
4113 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4115 * Match against @m, not @backoff -- the PG may
4116 * have split on the OSD.
4118 if (target_contained_by(&req->r_t, m->begin, m->end)) {
4120 * If no other installed backoff applies,
4129 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4131 struct ceph_osd_client *osdc = osd->o_osdc;
4132 struct MOSDBackoff m;
4135 down_read(&osdc->lock);
4136 if (!osd_registered(osd)) {
4137 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4138 up_read(&osdc->lock);
4141 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4143 mutex_lock(&osd->lock);
4144 ret = decode_MOSDBackoff(msg, &m);
4146 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4152 case CEPH_OSD_BACKOFF_OP_BLOCK:
4153 handle_backoff_block(osd, &m);
4155 case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4156 handle_backoff_unblock(osd, &m);
4159 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4166 mutex_unlock(&osd->lock);
4167 up_read(&osdc->lock);
4171 * Process osd watch notifications
4173 static void handle_watch_notify(struct ceph_osd_client *osdc,
4174 struct ceph_msg *msg)
4176 void *p = msg->front.iov_base;
4177 void *const end = p + msg->front.iov_len;
4178 struct ceph_osd_linger_request *lreq;
4179 struct linger_work *lwork;
4180 u8 proto_ver, opcode;
4181 u64 cookie, notify_id;
4182 u64 notifier_id = 0;
4183 s32 return_code = 0;
4184 void *payload = NULL;
4185 u32 payload_len = 0;
4187 ceph_decode_8_safe(&p, end, proto_ver, bad);
4188 ceph_decode_8_safe(&p, end, opcode, bad);
4189 ceph_decode_64_safe(&p, end, cookie, bad);
4190 p += 8; /* skip ver */
4191 ceph_decode_64_safe(&p, end, notify_id, bad);
4193 if (proto_ver >= 1) {
4194 ceph_decode_32_safe(&p, end, payload_len, bad);
4195 ceph_decode_need(&p, end, payload_len, bad);
4200 if (le16_to_cpu(msg->hdr.version) >= 2)
4201 ceph_decode_32_safe(&p, end, return_code, bad);
4203 if (le16_to_cpu(msg->hdr.version) >= 3)
4204 ceph_decode_64_safe(&p, end, notifier_id, bad);
4206 down_read(&osdc->lock);
4207 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4209 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4211 goto out_unlock_osdc;
4214 mutex_lock(&lreq->lock);
4215 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4216 opcode, cookie, lreq, lreq->is_watch);
4217 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4218 if (!lreq->last_error) {
4219 lreq->last_error = -ENOTCONN;
4220 queue_watch_error(lreq);
4222 } else if (!lreq->is_watch) {
4223 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4224 if (lreq->notify_id && lreq->notify_id != notify_id) {
4225 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4226 lreq->notify_id, notify_id);
4227 } else if (!completion_done(&lreq->notify_finish_wait)) {
4228 struct ceph_msg_data *data =
4229 list_first_entry_or_null(&msg->data,
4230 struct ceph_msg_data,
4234 if (lreq->preply_pages) {
4235 WARN_ON(data->type !=
4236 CEPH_MSG_DATA_PAGES);
4237 *lreq->preply_pages = data->pages;
4238 *lreq->preply_len = data->length;
4240 ceph_release_page_vector(data->pages,
4241 calc_pages_for(0, data->length));
4244 lreq->notify_finish_error = return_code;
4245 complete_all(&lreq->notify_finish_wait);
4248 /* CEPH_WATCH_EVENT_NOTIFY */
4249 lwork = lwork_alloc(lreq, do_watch_notify);
4251 pr_err("failed to allocate notify-lwork\n");
4252 goto out_unlock_lreq;
4255 lwork->notify.notify_id = notify_id;
4256 lwork->notify.notifier_id = notifier_id;
4257 lwork->notify.payload = payload;
4258 lwork->notify.payload_len = payload_len;
4259 lwork->notify.msg = ceph_msg_get(msg);
4264 mutex_unlock(&lreq->lock);
4266 up_read(&osdc->lock);
4270 pr_err("osdc handle_watch_notify corrupt msg\n");
4274 * Register request, send initial attempt.
4276 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4277 struct ceph_osd_request *req,
4280 down_read(&osdc->lock);
4281 submit_request(req, false);
4282 up_read(&osdc->lock);
4286 EXPORT_SYMBOL(ceph_osdc_start_request);
4289 * Unregister a registered request. The request is not completed:
4290 * ->r_result isn't set and __complete_request() isn't called.
4292 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4294 struct ceph_osd_client *osdc = req->r_osdc;
4296 down_write(&osdc->lock);
4298 cancel_request(req);
4299 up_write(&osdc->lock);
4301 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4304 * @timeout: in jiffies, 0 means "wait forever"
4306 static int wait_request_timeout(struct ceph_osd_request *req,
4307 unsigned long timeout)
4311 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4312 left = wait_for_completion_killable_timeout(&req->r_completion,
4313 ceph_timeout_jiffies(timeout));
4315 left = left ?: -ETIMEDOUT;
4316 ceph_osdc_cancel_request(req);
4318 left = req->r_result; /* completed */
4325 * wait for a request to complete
4327 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4328 struct ceph_osd_request *req)
4330 return wait_request_timeout(req, 0);
4332 EXPORT_SYMBOL(ceph_osdc_wait_request);
4335 * sync - wait for all in-flight requests to flush. avoid starvation.
4337 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4339 struct rb_node *n, *p;
4340 u64 last_tid = atomic64_read(&osdc->last_tid);
4343 down_read(&osdc->lock);
4344 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4345 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4347 mutex_lock(&osd->lock);
4348 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4349 struct ceph_osd_request *req =
4350 rb_entry(p, struct ceph_osd_request, r_node);
4352 if (req->r_tid > last_tid)
4355 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4358 ceph_osdc_get_request(req);
4359 mutex_unlock(&osd->lock);
4360 up_read(&osdc->lock);
4361 dout("%s waiting on req %p tid %llu last_tid %llu\n",
4362 __func__, req, req->r_tid, last_tid);
4363 wait_for_completion(&req->r_completion);
4364 ceph_osdc_put_request(req);
4368 mutex_unlock(&osd->lock);
4371 up_read(&osdc->lock);
4372 dout("%s done last_tid %llu\n", __func__, last_tid);
4374 EXPORT_SYMBOL(ceph_osdc_sync);
4376 static struct ceph_osd_request *
4377 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4379 struct ceph_osd_request *req;
4381 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4385 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4386 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4388 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4389 ceph_osdc_put_request(req);
4397 * Returns a handle, caller owns a ref.
4399 struct ceph_osd_linger_request *
4400 ceph_osdc_watch(struct ceph_osd_client *osdc,
4401 struct ceph_object_id *oid,
4402 struct ceph_object_locator *oloc,
4403 rados_watchcb2_t wcb,
4404 rados_watcherrcb_t errcb,
4407 struct ceph_osd_linger_request *lreq;
4410 lreq = linger_alloc(osdc);
4412 return ERR_PTR(-ENOMEM);
4414 lreq->is_watch = true;
4416 lreq->errcb = errcb;
4418 lreq->watch_valid_thru = jiffies;
4420 ceph_oid_copy(&lreq->t.base_oid, oid);
4421 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4422 lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4423 ktime_get_real_ts(&lreq->mtime);
4425 lreq->reg_req = alloc_linger_request(lreq);
4426 if (!lreq->reg_req) {
4431 lreq->ping_req = alloc_linger_request(lreq);
4432 if (!lreq->ping_req) {
4437 down_write(&osdc->lock);
4438 linger_register(lreq); /* before osd_req_op_* */
4439 osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
4440 CEPH_OSD_WATCH_OP_WATCH);
4441 osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
4442 CEPH_OSD_WATCH_OP_PING);
4443 linger_submit(lreq);
4444 up_write(&osdc->lock);
4446 ret = linger_reg_commit_wait(lreq);
4448 linger_cancel(lreq);
4456 return ERR_PTR(ret);
4458 EXPORT_SYMBOL(ceph_osdc_watch);
4463 * Times out after mount_timeout to preserve rbd unmap behaviour
4464 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4465 * with mount_timeout").
4467 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4468 struct ceph_osd_linger_request *lreq)
4470 struct ceph_options *opts = osdc->client->options;
4471 struct ceph_osd_request *req;
4474 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4478 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4479 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4480 req->r_flags = CEPH_OSD_FLAG_WRITE;
4481 ktime_get_real_ts(&req->r_mtime);
4482 osd_req_op_watch_init(req, 0, lreq->linger_id,
4483 CEPH_OSD_WATCH_OP_UNWATCH);
4485 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4489 ceph_osdc_start_request(osdc, req, false);
4490 linger_cancel(lreq);
4492 ret = wait_request_timeout(req, opts->mount_timeout);
4495 ceph_osdc_put_request(req);
4498 EXPORT_SYMBOL(ceph_osdc_unwatch);
4500 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4501 u64 notify_id, u64 cookie, void *payload,
4504 struct ceph_osd_req_op *op;
4505 struct ceph_pagelist *pl;
4508 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4510 pl = kmalloc(sizeof(*pl), GFP_NOIO);
4514 ceph_pagelist_init(pl);
4515 ret = ceph_pagelist_encode_64(pl, notify_id);
4516 ret |= ceph_pagelist_encode_64(pl, cookie);
4518 ret |= ceph_pagelist_encode_32(pl, payload_len);
4519 ret |= ceph_pagelist_append(pl, payload, payload_len);
4521 ret |= ceph_pagelist_encode_32(pl, 0);
4524 ceph_pagelist_release(pl);
4528 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4529 op->indata_len = pl->length;
4533 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4534 struct ceph_object_id *oid,
4535 struct ceph_object_locator *oloc,
4541 struct ceph_osd_request *req;
4544 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4548 ceph_oid_copy(&req->r_base_oid, oid);
4549 ceph_oloc_copy(&req->r_base_oloc, oloc);
4550 req->r_flags = CEPH_OSD_FLAG_READ;
4552 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4556 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4561 ceph_osdc_start_request(osdc, req, false);
4562 ret = ceph_osdc_wait_request(osdc, req);
4565 ceph_osdc_put_request(req);
4568 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4570 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4571 u64 cookie, u32 prot_ver, u32 timeout,
4572 void *payload, size_t payload_len)
4574 struct ceph_osd_req_op *op;
4575 struct ceph_pagelist *pl;
4578 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4579 op->notify.cookie = cookie;
4581 pl = kmalloc(sizeof(*pl), GFP_NOIO);
4585 ceph_pagelist_init(pl);
4586 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4587 ret |= ceph_pagelist_encode_32(pl, timeout);
4588 ret |= ceph_pagelist_encode_32(pl, payload_len);
4589 ret |= ceph_pagelist_append(pl, payload, payload_len);
4591 ceph_pagelist_release(pl);
4595 ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4596 op->indata_len = pl->length;
4601 * @timeout: in seconds
4603 * @preply_{pages,len} are initialized both on success and error.
4604 * The caller is responsible for:
4606 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4608 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4609 struct ceph_object_id *oid,
4610 struct ceph_object_locator *oloc,
4614 struct page ***preply_pages,
4617 struct ceph_osd_linger_request *lreq;
4618 struct page **pages;
4623 *preply_pages = NULL;
4627 lreq = linger_alloc(osdc);
4631 lreq->preply_pages = preply_pages;
4632 lreq->preply_len = preply_len;
4634 ceph_oid_copy(&lreq->t.base_oid, oid);
4635 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4636 lreq->t.flags = CEPH_OSD_FLAG_READ;
4638 lreq->reg_req = alloc_linger_request(lreq);
4639 if (!lreq->reg_req) {
4645 pages = ceph_alloc_page_vector(1, GFP_NOIO);
4646 if (IS_ERR(pages)) {
4647 ret = PTR_ERR(pages);
4651 down_write(&osdc->lock);
4652 linger_register(lreq); /* before osd_req_op_* */
4653 ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
4654 timeout, payload, payload_len);
4656 linger_unregister(lreq);
4657 up_write(&osdc->lock);
4658 ceph_release_page_vector(pages, 1);
4661 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4663 pages, PAGE_SIZE, 0, false, true);
4664 linger_submit(lreq);
4665 up_write(&osdc->lock);
4667 ret = linger_reg_commit_wait(lreq);
4669 ret = linger_notify_finish_wait(lreq);
4671 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4673 linger_cancel(lreq);
4678 EXPORT_SYMBOL(ceph_osdc_notify);
4681 * Return the number of milliseconds since the watch was last
4682 * confirmed, or an error. If there is an error, the watch is no
4683 * longer valid, and should be destroyed with ceph_osdc_unwatch().
4685 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4686 struct ceph_osd_linger_request *lreq)
4688 unsigned long stamp, age;
4691 down_read(&osdc->lock);
4692 mutex_lock(&lreq->lock);
4693 stamp = lreq->watch_valid_thru;
4694 if (!list_empty(&lreq->pending_lworks)) {
4695 struct linger_work *lwork =
4696 list_first_entry(&lreq->pending_lworks,
4700 if (time_before(lwork->queued_stamp, stamp))
4701 stamp = lwork->queued_stamp;
4703 age = jiffies - stamp;
4704 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4705 lreq, lreq->linger_id, age, lreq->last_error);
4706 /* we are truncating to msecs, so return a safe upper bound */
4707 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4709 mutex_unlock(&lreq->lock);
4710 up_read(&osdc->lock);
4714 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4720 ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4721 &struct_v, &struct_len);
4725 ceph_decode_copy(p, &item->name, sizeof(item->name));
4726 item->cookie = ceph_decode_64(p);
4727 *p += 4; /* skip timeout_seconds */
4728 if (struct_v >= 2) {
4729 ceph_decode_copy(p, &item->addr, sizeof(item->addr));
4730 ceph_decode_addr(&item->addr);
4733 dout("%s %s%llu cookie %llu addr %s\n", __func__,
4734 ENTITY_NAME(item->name), item->cookie,
4735 ceph_pr_addr(&item->addr.in_addr));
4739 static int decode_watchers(void **p, void *end,
4740 struct ceph_watch_item **watchers,
4748 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4749 &struct_v, &struct_len);
4753 *num_watchers = ceph_decode_32(p);
4754 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4758 for (i = 0; i < *num_watchers; i++) {
4759 ret = decode_watcher(p, end, *watchers + i);
4770 * On success, the caller is responsible for:
4774 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4775 struct ceph_object_id *oid,
4776 struct ceph_object_locator *oloc,
4777 struct ceph_watch_item **watchers,
4780 struct ceph_osd_request *req;
4781 struct page **pages;
4784 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4788 ceph_oid_copy(&req->r_base_oid, oid);
4789 ceph_oloc_copy(&req->r_base_oloc, oloc);
4790 req->r_flags = CEPH_OSD_FLAG_READ;
4792 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4796 pages = ceph_alloc_page_vector(1, GFP_NOIO);
4797 if (IS_ERR(pages)) {
4798 ret = PTR_ERR(pages);
4802 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4803 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4805 pages, PAGE_SIZE, 0, false, true);
4807 ceph_osdc_start_request(osdc, req, false);
4808 ret = ceph_osdc_wait_request(osdc, req);
4810 void *p = page_address(pages[0]);
4811 void *const end = p + req->r_ops[0].outdata_len;
4813 ret = decode_watchers(&p, end, watchers, num_watchers);
4817 ceph_osdc_put_request(req);
4820 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4823 * Call all pending notify callbacks - for use after a watch is
4824 * unregistered, to make sure no more callbacks for it will be invoked
4826 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4828 dout("%s osdc %p\n", __func__, osdc);
4829 flush_workqueue(osdc->notify_wq);
4831 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4833 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4835 down_read(&osdc->lock);
4836 maybe_request_map(osdc);
4837 up_read(&osdc->lock);
4839 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4842 * Execute an OSD class method on an object.
4844 * @flags: CEPH_OSD_FLAG_*
4845 * @resp_len: in/out param for reply length
4847 int ceph_osdc_call(struct ceph_osd_client *osdc,
4848 struct ceph_object_id *oid,
4849 struct ceph_object_locator *oloc,
4850 const char *class, const char *method,
4852 struct page *req_page, size_t req_len,
4853 struct page *resp_page, size_t *resp_len)
4855 struct ceph_osd_request *req;
4858 if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4861 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4865 ceph_oid_copy(&req->r_base_oid, oid);
4866 ceph_oloc_copy(&req->r_base_oloc, oloc);
4867 req->r_flags = flags;
4869 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4873 osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4875 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4878 osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4879 *resp_len, 0, false, false);
4881 ceph_osdc_start_request(osdc, req, false);
4882 ret = ceph_osdc_wait_request(osdc, req);
4884 ret = req->r_ops[0].rval;
4886 *resp_len = req->r_ops[0].outdata_len;
4890 ceph_osdc_put_request(req);
4893 EXPORT_SYMBOL(ceph_osdc_call);
4898 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4903 osdc->client = client;
4904 init_rwsem(&osdc->lock);
4905 osdc->osds = RB_ROOT;
4906 INIT_LIST_HEAD(&osdc->osd_lru);
4907 spin_lock_init(&osdc->osd_lru_lock);
4908 osd_init(&osdc->homeless_osd);
4909 osdc->homeless_osd.o_osdc = osdc;
4910 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4911 osdc->last_linger_id = CEPH_LINGER_ID_START;
4912 osdc->linger_requests = RB_ROOT;
4913 osdc->map_checks = RB_ROOT;
4914 osdc->linger_map_checks = RB_ROOT;
4915 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4916 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4919 osdc->osdmap = ceph_osdmap_alloc();
4923 osdc->req_mempool = mempool_create_slab_pool(10,
4924 ceph_osd_request_cache);
4925 if (!osdc->req_mempool)
4928 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4929 PAGE_SIZE, 10, true, "osd_op");
4932 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4933 PAGE_SIZE, 10, true, "osd_op_reply");
4938 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4939 if (!osdc->notify_wq)
4940 goto out_msgpool_reply;
4942 schedule_delayed_work(&osdc->timeout_work,
4943 osdc->client->options->osd_keepalive_timeout);
4944 schedule_delayed_work(&osdc->osds_timeout_work,
4945 round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4950 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4952 ceph_msgpool_destroy(&osdc->msgpool_op);
4954 mempool_destroy(osdc->req_mempool);
4956 ceph_osdmap_destroy(osdc->osdmap);
4961 void ceph_osdc_stop(struct ceph_osd_client *osdc)
4963 flush_workqueue(osdc->notify_wq);
4964 destroy_workqueue(osdc->notify_wq);
4965 cancel_delayed_work_sync(&osdc->timeout_work);
4966 cancel_delayed_work_sync(&osdc->osds_timeout_work);
4968 down_write(&osdc->lock);
4969 while (!RB_EMPTY_ROOT(&osdc->osds)) {
4970 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
4971 struct ceph_osd, o_node);
4974 up_write(&osdc->lock);
4975 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
4976 osd_cleanup(&osdc->homeless_osd);
4978 WARN_ON(!list_empty(&osdc->osd_lru));
4979 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
4980 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
4981 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
4982 WARN_ON(atomic_read(&osdc->num_requests));
4983 WARN_ON(atomic_read(&osdc->num_homeless));
4985 ceph_osdmap_destroy(osdc->osdmap);
4986 mempool_destroy(osdc->req_mempool);
4987 ceph_msgpool_destroy(&osdc->msgpool_op);
4988 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4992 * Read some contiguous pages. If we cross a stripe boundary, shorten
4993 * *plen. Return number of bytes read, or error.
4995 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4996 struct ceph_vino vino, struct ceph_file_layout *layout,
4998 u32 truncate_seq, u64 truncate_size,
4999 struct page **pages, int num_pages, int page_align)
5001 struct ceph_osd_request *req;
5004 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
5005 vino.snap, off, *plen);
5006 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5007 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5008 NULL, truncate_seq, truncate_size,
5011 return PTR_ERR(req);
5013 /* it may be a short read due to an object boundary */
5014 osd_req_op_extent_osd_data_pages(req, 0,
5015 pages, *plen, page_align, false, false);
5017 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
5018 off, *plen, *plen, page_align);
5020 rc = ceph_osdc_start_request(osdc, req, false);
5022 rc = ceph_osdc_wait_request(osdc, req);
5024 ceph_osdc_put_request(req);
5025 dout("readpages result %d\n", rc);
5028 EXPORT_SYMBOL(ceph_osdc_readpages);
5031 * do a synchronous write on N pages
5033 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5034 struct ceph_file_layout *layout,
5035 struct ceph_snap_context *snapc,
5037 u32 truncate_seq, u64 truncate_size,
5038 struct timespec *mtime,
5039 struct page **pages, int num_pages)
5041 struct ceph_osd_request *req;
5043 int page_align = off & ~PAGE_MASK;
5045 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5046 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5047 snapc, truncate_seq, truncate_size,
5050 return PTR_ERR(req);
5052 /* it may be a short write due to an object boundary */
5053 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5055 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5057 req->r_mtime = *mtime;
5058 rc = ceph_osdc_start_request(osdc, req, true);
5060 rc = ceph_osdc_wait_request(osdc, req);
5062 ceph_osdc_put_request(req);
5065 dout("writepages result %d\n", rc);
5068 EXPORT_SYMBOL(ceph_osdc_writepages);
5070 int ceph_osdc_setup(void)
5072 size_t size = sizeof(struct ceph_osd_request) +
5073 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5075 BUG_ON(ceph_osd_request_cache);
5076 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5079 return ceph_osd_request_cache ? 0 : -ENOMEM;
5081 EXPORT_SYMBOL(ceph_osdc_setup);
5083 void ceph_osdc_cleanup(void)
5085 BUG_ON(!ceph_osd_request_cache);
5086 kmem_cache_destroy(ceph_osd_request_cache);
5087 ceph_osd_request_cache = NULL;
5089 EXPORT_SYMBOL(ceph_osdc_cleanup);
5092 * handle incoming message
5094 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5096 struct ceph_osd *osd = con->private;
5097 struct ceph_osd_client *osdc = osd->o_osdc;
5098 int type = le16_to_cpu(msg->hdr.type);
5101 case CEPH_MSG_OSD_MAP:
5102 ceph_osdc_handle_map(osdc, msg);
5104 case CEPH_MSG_OSD_OPREPLY:
5105 handle_reply(osd, msg);
5107 case CEPH_MSG_OSD_BACKOFF:
5108 handle_backoff(osd, msg);
5110 case CEPH_MSG_WATCH_NOTIFY:
5111 handle_watch_notify(osdc, msg);
5115 pr_err("received unknown message type %d %s\n", type,
5116 ceph_msg_type_name(type));
5123 * Lookup and return message for incoming reply. Don't try to do
5124 * anything about a larger than preallocated data portion of the
5125 * message at the moment - for now, just skip the message.
5127 static struct ceph_msg *get_reply(struct ceph_connection *con,
5128 struct ceph_msg_header *hdr,
5131 struct ceph_osd *osd = con->private;
5132 struct ceph_osd_client *osdc = osd->o_osdc;
5133 struct ceph_msg *m = NULL;
5134 struct ceph_osd_request *req;
5135 int front_len = le32_to_cpu(hdr->front_len);
5136 int data_len = le32_to_cpu(hdr->data_len);
5137 u64 tid = le64_to_cpu(hdr->tid);
5139 down_read(&osdc->lock);
5140 if (!osd_registered(osd)) {
5141 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5143 goto out_unlock_osdc;
5145 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5147 mutex_lock(&osd->lock);
5148 req = lookup_request(&osd->o_requests, tid);
5150 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5153 goto out_unlock_session;
5156 ceph_msg_revoke_incoming(req->r_reply);
5158 if (front_len > req->r_reply->front_alloc_len) {
5159 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5160 __func__, osd->o_osd, req->r_tid, front_len,
5161 req->r_reply->front_alloc_len);
5162 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5165 goto out_unlock_session;
5166 ceph_msg_put(req->r_reply);
5170 if (data_len > req->r_reply->data_length) {
5171 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5172 __func__, osd->o_osd, req->r_tid, data_len,
5173 req->r_reply->data_length);
5176 goto out_unlock_session;
5179 m = ceph_msg_get(req->r_reply);
5180 dout("get_reply tid %lld %p\n", tid, m);
5183 mutex_unlock(&osd->lock);
5185 up_read(&osdc->lock);
5190 * TODO: switch to a msg-owned pagelist
5192 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5195 int type = le16_to_cpu(hdr->type);
5196 u32 front_len = le32_to_cpu(hdr->front_len);
5197 u32 data_len = le32_to_cpu(hdr->data_len);
5199 m = ceph_msg_new(type, front_len, GFP_NOIO, false);
5204 struct page **pages;
5205 struct ceph_osd_data osd_data;
5207 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5209 if (IS_ERR(pages)) {
5214 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
5216 ceph_osdc_msg_data_add(m, &osd_data);
5222 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5223 struct ceph_msg_header *hdr,
5226 struct ceph_osd *osd = con->private;
5227 int type = le16_to_cpu(hdr->type);
5231 case CEPH_MSG_OSD_MAP:
5232 case CEPH_MSG_OSD_BACKOFF:
5233 case CEPH_MSG_WATCH_NOTIFY:
5234 return alloc_msg_with_page_vector(hdr);
5235 case CEPH_MSG_OSD_OPREPLY:
5236 return get_reply(con, hdr, skip);
5238 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5246 * Wrappers to refcount containing ceph_osd struct
5248 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5250 struct ceph_osd *osd = con->private;
5256 static void put_osd_con(struct ceph_connection *con)
5258 struct ceph_osd *osd = con->private;
5266 * Note: returned pointer is the address of a structure that's
5267 * managed separately. Caller must *not* attempt to free it.
5269 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5270 int *proto, int force_new)
5272 struct ceph_osd *o = con->private;
5273 struct ceph_osd_client *osdc = o->o_osdc;
5274 struct ceph_auth_client *ac = osdc->client->monc.auth;
5275 struct ceph_auth_handshake *auth = &o->o_auth;
5277 if (force_new && auth->authorizer) {
5278 ceph_auth_destroy_authorizer(auth->authorizer);
5279 auth->authorizer = NULL;
5281 if (!auth->authorizer) {
5282 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5285 return ERR_PTR(ret);
5287 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5290 return ERR_PTR(ret);
5292 *proto = ac->protocol;
5297 static int add_authorizer_challenge(struct ceph_connection *con,
5298 void *challenge_buf, int challenge_buf_len)
5300 struct ceph_osd *o = con->private;
5301 struct ceph_osd_client *osdc = o->o_osdc;
5302 struct ceph_auth_client *ac = osdc->client->monc.auth;
5304 return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5305 challenge_buf, challenge_buf_len);
5308 static int verify_authorizer_reply(struct ceph_connection *con)
5310 struct ceph_osd *o = con->private;
5311 struct ceph_osd_client *osdc = o->o_osdc;
5312 struct ceph_auth_client *ac = osdc->client->monc.auth;
5314 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5317 static int invalidate_authorizer(struct ceph_connection *con)
5319 struct ceph_osd *o = con->private;
5320 struct ceph_osd_client *osdc = o->o_osdc;
5321 struct ceph_auth_client *ac = osdc->client->monc.auth;
5323 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5324 return ceph_monc_validate_auth(&osdc->client->monc);
5327 static void osd_reencode_message(struct ceph_msg *msg)
5329 int type = le16_to_cpu(msg->hdr.type);
5331 if (type == CEPH_MSG_OSD_OP)
5332 encode_request_finish(msg);
5335 static int osd_sign_message(struct ceph_msg *msg)
5337 struct ceph_osd *o = msg->con->private;
5338 struct ceph_auth_handshake *auth = &o->o_auth;
5340 return ceph_auth_sign_message(auth, msg);
5343 static int osd_check_message_signature(struct ceph_msg *msg)
5345 struct ceph_osd *o = msg->con->private;
5346 struct ceph_auth_handshake *auth = &o->o_auth;
5348 return ceph_auth_check_message_signature(auth, msg);
5351 static const struct ceph_connection_operations osd_con_ops = {
5354 .dispatch = dispatch,
5355 .get_authorizer = get_authorizer,
5356 .add_authorizer_challenge = add_authorizer_challenge,
5357 .verify_authorizer_reply = verify_authorizer_reply,
5358 .invalidate_authorizer = invalidate_authorizer,
5359 .alloc_msg = alloc_msg,
5360 .reencode_message = osd_reencode_message,
5361 .sign_message = osd_sign_message,
5362 .check_message_signature = osd_check_message_signature,