1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ceph/ceph_debug.h>
5 #include <linux/module.h>
7 #include <linux/highmem.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
13 #include <linux/bio.h>
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
25 #define OSD_OPREPLY_FRONT_LEN 512
27 static struct kmem_cache *ceph_osd_request_cache;
29 static const struct ceph_connection_operations osd_con_ops;
32 * Implement client access to distributed object storage cluster.
34 * All data objects are stored within a cluster/cloud of OSDs, or
35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
37 * remote daemons serving up and coordinating consistent and safe
40 * Cluster membership and the mapping of data objects onto storage devices
41 * are described by the osd map.
43 * We keep track of pending OSD requests (read, write), resubmit
44 * requests to different OSDs when the cluster topology/data layout
45 * change, or retry the affected requests when the communications
46 * channel with an OSD is reset.
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
52 struct ceph_osd_linger_request *lreq);
53 static void unlink_linger(struct ceph_osd *osd,
54 struct ceph_osd_linger_request *lreq);
55 static void clear_backoffs(struct ceph_osd *osd);
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
62 if (unlikely(down_read_trylock(sem))) {
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
71 WARN_ON(!rwsem_is_locked(&osdc->lock));
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
77 static inline void verify_osd_locked(struct ceph_osd *osd)
79 struct ceph_osd_client *osdc = osd->o_osdc;
81 WARN_ON(!(mutex_is_locked(&osd->lock) &&
82 rwsem_is_locked(&osdc->lock)) &&
83 !rwsem_is_wrlocked(&osdc->lock));
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
87 WARN_ON(!mutex_is_locked(&lreq->lock));
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
97 * calculate the mapping of a file extent onto an object, and fill out the
98 * request accordingly. shorten extent as necessary if it crosses an
101 * fill osd op in request message.
103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
104 u64 *objnum, u64 *objoff, u64 *objlen)
106 u64 orig_len = *plen;
110 ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
113 if (*objlen < orig_len) {
115 dout(" skipping last %llu, final file extent %llu~%llu\n",
116 orig_len - *plen, off, *plen);
119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
125 memset(osd_data, 0, sizeof (*osd_data));
126 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
129 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
130 struct page **pages, u64 length, u32 alignment,
131 bool pages_from_pool, bool own_pages)
133 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
134 osd_data->pages = pages;
135 osd_data->length = length;
136 osd_data->alignment = alignment;
137 osd_data->pages_from_pool = pages_from_pool;
138 osd_data->own_pages = own_pages;
141 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
142 struct ceph_pagelist *pagelist)
144 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
145 osd_data->pagelist = pagelist;
149 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
150 struct ceph_bio_iter *bio_pos,
153 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
154 osd_data->bio_pos = *bio_pos;
155 osd_data->bio_length = bio_length;
157 #endif /* CONFIG_BLOCK */
159 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
160 struct ceph_bvec_iter *bvec_pos,
163 osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
164 osd_data->bvec_pos = *bvec_pos;
165 osd_data->num_bvecs = num_bvecs;
168 #define osd_req_op_data(oreq, whch, typ, fld) \
170 struct ceph_osd_request *__oreq = (oreq); \
171 unsigned int __whch = (whch); \
172 BUG_ON(__whch >= __oreq->r_num_ops); \
173 &__oreq->r_ops[__whch].typ.fld; \
176 static struct ceph_osd_data *
177 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
179 BUG_ON(which >= osd_req->r_num_ops);
181 return &osd_req->r_ops[which].raw_data_in;
184 struct ceph_osd_data *
185 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
188 return osd_req_op_data(osd_req, which, extent, osd_data);
190 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
192 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
193 unsigned int which, struct page **pages,
194 u64 length, u32 alignment,
195 bool pages_from_pool, bool own_pages)
197 struct ceph_osd_data *osd_data;
199 osd_data = osd_req_op_raw_data_in(osd_req, which);
200 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
201 pages_from_pool, own_pages);
203 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
205 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
206 unsigned int which, struct page **pages,
207 u64 length, u32 alignment,
208 bool pages_from_pool, bool own_pages)
210 struct ceph_osd_data *osd_data;
212 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
213 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
214 pages_from_pool, own_pages);
216 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
218 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
219 unsigned int which, struct ceph_pagelist *pagelist)
221 struct ceph_osd_data *osd_data;
223 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
224 ceph_osd_data_pagelist_init(osd_data, pagelist);
226 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
229 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
231 struct ceph_bio_iter *bio_pos,
234 struct ceph_osd_data *osd_data;
236 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
237 ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
239 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
240 #endif /* CONFIG_BLOCK */
242 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
244 struct bio_vec *bvecs, u32 num_bvecs,
247 struct ceph_osd_data *osd_data;
248 struct ceph_bvec_iter it = {
250 .iter = { .bi_size = bytes },
253 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
254 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
256 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
258 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
260 struct ceph_bvec_iter *bvec_pos)
262 struct ceph_osd_data *osd_data;
264 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
265 ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
267 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
269 static void osd_req_op_cls_request_info_pagelist(
270 struct ceph_osd_request *osd_req,
271 unsigned int which, struct ceph_pagelist *pagelist)
273 struct ceph_osd_data *osd_data;
275 osd_data = osd_req_op_data(osd_req, which, cls, request_info);
276 ceph_osd_data_pagelist_init(osd_data, pagelist);
279 void osd_req_op_cls_request_data_pagelist(
280 struct ceph_osd_request *osd_req,
281 unsigned int which, struct ceph_pagelist *pagelist)
283 struct ceph_osd_data *osd_data;
285 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
286 ceph_osd_data_pagelist_init(osd_data, pagelist);
287 osd_req->r_ops[which].cls.indata_len += pagelist->length;
288 osd_req->r_ops[which].indata_len += pagelist->length;
290 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
292 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
293 unsigned int which, struct page **pages, u64 length,
294 u32 alignment, bool pages_from_pool, bool own_pages)
296 struct ceph_osd_data *osd_data;
298 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
299 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
300 pages_from_pool, own_pages);
301 osd_req->r_ops[which].cls.indata_len += length;
302 osd_req->r_ops[which].indata_len += length;
304 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
306 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
308 struct bio_vec *bvecs, u32 num_bvecs,
311 struct ceph_osd_data *osd_data;
312 struct ceph_bvec_iter it = {
314 .iter = { .bi_size = bytes },
317 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
318 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
319 osd_req->r_ops[which].cls.indata_len += bytes;
320 osd_req->r_ops[which].indata_len += bytes;
322 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
324 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
325 unsigned int which, struct page **pages, u64 length,
326 u32 alignment, bool pages_from_pool, bool own_pages)
328 struct ceph_osd_data *osd_data;
330 osd_data = osd_req_op_data(osd_req, which, cls, response_data);
331 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
332 pages_from_pool, own_pages);
334 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
336 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
338 switch (osd_data->type) {
339 case CEPH_OSD_DATA_TYPE_NONE:
341 case CEPH_OSD_DATA_TYPE_PAGES:
342 return osd_data->length;
343 case CEPH_OSD_DATA_TYPE_PAGELIST:
344 return (u64)osd_data->pagelist->length;
346 case CEPH_OSD_DATA_TYPE_BIO:
347 return (u64)osd_data->bio_length;
348 #endif /* CONFIG_BLOCK */
349 case CEPH_OSD_DATA_TYPE_BVECS:
350 return osd_data->bvec_pos.iter.bi_size;
352 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
357 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
359 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
362 num_pages = calc_pages_for((u64)osd_data->alignment,
363 (u64)osd_data->length);
364 ceph_release_page_vector(osd_data->pages, num_pages);
366 ceph_osd_data_init(osd_data);
369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
372 struct ceph_osd_req_op *op;
374 BUG_ON(which >= osd_req->r_num_ops);
375 op = &osd_req->r_ops[which];
378 case CEPH_OSD_OP_READ:
379 case CEPH_OSD_OP_WRITE:
380 case CEPH_OSD_OP_WRITEFULL:
381 ceph_osd_data_release(&op->extent.osd_data);
383 case CEPH_OSD_OP_CALL:
384 ceph_osd_data_release(&op->cls.request_info);
385 ceph_osd_data_release(&op->cls.request_data);
386 ceph_osd_data_release(&op->cls.response_data);
388 case CEPH_OSD_OP_SETXATTR:
389 case CEPH_OSD_OP_CMPXATTR:
390 ceph_osd_data_release(&op->xattr.osd_data);
392 case CEPH_OSD_OP_STAT:
393 ceph_osd_data_release(&op->raw_data_in);
395 case CEPH_OSD_OP_NOTIFY_ACK:
396 ceph_osd_data_release(&op->notify_ack.request_data);
398 case CEPH_OSD_OP_NOTIFY:
399 ceph_osd_data_release(&op->notify.request_data);
400 ceph_osd_data_release(&op->notify.response_data);
402 case CEPH_OSD_OP_LIST_WATCHERS:
403 ceph_osd_data_release(&op->list_watchers.response_data);
411 * Assumes @t is zero-initialized.
413 static void target_init(struct ceph_osd_request_target *t)
415 ceph_oid_init(&t->base_oid);
416 ceph_oloc_init(&t->base_oloc);
417 ceph_oid_init(&t->target_oid);
418 ceph_oloc_init(&t->target_oloc);
420 ceph_osds_init(&t->acting);
421 ceph_osds_init(&t->up);
425 t->osd = CEPH_HOMELESS_OSD;
428 static void target_copy(struct ceph_osd_request_target *dest,
429 const struct ceph_osd_request_target *src)
431 ceph_oid_copy(&dest->base_oid, &src->base_oid);
432 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
433 ceph_oid_copy(&dest->target_oid, &src->target_oid);
434 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
436 dest->pgid = src->pgid; /* struct */
437 dest->spgid = src->spgid; /* struct */
438 dest->pg_num = src->pg_num;
439 dest->pg_num_mask = src->pg_num_mask;
440 ceph_osds_copy(&dest->acting, &src->acting);
441 ceph_osds_copy(&dest->up, &src->up);
442 dest->size = src->size;
443 dest->min_size = src->min_size;
444 dest->sort_bitwise = src->sort_bitwise;
445 dest->recovery_deletes = src->recovery_deletes;
447 dest->flags = src->flags;
448 dest->paused = src->paused;
450 dest->epoch = src->epoch;
451 dest->last_force_resend = src->last_force_resend;
453 dest->osd = src->osd;
456 static void target_destroy(struct ceph_osd_request_target *t)
458 ceph_oid_destroy(&t->base_oid);
459 ceph_oloc_destroy(&t->base_oloc);
460 ceph_oid_destroy(&t->target_oid);
461 ceph_oloc_destroy(&t->target_oloc);
467 static void request_release_checks(struct ceph_osd_request *req)
469 WARN_ON(!RB_EMPTY_NODE(&req->r_node));
470 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
471 WARN_ON(!list_empty(&req->r_unsafe_item));
475 static void ceph_osdc_release_request(struct kref *kref)
477 struct ceph_osd_request *req = container_of(kref,
478 struct ceph_osd_request, r_kref);
481 dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
482 req->r_request, req->r_reply);
483 request_release_checks(req);
486 ceph_msg_put(req->r_request);
488 ceph_msg_put(req->r_reply);
490 for (which = 0; which < req->r_num_ops; which++)
491 osd_req_op_data_release(req, which);
493 target_destroy(&req->r_t);
494 ceph_put_snap_context(req->r_snapc);
497 mempool_free(req, req->r_osdc->req_mempool);
498 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
499 kmem_cache_free(ceph_osd_request_cache, req);
504 void ceph_osdc_get_request(struct ceph_osd_request *req)
506 dout("%s %p (was %d)\n", __func__, req,
507 kref_read(&req->r_kref));
508 kref_get(&req->r_kref);
510 EXPORT_SYMBOL(ceph_osdc_get_request);
512 void ceph_osdc_put_request(struct ceph_osd_request *req)
515 dout("%s %p (was %d)\n", __func__, req,
516 kref_read(&req->r_kref));
517 kref_put(&req->r_kref, ceph_osdc_release_request);
520 EXPORT_SYMBOL(ceph_osdc_put_request);
522 static void request_init(struct ceph_osd_request *req)
524 /* req only, each op is zeroed in _osd_req_op_init() */
525 memset(req, 0, sizeof(*req));
527 kref_init(&req->r_kref);
528 init_completion(&req->r_completion);
529 RB_CLEAR_NODE(&req->r_node);
530 RB_CLEAR_NODE(&req->r_mc_node);
531 INIT_LIST_HEAD(&req->r_unsafe_item);
533 target_init(&req->r_t);
537 * This is ugly, but it allows us to reuse linger registration and ping
538 * requests, keeping the structure of the code around send_linger{_ping}()
539 * reasonable. Setting up a min_nr=2 mempool for each linger request
540 * and dealing with copying ops (this blasts req only, watch op remains
541 * intact) isn't any better.
543 static void request_reinit(struct ceph_osd_request *req)
545 struct ceph_osd_client *osdc = req->r_osdc;
546 bool mempool = req->r_mempool;
547 unsigned int num_ops = req->r_num_ops;
548 u64 snapid = req->r_snapid;
549 struct ceph_snap_context *snapc = req->r_snapc;
550 bool linger = req->r_linger;
551 struct ceph_msg *request_msg = req->r_request;
552 struct ceph_msg *reply_msg = req->r_reply;
554 dout("%s req %p\n", __func__, req);
555 WARN_ON(kref_read(&req->r_kref) != 1);
556 request_release_checks(req);
558 WARN_ON(kref_read(&request_msg->kref) != 1);
559 WARN_ON(kref_read(&reply_msg->kref) != 1);
560 target_destroy(&req->r_t);
564 req->r_mempool = mempool;
565 req->r_num_ops = num_ops;
566 req->r_snapid = snapid;
567 req->r_snapc = snapc;
568 req->r_linger = linger;
569 req->r_request = request_msg;
570 req->r_reply = reply_msg;
573 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
574 struct ceph_snap_context *snapc,
575 unsigned int num_ops,
579 struct ceph_osd_request *req;
582 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
583 req = mempool_alloc(osdc->req_mempool, gfp_flags);
584 } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
585 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
587 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
588 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
595 req->r_mempool = use_mempool;
596 req->r_num_ops = num_ops;
597 req->r_snapid = CEPH_NOSNAP;
598 req->r_snapc = ceph_get_snap_context(snapc);
600 dout("%s req %p\n", __func__, req);
603 EXPORT_SYMBOL(ceph_osdc_alloc_request);
605 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
607 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
610 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
612 struct ceph_osd_client *osdc = req->r_osdc;
613 struct ceph_msg *msg;
616 WARN_ON(ceph_oid_empty(&req->r_base_oid));
617 WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
619 /* create request message */
620 msg_size = CEPH_ENCODING_START_BLK_LEN +
621 CEPH_PGID_ENCODING_LEN + 1; /* spgid */
622 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
623 msg_size += CEPH_ENCODING_START_BLK_LEN +
624 sizeof(struct ceph_osd_reqid); /* reqid */
625 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
626 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
627 msg_size += CEPH_ENCODING_START_BLK_LEN +
628 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
629 msg_size += 4 + req->r_base_oid.name_len; /* oid */
630 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
631 msg_size += 8; /* snapid */
632 msg_size += 8; /* snap_seq */
633 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
634 msg_size += 4 + 8; /* retry_attempt, features */
637 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
639 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
643 memset(msg->front.iov_base, 0, msg->front.iov_len);
644 req->r_request = msg;
646 /* create reply message */
647 msg_size = OSD_OPREPLY_FRONT_LEN;
648 msg_size += req->r_base_oid.name_len;
649 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
652 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
654 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
662 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
664 static bool osd_req_opcode_valid(u16 opcode)
667 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
668 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
676 * This is an osd op init function for opcodes that have no data or
677 * other information associated with them. It also serves as a
678 * common init routine for all the other init functions, below.
680 static struct ceph_osd_req_op *
681 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
682 u16 opcode, u32 flags)
684 struct ceph_osd_req_op *op;
686 BUG_ON(which >= osd_req->r_num_ops);
687 BUG_ON(!osd_req_opcode_valid(opcode));
689 op = &osd_req->r_ops[which];
690 memset(op, 0, sizeof (*op));
697 void osd_req_op_init(struct ceph_osd_request *osd_req,
698 unsigned int which, u16 opcode, u32 flags)
700 (void)_osd_req_op_init(osd_req, which, opcode, flags);
702 EXPORT_SYMBOL(osd_req_op_init);
704 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
705 unsigned int which, u16 opcode,
706 u64 offset, u64 length,
707 u64 truncate_size, u32 truncate_seq)
709 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
711 size_t payload_len = 0;
713 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
714 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
715 opcode != CEPH_OSD_OP_TRUNCATE);
717 op->extent.offset = offset;
718 op->extent.length = length;
719 op->extent.truncate_size = truncate_size;
720 op->extent.truncate_seq = truncate_seq;
721 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
722 payload_len += length;
724 op->indata_len = payload_len;
726 EXPORT_SYMBOL(osd_req_op_extent_init);
728 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
729 unsigned int which, u64 length)
731 struct ceph_osd_req_op *op;
734 BUG_ON(which >= osd_req->r_num_ops);
735 op = &osd_req->r_ops[which];
736 previous = op->extent.length;
738 if (length == previous)
739 return; /* Nothing to do */
740 BUG_ON(length > previous);
742 op->extent.length = length;
743 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
744 op->indata_len -= previous - length;
746 EXPORT_SYMBOL(osd_req_op_extent_update);
748 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
749 unsigned int which, u64 offset_inc)
751 struct ceph_osd_req_op *op, *prev_op;
753 BUG_ON(which + 1 >= osd_req->r_num_ops);
755 prev_op = &osd_req->r_ops[which];
756 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
757 /* dup previous one */
758 op->indata_len = prev_op->indata_len;
759 op->outdata_len = prev_op->outdata_len;
760 op->extent = prev_op->extent;
762 op->extent.offset += offset_inc;
763 op->extent.length -= offset_inc;
765 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
766 op->indata_len -= offset_inc;
768 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
770 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
771 u16 opcode, const char *class, const char *method)
773 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
775 struct ceph_pagelist *pagelist;
776 size_t payload_len = 0;
779 BUG_ON(opcode != CEPH_OSD_OP_CALL);
781 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
785 ceph_pagelist_init(pagelist);
787 op->cls.class_name = class;
788 size = strlen(class);
789 BUG_ON(size > (size_t) U8_MAX);
790 op->cls.class_len = size;
791 ceph_pagelist_append(pagelist, class, size);
794 op->cls.method_name = method;
795 size = strlen(method);
796 BUG_ON(size > (size_t) U8_MAX);
797 op->cls.method_len = size;
798 ceph_pagelist_append(pagelist, method, size);
801 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
803 op->indata_len = payload_len;
806 EXPORT_SYMBOL(osd_req_op_cls_init);
808 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
809 u16 opcode, const char *name, const void *value,
810 size_t size, u8 cmp_op, u8 cmp_mode)
812 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
814 struct ceph_pagelist *pagelist;
817 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
819 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
823 ceph_pagelist_init(pagelist);
825 payload_len = strlen(name);
826 op->xattr.name_len = payload_len;
827 ceph_pagelist_append(pagelist, name, payload_len);
829 op->xattr.value_len = size;
830 ceph_pagelist_append(pagelist, value, size);
833 op->xattr.cmp_op = cmp_op;
834 op->xattr.cmp_mode = cmp_mode;
836 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
837 op->indata_len = payload_len;
840 EXPORT_SYMBOL(osd_req_op_xattr_init);
843 * @watch_opcode: CEPH_OSD_WATCH_OP_*
845 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
846 u64 cookie, u8 watch_opcode)
848 struct ceph_osd_req_op *op;
850 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
851 op->watch.cookie = cookie;
852 op->watch.op = watch_opcode;
856 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
858 u64 expected_object_size,
859 u64 expected_write_size)
861 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
862 CEPH_OSD_OP_SETALLOCHINT,
865 op->alloc_hint.expected_object_size = expected_object_size;
866 op->alloc_hint.expected_write_size = expected_write_size;
869 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
870 * not worth a feature bit. Set FAILOK per-op flag to make
871 * sure older osds don't trip over an unsupported opcode.
873 op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
875 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
877 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
878 struct ceph_osd_data *osd_data)
880 u64 length = ceph_osd_data_length(osd_data);
882 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
883 BUG_ON(length > (u64) SIZE_MAX);
885 ceph_msg_data_add_pages(msg, osd_data->pages,
886 length, osd_data->alignment);
887 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
889 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
891 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
892 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
894 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
895 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
897 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
901 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
902 const struct ceph_osd_req_op *src)
904 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
905 pr_err("unrecognized osd opcode %d\n", src->op);
911 case CEPH_OSD_OP_STAT:
913 case CEPH_OSD_OP_READ:
914 case CEPH_OSD_OP_WRITE:
915 case CEPH_OSD_OP_WRITEFULL:
916 case CEPH_OSD_OP_ZERO:
917 case CEPH_OSD_OP_TRUNCATE:
918 dst->extent.offset = cpu_to_le64(src->extent.offset);
919 dst->extent.length = cpu_to_le64(src->extent.length);
920 dst->extent.truncate_size =
921 cpu_to_le64(src->extent.truncate_size);
922 dst->extent.truncate_seq =
923 cpu_to_le32(src->extent.truncate_seq);
925 case CEPH_OSD_OP_CALL:
926 dst->cls.class_len = src->cls.class_len;
927 dst->cls.method_len = src->cls.method_len;
928 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
930 case CEPH_OSD_OP_WATCH:
931 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
932 dst->watch.ver = cpu_to_le64(0);
933 dst->watch.op = src->watch.op;
934 dst->watch.gen = cpu_to_le32(src->watch.gen);
936 case CEPH_OSD_OP_NOTIFY_ACK:
938 case CEPH_OSD_OP_NOTIFY:
939 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
941 case CEPH_OSD_OP_LIST_WATCHERS:
943 case CEPH_OSD_OP_SETALLOCHINT:
944 dst->alloc_hint.expected_object_size =
945 cpu_to_le64(src->alloc_hint.expected_object_size);
946 dst->alloc_hint.expected_write_size =
947 cpu_to_le64(src->alloc_hint.expected_write_size);
949 case CEPH_OSD_OP_SETXATTR:
950 case CEPH_OSD_OP_CMPXATTR:
951 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
952 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
953 dst->xattr.cmp_op = src->xattr.cmp_op;
954 dst->xattr.cmp_mode = src->xattr.cmp_mode;
956 case CEPH_OSD_OP_CREATE:
957 case CEPH_OSD_OP_DELETE:
960 pr_err("unsupported osd opcode %s\n",
961 ceph_osd_op_name(src->op));
967 dst->op = cpu_to_le16(src->op);
968 dst->flags = cpu_to_le32(src->flags);
969 dst->payload_len = cpu_to_le32(src->indata_len);
971 return src->indata_len;
975 * build new request AND message, calculate layout, and adjust file
978 * if the file was recently truncated, we include information about its
979 * old and new size so that the object can be updated appropriately. (we
980 * avoid synchronously deleting truncated objects because it's slow.)
982 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
983 struct ceph_file_layout *layout,
984 struct ceph_vino vino,
986 unsigned int which, int num_ops,
987 int opcode, int flags,
988 struct ceph_snap_context *snapc,
993 struct ceph_osd_request *req;
999 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
1000 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
1001 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
1003 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1010 /* calculate max write size */
1011 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
1015 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
1016 osd_req_op_init(req, which, opcode, 0);
1018 u32 object_size = layout->object_size;
1019 u32 object_base = off - objoff;
1020 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
1021 if (truncate_size <= object_base) {
1024 truncate_size -= object_base;
1025 if (truncate_size > object_size)
1026 truncate_size = object_size;
1029 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1030 truncate_size, truncate_seq);
1033 req->r_flags = flags;
1034 req->r_base_oloc.pool = layout->pool_id;
1035 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1036 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1038 req->r_snapid = vino.snap;
1039 if (flags & CEPH_OSD_FLAG_WRITE)
1040 req->r_data_offset = off;
1042 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1049 ceph_osdc_put_request(req);
1052 EXPORT_SYMBOL(ceph_osdc_new_request);
1055 * We keep osd requests in an rbtree, sorted by ->r_tid.
1057 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1058 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1061 * Call @fn on each OSD request as long as @fn returns 0.
1063 static void for_each_request(struct ceph_osd_client *osdc,
1064 int (*fn)(struct ceph_osd_request *req, void *arg),
1067 struct rb_node *n, *p;
1069 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1070 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1072 for (p = rb_first(&osd->o_requests); p; ) {
1073 struct ceph_osd_request *req =
1074 rb_entry(p, struct ceph_osd_request, r_node);
1082 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1083 struct ceph_osd_request *req =
1084 rb_entry(p, struct ceph_osd_request, r_node);
1092 static bool osd_homeless(struct ceph_osd *osd)
1094 return osd->o_osd == CEPH_HOMELESS_OSD;
1097 static bool osd_registered(struct ceph_osd *osd)
1099 verify_osdc_locked(osd->o_osdc);
1101 return !RB_EMPTY_NODE(&osd->o_node);
1105 * Assumes @osd is zero-initialized.
1107 static void osd_init(struct ceph_osd *osd)
1109 refcount_set(&osd->o_ref, 1);
1110 RB_CLEAR_NODE(&osd->o_node);
1111 osd->o_requests = RB_ROOT;
1112 osd->o_linger_requests = RB_ROOT;
1113 osd->o_backoff_mappings = RB_ROOT;
1114 osd->o_backoffs_by_id = RB_ROOT;
1115 INIT_LIST_HEAD(&osd->o_osd_lru);
1116 INIT_LIST_HEAD(&osd->o_keepalive_item);
1117 osd->o_incarnation = 1;
1118 mutex_init(&osd->lock);
1121 static void osd_cleanup(struct ceph_osd *osd)
1123 WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1124 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1125 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1126 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1127 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1128 WARN_ON(!list_empty(&osd->o_osd_lru));
1129 WARN_ON(!list_empty(&osd->o_keepalive_item));
1131 if (osd->o_auth.authorizer) {
1132 WARN_ON(osd_homeless(osd));
1133 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1138 * Track open sessions with osds.
1140 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1142 struct ceph_osd *osd;
1144 WARN_ON(onum == CEPH_HOMELESS_OSD);
1146 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1151 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1156 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1158 if (refcount_inc_not_zero(&osd->o_ref)) {
1159 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1160 refcount_read(&osd->o_ref));
1163 dout("get_osd %p FAIL\n", osd);
1168 static void put_osd(struct ceph_osd *osd)
1170 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1171 refcount_read(&osd->o_ref) - 1);
1172 if (refcount_dec_and_test(&osd->o_ref)) {
1178 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1180 static void __move_osd_to_lru(struct ceph_osd *osd)
1182 struct ceph_osd_client *osdc = osd->o_osdc;
1184 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1185 BUG_ON(!list_empty(&osd->o_osd_lru));
1187 spin_lock(&osdc->osd_lru_lock);
1188 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1189 spin_unlock(&osdc->osd_lru_lock);
1191 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1194 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1196 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1197 RB_EMPTY_ROOT(&osd->o_linger_requests))
1198 __move_osd_to_lru(osd);
1201 static void __remove_osd_from_lru(struct ceph_osd *osd)
1203 struct ceph_osd_client *osdc = osd->o_osdc;
1205 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1207 spin_lock(&osdc->osd_lru_lock);
1208 if (!list_empty(&osd->o_osd_lru))
1209 list_del_init(&osd->o_osd_lru);
1210 spin_unlock(&osdc->osd_lru_lock);
1214 * Close the connection and assign any leftover requests to the
1217 static void close_osd(struct ceph_osd *osd)
1219 struct ceph_osd_client *osdc = osd->o_osdc;
1222 verify_osdc_wrlocked(osdc);
1223 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1225 ceph_con_close(&osd->o_con);
1227 for (n = rb_first(&osd->o_requests); n; ) {
1228 struct ceph_osd_request *req =
1229 rb_entry(n, struct ceph_osd_request, r_node);
1231 n = rb_next(n); /* unlink_request() */
1233 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1234 unlink_request(osd, req);
1235 link_request(&osdc->homeless_osd, req);
1237 for (n = rb_first(&osd->o_linger_requests); n; ) {
1238 struct ceph_osd_linger_request *lreq =
1239 rb_entry(n, struct ceph_osd_linger_request, node);
1241 n = rb_next(n); /* unlink_linger() */
1243 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1245 unlink_linger(osd, lreq);
1246 link_linger(&osdc->homeless_osd, lreq);
1248 clear_backoffs(osd);
1250 __remove_osd_from_lru(osd);
1251 erase_osd(&osdc->osds, osd);
1258 static int reopen_osd(struct ceph_osd *osd)
1260 struct ceph_entity_addr *peer_addr;
1262 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1264 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1265 RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1270 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1271 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1272 !ceph_con_opened(&osd->o_con)) {
1275 dout("osd addr hasn't changed and connection never opened, "
1276 "letting msgr retry\n");
1277 /* touch each r_stamp for handle_timeout()'s benfit */
1278 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1279 struct ceph_osd_request *req =
1280 rb_entry(n, struct ceph_osd_request, r_node);
1281 req->r_stamp = jiffies;
1287 ceph_con_close(&osd->o_con);
1288 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1289 osd->o_incarnation++;
1294 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1297 struct ceph_osd *osd;
1300 verify_osdc_wrlocked(osdc);
1302 verify_osdc_locked(osdc);
1304 if (o != CEPH_HOMELESS_OSD)
1305 osd = lookup_osd(&osdc->osds, o);
1307 osd = &osdc->homeless_osd;
1310 return ERR_PTR(-EAGAIN);
1312 osd = create_osd(osdc, o);
1313 insert_osd(&osdc->osds, osd);
1314 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1315 &osdc->osdmap->osd_addr[osd->o_osd]);
1318 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1323 * Create request <-> OSD session relation.
1325 * @req has to be assigned a tid, @osd may be homeless.
1327 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1329 verify_osd_locked(osd);
1330 WARN_ON(!req->r_tid || req->r_osd);
1331 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1334 if (!osd_homeless(osd))
1335 __remove_osd_from_lru(osd);
1337 atomic_inc(&osd->o_osdc->num_homeless);
1340 insert_request(&osd->o_requests, req);
1344 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1346 verify_osd_locked(osd);
1347 WARN_ON(req->r_osd != osd);
1348 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1352 erase_request(&osd->o_requests, req);
1355 if (!osd_homeless(osd))
1356 maybe_move_osd_to_lru(osd);
1358 atomic_dec(&osd->o_osdc->num_homeless);
1361 static bool __pool_full(struct ceph_pg_pool_info *pi)
1363 return pi->flags & CEPH_POOL_FLAG_FULL;
1366 static bool have_pool_full(struct ceph_osd_client *osdc)
1370 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1371 struct ceph_pg_pool_info *pi =
1372 rb_entry(n, struct ceph_pg_pool_info, node);
1374 if (__pool_full(pi))
1381 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1383 struct ceph_pg_pool_info *pi;
1385 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1389 return __pool_full(pi);
1393 * Returns whether a request should be blocked from being sent
1394 * based on the current osdmap and osd_client settings.
1396 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1397 const struct ceph_osd_request_target *t,
1398 struct ceph_pg_pool_info *pi)
1400 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1401 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1402 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1405 WARN_ON(pi->id != t->target_oloc.pool);
1406 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1407 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1408 (osdc->osdmap->epoch < osdc->epoch_barrier);
1411 enum calc_target_result {
1412 CALC_TARGET_NO_ACTION = 0,
1413 CALC_TARGET_NEED_RESEND,
1414 CALC_TARGET_POOL_DNE,
1417 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1418 struct ceph_osd_request_target *t,
1419 struct ceph_connection *con,
1422 struct ceph_pg_pool_info *pi;
1423 struct ceph_pg pgid, last_pgid;
1424 struct ceph_osds up, acting;
1425 bool force_resend = false;
1426 bool unpaused = false;
1427 bool legacy_change = false;
1429 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1430 bool recovery_deletes = ceph_osdmap_flag(osdc,
1431 CEPH_OSDMAP_RECOVERY_DELETES);
1432 enum calc_target_result ct_res;
1434 t->epoch = osdc->osdmap->epoch;
1435 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1437 t->osd = CEPH_HOMELESS_OSD;
1438 ct_res = CALC_TARGET_POOL_DNE;
1442 if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1443 if (t->last_force_resend < pi->last_force_request_resend) {
1444 t->last_force_resend = pi->last_force_request_resend;
1445 force_resend = true;
1446 } else if (t->last_force_resend == 0) {
1447 force_resend = true;
1452 ceph_oid_copy(&t->target_oid, &t->base_oid);
1453 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1454 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1455 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1456 t->target_oloc.pool = pi->read_tier;
1457 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1458 t->target_oloc.pool = pi->write_tier;
1460 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1462 t->osd = CEPH_HOMELESS_OSD;
1463 ct_res = CALC_TARGET_POOL_DNE;
1468 __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
1469 last_pgid.pool = pgid.pool;
1470 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1472 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1474 ceph_is_new_interval(&t->acting,
1486 t->recovery_deletes,
1489 force_resend = true;
1491 if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1495 legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1496 ceph_osds_changed(&t->acting, &acting, any_change);
1498 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1500 if (legacy_change || force_resend || split) {
1501 t->pgid = pgid; /* struct */
1502 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1503 ceph_osds_copy(&t->acting, &acting);
1504 ceph_osds_copy(&t->up, &up);
1506 t->min_size = pi->min_size;
1507 t->pg_num = pi->pg_num;
1508 t->pg_num_mask = pi->pg_num_mask;
1509 t->sort_bitwise = sort_bitwise;
1510 t->recovery_deletes = recovery_deletes;
1512 t->osd = acting.primary;
1515 if (unpaused || legacy_change || force_resend || split)
1516 ct_res = CALC_TARGET_NEED_RESEND;
1518 ct_res = CALC_TARGET_NO_ACTION;
1521 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1522 legacy_change, force_resend, split, ct_res, t->osd);
1526 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1528 struct ceph_spg_mapping *spg;
1530 spg = kmalloc(sizeof(*spg), GFP_NOIO);
1534 RB_CLEAR_NODE(&spg->node);
1535 spg->backoffs = RB_ROOT;
1539 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1541 WARN_ON(!RB_EMPTY_NODE(&spg->node));
1542 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1548 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1549 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1550 * defined only within a specific spgid; it does not pass anything to
1551 * children on split, or to another primary.
1553 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1554 RB_BYPTR, const struct ceph_spg *, node)
1556 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1558 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1561 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1562 void **pkey, size_t *pkey_len)
1564 if (hoid->key_len) {
1566 *pkey_len = hoid->key_len;
1569 *pkey_len = hoid->oid_len;
1573 static int compare_names(const void *name1, size_t name1_len,
1574 const void *name2, size_t name2_len)
1578 ret = memcmp(name1, name2, min(name1_len, name2_len));
1580 if (name1_len < name2_len)
1582 else if (name1_len > name2_len)
1588 static int hoid_compare(const struct ceph_hobject_id *lhs,
1589 const struct ceph_hobject_id *rhs)
1591 void *effective_key1, *effective_key2;
1592 size_t effective_key1_len, effective_key2_len;
1595 if (lhs->is_max < rhs->is_max)
1597 if (lhs->is_max > rhs->is_max)
1600 if (lhs->pool < rhs->pool)
1602 if (lhs->pool > rhs->pool)
1605 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1607 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1610 ret = compare_names(lhs->nspace, lhs->nspace_len,
1611 rhs->nspace, rhs->nspace_len);
1615 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1616 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1617 ret = compare_names(effective_key1, effective_key1_len,
1618 effective_key2, effective_key2_len);
1622 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1626 if (lhs->snapid < rhs->snapid)
1628 if (lhs->snapid > rhs->snapid)
1635 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1636 * compat stuff here.
1638 * Assumes @hoid is zero-initialized.
1640 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1646 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1652 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1656 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1658 if (IS_ERR(hoid->key)) {
1659 ret = PTR_ERR(hoid->key);
1664 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1666 if (IS_ERR(hoid->oid)) {
1667 ret = PTR_ERR(hoid->oid);
1672 ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1673 ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1674 ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1676 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1678 if (IS_ERR(hoid->nspace)) {
1679 ret = PTR_ERR(hoid->nspace);
1680 hoid->nspace = NULL;
1684 ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1686 ceph_hoid_build_hash_cache(hoid);
1693 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1695 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1696 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1699 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1701 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1702 ceph_encode_string(p, end, hoid->key, hoid->key_len);
1703 ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1704 ceph_encode_64(p, hoid->snapid);
1705 ceph_encode_32(p, hoid->hash);
1706 ceph_encode_8(p, hoid->is_max);
1707 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1708 ceph_encode_64(p, hoid->pool);
1711 static void free_hoid(struct ceph_hobject_id *hoid)
1716 kfree(hoid->nspace);
1721 static struct ceph_osd_backoff *alloc_backoff(void)
1723 struct ceph_osd_backoff *backoff;
1725 backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1729 RB_CLEAR_NODE(&backoff->spg_node);
1730 RB_CLEAR_NODE(&backoff->id_node);
1734 static void free_backoff(struct ceph_osd_backoff *backoff)
1736 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1737 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1739 free_hoid(backoff->begin);
1740 free_hoid(backoff->end);
1745 * Within a specific spgid, backoffs are managed by ->begin hoid.
1747 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1748 RB_BYVAL, spg_node);
1750 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1751 const struct ceph_hobject_id *hoid)
1753 struct rb_node *n = root->rb_node;
1756 struct ceph_osd_backoff *cur =
1757 rb_entry(n, struct ceph_osd_backoff, spg_node);
1760 cmp = hoid_compare(hoid, cur->begin);
1763 } else if (cmp > 0) {
1764 if (hoid_compare(hoid, cur->end) < 0)
1777 * Each backoff has a unique id within its OSD session.
1779 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1781 static void clear_backoffs(struct ceph_osd *osd)
1783 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1784 struct ceph_spg_mapping *spg =
1785 rb_entry(rb_first(&osd->o_backoff_mappings),
1786 struct ceph_spg_mapping, node);
1788 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1789 struct ceph_osd_backoff *backoff =
1790 rb_entry(rb_first(&spg->backoffs),
1791 struct ceph_osd_backoff, spg_node);
1793 erase_backoff(&spg->backoffs, backoff);
1794 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1795 free_backoff(backoff);
1797 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1798 free_spg_mapping(spg);
1803 * Set up a temporary, non-owning view into @t.
1805 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1806 const struct ceph_osd_request_target *t)
1810 hoid->oid = t->target_oid.name;
1811 hoid->oid_len = t->target_oid.name_len;
1812 hoid->snapid = CEPH_NOSNAP;
1813 hoid->hash = t->pgid.seed;
1814 hoid->is_max = false;
1815 if (t->target_oloc.pool_ns) {
1816 hoid->nspace = t->target_oloc.pool_ns->str;
1817 hoid->nspace_len = t->target_oloc.pool_ns->len;
1819 hoid->nspace = NULL;
1820 hoid->nspace_len = 0;
1822 hoid->pool = t->target_oloc.pool;
1823 ceph_hoid_build_hash_cache(hoid);
1826 static bool should_plug_request(struct ceph_osd_request *req)
1828 struct ceph_osd *osd = req->r_osd;
1829 struct ceph_spg_mapping *spg;
1830 struct ceph_osd_backoff *backoff;
1831 struct ceph_hobject_id hoid;
1833 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1837 hoid_fill_from_target(&hoid, &req->r_t);
1838 backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1842 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1843 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1844 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1848 static void setup_request_data(struct ceph_osd_request *req,
1849 struct ceph_msg *msg)
1854 if (!list_empty(&msg->data))
1857 WARN_ON(msg->data_length);
1858 for (i = 0; i < req->r_num_ops; i++) {
1859 struct ceph_osd_req_op *op = &req->r_ops[i];
1863 case CEPH_OSD_OP_WRITE:
1864 case CEPH_OSD_OP_WRITEFULL:
1865 WARN_ON(op->indata_len != op->extent.length);
1866 ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1868 case CEPH_OSD_OP_SETXATTR:
1869 case CEPH_OSD_OP_CMPXATTR:
1870 WARN_ON(op->indata_len != op->xattr.name_len +
1871 op->xattr.value_len);
1872 ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1874 case CEPH_OSD_OP_NOTIFY_ACK:
1875 ceph_osdc_msg_data_add(msg,
1876 &op->notify_ack.request_data);
1880 case CEPH_OSD_OP_STAT:
1881 ceph_osdc_msg_data_add(req->r_reply,
1884 case CEPH_OSD_OP_READ:
1885 ceph_osdc_msg_data_add(req->r_reply,
1886 &op->extent.osd_data);
1888 case CEPH_OSD_OP_LIST_WATCHERS:
1889 ceph_osdc_msg_data_add(req->r_reply,
1890 &op->list_watchers.response_data);
1894 case CEPH_OSD_OP_CALL:
1895 WARN_ON(op->indata_len != op->cls.class_len +
1896 op->cls.method_len +
1897 op->cls.indata_len);
1898 ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1899 /* optional, can be NONE */
1900 ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1901 /* optional, can be NONE */
1902 ceph_osdc_msg_data_add(req->r_reply,
1903 &op->cls.response_data);
1905 case CEPH_OSD_OP_NOTIFY:
1906 ceph_osdc_msg_data_add(msg,
1907 &op->notify.request_data);
1908 ceph_osdc_msg_data_add(req->r_reply,
1909 &op->notify.response_data);
1913 data_len += op->indata_len;
1916 WARN_ON(data_len != msg->data_length);
1919 static void encode_pgid(void **p, const struct ceph_pg *pgid)
1921 ceph_encode_8(p, 1);
1922 ceph_encode_64(p, pgid->pool);
1923 ceph_encode_32(p, pgid->seed);
1924 ceph_encode_32(p, -1); /* preferred */
1927 static void encode_spgid(void **p, const struct ceph_spg *spgid)
1929 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
1930 encode_pgid(p, &spgid->pgid);
1931 ceph_encode_8(p, spgid->shard);
1934 static void encode_oloc(void **p, void *end,
1935 const struct ceph_object_locator *oloc)
1937 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
1938 ceph_encode_64(p, oloc->pool);
1939 ceph_encode_32(p, -1); /* preferred */
1940 ceph_encode_32(p, 0); /* key len */
1942 ceph_encode_string(p, end, oloc->pool_ns->str,
1943 oloc->pool_ns->len);
1945 ceph_encode_32(p, 0);
1948 static void encode_request_partial(struct ceph_osd_request *req,
1949 struct ceph_msg *msg)
1951 void *p = msg->front.iov_base;
1952 void *const end = p + msg->front_alloc_len;
1956 if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1957 /* snapshots aren't writeable */
1958 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1960 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1961 req->r_data_offset || req->r_snapc);
1964 setup_request_data(req, msg);
1966 encode_spgid(&p, &req->r_t.spgid); /* actual spg */
1967 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
1968 ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1969 ceph_encode_32(&p, req->r_flags);
1972 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
1973 memset(p, 0, sizeof(struct ceph_osd_reqid));
1974 p += sizeof(struct ceph_osd_reqid);
1977 memset(p, 0, sizeof(struct ceph_blkin_trace_info));
1978 p += sizeof(struct ceph_blkin_trace_info);
1980 ceph_encode_32(&p, 0); /* client_inc, always 0 */
1981 ceph_encode_timespec64(p, &req->r_mtime);
1982 p += sizeof(struct ceph_timespec);
1984 encode_oloc(&p, end, &req->r_t.target_oloc);
1985 ceph_encode_string(&p, end, req->r_t.target_oid.name,
1986 req->r_t.target_oid.name_len);
1988 /* ops, can imply data */
1989 ceph_encode_16(&p, req->r_num_ops);
1990 for (i = 0; i < req->r_num_ops; i++) {
1991 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1992 p += sizeof(struct ceph_osd_op);
1995 ceph_encode_64(&p, req->r_snapid); /* snapid */
1997 ceph_encode_64(&p, req->r_snapc->seq);
1998 ceph_encode_32(&p, req->r_snapc->num_snaps);
1999 for (i = 0; i < req->r_snapc->num_snaps; i++)
2000 ceph_encode_64(&p, req->r_snapc->snaps[i]);
2002 ceph_encode_64(&p, 0); /* snap_seq */
2003 ceph_encode_32(&p, 0); /* snaps len */
2006 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
2007 BUG_ON(p > end - 8); /* space for features */
2009 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
2010 /* front_len is finalized in encode_request_finish() */
2011 msg->front.iov_len = p - msg->front.iov_base;
2012 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2013 msg->hdr.data_len = cpu_to_le32(data_len);
2015 * The header "data_off" is a hint to the receiver allowing it
2016 * to align received data into its buffers such that there's no
2017 * need to re-copy it before writing it to disk (direct I/O).
2019 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
2021 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
2022 req->r_t.target_oid.name, req->r_t.target_oid.name_len);
2025 static void encode_request_finish(struct ceph_msg *msg)
2027 void *p = msg->front.iov_base;
2028 void *const partial_end = p + msg->front.iov_len;
2029 void *const end = p + msg->front_alloc_len;
2031 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
2032 /* luminous OSD -- encode features and be done */
2034 ceph_encode_64(&p, msg->con->peer_features);
2037 char spgid[CEPH_ENCODING_START_BLK_LEN +
2038 CEPH_PGID_ENCODING_LEN + 1];
2042 char reqid[CEPH_ENCODING_START_BLK_LEN +
2043 sizeof(struct ceph_osd_reqid)];
2044 char trace[sizeof(struct ceph_blkin_trace_info)];
2046 struct ceph_timespec mtime;
2048 struct ceph_pg pgid;
2049 void *oloc, *oid, *tail;
2050 int oloc_len, oid_len, tail_len;
2054 * Pre-luminous OSD -- reencode v8 into v4 using @head
2055 * as a temporary buffer. Encode the raw PG; the rest
2056 * is just a matter of moving oloc, oid and tail blobs
2059 memcpy(&head, p, sizeof(head));
2063 p += CEPH_ENCODING_START_BLK_LEN;
2064 pgid.pool = ceph_decode_64(&p);
2065 p += 4 + 4; /* preferred, key len */
2066 len = ceph_decode_32(&p);
2067 p += len; /* nspace */
2068 oloc_len = p - oloc;
2071 len = ceph_decode_32(&p);
2076 tail_len = partial_end - p;
2078 p = msg->front.iov_base;
2079 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2080 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2081 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2082 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2084 /* reassert_version */
2085 memset(p, 0, sizeof(struct ceph_eversion));
2086 p += sizeof(struct ceph_eversion);
2089 memmove(p, oloc, oloc_len);
2092 pgid.seed = le32_to_cpu(head.hash);
2093 encode_pgid(&p, &pgid); /* raw pg */
2096 memmove(p, oid, oid_len);
2099 /* tail -- ops, snapid, snapc, retry_attempt */
2101 memmove(p, tail, tail_len);
2104 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2108 msg->front.iov_len = p - msg->front.iov_base;
2109 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2111 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2112 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2113 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2114 le16_to_cpu(msg->hdr.version));
2118 * @req has to be assigned a tid and registered.
2120 static void send_request(struct ceph_osd_request *req)
2122 struct ceph_osd *osd = req->r_osd;
2124 verify_osd_locked(osd);
2125 WARN_ON(osd->o_osd != req->r_t.osd);
2128 if (should_plug_request(req))
2132 * We may have a previously queued request message hanging
2133 * around. Cancel it to avoid corrupting the msgr.
2136 ceph_msg_revoke(req->r_request);
2138 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2139 if (req->r_attempts)
2140 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2142 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2144 encode_request_partial(req, req->r_request);
2146 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2147 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2148 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2149 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2152 req->r_t.paused = false;
2153 req->r_stamp = jiffies;
2156 req->r_sent = osd->o_incarnation;
2157 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2158 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2161 static void maybe_request_map(struct ceph_osd_client *osdc)
2163 bool continuous = false;
2165 verify_osdc_locked(osdc);
2166 WARN_ON(!osdc->osdmap->epoch);
2168 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2169 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2170 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2171 dout("%s osdc %p continuous\n", __func__, osdc);
2174 dout("%s osdc %p onetime\n", __func__, osdc);
2177 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2178 osdc->osdmap->epoch + 1, continuous))
2179 ceph_monc_renew_subs(&osdc->client->monc);
2182 static void complete_request(struct ceph_osd_request *req, int err);
2183 static void send_map_check(struct ceph_osd_request *req);
2185 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2187 struct ceph_osd_client *osdc = req->r_osdc;
2188 struct ceph_osd *osd;
2189 enum calc_target_result ct_res;
2191 bool need_send = false;
2192 bool promoted = false;
2194 WARN_ON(req->r_tid);
2195 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2198 ct_res = calc_target(osdc, &req->r_t, NULL, false);
2199 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2202 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2204 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2208 if (osdc->abort_err) {
2209 dout("req %p abort_err %d\n", req, osdc->abort_err);
2210 err = osdc->abort_err;
2211 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2212 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2213 osdc->epoch_barrier);
2214 req->r_t.paused = true;
2215 maybe_request_map(osdc);
2216 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2217 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2218 dout("req %p pausewr\n", req);
2219 req->r_t.paused = true;
2220 maybe_request_map(osdc);
2221 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2222 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2223 dout("req %p pauserd\n", req);
2224 req->r_t.paused = true;
2225 maybe_request_map(osdc);
2226 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2227 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2228 CEPH_OSD_FLAG_FULL_FORCE)) &&
2229 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2230 pool_full(osdc, req->r_t.base_oloc.pool))) {
2231 dout("req %p full/pool_full\n", req);
2232 if (osdc->abort_on_full) {
2235 pr_warn_ratelimited("FULL or reached pool quota\n");
2236 req->r_t.paused = true;
2237 maybe_request_map(osdc);
2239 } else if (!osd_homeless(osd)) {
2242 maybe_request_map(osdc);
2245 mutex_lock(&osd->lock);
2247 * Assign the tid atomically with send_request() to protect
2248 * multiple writes to the same object from racing with each
2249 * other, resulting in out of order ops on the OSDs.
2251 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2252 link_request(osd, req);
2256 complete_request(req, err);
2257 mutex_unlock(&osd->lock);
2259 if (!err && ct_res == CALC_TARGET_POOL_DNE)
2260 send_map_check(req);
2263 downgrade_write(&osdc->lock);
2267 up_read(&osdc->lock);
2268 down_write(&osdc->lock);
2274 static void account_request(struct ceph_osd_request *req)
2276 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2277 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2279 req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2280 atomic_inc(&req->r_osdc->num_requests);
2282 req->r_start_stamp = jiffies;
2285 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2287 ceph_osdc_get_request(req);
2288 account_request(req);
2289 __submit_request(req, wrlocked);
2292 static void finish_request(struct ceph_osd_request *req)
2294 struct ceph_osd_client *osdc = req->r_osdc;
2296 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2297 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2300 unlink_request(req->r_osd, req);
2301 atomic_dec(&osdc->num_requests);
2304 * If an OSD has failed or returned and a request has been sent
2305 * twice, it's possible to get a reply and end up here while the
2306 * request message is queued for delivery. We will ignore the
2307 * reply, so not a big deal, but better to try and catch it.
2309 ceph_msg_revoke(req->r_request);
2310 ceph_msg_revoke_incoming(req->r_reply);
2313 static void __complete_request(struct ceph_osd_request *req)
2315 dout("%s req %p tid %llu cb %pf result %d\n", __func__, req,
2316 req->r_tid, req->r_callback, req->r_result);
2318 if (req->r_callback)
2319 req->r_callback(req);
2320 complete_all(&req->r_completion);
2321 ceph_osdc_put_request(req);
2324 static void complete_request_workfn(struct work_struct *work)
2326 struct ceph_osd_request *req =
2327 container_of(work, struct ceph_osd_request, r_complete_work);
2329 __complete_request(req);
2333 * This is open-coded in handle_reply().
2335 static void complete_request(struct ceph_osd_request *req, int err)
2337 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2339 req->r_result = err;
2340 finish_request(req);
2342 INIT_WORK(&req->r_complete_work, complete_request_workfn);
2343 queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
2346 static void cancel_map_check(struct ceph_osd_request *req)
2348 struct ceph_osd_client *osdc = req->r_osdc;
2349 struct ceph_osd_request *lookup_req;
2351 verify_osdc_wrlocked(osdc);
2353 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2357 WARN_ON(lookup_req != req);
2358 erase_request_mc(&osdc->map_checks, req);
2359 ceph_osdc_put_request(req);
2362 static void cancel_request(struct ceph_osd_request *req)
2364 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2366 cancel_map_check(req);
2367 finish_request(req);
2368 complete_all(&req->r_completion);
2369 ceph_osdc_put_request(req);
2372 static void abort_request(struct ceph_osd_request *req, int err)
2374 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2376 cancel_map_check(req);
2377 complete_request(req, err);
2380 static int abort_fn(struct ceph_osd_request *req, void *arg)
2382 int err = *(int *)arg;
2384 abort_request(req, err);
2385 return 0; /* continue iteration */
2389 * Abort all in-flight requests with @err and arrange for all future
2390 * requests to be failed immediately.
2392 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2394 dout("%s osdc %p err %d\n", __func__, osdc, err);
2395 down_write(&osdc->lock);
2396 for_each_request(osdc, abort_fn, &err);
2397 osdc->abort_err = err;
2398 up_write(&osdc->lock);
2400 EXPORT_SYMBOL(ceph_osdc_abort_requests);
2402 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2404 if (likely(eb > osdc->epoch_barrier)) {
2405 dout("updating epoch_barrier from %u to %u\n",
2406 osdc->epoch_barrier, eb);
2407 osdc->epoch_barrier = eb;
2408 /* Request map if we're not to the barrier yet */
2409 if (eb > osdc->osdmap->epoch)
2410 maybe_request_map(osdc);
2414 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2416 down_read(&osdc->lock);
2417 if (unlikely(eb > osdc->epoch_barrier)) {
2418 up_read(&osdc->lock);
2419 down_write(&osdc->lock);
2420 update_epoch_barrier(osdc, eb);
2421 up_write(&osdc->lock);
2423 up_read(&osdc->lock);
2426 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2429 * We can end up releasing caps as a result of abort_request().
2430 * In that case, we probably want to ensure that the cap release message
2431 * has an updated epoch barrier in it, so set the epoch barrier prior to
2432 * aborting the first request.
2434 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
2436 struct ceph_osd_client *osdc = req->r_osdc;
2437 bool *victims = arg;
2439 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2440 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2441 pool_full(osdc, req->r_t.base_oloc.pool))) {
2443 update_epoch_barrier(osdc, osdc->osdmap->epoch);
2446 abort_request(req, -ENOSPC);
2449 return 0; /* continue iteration */
2453 * Drop all pending requests that are stalled waiting on a full condition to
2454 * clear, and complete them with ENOSPC as the return code. Set the
2455 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2458 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2460 bool victims = false;
2462 if (osdc->abort_on_full &&
2463 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2464 for_each_request(osdc, abort_on_full_fn, &victims);
2467 static void check_pool_dne(struct ceph_osd_request *req)
2469 struct ceph_osd_client *osdc = req->r_osdc;
2470 struct ceph_osdmap *map = osdc->osdmap;
2472 verify_osdc_wrlocked(osdc);
2473 WARN_ON(!map->epoch);
2475 if (req->r_attempts) {
2477 * We sent a request earlier, which means that
2478 * previously the pool existed, and now it does not
2479 * (i.e., it was deleted).
2481 req->r_map_dne_bound = map->epoch;
2482 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2485 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2486 req, req->r_tid, req->r_map_dne_bound, map->epoch);
2489 if (req->r_map_dne_bound) {
2490 if (map->epoch >= req->r_map_dne_bound) {
2491 /* we had a new enough map */
2492 pr_info_ratelimited("tid %llu pool does not exist\n",
2494 complete_request(req, -ENOENT);
2497 send_map_check(req);
2501 static void map_check_cb(struct ceph_mon_generic_request *greq)
2503 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2504 struct ceph_osd_request *req;
2505 u64 tid = greq->private_data;
2507 WARN_ON(greq->result || !greq->u.newest);
2509 down_write(&osdc->lock);
2510 req = lookup_request_mc(&osdc->map_checks, tid);
2512 dout("%s tid %llu dne\n", __func__, tid);
2516 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2517 req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2518 if (!req->r_map_dne_bound)
2519 req->r_map_dne_bound = greq->u.newest;
2520 erase_request_mc(&osdc->map_checks, req);
2521 check_pool_dne(req);
2523 ceph_osdc_put_request(req);
2525 up_write(&osdc->lock);
2528 static void send_map_check(struct ceph_osd_request *req)
2530 struct ceph_osd_client *osdc = req->r_osdc;
2531 struct ceph_osd_request *lookup_req;
2534 verify_osdc_wrlocked(osdc);
2536 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2538 WARN_ON(lookup_req != req);
2542 ceph_osdc_get_request(req);
2543 insert_request_mc(&osdc->map_checks, req);
2544 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2545 map_check_cb, req->r_tid);
2550 * lingering requests, watch/notify v2 infrastructure
2552 static void linger_release(struct kref *kref)
2554 struct ceph_osd_linger_request *lreq =
2555 container_of(kref, struct ceph_osd_linger_request, kref);
2557 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2558 lreq->reg_req, lreq->ping_req);
2559 WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2560 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2561 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2562 WARN_ON(!list_empty(&lreq->scan_item));
2563 WARN_ON(!list_empty(&lreq->pending_lworks));
2567 ceph_osdc_put_request(lreq->reg_req);
2569 ceph_osdc_put_request(lreq->ping_req);
2570 target_destroy(&lreq->t);
2574 static void linger_put(struct ceph_osd_linger_request *lreq)
2577 kref_put(&lreq->kref, linger_release);
2580 static struct ceph_osd_linger_request *
2581 linger_get(struct ceph_osd_linger_request *lreq)
2583 kref_get(&lreq->kref);
2587 static struct ceph_osd_linger_request *
2588 linger_alloc(struct ceph_osd_client *osdc)
2590 struct ceph_osd_linger_request *lreq;
2592 lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2596 kref_init(&lreq->kref);
2597 mutex_init(&lreq->lock);
2598 RB_CLEAR_NODE(&lreq->node);
2599 RB_CLEAR_NODE(&lreq->osdc_node);
2600 RB_CLEAR_NODE(&lreq->mc_node);
2601 INIT_LIST_HEAD(&lreq->scan_item);
2602 INIT_LIST_HEAD(&lreq->pending_lworks);
2603 init_completion(&lreq->reg_commit_wait);
2604 init_completion(&lreq->notify_finish_wait);
2607 target_init(&lreq->t);
2609 dout("%s lreq %p\n", __func__, lreq);
2613 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2614 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2615 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2618 * Create linger request <-> OSD session relation.
2620 * @lreq has to be registered, @osd may be homeless.
2622 static void link_linger(struct ceph_osd *osd,
2623 struct ceph_osd_linger_request *lreq)
2625 verify_osd_locked(osd);
2626 WARN_ON(!lreq->linger_id || lreq->osd);
2627 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2628 osd->o_osd, lreq, lreq->linger_id);
2630 if (!osd_homeless(osd))
2631 __remove_osd_from_lru(osd);
2633 atomic_inc(&osd->o_osdc->num_homeless);
2636 insert_linger(&osd->o_linger_requests, lreq);
2640 static void unlink_linger(struct ceph_osd *osd,
2641 struct ceph_osd_linger_request *lreq)
2643 verify_osd_locked(osd);
2644 WARN_ON(lreq->osd != osd);
2645 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2646 osd->o_osd, lreq, lreq->linger_id);
2649 erase_linger(&osd->o_linger_requests, lreq);
2652 if (!osd_homeless(osd))
2653 maybe_move_osd_to_lru(osd);
2655 atomic_dec(&osd->o_osdc->num_homeless);
2658 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2660 verify_osdc_locked(lreq->osdc);
2662 return !RB_EMPTY_NODE(&lreq->osdc_node);
2665 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2667 struct ceph_osd_client *osdc = lreq->osdc;
2670 down_read(&osdc->lock);
2671 registered = __linger_registered(lreq);
2672 up_read(&osdc->lock);
2677 static void linger_register(struct ceph_osd_linger_request *lreq)
2679 struct ceph_osd_client *osdc = lreq->osdc;
2681 verify_osdc_wrlocked(osdc);
2682 WARN_ON(lreq->linger_id);
2685 lreq->linger_id = ++osdc->last_linger_id;
2686 insert_linger_osdc(&osdc->linger_requests, lreq);
2689 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2691 struct ceph_osd_client *osdc = lreq->osdc;
2693 verify_osdc_wrlocked(osdc);
2695 erase_linger_osdc(&osdc->linger_requests, lreq);
2699 static void cancel_linger_request(struct ceph_osd_request *req)
2701 struct ceph_osd_linger_request *lreq = req->r_priv;
2703 WARN_ON(!req->r_linger);
2704 cancel_request(req);
2708 struct linger_work {
2709 struct work_struct work;
2710 struct ceph_osd_linger_request *lreq;
2711 struct list_head pending_item;
2712 unsigned long queued_stamp;
2718 void *payload; /* points into @msg front */
2721 struct ceph_msg *msg; /* for ceph_msg_put() */
2729 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2732 struct linger_work *lwork;
2734 lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2738 INIT_WORK(&lwork->work, workfn);
2739 INIT_LIST_HEAD(&lwork->pending_item);
2740 lwork->lreq = linger_get(lreq);
2745 static void lwork_free(struct linger_work *lwork)
2747 struct ceph_osd_linger_request *lreq = lwork->lreq;
2749 mutex_lock(&lreq->lock);
2750 list_del(&lwork->pending_item);
2751 mutex_unlock(&lreq->lock);
2757 static void lwork_queue(struct linger_work *lwork)
2759 struct ceph_osd_linger_request *lreq = lwork->lreq;
2760 struct ceph_osd_client *osdc = lreq->osdc;
2762 verify_lreq_locked(lreq);
2763 WARN_ON(!list_empty(&lwork->pending_item));
2765 lwork->queued_stamp = jiffies;
2766 list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2767 queue_work(osdc->notify_wq, &lwork->work);
2770 static void do_watch_notify(struct work_struct *w)
2772 struct linger_work *lwork = container_of(w, struct linger_work, work);
2773 struct ceph_osd_linger_request *lreq = lwork->lreq;
2775 if (!linger_registered(lreq)) {
2776 dout("%s lreq %p not registered\n", __func__, lreq);
2780 WARN_ON(!lreq->is_watch);
2781 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2782 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2783 lwork->notify.payload_len);
2784 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2785 lwork->notify.notifier_id, lwork->notify.payload,
2786 lwork->notify.payload_len);
2789 ceph_msg_put(lwork->notify.msg);
2793 static void do_watch_error(struct work_struct *w)
2795 struct linger_work *lwork = container_of(w, struct linger_work, work);
2796 struct ceph_osd_linger_request *lreq = lwork->lreq;
2798 if (!linger_registered(lreq)) {
2799 dout("%s lreq %p not registered\n", __func__, lreq);
2803 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2804 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2810 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2812 struct linger_work *lwork;
2814 lwork = lwork_alloc(lreq, do_watch_error);
2816 pr_err("failed to allocate error-lwork\n");
2820 lwork->error.err = lreq->last_error;
2824 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2827 if (!completion_done(&lreq->reg_commit_wait)) {
2828 lreq->reg_commit_error = (result <= 0 ? result : 0);
2829 complete_all(&lreq->reg_commit_wait);
2833 static void linger_commit_cb(struct ceph_osd_request *req)
2835 struct ceph_osd_linger_request *lreq = req->r_priv;
2837 mutex_lock(&lreq->lock);
2838 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2839 lreq->linger_id, req->r_result);
2840 linger_reg_commit_complete(lreq, req->r_result);
2841 lreq->committed = true;
2843 if (!lreq->is_watch) {
2844 struct ceph_osd_data *osd_data =
2845 osd_req_op_data(req, 0, notify, response_data);
2846 void *p = page_address(osd_data->pages[0]);
2848 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2849 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2851 /* make note of the notify_id */
2852 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2853 lreq->notify_id = ceph_decode_64(&p);
2854 dout("lreq %p notify_id %llu\n", lreq,
2857 dout("lreq %p no notify_id\n", lreq);
2861 mutex_unlock(&lreq->lock);
2865 static int normalize_watch_error(int err)
2868 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2869 * notification and a failure to reconnect because we raced with
2870 * the delete appear the same to the user.
2878 static void linger_reconnect_cb(struct ceph_osd_request *req)
2880 struct ceph_osd_linger_request *lreq = req->r_priv;
2882 mutex_lock(&lreq->lock);
2883 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2884 lreq, lreq->linger_id, req->r_result, lreq->last_error);
2885 if (req->r_result < 0) {
2886 if (!lreq->last_error) {
2887 lreq->last_error = normalize_watch_error(req->r_result);
2888 queue_watch_error(lreq);
2892 mutex_unlock(&lreq->lock);
2896 static void send_linger(struct ceph_osd_linger_request *lreq)
2898 struct ceph_osd_request *req = lreq->reg_req;
2899 struct ceph_osd_req_op *op = &req->r_ops[0];
2901 verify_osdc_wrlocked(req->r_osdc);
2902 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2905 cancel_linger_request(req);
2907 request_reinit(req);
2908 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2909 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2910 req->r_flags = lreq->t.flags;
2911 req->r_mtime = lreq->mtime;
2913 mutex_lock(&lreq->lock);
2914 if (lreq->is_watch && lreq->committed) {
2915 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2916 op->watch.cookie != lreq->linger_id);
2917 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2918 op->watch.gen = ++lreq->register_gen;
2919 dout("lreq %p reconnect register_gen %u\n", lreq,
2921 req->r_callback = linger_reconnect_cb;
2923 if (!lreq->is_watch)
2924 lreq->notify_id = 0;
2926 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2927 dout("lreq %p register\n", lreq);
2928 req->r_callback = linger_commit_cb;
2930 mutex_unlock(&lreq->lock);
2932 req->r_priv = linger_get(lreq);
2933 req->r_linger = true;
2935 submit_request(req, true);
2938 static void linger_ping_cb(struct ceph_osd_request *req)
2940 struct ceph_osd_linger_request *lreq = req->r_priv;
2942 mutex_lock(&lreq->lock);
2943 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2944 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2946 if (lreq->register_gen == req->r_ops[0].watch.gen) {
2947 if (!req->r_result) {
2948 lreq->watch_valid_thru = lreq->ping_sent;
2949 } else if (!lreq->last_error) {
2950 lreq->last_error = normalize_watch_error(req->r_result);
2951 queue_watch_error(lreq);
2954 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2955 lreq->register_gen, req->r_ops[0].watch.gen);
2958 mutex_unlock(&lreq->lock);
2962 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2964 struct ceph_osd_client *osdc = lreq->osdc;
2965 struct ceph_osd_request *req = lreq->ping_req;
2966 struct ceph_osd_req_op *op = &req->r_ops[0];
2968 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2969 dout("%s PAUSERD\n", __func__);
2973 lreq->ping_sent = jiffies;
2974 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2975 __func__, lreq, lreq->linger_id, lreq->ping_sent,
2976 lreq->register_gen);
2979 cancel_linger_request(req);
2981 request_reinit(req);
2982 target_copy(&req->r_t, &lreq->t);
2984 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2985 op->watch.cookie != lreq->linger_id ||
2986 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2987 op->watch.gen = lreq->register_gen;
2988 req->r_callback = linger_ping_cb;
2989 req->r_priv = linger_get(lreq);
2990 req->r_linger = true;
2992 ceph_osdc_get_request(req);
2993 account_request(req);
2994 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2995 link_request(lreq->osd, req);
2999 static void linger_submit(struct ceph_osd_linger_request *lreq)
3001 struct ceph_osd_client *osdc = lreq->osdc;
3002 struct ceph_osd *osd;
3004 calc_target(osdc, &lreq->t, NULL, false);
3005 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3006 link_linger(osd, lreq);
3011 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3013 struct ceph_osd_client *osdc = lreq->osdc;
3014 struct ceph_osd_linger_request *lookup_lreq;
3016 verify_osdc_wrlocked(osdc);
3018 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3023 WARN_ON(lookup_lreq != lreq);
3024 erase_linger_mc(&osdc->linger_map_checks, lreq);
3029 * @lreq has to be both registered and linked.
3031 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3033 if (lreq->is_watch && lreq->ping_req->r_osd)
3034 cancel_linger_request(lreq->ping_req);
3035 if (lreq->reg_req->r_osd)
3036 cancel_linger_request(lreq->reg_req);
3037 cancel_linger_map_check(lreq);
3038 unlink_linger(lreq->osd, lreq);
3039 linger_unregister(lreq);
3042 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3044 struct ceph_osd_client *osdc = lreq->osdc;
3046 down_write(&osdc->lock);
3047 if (__linger_registered(lreq))
3048 __linger_cancel(lreq);
3049 up_write(&osdc->lock);
3052 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3054 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3056 struct ceph_osd_client *osdc = lreq->osdc;
3057 struct ceph_osdmap *map = osdc->osdmap;
3059 verify_osdc_wrlocked(osdc);
3060 WARN_ON(!map->epoch);
3062 if (lreq->register_gen) {
3063 lreq->map_dne_bound = map->epoch;
3064 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3065 lreq, lreq->linger_id);
3067 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3068 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3072 if (lreq->map_dne_bound) {
3073 if (map->epoch >= lreq->map_dne_bound) {
3074 /* we had a new enough map */
3075 pr_info("linger_id %llu pool does not exist\n",
3077 linger_reg_commit_complete(lreq, -ENOENT);
3078 __linger_cancel(lreq);
3081 send_linger_map_check(lreq);
3085 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3087 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3088 struct ceph_osd_linger_request *lreq;
3089 u64 linger_id = greq->private_data;
3091 WARN_ON(greq->result || !greq->u.newest);
3093 down_write(&osdc->lock);
3094 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3096 dout("%s linger_id %llu dne\n", __func__, linger_id);
3100 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3101 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3103 if (!lreq->map_dne_bound)
3104 lreq->map_dne_bound = greq->u.newest;
3105 erase_linger_mc(&osdc->linger_map_checks, lreq);
3106 check_linger_pool_dne(lreq);
3110 up_write(&osdc->lock);
3113 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3115 struct ceph_osd_client *osdc = lreq->osdc;
3116 struct ceph_osd_linger_request *lookup_lreq;
3119 verify_osdc_wrlocked(osdc);
3121 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3124 WARN_ON(lookup_lreq != lreq);
3129 insert_linger_mc(&osdc->linger_map_checks, lreq);
3130 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3131 linger_map_check_cb, lreq->linger_id);
3135 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3139 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3140 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3141 return ret ?: lreq->reg_commit_error;
3144 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3148 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3149 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3150 return ret ?: lreq->notify_finish_error;
3154 * Timeout callback, called every N seconds. When 1 or more OSD
3155 * requests has been active for more than N seconds, we send a keepalive
3156 * (tag + timestamp) to its OSD to ensure any communications channel
3157 * reset is detected.
3159 static void handle_timeout(struct work_struct *work)
3161 struct ceph_osd_client *osdc =
3162 container_of(work, struct ceph_osd_client, timeout_work.work);
3163 struct ceph_options *opts = osdc->client->options;
3164 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3165 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3166 LIST_HEAD(slow_osds);
3167 struct rb_node *n, *p;
3169 dout("%s osdc %p\n", __func__, osdc);
3170 down_write(&osdc->lock);
3173 * ping osds that are a bit slow. this ensures that if there
3174 * is a break in the TCP connection we will notice, and reopen
3175 * a connection with that osd (from the fault callback).
3177 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3178 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3181 for (p = rb_first(&osd->o_requests); p; ) {
3182 struct ceph_osd_request *req =
3183 rb_entry(p, struct ceph_osd_request, r_node);
3185 p = rb_next(p); /* abort_request() */
3187 if (time_before(req->r_stamp, cutoff)) {
3188 dout(" req %p tid %llu on osd%d is laggy\n",
3189 req, req->r_tid, osd->o_osd);
3192 if (opts->osd_request_timeout &&
3193 time_before(req->r_start_stamp, expiry_cutoff)) {
3194 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3195 req->r_tid, osd->o_osd);
3196 abort_request(req, -ETIMEDOUT);
3199 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3200 struct ceph_osd_linger_request *lreq =
3201 rb_entry(p, struct ceph_osd_linger_request, node);
3203 dout(" lreq %p linger_id %llu is served by osd%d\n",
3204 lreq, lreq->linger_id, osd->o_osd);
3207 mutex_lock(&lreq->lock);
3208 if (lreq->is_watch && lreq->committed && !lreq->last_error)
3209 send_linger_ping(lreq);
3210 mutex_unlock(&lreq->lock);
3214 list_move_tail(&osd->o_keepalive_item, &slow_osds);
3217 if (opts->osd_request_timeout) {
3218 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3219 struct ceph_osd_request *req =
3220 rb_entry(p, struct ceph_osd_request, r_node);
3222 p = rb_next(p); /* abort_request() */
3224 if (time_before(req->r_start_stamp, expiry_cutoff)) {
3225 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3226 req->r_tid, osdc->homeless_osd.o_osd);
3227 abort_request(req, -ETIMEDOUT);
3232 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3233 maybe_request_map(osdc);
3235 while (!list_empty(&slow_osds)) {
3236 struct ceph_osd *osd = list_first_entry(&slow_osds,
3239 list_del_init(&osd->o_keepalive_item);
3240 ceph_con_keepalive(&osd->o_con);
3243 up_write(&osdc->lock);
3244 schedule_delayed_work(&osdc->timeout_work,
3245 osdc->client->options->osd_keepalive_timeout);
3248 static void handle_osds_timeout(struct work_struct *work)
3250 struct ceph_osd_client *osdc =
3251 container_of(work, struct ceph_osd_client,
3252 osds_timeout_work.work);
3253 unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3254 struct ceph_osd *osd, *nosd;
3256 dout("%s osdc %p\n", __func__, osdc);
3257 down_write(&osdc->lock);
3258 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3259 if (time_before(jiffies, osd->lru_ttl))
3262 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3263 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3267 up_write(&osdc->lock);
3268 schedule_delayed_work(&osdc->osds_timeout_work,
3269 round_jiffies_relative(delay));
3272 static int ceph_oloc_decode(void **p, void *end,
3273 struct ceph_object_locator *oloc)
3275 u8 struct_v, struct_cv;
3280 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3281 struct_v = ceph_decode_8(p);
3282 struct_cv = ceph_decode_8(p);
3284 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3285 struct_v, struct_cv);
3288 if (struct_cv > 6) {
3289 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3290 struct_v, struct_cv);
3293 len = ceph_decode_32(p);
3294 ceph_decode_need(p, end, len, e_inval);
3295 struct_end = *p + len;
3297 oloc->pool = ceph_decode_64(p);
3298 *p += 4; /* skip preferred */
3300 len = ceph_decode_32(p);
3302 pr_warn("ceph_object_locator::key is set\n");
3306 if (struct_v >= 5) {
3307 bool changed = false;
3309 len = ceph_decode_32(p);
3311 ceph_decode_need(p, end, len, e_inval);
3312 if (!oloc->pool_ns ||
3313 ceph_compare_string(oloc->pool_ns, *p, len))
3321 /* redirect changes namespace */
3322 pr_warn("ceph_object_locator::nspace is changed\n");
3327 if (struct_v >= 6) {
3328 s64 hash = ceph_decode_64(p);
3330 pr_warn("ceph_object_locator::hash is set\n");
3345 static int ceph_redirect_decode(void **p, void *end,
3346 struct ceph_request_redirect *redir)
3348 u8 struct_v, struct_cv;
3353 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3354 struct_v = ceph_decode_8(p);
3355 struct_cv = ceph_decode_8(p);
3356 if (struct_cv > 1) {
3357 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3358 struct_v, struct_cv);
3361 len = ceph_decode_32(p);
3362 ceph_decode_need(p, end, len, e_inval);
3363 struct_end = *p + len;
3365 ret = ceph_oloc_decode(p, end, &redir->oloc);
3369 len = ceph_decode_32(p);
3371 pr_warn("ceph_request_redirect::object_name is set\n");
3375 len = ceph_decode_32(p);
3376 *p += len; /* skip osd_instructions */
3388 struct MOSDOpReply {
3389 struct ceph_pg pgid;
3394 u32 outdata_len[CEPH_OSD_MAX_OPS];
3395 s32 rval[CEPH_OSD_MAX_OPS];
3397 struct ceph_eversion replay_version;
3399 struct ceph_request_redirect redirect;
3402 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3404 void *p = msg->front.iov_base;
3405 void *const end = p + msg->front.iov_len;
3406 u16 version = le16_to_cpu(msg->hdr.version);
3407 struct ceph_eversion bad_replay_version;
3413 ceph_decode_32_safe(&p, end, len, e_inval);
3414 ceph_decode_need(&p, end, len, e_inval);
3415 p += len; /* skip oid */
3417 ret = ceph_decode_pgid(&p, end, &m->pgid);
3421 ceph_decode_64_safe(&p, end, m->flags, e_inval);
3422 ceph_decode_32_safe(&p, end, m->result, e_inval);
3423 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3424 memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3425 p += sizeof(bad_replay_version);
3426 ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3428 ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3429 if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3432 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3434 for (i = 0; i < m->num_ops; i++) {
3435 struct ceph_osd_op *op = p;
3437 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3441 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3442 for (i = 0; i < m->num_ops; i++)
3443 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3446 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3447 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3448 p += sizeof(m->replay_version);
3449 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3451 m->replay_version = bad_replay_version; /* struct */
3452 m->user_version = le64_to_cpu(m->replay_version.version);
3457 ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3465 ret = ceph_redirect_decode(&p, end, &m->redirect);
3469 ceph_oloc_init(&m->redirect.oloc);
3479 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
3482 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3484 struct ceph_osd_client *osdc = osd->o_osdc;
3485 struct ceph_osd_request *req;
3486 struct MOSDOpReply m;
3487 u64 tid = le64_to_cpu(msg->hdr.tid);
3492 dout("%s msg %p tid %llu\n", __func__, msg, tid);
3494 down_read(&osdc->lock);
3495 if (!osd_registered(osd)) {
3496 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3497 goto out_unlock_osdc;
3499 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3501 mutex_lock(&osd->lock);
3502 req = lookup_request(&osd->o_requests, tid);
3504 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3505 goto out_unlock_session;
3508 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3509 ret = decode_MOSDOpReply(msg, &m);
3510 m.redirect.oloc.pool_ns = NULL;
3512 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3517 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3518 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3519 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3520 le64_to_cpu(m.replay_version.version), m.user_version);
3522 if (m.retry_attempt >= 0) {
3523 if (m.retry_attempt != req->r_attempts - 1) {
3524 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3525 req, req->r_tid, m.retry_attempt,
3526 req->r_attempts - 1);
3527 goto out_unlock_session;
3530 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3533 if (!ceph_oloc_empty(&m.redirect.oloc)) {
3534 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3535 m.redirect.oloc.pool);
3536 unlink_request(osd, req);
3537 mutex_unlock(&osd->lock);
3540 * Not ceph_oloc_copy() - changing pool_ns is not
3543 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3544 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
3545 CEPH_OSD_FLAG_IGNORE_OVERLAY |
3546 CEPH_OSD_FLAG_IGNORE_CACHE;
3548 __submit_request(req, false);
3549 goto out_unlock_osdc;
3552 if (m.num_ops != req->r_num_ops) {
3553 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3554 req->r_num_ops, req->r_tid);
3557 for (i = 0; i < req->r_num_ops; i++) {
3558 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3559 req->r_tid, i, m.rval[i], m.outdata_len[i]);
3560 req->r_ops[i].rval = m.rval[i];
3561 req->r_ops[i].outdata_len = m.outdata_len[i];
3562 data_len += m.outdata_len[i];
3564 if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3565 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3566 le32_to_cpu(msg->hdr.data_len), req->r_tid);
3569 dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3570 req, req->r_tid, m.result, data_len);
3573 * Since we only ever request ONDISK, we should only ever get
3574 * one (type of) reply back.
3576 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3577 req->r_result = m.result ?: data_len;
3578 finish_request(req);
3579 mutex_unlock(&osd->lock);
3580 up_read(&osdc->lock);
3582 __complete_request(req);
3586 complete_request(req, -EIO);
3588 mutex_unlock(&osd->lock);
3590 up_read(&osdc->lock);
3593 static void set_pool_was_full(struct ceph_osd_client *osdc)
3597 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3598 struct ceph_pg_pool_info *pi =
3599 rb_entry(n, struct ceph_pg_pool_info, node);
3601 pi->was_full = __pool_full(pi);
3605 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3607 struct ceph_pg_pool_info *pi;
3609 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3613 return pi->was_full && !__pool_full(pi);
3616 static enum calc_target_result
3617 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3619 struct ceph_osd_client *osdc = lreq->osdc;
3620 enum calc_target_result ct_res;
3622 ct_res = calc_target(osdc, &lreq->t, NULL, true);
3623 if (ct_res == CALC_TARGET_NEED_RESEND) {
3624 struct ceph_osd *osd;
3626 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3627 if (osd != lreq->osd) {
3628 unlink_linger(lreq->osd, lreq);
3629 link_linger(osd, lreq);
3637 * Requeue requests whose mapping to an OSD has changed.
3639 static void scan_requests(struct ceph_osd *osd,
3642 bool check_pool_cleared_full,
3643 struct rb_root *need_resend,
3644 struct list_head *need_resend_linger)
3646 struct ceph_osd_client *osdc = osd->o_osdc;
3648 bool force_resend_writes;
3650 for (n = rb_first(&osd->o_linger_requests); n; ) {
3651 struct ceph_osd_linger_request *lreq =
3652 rb_entry(n, struct ceph_osd_linger_request, node);
3653 enum calc_target_result ct_res;
3655 n = rb_next(n); /* recalc_linger_target() */
3657 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3659 ct_res = recalc_linger_target(lreq);
3661 case CALC_TARGET_NO_ACTION:
3662 force_resend_writes = cleared_full ||
3663 (check_pool_cleared_full &&
3664 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3665 if (!force_resend && !force_resend_writes)
3669 case CALC_TARGET_NEED_RESEND:
3670 cancel_linger_map_check(lreq);
3672 * scan_requests() for the previous epoch(s)
3673 * may have already added it to the list, since
3674 * it's not unlinked here.
3676 if (list_empty(&lreq->scan_item))
3677 list_add_tail(&lreq->scan_item, need_resend_linger);
3679 case CALC_TARGET_POOL_DNE:
3680 list_del_init(&lreq->scan_item);
3681 check_linger_pool_dne(lreq);
3686 for (n = rb_first(&osd->o_requests); n; ) {
3687 struct ceph_osd_request *req =
3688 rb_entry(n, struct ceph_osd_request, r_node);
3689 enum calc_target_result ct_res;
3691 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3693 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3694 ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
3697 case CALC_TARGET_NO_ACTION:
3698 force_resend_writes = cleared_full ||
3699 (check_pool_cleared_full &&
3700 pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3701 if (!force_resend &&
3702 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3703 !force_resend_writes))
3707 case CALC_TARGET_NEED_RESEND:
3708 cancel_map_check(req);
3709 unlink_request(osd, req);
3710 insert_request(need_resend, req);
3712 case CALC_TARGET_POOL_DNE:
3713 check_pool_dne(req);
3719 static int handle_one_map(struct ceph_osd_client *osdc,
3720 void *p, void *end, bool incremental,
3721 struct rb_root *need_resend,
3722 struct list_head *need_resend_linger)
3724 struct ceph_osdmap *newmap;
3726 bool skipped_map = false;
3729 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3730 set_pool_was_full(osdc);
3733 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3735 newmap = ceph_osdmap_decode(&p, end);
3737 return PTR_ERR(newmap);
3739 if (newmap != osdc->osdmap) {
3741 * Preserve ->was_full before destroying the old map.
3742 * For pools that weren't in the old map, ->was_full
3745 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3746 struct ceph_pg_pool_info *pi =
3747 rb_entry(n, struct ceph_pg_pool_info, node);
3748 struct ceph_pg_pool_info *old_pi;
3750 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3752 pi->was_full = old_pi->was_full;
3754 WARN_ON(pi->was_full);
3757 if (osdc->osdmap->epoch &&
3758 osdc->osdmap->epoch + 1 < newmap->epoch) {
3759 WARN_ON(incremental);
3763 ceph_osdmap_destroy(osdc->osdmap);
3764 osdc->osdmap = newmap;
3767 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3768 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3769 need_resend, need_resend_linger);
3771 for (n = rb_first(&osdc->osds); n; ) {
3772 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3774 n = rb_next(n); /* close_osd() */
3776 scan_requests(osd, skipped_map, was_full, true, need_resend,
3777 need_resend_linger);
3778 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3779 memcmp(&osd->o_con.peer_addr,
3780 ceph_osd_addr(osdc->osdmap, osd->o_osd),
3781 sizeof(struct ceph_entity_addr)))
3788 static void kick_requests(struct ceph_osd_client *osdc,
3789 struct rb_root *need_resend,
3790 struct list_head *need_resend_linger)
3792 struct ceph_osd_linger_request *lreq, *nlreq;
3793 enum calc_target_result ct_res;
3796 /* make sure need_resend targets reflect latest map */
3797 for (n = rb_first(need_resend); n; ) {
3798 struct ceph_osd_request *req =
3799 rb_entry(n, struct ceph_osd_request, r_node);
3803 if (req->r_t.epoch < osdc->osdmap->epoch) {
3804 ct_res = calc_target(osdc, &req->r_t, NULL, false);
3805 if (ct_res == CALC_TARGET_POOL_DNE) {
3806 erase_request(need_resend, req);
3807 check_pool_dne(req);
3812 for (n = rb_first(need_resend); n; ) {
3813 struct ceph_osd_request *req =
3814 rb_entry(n, struct ceph_osd_request, r_node);
3815 struct ceph_osd *osd;
3818 erase_request(need_resend, req); /* before link_request() */
3820 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3821 link_request(osd, req);
3822 if (!req->r_linger) {
3823 if (!osd_homeless(osd) && !req->r_t.paused)
3826 cancel_linger_request(req);
3830 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3831 if (!osd_homeless(lreq->osd))
3834 list_del_init(&lreq->scan_item);
3839 * Process updated osd map.
3841 * The message contains any number of incremental and full maps, normally
3842 * indicating some sort of topology change in the cluster. Kick requests
3843 * off to different OSDs as needed.
3845 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3847 void *p = msg->front.iov_base;
3848 void *const end = p + msg->front.iov_len;
3849 u32 nr_maps, maplen;
3851 struct ceph_fsid fsid;
3852 struct rb_root need_resend = RB_ROOT;
3853 LIST_HEAD(need_resend_linger);
3854 bool handled_incremental = false;
3855 bool was_pauserd, was_pausewr;
3856 bool pauserd, pausewr;
3859 dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3860 down_write(&osdc->lock);
3863 ceph_decode_need(&p, end, sizeof(fsid), bad);
3864 ceph_decode_copy(&p, &fsid, sizeof(fsid));
3865 if (ceph_check_fsid(osdc->client, &fsid) < 0)
3868 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3869 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3870 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3871 have_pool_full(osdc);
3873 /* incremental maps */
3874 ceph_decode_32_safe(&p, end, nr_maps, bad);
3875 dout(" %d inc maps\n", nr_maps);
3876 while (nr_maps > 0) {
3877 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3878 epoch = ceph_decode_32(&p);
3879 maplen = ceph_decode_32(&p);
3880 ceph_decode_need(&p, end, maplen, bad);
3881 if (osdc->osdmap->epoch &&
3882 osdc->osdmap->epoch + 1 == epoch) {
3883 dout("applying incremental map %u len %d\n",
3885 err = handle_one_map(osdc, p, p + maplen, true,
3886 &need_resend, &need_resend_linger);
3889 handled_incremental = true;
3891 dout("ignoring incremental map %u len %d\n",
3897 if (handled_incremental)
3901 ceph_decode_32_safe(&p, end, nr_maps, bad);
3902 dout(" %d full maps\n", nr_maps);
3904 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3905 epoch = ceph_decode_32(&p);
3906 maplen = ceph_decode_32(&p);
3907 ceph_decode_need(&p, end, maplen, bad);
3909 dout("skipping non-latest full map %u len %d\n",
3911 } else if (osdc->osdmap->epoch >= epoch) {
3912 dout("skipping full map %u len %d, "
3913 "older than our %u\n", epoch, maplen,
3914 osdc->osdmap->epoch);
3916 dout("taking full map %u len %d\n", epoch, maplen);
3917 err = handle_one_map(osdc, p, p + maplen, false,
3918 &need_resend, &need_resend_linger);
3928 * subscribe to subsequent osdmap updates if full to ensure
3929 * we find out when we are no longer full and stop returning
3932 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3933 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3934 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3935 have_pool_full(osdc);
3936 if (was_pauserd || was_pausewr || pauserd || pausewr ||
3937 osdc->osdmap->epoch < osdc->epoch_barrier)
3938 maybe_request_map(osdc);
3940 kick_requests(osdc, &need_resend, &need_resend_linger);
3942 ceph_osdc_abort_on_full(osdc);
3943 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3944 osdc->osdmap->epoch);
3945 up_write(&osdc->lock);
3946 wake_up_all(&osdc->client->auth_wq);
3950 pr_err("osdc handle_map corrupt msg\n");
3952 up_write(&osdc->lock);
3956 * Resubmit requests pending on the given osd.
3958 static void kick_osd_requests(struct ceph_osd *osd)
3962 clear_backoffs(osd);
3964 for (n = rb_first(&osd->o_requests); n; ) {
3965 struct ceph_osd_request *req =
3966 rb_entry(n, struct ceph_osd_request, r_node);
3968 n = rb_next(n); /* cancel_linger_request() */
3970 if (!req->r_linger) {
3971 if (!req->r_t.paused)
3974 cancel_linger_request(req);
3977 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3978 struct ceph_osd_linger_request *lreq =
3979 rb_entry(n, struct ceph_osd_linger_request, node);
3986 * If the osd connection drops, we need to resubmit all requests.
3988 static void osd_fault(struct ceph_connection *con)
3990 struct ceph_osd *osd = con->private;
3991 struct ceph_osd_client *osdc = osd->o_osdc;
3993 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3995 down_write(&osdc->lock);
3996 if (!osd_registered(osd)) {
3997 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4001 if (!reopen_osd(osd))
4002 kick_osd_requests(osd);
4003 maybe_request_map(osdc);
4006 up_write(&osdc->lock);
4009 struct MOSDBackoff {
4010 struct ceph_spg spgid;
4014 struct ceph_hobject_id *begin;
4015 struct ceph_hobject_id *end;
4018 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
4020 void *p = msg->front.iov_base;
4021 void *const end = p + msg->front.iov_len;
4026 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
4030 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
4034 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
4035 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
4036 ceph_decode_8_safe(&p, end, m->op, e_inval);
4037 ceph_decode_64_safe(&p, end, m->id, e_inval);
4039 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
4043 ret = decode_hoid(&p, end, m->begin);
4045 free_hoid(m->begin);
4049 m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
4051 free_hoid(m->begin);
4055 ret = decode_hoid(&p, end, m->end);
4057 free_hoid(m->begin);
4068 static struct ceph_msg *create_backoff_message(
4069 const struct ceph_osd_backoff *backoff,
4072 struct ceph_msg *msg;
4076 msg_size = CEPH_ENCODING_START_BLK_LEN +
4077 CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4078 msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4079 msg_size += CEPH_ENCODING_START_BLK_LEN +
4080 hoid_encoding_size(backoff->begin);
4081 msg_size += CEPH_ENCODING_START_BLK_LEN +
4082 hoid_encoding_size(backoff->end);
4084 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4088 p = msg->front.iov_base;
4089 end = p + msg->front_alloc_len;
4091 encode_spgid(&p, &backoff->spgid);
4092 ceph_encode_32(&p, map_epoch);
4093 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4094 ceph_encode_64(&p, backoff->id);
4095 encode_hoid(&p, end, backoff->begin);
4096 encode_hoid(&p, end, backoff->end);
4099 msg->front.iov_len = p - msg->front.iov_base;
4100 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4101 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4106 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4108 struct ceph_spg_mapping *spg;
4109 struct ceph_osd_backoff *backoff;
4110 struct ceph_msg *msg;
4112 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4113 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4115 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4117 spg = alloc_spg_mapping();
4119 pr_err("%s failed to allocate spg\n", __func__);
4122 spg->spgid = m->spgid; /* struct */
4123 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4126 backoff = alloc_backoff();
4128 pr_err("%s failed to allocate backoff\n", __func__);
4131 backoff->spgid = m->spgid; /* struct */
4132 backoff->id = m->id;
4133 backoff->begin = m->begin;
4134 m->begin = NULL; /* backoff now owns this */
4135 backoff->end = m->end;
4136 m->end = NULL; /* ditto */
4138 insert_backoff(&spg->backoffs, backoff);
4139 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4142 * Ack with original backoff's epoch so that the OSD can
4143 * discard this if there was a PG split.
4145 msg = create_backoff_message(backoff, m->map_epoch);
4147 pr_err("%s failed to allocate msg\n", __func__);
4150 ceph_con_send(&osd->o_con, msg);
4153 static bool target_contained_by(const struct ceph_osd_request_target *t,
4154 const struct ceph_hobject_id *begin,
4155 const struct ceph_hobject_id *end)
4157 struct ceph_hobject_id hoid;
4160 hoid_fill_from_target(&hoid, t);
4161 cmp = hoid_compare(&hoid, begin);
4162 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4165 static void handle_backoff_unblock(struct ceph_osd *osd,
4166 const struct MOSDBackoff *m)
4168 struct ceph_spg_mapping *spg;
4169 struct ceph_osd_backoff *backoff;
4172 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4173 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4175 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4177 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4178 __func__, osd->o_osd, m->spgid.pgid.pool,
4179 m->spgid.pgid.seed, m->spgid.shard, m->id);
4183 if (hoid_compare(backoff->begin, m->begin) &&
4184 hoid_compare(backoff->end, m->end)) {
4185 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4186 __func__, osd->o_osd, m->spgid.pgid.pool,
4187 m->spgid.pgid.seed, m->spgid.shard, m->id);
4188 /* unblock it anyway... */
4191 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4194 erase_backoff(&spg->backoffs, backoff);
4195 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4196 free_backoff(backoff);
4198 if (RB_EMPTY_ROOT(&spg->backoffs)) {
4199 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4200 free_spg_mapping(spg);
4203 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4204 struct ceph_osd_request *req =
4205 rb_entry(n, struct ceph_osd_request, r_node);
4207 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4209 * Match against @m, not @backoff -- the PG may
4210 * have split on the OSD.
4212 if (target_contained_by(&req->r_t, m->begin, m->end)) {
4214 * If no other installed backoff applies,
4223 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4225 struct ceph_osd_client *osdc = osd->o_osdc;
4226 struct MOSDBackoff m;
4229 down_read(&osdc->lock);
4230 if (!osd_registered(osd)) {
4231 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4232 up_read(&osdc->lock);
4235 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4237 mutex_lock(&osd->lock);
4238 ret = decode_MOSDBackoff(msg, &m);
4240 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4246 case CEPH_OSD_BACKOFF_OP_BLOCK:
4247 handle_backoff_block(osd, &m);
4249 case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4250 handle_backoff_unblock(osd, &m);
4253 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4260 mutex_unlock(&osd->lock);
4261 up_read(&osdc->lock);
4265 * Process osd watch notifications
4267 static void handle_watch_notify(struct ceph_osd_client *osdc,
4268 struct ceph_msg *msg)
4270 void *p = msg->front.iov_base;
4271 void *const end = p + msg->front.iov_len;
4272 struct ceph_osd_linger_request *lreq;
4273 struct linger_work *lwork;
4274 u8 proto_ver, opcode;
4275 u64 cookie, notify_id;
4276 u64 notifier_id = 0;
4277 s32 return_code = 0;
4278 void *payload = NULL;
4279 u32 payload_len = 0;
4281 ceph_decode_8_safe(&p, end, proto_ver, bad);
4282 ceph_decode_8_safe(&p, end, opcode, bad);
4283 ceph_decode_64_safe(&p, end, cookie, bad);
4284 p += 8; /* skip ver */
4285 ceph_decode_64_safe(&p, end, notify_id, bad);
4287 if (proto_ver >= 1) {
4288 ceph_decode_32_safe(&p, end, payload_len, bad);
4289 ceph_decode_need(&p, end, payload_len, bad);
4294 if (le16_to_cpu(msg->hdr.version) >= 2)
4295 ceph_decode_32_safe(&p, end, return_code, bad);
4297 if (le16_to_cpu(msg->hdr.version) >= 3)
4298 ceph_decode_64_safe(&p, end, notifier_id, bad);
4300 down_read(&osdc->lock);
4301 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4303 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4305 goto out_unlock_osdc;
4308 mutex_lock(&lreq->lock);
4309 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4310 opcode, cookie, lreq, lreq->is_watch);
4311 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4312 if (!lreq->last_error) {
4313 lreq->last_error = -ENOTCONN;
4314 queue_watch_error(lreq);
4316 } else if (!lreq->is_watch) {
4317 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4318 if (lreq->notify_id && lreq->notify_id != notify_id) {
4319 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4320 lreq->notify_id, notify_id);
4321 } else if (!completion_done(&lreq->notify_finish_wait)) {
4322 struct ceph_msg_data *data =
4323 list_first_entry_or_null(&msg->data,
4324 struct ceph_msg_data,
4328 if (lreq->preply_pages) {
4329 WARN_ON(data->type !=
4330 CEPH_MSG_DATA_PAGES);
4331 *lreq->preply_pages = data->pages;
4332 *lreq->preply_len = data->length;
4334 ceph_release_page_vector(data->pages,
4335 calc_pages_for(0, data->length));
4338 lreq->notify_finish_error = return_code;
4339 complete_all(&lreq->notify_finish_wait);
4342 /* CEPH_WATCH_EVENT_NOTIFY */
4343 lwork = lwork_alloc(lreq, do_watch_notify);
4345 pr_err("failed to allocate notify-lwork\n");
4346 goto out_unlock_lreq;
4349 lwork->notify.notify_id = notify_id;
4350 lwork->notify.notifier_id = notifier_id;
4351 lwork->notify.payload = payload;
4352 lwork->notify.payload_len = payload_len;
4353 lwork->notify.msg = ceph_msg_get(msg);
4358 mutex_unlock(&lreq->lock);
4360 up_read(&osdc->lock);
4364 pr_err("osdc handle_watch_notify corrupt msg\n");
4368 * Register request, send initial attempt.
4370 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4371 struct ceph_osd_request *req,
4374 down_read(&osdc->lock);
4375 submit_request(req, false);
4376 up_read(&osdc->lock);
4380 EXPORT_SYMBOL(ceph_osdc_start_request);
4383 * Unregister a registered request. The request is not completed:
4384 * ->r_result isn't set and __complete_request() isn't called.
4386 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4388 struct ceph_osd_client *osdc = req->r_osdc;
4390 down_write(&osdc->lock);
4392 cancel_request(req);
4393 up_write(&osdc->lock);
4395 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4398 * @timeout: in jiffies, 0 means "wait forever"
4400 static int wait_request_timeout(struct ceph_osd_request *req,
4401 unsigned long timeout)
4405 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4406 left = wait_for_completion_killable_timeout(&req->r_completion,
4407 ceph_timeout_jiffies(timeout));
4409 left = left ?: -ETIMEDOUT;
4410 ceph_osdc_cancel_request(req);
4412 left = req->r_result; /* completed */
4419 * wait for a request to complete
4421 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4422 struct ceph_osd_request *req)
4424 return wait_request_timeout(req, 0);
4426 EXPORT_SYMBOL(ceph_osdc_wait_request);
4429 * sync - wait for all in-flight requests to flush. avoid starvation.
4431 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4433 struct rb_node *n, *p;
4434 u64 last_tid = atomic64_read(&osdc->last_tid);
4437 down_read(&osdc->lock);
4438 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4439 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4441 mutex_lock(&osd->lock);
4442 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4443 struct ceph_osd_request *req =
4444 rb_entry(p, struct ceph_osd_request, r_node);
4446 if (req->r_tid > last_tid)
4449 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4452 ceph_osdc_get_request(req);
4453 mutex_unlock(&osd->lock);
4454 up_read(&osdc->lock);
4455 dout("%s waiting on req %p tid %llu last_tid %llu\n",
4456 __func__, req, req->r_tid, last_tid);
4457 wait_for_completion(&req->r_completion);
4458 ceph_osdc_put_request(req);
4462 mutex_unlock(&osd->lock);
4465 up_read(&osdc->lock);
4466 dout("%s done last_tid %llu\n", __func__, last_tid);
4468 EXPORT_SYMBOL(ceph_osdc_sync);
4470 static struct ceph_osd_request *
4471 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4473 struct ceph_osd_request *req;
4475 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4479 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4480 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4482 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4483 ceph_osdc_put_request(req);
4491 * Returns a handle, caller owns a ref.
4493 struct ceph_osd_linger_request *
4494 ceph_osdc_watch(struct ceph_osd_client *osdc,
4495 struct ceph_object_id *oid,
4496 struct ceph_object_locator *oloc,
4497 rados_watchcb2_t wcb,
4498 rados_watcherrcb_t errcb,
4501 struct ceph_osd_linger_request *lreq;
4504 lreq = linger_alloc(osdc);
4506 return ERR_PTR(-ENOMEM);
4508 lreq->is_watch = true;
4510 lreq->errcb = errcb;
4512 lreq->watch_valid_thru = jiffies;
4514 ceph_oid_copy(&lreq->t.base_oid, oid);
4515 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4516 lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4517 ktime_get_real_ts64(&lreq->mtime);
4519 lreq->reg_req = alloc_linger_request(lreq);
4520 if (!lreq->reg_req) {
4525 lreq->ping_req = alloc_linger_request(lreq);
4526 if (!lreq->ping_req) {
4531 down_write(&osdc->lock);
4532 linger_register(lreq); /* before osd_req_op_* */
4533 osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
4534 CEPH_OSD_WATCH_OP_WATCH);
4535 osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
4536 CEPH_OSD_WATCH_OP_PING);
4537 linger_submit(lreq);
4538 up_write(&osdc->lock);
4540 ret = linger_reg_commit_wait(lreq);
4542 linger_cancel(lreq);
4550 return ERR_PTR(ret);
4552 EXPORT_SYMBOL(ceph_osdc_watch);
4557 * Times out after mount_timeout to preserve rbd unmap behaviour
4558 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4559 * with mount_timeout").
4561 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4562 struct ceph_osd_linger_request *lreq)
4564 struct ceph_options *opts = osdc->client->options;
4565 struct ceph_osd_request *req;
4568 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4572 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4573 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4574 req->r_flags = CEPH_OSD_FLAG_WRITE;
4575 ktime_get_real_ts64(&req->r_mtime);
4576 osd_req_op_watch_init(req, 0, lreq->linger_id,
4577 CEPH_OSD_WATCH_OP_UNWATCH);
4579 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4583 ceph_osdc_start_request(osdc, req, false);
4584 linger_cancel(lreq);
4586 ret = wait_request_timeout(req, opts->mount_timeout);
4589 ceph_osdc_put_request(req);
4592 EXPORT_SYMBOL(ceph_osdc_unwatch);
4594 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4595 u64 notify_id, u64 cookie, void *payload,
4598 struct ceph_osd_req_op *op;
4599 struct ceph_pagelist *pl;
4602 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4604 pl = kmalloc(sizeof(*pl), GFP_NOIO);
4608 ceph_pagelist_init(pl);
4609 ret = ceph_pagelist_encode_64(pl, notify_id);
4610 ret |= ceph_pagelist_encode_64(pl, cookie);
4612 ret |= ceph_pagelist_encode_32(pl, payload_len);
4613 ret |= ceph_pagelist_append(pl, payload, payload_len);
4615 ret |= ceph_pagelist_encode_32(pl, 0);
4618 ceph_pagelist_release(pl);
4622 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4623 op->indata_len = pl->length;
4627 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4628 struct ceph_object_id *oid,
4629 struct ceph_object_locator *oloc,
4635 struct ceph_osd_request *req;
4638 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4642 ceph_oid_copy(&req->r_base_oid, oid);
4643 ceph_oloc_copy(&req->r_base_oloc, oloc);
4644 req->r_flags = CEPH_OSD_FLAG_READ;
4646 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4650 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4655 ceph_osdc_start_request(osdc, req, false);
4656 ret = ceph_osdc_wait_request(osdc, req);
4659 ceph_osdc_put_request(req);
4662 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4664 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4665 u64 cookie, u32 prot_ver, u32 timeout,
4666 void *payload, u32 payload_len)
4668 struct ceph_osd_req_op *op;
4669 struct ceph_pagelist *pl;
4672 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4673 op->notify.cookie = cookie;
4675 pl = kmalloc(sizeof(*pl), GFP_NOIO);
4679 ceph_pagelist_init(pl);
4680 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4681 ret |= ceph_pagelist_encode_32(pl, timeout);
4682 ret |= ceph_pagelist_encode_32(pl, payload_len);
4683 ret |= ceph_pagelist_append(pl, payload, payload_len);
4685 ceph_pagelist_release(pl);
4689 ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4690 op->indata_len = pl->length;
4695 * @timeout: in seconds
4697 * @preply_{pages,len} are initialized both on success and error.
4698 * The caller is responsible for:
4700 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4702 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4703 struct ceph_object_id *oid,
4704 struct ceph_object_locator *oloc,
4708 struct page ***preply_pages,
4711 struct ceph_osd_linger_request *lreq;
4712 struct page **pages;
4717 *preply_pages = NULL;
4721 lreq = linger_alloc(osdc);
4725 lreq->preply_pages = preply_pages;
4726 lreq->preply_len = preply_len;
4728 ceph_oid_copy(&lreq->t.base_oid, oid);
4729 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4730 lreq->t.flags = CEPH_OSD_FLAG_READ;
4732 lreq->reg_req = alloc_linger_request(lreq);
4733 if (!lreq->reg_req) {
4739 pages = ceph_alloc_page_vector(1, GFP_NOIO);
4740 if (IS_ERR(pages)) {
4741 ret = PTR_ERR(pages);
4745 down_write(&osdc->lock);
4746 linger_register(lreq); /* before osd_req_op_* */
4747 ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
4748 timeout, payload, payload_len);
4750 linger_unregister(lreq);
4751 up_write(&osdc->lock);
4752 ceph_release_page_vector(pages, 1);
4755 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4757 pages, PAGE_SIZE, 0, false, true);
4758 linger_submit(lreq);
4759 up_write(&osdc->lock);
4761 ret = linger_reg_commit_wait(lreq);
4763 ret = linger_notify_finish_wait(lreq);
4765 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4767 linger_cancel(lreq);
4772 EXPORT_SYMBOL(ceph_osdc_notify);
4775 * Return the number of milliseconds since the watch was last
4776 * confirmed, or an error. If there is an error, the watch is no
4777 * longer valid, and should be destroyed with ceph_osdc_unwatch().
4779 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4780 struct ceph_osd_linger_request *lreq)
4782 unsigned long stamp, age;
4785 down_read(&osdc->lock);
4786 mutex_lock(&lreq->lock);
4787 stamp = lreq->watch_valid_thru;
4788 if (!list_empty(&lreq->pending_lworks)) {
4789 struct linger_work *lwork =
4790 list_first_entry(&lreq->pending_lworks,
4794 if (time_before(lwork->queued_stamp, stamp))
4795 stamp = lwork->queued_stamp;
4797 age = jiffies - stamp;
4798 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4799 lreq, lreq->linger_id, age, lreq->last_error);
4800 /* we are truncating to msecs, so return a safe upper bound */
4801 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4803 mutex_unlock(&lreq->lock);
4804 up_read(&osdc->lock);
4808 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4814 ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4815 &struct_v, &struct_len);
4819 ceph_decode_copy(p, &item->name, sizeof(item->name));
4820 item->cookie = ceph_decode_64(p);
4821 *p += 4; /* skip timeout_seconds */
4822 if (struct_v >= 2) {
4823 ceph_decode_copy(p, &item->addr, sizeof(item->addr));
4824 ceph_decode_addr(&item->addr);
4827 dout("%s %s%llu cookie %llu addr %s\n", __func__,
4828 ENTITY_NAME(item->name), item->cookie,
4829 ceph_pr_addr(&item->addr.in_addr));
4833 static int decode_watchers(void **p, void *end,
4834 struct ceph_watch_item **watchers,
4842 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4843 &struct_v, &struct_len);
4847 *num_watchers = ceph_decode_32(p);
4848 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4852 for (i = 0; i < *num_watchers; i++) {
4853 ret = decode_watcher(p, end, *watchers + i);
4864 * On success, the caller is responsible for:
4868 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4869 struct ceph_object_id *oid,
4870 struct ceph_object_locator *oloc,
4871 struct ceph_watch_item **watchers,
4874 struct ceph_osd_request *req;
4875 struct page **pages;
4878 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4882 ceph_oid_copy(&req->r_base_oid, oid);
4883 ceph_oloc_copy(&req->r_base_oloc, oloc);
4884 req->r_flags = CEPH_OSD_FLAG_READ;
4886 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4890 pages = ceph_alloc_page_vector(1, GFP_NOIO);
4891 if (IS_ERR(pages)) {
4892 ret = PTR_ERR(pages);
4896 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4897 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4899 pages, PAGE_SIZE, 0, false, true);
4901 ceph_osdc_start_request(osdc, req, false);
4902 ret = ceph_osdc_wait_request(osdc, req);
4904 void *p = page_address(pages[0]);
4905 void *const end = p + req->r_ops[0].outdata_len;
4907 ret = decode_watchers(&p, end, watchers, num_watchers);
4911 ceph_osdc_put_request(req);
4914 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4917 * Call all pending notify callbacks - for use after a watch is
4918 * unregistered, to make sure no more callbacks for it will be invoked
4920 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4922 dout("%s osdc %p\n", __func__, osdc);
4923 flush_workqueue(osdc->notify_wq);
4925 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4927 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4929 down_read(&osdc->lock);
4930 maybe_request_map(osdc);
4931 up_read(&osdc->lock);
4933 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4936 * Execute an OSD class method on an object.
4938 * @flags: CEPH_OSD_FLAG_*
4939 * @resp_len: in/out param for reply length
4941 int ceph_osdc_call(struct ceph_osd_client *osdc,
4942 struct ceph_object_id *oid,
4943 struct ceph_object_locator *oloc,
4944 const char *class, const char *method,
4946 struct page *req_page, size_t req_len,
4947 struct page *resp_page, size_t *resp_len)
4949 struct ceph_osd_request *req;
4952 if (req_len > PAGE_SIZE || (resp_page && *resp_len > PAGE_SIZE))
4955 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4959 ceph_oid_copy(&req->r_base_oid, oid);
4960 ceph_oloc_copy(&req->r_base_oloc, oloc);
4961 req->r_flags = flags;
4963 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4967 ret = osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4972 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4975 osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4976 *resp_len, 0, false, false);
4978 ceph_osdc_start_request(osdc, req, false);
4979 ret = ceph_osdc_wait_request(osdc, req);
4981 ret = req->r_ops[0].rval;
4983 *resp_len = req->r_ops[0].outdata_len;
4987 ceph_osdc_put_request(req);
4990 EXPORT_SYMBOL(ceph_osdc_call);
4995 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5000 osdc->client = client;
5001 init_rwsem(&osdc->lock);
5002 osdc->osds = RB_ROOT;
5003 INIT_LIST_HEAD(&osdc->osd_lru);
5004 spin_lock_init(&osdc->osd_lru_lock);
5005 osd_init(&osdc->homeless_osd);
5006 osdc->homeless_osd.o_osdc = osdc;
5007 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5008 osdc->last_linger_id = CEPH_LINGER_ID_START;
5009 osdc->linger_requests = RB_ROOT;
5010 osdc->map_checks = RB_ROOT;
5011 osdc->linger_map_checks = RB_ROOT;
5012 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5013 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5016 osdc->osdmap = ceph_osdmap_alloc();
5020 osdc->req_mempool = mempool_create_slab_pool(10,
5021 ceph_osd_request_cache);
5022 if (!osdc->req_mempool)
5025 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5026 PAGE_SIZE, 10, true, "osd_op");
5029 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5030 PAGE_SIZE, 10, true, "osd_op_reply");
5035 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5036 if (!osdc->notify_wq)
5037 goto out_msgpool_reply;
5039 osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5040 if (!osdc->completion_wq)
5043 schedule_delayed_work(&osdc->timeout_work,
5044 osdc->client->options->osd_keepalive_timeout);
5045 schedule_delayed_work(&osdc->osds_timeout_work,
5046 round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5051 destroy_workqueue(osdc->notify_wq);
5053 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5055 ceph_msgpool_destroy(&osdc->msgpool_op);
5057 mempool_destroy(osdc->req_mempool);
5059 ceph_osdmap_destroy(osdc->osdmap);
5064 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5066 destroy_workqueue(osdc->completion_wq);
5067 destroy_workqueue(osdc->notify_wq);
5068 cancel_delayed_work_sync(&osdc->timeout_work);
5069 cancel_delayed_work_sync(&osdc->osds_timeout_work);
5071 down_write(&osdc->lock);
5072 while (!RB_EMPTY_ROOT(&osdc->osds)) {
5073 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5074 struct ceph_osd, o_node);
5077 up_write(&osdc->lock);
5078 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5079 osd_cleanup(&osdc->homeless_osd);
5081 WARN_ON(!list_empty(&osdc->osd_lru));
5082 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5083 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5084 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5085 WARN_ON(atomic_read(&osdc->num_requests));
5086 WARN_ON(atomic_read(&osdc->num_homeless));
5088 ceph_osdmap_destroy(osdc->osdmap);
5089 mempool_destroy(osdc->req_mempool);
5090 ceph_msgpool_destroy(&osdc->msgpool_op);
5091 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5095 * Read some contiguous pages. If we cross a stripe boundary, shorten
5096 * *plen. Return number of bytes read, or error.
5098 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
5099 struct ceph_vino vino, struct ceph_file_layout *layout,
5101 u32 truncate_seq, u64 truncate_size,
5102 struct page **pages, int num_pages, int page_align)
5104 struct ceph_osd_request *req;
5107 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
5108 vino.snap, off, *plen);
5109 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5110 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5111 NULL, truncate_seq, truncate_size,
5114 return PTR_ERR(req);
5116 /* it may be a short read due to an object boundary */
5117 osd_req_op_extent_osd_data_pages(req, 0,
5118 pages, *plen, page_align, false, false);
5120 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
5121 off, *plen, *plen, page_align);
5123 rc = ceph_osdc_start_request(osdc, req, false);
5125 rc = ceph_osdc_wait_request(osdc, req);
5127 ceph_osdc_put_request(req);
5128 dout("readpages result %d\n", rc);
5131 EXPORT_SYMBOL(ceph_osdc_readpages);
5134 * do a synchronous write on N pages
5136 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5137 struct ceph_file_layout *layout,
5138 struct ceph_snap_context *snapc,
5140 u32 truncate_seq, u64 truncate_size,
5141 struct timespec64 *mtime,
5142 struct page **pages, int num_pages)
5144 struct ceph_osd_request *req;
5146 int page_align = off & ~PAGE_MASK;
5148 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5149 CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5150 snapc, truncate_seq, truncate_size,
5153 return PTR_ERR(req);
5155 /* it may be a short write due to an object boundary */
5156 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5158 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5160 req->r_mtime = *mtime;
5161 rc = ceph_osdc_start_request(osdc, req, true);
5163 rc = ceph_osdc_wait_request(osdc, req);
5165 ceph_osdc_put_request(req);
5168 dout("writepages result %d\n", rc);
5171 EXPORT_SYMBOL(ceph_osdc_writepages);
5173 int __init ceph_osdc_setup(void)
5175 size_t size = sizeof(struct ceph_osd_request) +
5176 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5178 BUG_ON(ceph_osd_request_cache);
5179 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5182 return ceph_osd_request_cache ? 0 : -ENOMEM;
5185 void ceph_osdc_cleanup(void)
5187 BUG_ON(!ceph_osd_request_cache);
5188 kmem_cache_destroy(ceph_osd_request_cache);
5189 ceph_osd_request_cache = NULL;
5193 * handle incoming message
5195 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5197 struct ceph_osd *osd = con->private;
5198 struct ceph_osd_client *osdc = osd->o_osdc;
5199 int type = le16_to_cpu(msg->hdr.type);
5202 case CEPH_MSG_OSD_MAP:
5203 ceph_osdc_handle_map(osdc, msg);
5205 case CEPH_MSG_OSD_OPREPLY:
5206 handle_reply(osd, msg);
5208 case CEPH_MSG_OSD_BACKOFF:
5209 handle_backoff(osd, msg);
5211 case CEPH_MSG_WATCH_NOTIFY:
5212 handle_watch_notify(osdc, msg);
5216 pr_err("received unknown message type %d %s\n", type,
5217 ceph_msg_type_name(type));
5224 * Lookup and return message for incoming reply. Don't try to do
5225 * anything about a larger than preallocated data portion of the
5226 * message at the moment - for now, just skip the message.
5228 static struct ceph_msg *get_reply(struct ceph_connection *con,
5229 struct ceph_msg_header *hdr,
5232 struct ceph_osd *osd = con->private;
5233 struct ceph_osd_client *osdc = osd->o_osdc;
5234 struct ceph_msg *m = NULL;
5235 struct ceph_osd_request *req;
5236 int front_len = le32_to_cpu(hdr->front_len);
5237 int data_len = le32_to_cpu(hdr->data_len);
5238 u64 tid = le64_to_cpu(hdr->tid);
5240 down_read(&osdc->lock);
5241 if (!osd_registered(osd)) {
5242 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5244 goto out_unlock_osdc;
5246 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5248 mutex_lock(&osd->lock);
5249 req = lookup_request(&osd->o_requests, tid);
5251 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5254 goto out_unlock_session;
5257 ceph_msg_revoke_incoming(req->r_reply);
5259 if (front_len > req->r_reply->front_alloc_len) {
5260 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5261 __func__, osd->o_osd, req->r_tid, front_len,
5262 req->r_reply->front_alloc_len);
5263 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5266 goto out_unlock_session;
5267 ceph_msg_put(req->r_reply);
5271 if (data_len > req->r_reply->data_length) {
5272 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5273 __func__, osd->o_osd, req->r_tid, data_len,
5274 req->r_reply->data_length);
5277 goto out_unlock_session;
5280 m = ceph_msg_get(req->r_reply);
5281 dout("get_reply tid %lld %p\n", tid, m);
5284 mutex_unlock(&osd->lock);
5286 up_read(&osdc->lock);
5291 * TODO: switch to a msg-owned pagelist
5293 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5296 int type = le16_to_cpu(hdr->type);
5297 u32 front_len = le32_to_cpu(hdr->front_len);
5298 u32 data_len = le32_to_cpu(hdr->data_len);
5300 m = ceph_msg_new(type, front_len, GFP_NOIO, false);
5305 struct page **pages;
5306 struct ceph_osd_data osd_data;
5308 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5310 if (IS_ERR(pages)) {
5315 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
5317 ceph_osdc_msg_data_add(m, &osd_data);
5323 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5324 struct ceph_msg_header *hdr,
5327 struct ceph_osd *osd = con->private;
5328 int type = le16_to_cpu(hdr->type);
5332 case CEPH_MSG_OSD_MAP:
5333 case CEPH_MSG_OSD_BACKOFF:
5334 case CEPH_MSG_WATCH_NOTIFY:
5335 return alloc_msg_with_page_vector(hdr);
5336 case CEPH_MSG_OSD_OPREPLY:
5337 return get_reply(con, hdr, skip);
5339 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5347 * Wrappers to refcount containing ceph_osd struct
5349 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5351 struct ceph_osd *osd = con->private;
5357 static void put_osd_con(struct ceph_connection *con)
5359 struct ceph_osd *osd = con->private;
5367 * Note: returned pointer is the address of a structure that's
5368 * managed separately. Caller must *not* attempt to free it.
5370 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5371 int *proto, int force_new)
5373 struct ceph_osd *o = con->private;
5374 struct ceph_osd_client *osdc = o->o_osdc;
5375 struct ceph_auth_client *ac = osdc->client->monc.auth;
5376 struct ceph_auth_handshake *auth = &o->o_auth;
5378 if (force_new && auth->authorizer) {
5379 ceph_auth_destroy_authorizer(auth->authorizer);
5380 auth->authorizer = NULL;
5382 if (!auth->authorizer) {
5383 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5386 return ERR_PTR(ret);
5388 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5391 return ERR_PTR(ret);
5393 *proto = ac->protocol;
5398 static int add_authorizer_challenge(struct ceph_connection *con,
5399 void *challenge_buf, int challenge_buf_len)
5401 struct ceph_osd *o = con->private;
5402 struct ceph_osd_client *osdc = o->o_osdc;
5403 struct ceph_auth_client *ac = osdc->client->monc.auth;
5405 return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5406 challenge_buf, challenge_buf_len);
5409 static int verify_authorizer_reply(struct ceph_connection *con)
5411 struct ceph_osd *o = con->private;
5412 struct ceph_osd_client *osdc = o->o_osdc;
5413 struct ceph_auth_client *ac = osdc->client->monc.auth;
5415 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5418 static int invalidate_authorizer(struct ceph_connection *con)
5420 struct ceph_osd *o = con->private;
5421 struct ceph_osd_client *osdc = o->o_osdc;
5422 struct ceph_auth_client *ac = osdc->client->monc.auth;
5424 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5425 return ceph_monc_validate_auth(&osdc->client->monc);
5428 static void osd_reencode_message(struct ceph_msg *msg)
5430 int type = le16_to_cpu(msg->hdr.type);
5432 if (type == CEPH_MSG_OSD_OP)
5433 encode_request_finish(msg);
5436 static int osd_sign_message(struct ceph_msg *msg)
5438 struct ceph_osd *o = msg->con->private;
5439 struct ceph_auth_handshake *auth = &o->o_auth;
5441 return ceph_auth_sign_message(auth, msg);
5444 static int osd_check_message_signature(struct ceph_msg *msg)
5446 struct ceph_osd *o = msg->con->private;
5447 struct ceph_auth_handshake *auth = &o->o_auth;
5449 return ceph_auth_check_message_signature(auth, msg);
5452 static const struct ceph_connection_operations osd_con_ops = {
5455 .dispatch = dispatch,
5456 .get_authorizer = get_authorizer,
5457 .add_authorizer_challenge = add_authorizer_challenge,
5458 .verify_authorizer_reply = verify_authorizer_reply,
5459 .invalidate_authorizer = invalidate_authorizer,
5460 .alloc_msg = alloc_msg,
5461 .reencode_message = osd_reencode_message,
5462 .sign_message = osd_sign_message,
5463 .check_message_signature = osd_check_message_signature,