GNU Linux-libre 5.4.257-gnu1
[releases.git] / net / ceph / osd_client.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/ceph/ceph_debug.h>
4
5 #include <linux/module.h>
6 #include <linux/err.h>
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
12 #ifdef CONFIG_BLOCK
13 #include <linux/bio.h>
14 #endif
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
24
25 #define OSD_OPREPLY_FRONT_LEN   512
26
27 static struct kmem_cache        *ceph_osd_request_cache;
28
29 static const struct ceph_connection_operations osd_con_ops;
30
31 /*
32  * Implement client access to distributed object storage cluster.
33  *
34  * All data objects are stored within a cluster/cloud of OSDs, or
35  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
36  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
37  * remote daemons serving up and coordinating consistent and safe
38  * access to storage.
39  *
40  * Cluster membership and the mapping of data objects onto storage devices
41  * are described by the osd map.
42  *
43  * We keep track of pending OSD requests (read, write), resubmit
44  * requests to different OSDs when the cluster topology/data layout
45  * change, or retry the affected requests when the communications
46  * channel with an OSD is reset.
47  */
48
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
52                         struct ceph_osd_linger_request *lreq);
53 static void unlink_linger(struct ceph_osd *osd,
54                           struct ceph_osd_linger_request *lreq);
55 static void clear_backoffs(struct ceph_osd *osd);
56
57 #if 1
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
59 {
60         bool wrlocked = true;
61
62         if (unlikely(down_read_trylock(sem))) {
63                 wrlocked = false;
64                 up_read(sem);
65         }
66
67         return wrlocked;
68 }
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_locked(&osdc->lock));
72 }
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
74 {
75         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
76 }
77 static inline void verify_osd_locked(struct ceph_osd *osd)
78 {
79         struct ceph_osd_client *osdc = osd->o_osdc;
80
81         WARN_ON(!(mutex_is_locked(&osd->lock) &&
82                   rwsem_is_locked(&osdc->lock)) &&
83                 !rwsem_is_wrlocked(&osdc->lock));
84 }
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
86 {
87         WARN_ON(!mutex_is_locked(&lreq->lock));
88 }
89 #else
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
94 #endif
95
96 /*
97  * calculate the mapping of a file extent onto an object, and fill out the
98  * request accordingly.  shorten extent as necessary if it crosses an
99  * object boundary.
100  *
101  * fill osd op in request message.
102  */
103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
104                         u64 *objnum, u64 *objoff, u64 *objlen)
105 {
106         u64 orig_len = *plen;
107         u32 xlen;
108
109         /* object extent? */
110         ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
111                                           objoff, &xlen);
112         *objlen = xlen;
113         if (*objlen < orig_len) {
114                 *plen = *objlen;
115                 dout(" skipping last %llu, final file extent %llu~%llu\n",
116                      orig_len - *plen, off, *plen);
117         }
118
119         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
120         return 0;
121 }
122
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
124 {
125         memset(osd_data, 0, sizeof (*osd_data));
126         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
127 }
128
129 /*
130  * Consumes @pages if @own_pages is true.
131  */
132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
133                         struct page **pages, u64 length, u32 alignment,
134                         bool pages_from_pool, bool own_pages)
135 {
136         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
137         osd_data->pages = pages;
138         osd_data->length = length;
139         osd_data->alignment = alignment;
140         osd_data->pages_from_pool = pages_from_pool;
141         osd_data->own_pages = own_pages;
142 }
143
144 /*
145  * Consumes a ref on @pagelist.
146  */
147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
148                         struct ceph_pagelist *pagelist)
149 {
150         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
151         osd_data->pagelist = pagelist;
152 }
153
154 #ifdef CONFIG_BLOCK
155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
156                                    struct ceph_bio_iter *bio_pos,
157                                    u32 bio_length)
158 {
159         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
160         osd_data->bio_pos = *bio_pos;
161         osd_data->bio_length = bio_length;
162 }
163 #endif /* CONFIG_BLOCK */
164
165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
166                                      struct ceph_bvec_iter *bvec_pos,
167                                      u32 num_bvecs)
168 {
169         osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
170         osd_data->bvec_pos = *bvec_pos;
171         osd_data->num_bvecs = num_bvecs;
172 }
173
174 static struct ceph_osd_data *
175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
176 {
177         BUG_ON(which >= osd_req->r_num_ops);
178
179         return &osd_req->r_ops[which].raw_data_in;
180 }
181
182 struct ceph_osd_data *
183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
184                         unsigned int which)
185 {
186         return osd_req_op_data(osd_req, which, extent, osd_data);
187 }
188 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
189
190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
191                         unsigned int which, struct page **pages,
192                         u64 length, u32 alignment,
193                         bool pages_from_pool, bool own_pages)
194 {
195         struct ceph_osd_data *osd_data;
196
197         osd_data = osd_req_op_raw_data_in(osd_req, which);
198         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
199                                 pages_from_pool, own_pages);
200 }
201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
202
203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
204                         unsigned int which, struct page **pages,
205                         u64 length, u32 alignment,
206                         bool pages_from_pool, bool own_pages)
207 {
208         struct ceph_osd_data *osd_data;
209
210         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
211         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
212                                 pages_from_pool, own_pages);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
215
216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
217                         unsigned int which, struct ceph_pagelist *pagelist)
218 {
219         struct ceph_osd_data *osd_data;
220
221         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
222         ceph_osd_data_pagelist_init(osd_data, pagelist);
223 }
224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
225
226 #ifdef CONFIG_BLOCK
227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
228                                     unsigned int which,
229                                     struct ceph_bio_iter *bio_pos,
230                                     u32 bio_length)
231 {
232         struct ceph_osd_data *osd_data;
233
234         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
235         ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
236 }
237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
238 #endif /* CONFIG_BLOCK */
239
240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
241                                       unsigned int which,
242                                       struct bio_vec *bvecs, u32 num_bvecs,
243                                       u32 bytes)
244 {
245         struct ceph_osd_data *osd_data;
246         struct ceph_bvec_iter it = {
247                 .bvecs = bvecs,
248                 .iter = { .bi_size = bytes },
249         };
250
251         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
252         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
253 }
254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
255
256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
257                                          unsigned int which,
258                                          struct ceph_bvec_iter *bvec_pos)
259 {
260         struct ceph_osd_data *osd_data;
261
262         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
263         ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
264 }
265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
266
267 static void osd_req_op_cls_request_info_pagelist(
268                         struct ceph_osd_request *osd_req,
269                         unsigned int which, struct ceph_pagelist *pagelist)
270 {
271         struct ceph_osd_data *osd_data;
272
273         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
274         ceph_osd_data_pagelist_init(osd_data, pagelist);
275 }
276
277 void osd_req_op_cls_request_data_pagelist(
278                         struct ceph_osd_request *osd_req,
279                         unsigned int which, struct ceph_pagelist *pagelist)
280 {
281         struct ceph_osd_data *osd_data;
282
283         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
284         ceph_osd_data_pagelist_init(osd_data, pagelist);
285         osd_req->r_ops[which].cls.indata_len += pagelist->length;
286         osd_req->r_ops[which].indata_len += pagelist->length;
287 }
288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
289
290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
291                         unsigned int which, struct page **pages, u64 length,
292                         u32 alignment, bool pages_from_pool, bool own_pages)
293 {
294         struct ceph_osd_data *osd_data;
295
296         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
297         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
298                                 pages_from_pool, own_pages);
299         osd_req->r_ops[which].cls.indata_len += length;
300         osd_req->r_ops[which].indata_len += length;
301 }
302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
303
304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
305                                        unsigned int which,
306                                        struct bio_vec *bvecs, u32 num_bvecs,
307                                        u32 bytes)
308 {
309         struct ceph_osd_data *osd_data;
310         struct ceph_bvec_iter it = {
311                 .bvecs = bvecs,
312                 .iter = { .bi_size = bytes },
313         };
314
315         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
316         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
317         osd_req->r_ops[which].cls.indata_len += bytes;
318         osd_req->r_ops[which].indata_len += bytes;
319 }
320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
321
322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
323                         unsigned int which, struct page **pages, u64 length,
324                         u32 alignment, bool pages_from_pool, bool own_pages)
325 {
326         struct ceph_osd_data *osd_data;
327
328         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
329         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
330                                 pages_from_pool, own_pages);
331 }
332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
333
334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
335 {
336         switch (osd_data->type) {
337         case CEPH_OSD_DATA_TYPE_NONE:
338                 return 0;
339         case CEPH_OSD_DATA_TYPE_PAGES:
340                 return osd_data->length;
341         case CEPH_OSD_DATA_TYPE_PAGELIST:
342                 return (u64)osd_data->pagelist->length;
343 #ifdef CONFIG_BLOCK
344         case CEPH_OSD_DATA_TYPE_BIO:
345                 return (u64)osd_data->bio_length;
346 #endif /* CONFIG_BLOCK */
347         case CEPH_OSD_DATA_TYPE_BVECS:
348                 return osd_data->bvec_pos.iter.bi_size;
349         default:
350                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
351                 return 0;
352         }
353 }
354
355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
356 {
357         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
358                 int num_pages;
359
360                 num_pages = calc_pages_for((u64)osd_data->alignment,
361                                                 (u64)osd_data->length);
362                 ceph_release_page_vector(osd_data->pages, num_pages);
363         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
364                 ceph_pagelist_release(osd_data->pagelist);
365         }
366         ceph_osd_data_init(osd_data);
367 }
368
369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
370                         unsigned int which)
371 {
372         struct ceph_osd_req_op *op;
373
374         BUG_ON(which >= osd_req->r_num_ops);
375         op = &osd_req->r_ops[which];
376
377         switch (op->op) {
378         case CEPH_OSD_OP_READ:
379         case CEPH_OSD_OP_WRITE:
380         case CEPH_OSD_OP_WRITEFULL:
381                 ceph_osd_data_release(&op->extent.osd_data);
382                 break;
383         case CEPH_OSD_OP_CALL:
384                 ceph_osd_data_release(&op->cls.request_info);
385                 ceph_osd_data_release(&op->cls.request_data);
386                 ceph_osd_data_release(&op->cls.response_data);
387                 break;
388         case CEPH_OSD_OP_SETXATTR:
389         case CEPH_OSD_OP_CMPXATTR:
390                 ceph_osd_data_release(&op->xattr.osd_data);
391                 break;
392         case CEPH_OSD_OP_STAT:
393                 ceph_osd_data_release(&op->raw_data_in);
394                 break;
395         case CEPH_OSD_OP_NOTIFY_ACK:
396                 ceph_osd_data_release(&op->notify_ack.request_data);
397                 break;
398         case CEPH_OSD_OP_NOTIFY:
399                 ceph_osd_data_release(&op->notify.request_data);
400                 ceph_osd_data_release(&op->notify.response_data);
401                 break;
402         case CEPH_OSD_OP_LIST_WATCHERS:
403                 ceph_osd_data_release(&op->list_watchers.response_data);
404                 break;
405         case CEPH_OSD_OP_COPY_FROM:
406                 ceph_osd_data_release(&op->copy_from.osd_data);
407                 break;
408         default:
409                 break;
410         }
411 }
412
413 /*
414  * Assumes @t is zero-initialized.
415  */
416 static void target_init(struct ceph_osd_request_target *t)
417 {
418         ceph_oid_init(&t->base_oid);
419         ceph_oloc_init(&t->base_oloc);
420         ceph_oid_init(&t->target_oid);
421         ceph_oloc_init(&t->target_oloc);
422
423         ceph_osds_init(&t->acting);
424         ceph_osds_init(&t->up);
425         t->size = -1;
426         t->min_size = -1;
427
428         t->osd = CEPH_HOMELESS_OSD;
429 }
430
431 static void target_copy(struct ceph_osd_request_target *dest,
432                         const struct ceph_osd_request_target *src)
433 {
434         ceph_oid_copy(&dest->base_oid, &src->base_oid);
435         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
436         ceph_oid_copy(&dest->target_oid, &src->target_oid);
437         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
438
439         dest->pgid = src->pgid; /* struct */
440         dest->spgid = src->spgid; /* struct */
441         dest->pg_num = src->pg_num;
442         dest->pg_num_mask = src->pg_num_mask;
443         ceph_osds_copy(&dest->acting, &src->acting);
444         ceph_osds_copy(&dest->up, &src->up);
445         dest->size = src->size;
446         dest->min_size = src->min_size;
447         dest->sort_bitwise = src->sort_bitwise;
448         dest->recovery_deletes = src->recovery_deletes;
449
450         dest->flags = src->flags;
451         dest->paused = src->paused;
452
453         dest->epoch = src->epoch;
454         dest->last_force_resend = src->last_force_resend;
455
456         dest->osd = src->osd;
457 }
458
459 static void target_destroy(struct ceph_osd_request_target *t)
460 {
461         ceph_oid_destroy(&t->base_oid);
462         ceph_oloc_destroy(&t->base_oloc);
463         ceph_oid_destroy(&t->target_oid);
464         ceph_oloc_destroy(&t->target_oloc);
465 }
466
467 /*
468  * requests
469  */
470 static void request_release_checks(struct ceph_osd_request *req)
471 {
472         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
473         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
474         WARN_ON(!list_empty(&req->r_private_item));
475         WARN_ON(req->r_osd);
476 }
477
478 static void ceph_osdc_release_request(struct kref *kref)
479 {
480         struct ceph_osd_request *req = container_of(kref,
481                                             struct ceph_osd_request, r_kref);
482         unsigned int which;
483
484         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
485              req->r_request, req->r_reply);
486         request_release_checks(req);
487
488         if (req->r_request)
489                 ceph_msg_put(req->r_request);
490         if (req->r_reply)
491                 ceph_msg_put(req->r_reply);
492
493         for (which = 0; which < req->r_num_ops; which++)
494                 osd_req_op_data_release(req, which);
495
496         target_destroy(&req->r_t);
497         ceph_put_snap_context(req->r_snapc);
498
499         if (req->r_mempool)
500                 mempool_free(req, req->r_osdc->req_mempool);
501         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
502                 kmem_cache_free(ceph_osd_request_cache, req);
503         else
504                 kfree(req);
505 }
506
507 void ceph_osdc_get_request(struct ceph_osd_request *req)
508 {
509         dout("%s %p (was %d)\n", __func__, req,
510              kref_read(&req->r_kref));
511         kref_get(&req->r_kref);
512 }
513 EXPORT_SYMBOL(ceph_osdc_get_request);
514
515 void ceph_osdc_put_request(struct ceph_osd_request *req)
516 {
517         if (req) {
518                 dout("%s %p (was %d)\n", __func__, req,
519                      kref_read(&req->r_kref));
520                 kref_put(&req->r_kref, ceph_osdc_release_request);
521         }
522 }
523 EXPORT_SYMBOL(ceph_osdc_put_request);
524
525 static void request_init(struct ceph_osd_request *req)
526 {
527         /* req only, each op is zeroed in _osd_req_op_init() */
528         memset(req, 0, sizeof(*req));
529
530         kref_init(&req->r_kref);
531         init_completion(&req->r_completion);
532         RB_CLEAR_NODE(&req->r_node);
533         RB_CLEAR_NODE(&req->r_mc_node);
534         INIT_LIST_HEAD(&req->r_private_item);
535
536         target_init(&req->r_t);
537 }
538
539 /*
540  * This is ugly, but it allows us to reuse linger registration and ping
541  * requests, keeping the structure of the code around send_linger{_ping}()
542  * reasonable.  Setting up a min_nr=2 mempool for each linger request
543  * and dealing with copying ops (this blasts req only, watch op remains
544  * intact) isn't any better.
545  */
546 static void request_reinit(struct ceph_osd_request *req)
547 {
548         struct ceph_osd_client *osdc = req->r_osdc;
549         bool mempool = req->r_mempool;
550         unsigned int num_ops = req->r_num_ops;
551         u64 snapid = req->r_snapid;
552         struct ceph_snap_context *snapc = req->r_snapc;
553         bool linger = req->r_linger;
554         struct ceph_msg *request_msg = req->r_request;
555         struct ceph_msg *reply_msg = req->r_reply;
556
557         dout("%s req %p\n", __func__, req);
558         WARN_ON(kref_read(&req->r_kref) != 1);
559         request_release_checks(req);
560
561         WARN_ON(kref_read(&request_msg->kref) != 1);
562         WARN_ON(kref_read(&reply_msg->kref) != 1);
563         target_destroy(&req->r_t);
564
565         request_init(req);
566         req->r_osdc = osdc;
567         req->r_mempool = mempool;
568         req->r_num_ops = num_ops;
569         req->r_snapid = snapid;
570         req->r_snapc = snapc;
571         req->r_linger = linger;
572         req->r_request = request_msg;
573         req->r_reply = reply_msg;
574 }
575
576 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
577                                                struct ceph_snap_context *snapc,
578                                                unsigned int num_ops,
579                                                bool use_mempool,
580                                                gfp_t gfp_flags)
581 {
582         struct ceph_osd_request *req;
583
584         if (use_mempool) {
585                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
586                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
587         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
588                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
589         } else {
590                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
591                 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
592         }
593         if (unlikely(!req))
594                 return NULL;
595
596         request_init(req);
597         req->r_osdc = osdc;
598         req->r_mempool = use_mempool;
599         req->r_num_ops = num_ops;
600         req->r_snapid = CEPH_NOSNAP;
601         req->r_snapc = ceph_get_snap_context(snapc);
602
603         dout("%s req %p\n", __func__, req);
604         return req;
605 }
606 EXPORT_SYMBOL(ceph_osdc_alloc_request);
607
608 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
609 {
610         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
611 }
612
613 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
614                                       int num_request_data_items,
615                                       int num_reply_data_items)
616 {
617         struct ceph_osd_client *osdc = req->r_osdc;
618         struct ceph_msg *msg;
619         int msg_size;
620
621         WARN_ON(req->r_request || req->r_reply);
622         WARN_ON(ceph_oid_empty(&req->r_base_oid));
623         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
624
625         /* create request message */
626         msg_size = CEPH_ENCODING_START_BLK_LEN +
627                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
628         msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
629         msg_size += CEPH_ENCODING_START_BLK_LEN +
630                         sizeof(struct ceph_osd_reqid); /* reqid */
631         msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
632         msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
633         msg_size += CEPH_ENCODING_START_BLK_LEN +
634                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
635         msg_size += 4 + req->r_base_oid.name_len; /* oid */
636         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
637         msg_size += 8; /* snapid */
638         msg_size += 8; /* snap_seq */
639         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
640         msg_size += 4 + 8; /* retry_attempt, features */
641
642         if (req->r_mempool)
643                 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
644                                        num_request_data_items);
645         else
646                 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
647                                     num_request_data_items, gfp, true);
648         if (!msg)
649                 return -ENOMEM;
650
651         memset(msg->front.iov_base, 0, msg->front.iov_len);
652         req->r_request = msg;
653
654         /* create reply message */
655         msg_size = OSD_OPREPLY_FRONT_LEN;
656         msg_size += req->r_base_oid.name_len;
657         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
658
659         if (req->r_mempool)
660                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
661                                        num_reply_data_items);
662         else
663                 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
664                                     num_reply_data_items, gfp, true);
665         if (!msg)
666                 return -ENOMEM;
667
668         req->r_reply = msg;
669
670         return 0;
671 }
672
673 static bool osd_req_opcode_valid(u16 opcode)
674 {
675         switch (opcode) {
676 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
677 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
678 #undef GENERATE_CASE
679         default:
680                 return false;
681         }
682 }
683
684 static void get_num_data_items(struct ceph_osd_request *req,
685                                int *num_request_data_items,
686                                int *num_reply_data_items)
687 {
688         struct ceph_osd_req_op *op;
689
690         *num_request_data_items = 0;
691         *num_reply_data_items = 0;
692
693         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
694                 switch (op->op) {
695                 /* request */
696                 case CEPH_OSD_OP_WRITE:
697                 case CEPH_OSD_OP_WRITEFULL:
698                 case CEPH_OSD_OP_SETXATTR:
699                 case CEPH_OSD_OP_CMPXATTR:
700                 case CEPH_OSD_OP_NOTIFY_ACK:
701                 case CEPH_OSD_OP_COPY_FROM:
702                         *num_request_data_items += 1;
703                         break;
704
705                 /* reply */
706                 case CEPH_OSD_OP_STAT:
707                 case CEPH_OSD_OP_READ:
708                 case CEPH_OSD_OP_LIST_WATCHERS:
709                         *num_reply_data_items += 1;
710                         break;
711
712                 /* both */
713                 case CEPH_OSD_OP_NOTIFY:
714                         *num_request_data_items += 1;
715                         *num_reply_data_items += 1;
716                         break;
717                 case CEPH_OSD_OP_CALL:
718                         *num_request_data_items += 2;
719                         *num_reply_data_items += 1;
720                         break;
721
722                 default:
723                         WARN_ON(!osd_req_opcode_valid(op->op));
724                         break;
725                 }
726         }
727 }
728
729 /*
730  * oid, oloc and OSD op opcode(s) must be filled in before this function
731  * is called.
732  */
733 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
734 {
735         int num_request_data_items, num_reply_data_items;
736
737         get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
738         return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
739                                           num_reply_data_items);
740 }
741 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
742
743 /*
744  * This is an osd op init function for opcodes that have no data or
745  * other information associated with them.  It also serves as a
746  * common init routine for all the other init functions, below.
747  */
748 static struct ceph_osd_req_op *
749 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
750                  u16 opcode, u32 flags)
751 {
752         struct ceph_osd_req_op *op;
753
754         BUG_ON(which >= osd_req->r_num_ops);
755         BUG_ON(!osd_req_opcode_valid(opcode));
756
757         op = &osd_req->r_ops[which];
758         memset(op, 0, sizeof (*op));
759         op->op = opcode;
760         op->flags = flags;
761
762         return op;
763 }
764
765 void osd_req_op_init(struct ceph_osd_request *osd_req,
766                      unsigned int which, u16 opcode, u32 flags)
767 {
768         (void)_osd_req_op_init(osd_req, which, opcode, flags);
769 }
770 EXPORT_SYMBOL(osd_req_op_init);
771
772 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
773                                 unsigned int which, u16 opcode,
774                                 u64 offset, u64 length,
775                                 u64 truncate_size, u32 truncate_seq)
776 {
777         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
778                                                       opcode, 0);
779         size_t payload_len = 0;
780
781         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
782                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
783                opcode != CEPH_OSD_OP_TRUNCATE);
784
785         op->extent.offset = offset;
786         op->extent.length = length;
787         op->extent.truncate_size = truncate_size;
788         op->extent.truncate_seq = truncate_seq;
789         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
790                 payload_len += length;
791
792         op->indata_len = payload_len;
793 }
794 EXPORT_SYMBOL(osd_req_op_extent_init);
795
796 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
797                                 unsigned int which, u64 length)
798 {
799         struct ceph_osd_req_op *op;
800         u64 previous;
801
802         BUG_ON(which >= osd_req->r_num_ops);
803         op = &osd_req->r_ops[which];
804         previous = op->extent.length;
805
806         if (length == previous)
807                 return;         /* Nothing to do */
808         BUG_ON(length > previous);
809
810         op->extent.length = length;
811         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
812                 op->indata_len -= previous - length;
813 }
814 EXPORT_SYMBOL(osd_req_op_extent_update);
815
816 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
817                                 unsigned int which, u64 offset_inc)
818 {
819         struct ceph_osd_req_op *op, *prev_op;
820
821         BUG_ON(which + 1 >= osd_req->r_num_ops);
822
823         prev_op = &osd_req->r_ops[which];
824         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
825         /* dup previous one */
826         op->indata_len = prev_op->indata_len;
827         op->outdata_len = prev_op->outdata_len;
828         op->extent = prev_op->extent;
829         /* adjust offset */
830         op->extent.offset += offset_inc;
831         op->extent.length -= offset_inc;
832
833         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
834                 op->indata_len -= offset_inc;
835 }
836 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
837
838 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
839                         const char *class, const char *method)
840 {
841         struct ceph_osd_req_op *op;
842         struct ceph_pagelist *pagelist;
843         size_t payload_len = 0;
844         size_t size;
845         int ret;
846
847         op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
848
849         pagelist = ceph_pagelist_alloc(GFP_NOFS);
850         if (!pagelist)
851                 return -ENOMEM;
852
853         op->cls.class_name = class;
854         size = strlen(class);
855         BUG_ON(size > (size_t) U8_MAX);
856         op->cls.class_len = size;
857         ret = ceph_pagelist_append(pagelist, class, size);
858         if (ret)
859                 goto err_pagelist_free;
860         payload_len += size;
861
862         op->cls.method_name = method;
863         size = strlen(method);
864         BUG_ON(size > (size_t) U8_MAX);
865         op->cls.method_len = size;
866         ret = ceph_pagelist_append(pagelist, method, size);
867         if (ret)
868                 goto err_pagelist_free;
869         payload_len += size;
870
871         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
872         op->indata_len = payload_len;
873         return 0;
874
875 err_pagelist_free:
876         ceph_pagelist_release(pagelist);
877         return ret;
878 }
879 EXPORT_SYMBOL(osd_req_op_cls_init);
880
881 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
882                           u16 opcode, const char *name, const void *value,
883                           size_t size, u8 cmp_op, u8 cmp_mode)
884 {
885         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
886                                                       opcode, 0);
887         struct ceph_pagelist *pagelist;
888         size_t payload_len;
889         int ret;
890
891         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
892
893         pagelist = ceph_pagelist_alloc(GFP_NOFS);
894         if (!pagelist)
895                 return -ENOMEM;
896
897         payload_len = strlen(name);
898         op->xattr.name_len = payload_len;
899         ret = ceph_pagelist_append(pagelist, name, payload_len);
900         if (ret)
901                 goto err_pagelist_free;
902
903         op->xattr.value_len = size;
904         ret = ceph_pagelist_append(pagelist, value, size);
905         if (ret)
906                 goto err_pagelist_free;
907         payload_len += size;
908
909         op->xattr.cmp_op = cmp_op;
910         op->xattr.cmp_mode = cmp_mode;
911
912         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
913         op->indata_len = payload_len;
914         return 0;
915
916 err_pagelist_free:
917         ceph_pagelist_release(pagelist);
918         return ret;
919 }
920 EXPORT_SYMBOL(osd_req_op_xattr_init);
921
922 /*
923  * @watch_opcode: CEPH_OSD_WATCH_OP_*
924  */
925 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
926                                   u64 cookie, u8 watch_opcode)
927 {
928         struct ceph_osd_req_op *op;
929
930         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
931         op->watch.cookie = cookie;
932         op->watch.op = watch_opcode;
933         op->watch.gen = 0;
934 }
935
936 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
937                                 unsigned int which,
938                                 u64 expected_object_size,
939                                 u64 expected_write_size)
940 {
941         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
942                                                       CEPH_OSD_OP_SETALLOCHINT,
943                                                       0);
944
945         op->alloc_hint.expected_object_size = expected_object_size;
946         op->alloc_hint.expected_write_size = expected_write_size;
947
948         /*
949          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
950          * not worth a feature bit.  Set FAILOK per-op flag to make
951          * sure older osds don't trip over an unsupported opcode.
952          */
953         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
954 }
955 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
956
957 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
958                                 struct ceph_osd_data *osd_data)
959 {
960         u64 length = ceph_osd_data_length(osd_data);
961
962         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
963                 BUG_ON(length > (u64) SIZE_MAX);
964                 if (length)
965                         ceph_msg_data_add_pages(msg, osd_data->pages,
966                                         length, osd_data->alignment, false);
967         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
968                 BUG_ON(!length);
969                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
970 #ifdef CONFIG_BLOCK
971         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
972                 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
973 #endif
974         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
975                 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
976         } else {
977                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
978         }
979 }
980
981 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
982                              const struct ceph_osd_req_op *src)
983 {
984         switch (src->op) {
985         case CEPH_OSD_OP_STAT:
986                 break;
987         case CEPH_OSD_OP_READ:
988         case CEPH_OSD_OP_WRITE:
989         case CEPH_OSD_OP_WRITEFULL:
990         case CEPH_OSD_OP_ZERO:
991         case CEPH_OSD_OP_TRUNCATE:
992                 dst->extent.offset = cpu_to_le64(src->extent.offset);
993                 dst->extent.length = cpu_to_le64(src->extent.length);
994                 dst->extent.truncate_size =
995                         cpu_to_le64(src->extent.truncate_size);
996                 dst->extent.truncate_seq =
997                         cpu_to_le32(src->extent.truncate_seq);
998                 break;
999         case CEPH_OSD_OP_CALL:
1000                 dst->cls.class_len = src->cls.class_len;
1001                 dst->cls.method_len = src->cls.method_len;
1002                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
1003                 break;
1004         case CEPH_OSD_OP_WATCH:
1005                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
1006                 dst->watch.ver = cpu_to_le64(0);
1007                 dst->watch.op = src->watch.op;
1008                 dst->watch.gen = cpu_to_le32(src->watch.gen);
1009                 break;
1010         case CEPH_OSD_OP_NOTIFY_ACK:
1011                 break;
1012         case CEPH_OSD_OP_NOTIFY:
1013                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
1014                 break;
1015         case CEPH_OSD_OP_LIST_WATCHERS:
1016                 break;
1017         case CEPH_OSD_OP_SETALLOCHINT:
1018                 dst->alloc_hint.expected_object_size =
1019                     cpu_to_le64(src->alloc_hint.expected_object_size);
1020                 dst->alloc_hint.expected_write_size =
1021                     cpu_to_le64(src->alloc_hint.expected_write_size);
1022                 break;
1023         case CEPH_OSD_OP_SETXATTR:
1024         case CEPH_OSD_OP_CMPXATTR:
1025                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
1026                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
1027                 dst->xattr.cmp_op = src->xattr.cmp_op;
1028                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
1029                 break;
1030         case CEPH_OSD_OP_CREATE:
1031         case CEPH_OSD_OP_DELETE:
1032                 break;
1033         case CEPH_OSD_OP_COPY_FROM:
1034                 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
1035                 dst->copy_from.src_version =
1036                         cpu_to_le64(src->copy_from.src_version);
1037                 dst->copy_from.flags = src->copy_from.flags;
1038                 dst->copy_from.src_fadvise_flags =
1039                         cpu_to_le32(src->copy_from.src_fadvise_flags);
1040                 break;
1041         default:
1042                 pr_err("unsupported osd opcode %s\n",
1043                         ceph_osd_op_name(src->op));
1044                 WARN_ON(1);
1045
1046                 return 0;
1047         }
1048
1049         dst->op = cpu_to_le16(src->op);
1050         dst->flags = cpu_to_le32(src->flags);
1051         dst->payload_len = cpu_to_le32(src->indata_len);
1052
1053         return src->indata_len;
1054 }
1055
1056 /*
1057  * build new request AND message, calculate layout, and adjust file
1058  * extent as needed.
1059  *
1060  * if the file was recently truncated, we include information about its
1061  * old and new size so that the object can be updated appropriately.  (we
1062  * avoid synchronously deleting truncated objects because it's slow.)
1063  */
1064 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
1065                                                struct ceph_file_layout *layout,
1066                                                struct ceph_vino vino,
1067                                                u64 off, u64 *plen,
1068                                                unsigned int which, int num_ops,
1069                                                int opcode, int flags,
1070                                                struct ceph_snap_context *snapc,
1071                                                u32 truncate_seq,
1072                                                u64 truncate_size,
1073                                                bool use_mempool)
1074 {
1075         struct ceph_osd_request *req;
1076         u64 objnum = 0;
1077         u64 objoff = 0;
1078         u64 objlen = 0;
1079         int r;
1080
1081         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
1082                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
1083                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
1084
1085         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1086                                         GFP_NOFS);
1087         if (!req) {
1088                 r = -ENOMEM;
1089                 goto fail;
1090         }
1091
1092         /* calculate max write size */
1093         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
1094         if (r)
1095                 goto fail;
1096
1097         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
1098                 osd_req_op_init(req, which, opcode, 0);
1099         } else {
1100                 u32 object_size = layout->object_size;
1101                 u32 object_base = off - objoff;
1102                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
1103                         if (truncate_size <= object_base) {
1104                                 truncate_size = 0;
1105                         } else {
1106                                 truncate_size -= object_base;
1107                                 if (truncate_size > object_size)
1108                                         truncate_size = object_size;
1109                         }
1110                 }
1111                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1112                                        truncate_size, truncate_seq);
1113         }
1114
1115         req->r_flags = flags;
1116         req->r_base_oloc.pool = layout->pool_id;
1117         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1118         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1119
1120         req->r_snapid = vino.snap;
1121         if (flags & CEPH_OSD_FLAG_WRITE)
1122                 req->r_data_offset = off;
1123
1124         if (num_ops > 1)
1125                 /*
1126                  * This is a special case for ceph_writepages_start(), but it
1127                  * also covers ceph_uninline_data().  If more multi-op request
1128                  * use cases emerge, we will need a separate helper.
1129                  */
1130                 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
1131         else
1132                 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1133         if (r)
1134                 goto fail;
1135
1136         return req;
1137
1138 fail:
1139         ceph_osdc_put_request(req);
1140         return ERR_PTR(r);
1141 }
1142 EXPORT_SYMBOL(ceph_osdc_new_request);
1143
1144 /*
1145  * We keep osd requests in an rbtree, sorted by ->r_tid.
1146  */
1147 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1148 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1149
1150 /*
1151  * Call @fn on each OSD request as long as @fn returns 0.
1152  */
1153 static void for_each_request(struct ceph_osd_client *osdc,
1154                         int (*fn)(struct ceph_osd_request *req, void *arg),
1155                         void *arg)
1156 {
1157         struct rb_node *n, *p;
1158
1159         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1160                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1161
1162                 for (p = rb_first(&osd->o_requests); p; ) {
1163                         struct ceph_osd_request *req =
1164                             rb_entry(p, struct ceph_osd_request, r_node);
1165
1166                         p = rb_next(p);
1167                         if (fn(req, arg))
1168                                 return;
1169                 }
1170         }
1171
1172         for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1173                 struct ceph_osd_request *req =
1174                     rb_entry(p, struct ceph_osd_request, r_node);
1175
1176                 p = rb_next(p);
1177                 if (fn(req, arg))
1178                         return;
1179         }
1180 }
1181
1182 static bool osd_homeless(struct ceph_osd *osd)
1183 {
1184         return osd->o_osd == CEPH_HOMELESS_OSD;
1185 }
1186
1187 static bool osd_registered(struct ceph_osd *osd)
1188 {
1189         verify_osdc_locked(osd->o_osdc);
1190
1191         return !RB_EMPTY_NODE(&osd->o_node);
1192 }
1193
1194 /*
1195  * Assumes @osd is zero-initialized.
1196  */
1197 static void osd_init(struct ceph_osd *osd)
1198 {
1199         refcount_set(&osd->o_ref, 1);
1200         RB_CLEAR_NODE(&osd->o_node);
1201         osd->o_requests = RB_ROOT;
1202         osd->o_linger_requests = RB_ROOT;
1203         osd->o_backoff_mappings = RB_ROOT;
1204         osd->o_backoffs_by_id = RB_ROOT;
1205         INIT_LIST_HEAD(&osd->o_osd_lru);
1206         INIT_LIST_HEAD(&osd->o_keepalive_item);
1207         osd->o_incarnation = 1;
1208         mutex_init(&osd->lock);
1209 }
1210
1211 static void osd_cleanup(struct ceph_osd *osd)
1212 {
1213         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1214         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1215         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1216         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1217         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1218         WARN_ON(!list_empty(&osd->o_osd_lru));
1219         WARN_ON(!list_empty(&osd->o_keepalive_item));
1220
1221         if (osd->o_auth.authorizer) {
1222                 WARN_ON(osd_homeless(osd));
1223                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1224         }
1225 }
1226
1227 /*
1228  * Track open sessions with osds.
1229  */
1230 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1231 {
1232         struct ceph_osd *osd;
1233
1234         WARN_ON(onum == CEPH_HOMELESS_OSD);
1235
1236         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1237         osd_init(osd);
1238         osd->o_osdc = osdc;
1239         osd->o_osd = onum;
1240
1241         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1242
1243         return osd;
1244 }
1245
1246 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1247 {
1248         if (refcount_inc_not_zero(&osd->o_ref)) {
1249                 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1250                      refcount_read(&osd->o_ref));
1251                 return osd;
1252         } else {
1253                 dout("get_osd %p FAIL\n", osd);
1254                 return NULL;
1255         }
1256 }
1257
1258 static void put_osd(struct ceph_osd *osd)
1259 {
1260         dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1261              refcount_read(&osd->o_ref) - 1);
1262         if (refcount_dec_and_test(&osd->o_ref)) {
1263                 osd_cleanup(osd);
1264                 kfree(osd);
1265         }
1266 }
1267
1268 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1269
1270 static void __move_osd_to_lru(struct ceph_osd *osd)
1271 {
1272         struct ceph_osd_client *osdc = osd->o_osdc;
1273
1274         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1275         BUG_ON(!list_empty(&osd->o_osd_lru));
1276
1277         spin_lock(&osdc->osd_lru_lock);
1278         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1279         spin_unlock(&osdc->osd_lru_lock);
1280
1281         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1282 }
1283
1284 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1285 {
1286         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1287             RB_EMPTY_ROOT(&osd->o_linger_requests))
1288                 __move_osd_to_lru(osd);
1289 }
1290
1291 static void __remove_osd_from_lru(struct ceph_osd *osd)
1292 {
1293         struct ceph_osd_client *osdc = osd->o_osdc;
1294
1295         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1296
1297         spin_lock(&osdc->osd_lru_lock);
1298         if (!list_empty(&osd->o_osd_lru))
1299                 list_del_init(&osd->o_osd_lru);
1300         spin_unlock(&osdc->osd_lru_lock);
1301 }
1302
1303 /*
1304  * Close the connection and assign any leftover requests to the
1305  * homeless session.
1306  */
1307 static void close_osd(struct ceph_osd *osd)
1308 {
1309         struct ceph_osd_client *osdc = osd->o_osdc;
1310         struct rb_node *n;
1311
1312         verify_osdc_wrlocked(osdc);
1313         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1314
1315         ceph_con_close(&osd->o_con);
1316
1317         for (n = rb_first(&osd->o_requests); n; ) {
1318                 struct ceph_osd_request *req =
1319                     rb_entry(n, struct ceph_osd_request, r_node);
1320
1321                 n = rb_next(n); /* unlink_request() */
1322
1323                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1324                 unlink_request(osd, req);
1325                 link_request(&osdc->homeless_osd, req);
1326         }
1327         for (n = rb_first(&osd->o_linger_requests); n; ) {
1328                 struct ceph_osd_linger_request *lreq =
1329                     rb_entry(n, struct ceph_osd_linger_request, node);
1330
1331                 n = rb_next(n); /* unlink_linger() */
1332
1333                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1334                      lreq->linger_id);
1335                 unlink_linger(osd, lreq);
1336                 link_linger(&osdc->homeless_osd, lreq);
1337         }
1338         clear_backoffs(osd);
1339
1340         __remove_osd_from_lru(osd);
1341         erase_osd(&osdc->osds, osd);
1342         put_osd(osd);
1343 }
1344
1345 /*
1346  * reset osd connect
1347  */
1348 static int reopen_osd(struct ceph_osd *osd)
1349 {
1350         struct ceph_entity_addr *peer_addr;
1351
1352         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1353
1354         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1355             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1356                 close_osd(osd);
1357                 return -ENODEV;
1358         }
1359
1360         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1361         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1362                         !ceph_con_opened(&osd->o_con)) {
1363                 struct rb_node *n;
1364
1365                 dout("osd addr hasn't changed and connection never opened, "
1366                      "letting msgr retry\n");
1367                 /* touch each r_stamp for handle_timeout()'s benfit */
1368                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1369                         struct ceph_osd_request *req =
1370                             rb_entry(n, struct ceph_osd_request, r_node);
1371                         req->r_stamp = jiffies;
1372                 }
1373
1374                 return -EAGAIN;
1375         }
1376
1377         ceph_con_close(&osd->o_con);
1378         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1379         osd->o_incarnation++;
1380
1381         return 0;
1382 }
1383
1384 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1385                                           bool wrlocked)
1386 {
1387         struct ceph_osd *osd;
1388
1389         if (wrlocked)
1390                 verify_osdc_wrlocked(osdc);
1391         else
1392                 verify_osdc_locked(osdc);
1393
1394         if (o != CEPH_HOMELESS_OSD)
1395                 osd = lookup_osd(&osdc->osds, o);
1396         else
1397                 osd = &osdc->homeless_osd;
1398         if (!osd) {
1399                 if (!wrlocked)
1400                         return ERR_PTR(-EAGAIN);
1401
1402                 osd = create_osd(osdc, o);
1403                 insert_osd(&osdc->osds, osd);
1404                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1405                               &osdc->osdmap->osd_addr[osd->o_osd]);
1406         }
1407
1408         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1409         return osd;
1410 }
1411
1412 /*
1413  * Create request <-> OSD session relation.
1414  *
1415  * @req has to be assigned a tid, @osd may be homeless.
1416  */
1417 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1418 {
1419         verify_osd_locked(osd);
1420         WARN_ON(!req->r_tid || req->r_osd);
1421         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1422              req, req->r_tid);
1423
1424         if (!osd_homeless(osd))
1425                 __remove_osd_from_lru(osd);
1426         else
1427                 atomic_inc(&osd->o_osdc->num_homeless);
1428
1429         get_osd(osd);
1430         insert_request(&osd->o_requests, req);
1431         req->r_osd = osd;
1432 }
1433
1434 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1435 {
1436         verify_osd_locked(osd);
1437         WARN_ON(req->r_osd != osd);
1438         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1439              req, req->r_tid);
1440
1441         req->r_osd = NULL;
1442         erase_request(&osd->o_requests, req);
1443         put_osd(osd);
1444
1445         if (!osd_homeless(osd))
1446                 maybe_move_osd_to_lru(osd);
1447         else
1448                 atomic_dec(&osd->o_osdc->num_homeless);
1449 }
1450
1451 static bool __pool_full(struct ceph_pg_pool_info *pi)
1452 {
1453         return pi->flags & CEPH_POOL_FLAG_FULL;
1454 }
1455
1456 static bool have_pool_full(struct ceph_osd_client *osdc)
1457 {
1458         struct rb_node *n;
1459
1460         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1461                 struct ceph_pg_pool_info *pi =
1462                     rb_entry(n, struct ceph_pg_pool_info, node);
1463
1464                 if (__pool_full(pi))
1465                         return true;
1466         }
1467
1468         return false;
1469 }
1470
1471 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1472 {
1473         struct ceph_pg_pool_info *pi;
1474
1475         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1476         if (!pi)
1477                 return false;
1478
1479         return __pool_full(pi);
1480 }
1481
1482 /*
1483  * Returns whether a request should be blocked from being sent
1484  * based on the current osdmap and osd_client settings.
1485  */
1486 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1487                                     const struct ceph_osd_request_target *t,
1488                                     struct ceph_pg_pool_info *pi)
1489 {
1490         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1491         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1492                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1493                        __pool_full(pi);
1494
1495         WARN_ON(pi->id != t->target_oloc.pool);
1496         return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1497                ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1498                (osdc->osdmap->epoch < osdc->epoch_barrier);
1499 }
1500
1501 enum calc_target_result {
1502         CALC_TARGET_NO_ACTION = 0,
1503         CALC_TARGET_NEED_RESEND,
1504         CALC_TARGET_POOL_DNE,
1505 };
1506
1507 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1508                                            struct ceph_osd_request_target *t,
1509                                            bool any_change)
1510 {
1511         struct ceph_pg_pool_info *pi;
1512         struct ceph_pg pgid, last_pgid;
1513         struct ceph_osds up, acting;
1514         bool force_resend = false;
1515         bool unpaused = false;
1516         bool legacy_change = false;
1517         bool split = false;
1518         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1519         bool recovery_deletes = ceph_osdmap_flag(osdc,
1520                                                  CEPH_OSDMAP_RECOVERY_DELETES);
1521         enum calc_target_result ct_res;
1522
1523         t->epoch = osdc->osdmap->epoch;
1524         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1525         if (!pi) {
1526                 t->osd = CEPH_HOMELESS_OSD;
1527                 ct_res = CALC_TARGET_POOL_DNE;
1528                 goto out;
1529         }
1530
1531         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1532                 if (t->last_force_resend < pi->last_force_request_resend) {
1533                         t->last_force_resend = pi->last_force_request_resend;
1534                         force_resend = true;
1535                 } else if (t->last_force_resend == 0) {
1536                         force_resend = true;
1537                 }
1538         }
1539
1540         /* apply tiering */
1541         ceph_oid_copy(&t->target_oid, &t->base_oid);
1542         ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1543         if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1544                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1545                         t->target_oloc.pool = pi->read_tier;
1546                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1547                         t->target_oloc.pool = pi->write_tier;
1548
1549                 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1550                 if (!pi) {
1551                         t->osd = CEPH_HOMELESS_OSD;
1552                         ct_res = CALC_TARGET_POOL_DNE;
1553                         goto out;
1554                 }
1555         }
1556
1557         __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
1558         last_pgid.pool = pgid.pool;
1559         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1560
1561         ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1562         if (any_change &&
1563             ceph_is_new_interval(&t->acting,
1564                                  &acting,
1565                                  &t->up,
1566                                  &up,
1567                                  t->size,
1568                                  pi->size,
1569                                  t->min_size,
1570                                  pi->min_size,
1571                                  t->pg_num,
1572                                  pi->pg_num,
1573                                  t->sort_bitwise,
1574                                  sort_bitwise,
1575                                  t->recovery_deletes,
1576                                  recovery_deletes,
1577                                  &last_pgid))
1578                 force_resend = true;
1579
1580         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1581                 t->paused = false;
1582                 unpaused = true;
1583         }
1584         legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1585                         ceph_osds_changed(&t->acting, &acting, any_change);
1586         if (t->pg_num)
1587                 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1588
1589         if (legacy_change || force_resend || split) {
1590                 t->pgid = pgid; /* struct */
1591                 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1592                 ceph_osds_copy(&t->acting, &acting);
1593                 ceph_osds_copy(&t->up, &up);
1594                 t->size = pi->size;
1595                 t->min_size = pi->min_size;
1596                 t->pg_num = pi->pg_num;
1597                 t->pg_num_mask = pi->pg_num_mask;
1598                 t->sort_bitwise = sort_bitwise;
1599                 t->recovery_deletes = recovery_deletes;
1600
1601                 t->osd = acting.primary;
1602         }
1603
1604         if (unpaused || legacy_change || force_resend || split)
1605                 ct_res = CALC_TARGET_NEED_RESEND;
1606         else
1607                 ct_res = CALC_TARGET_NO_ACTION;
1608
1609 out:
1610         dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1611              legacy_change, force_resend, split, ct_res, t->osd);
1612         return ct_res;
1613 }
1614
1615 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1616 {
1617         struct ceph_spg_mapping *spg;
1618
1619         spg = kmalloc(sizeof(*spg), GFP_NOIO);
1620         if (!spg)
1621                 return NULL;
1622
1623         RB_CLEAR_NODE(&spg->node);
1624         spg->backoffs = RB_ROOT;
1625         return spg;
1626 }
1627
1628 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1629 {
1630         WARN_ON(!RB_EMPTY_NODE(&spg->node));
1631         WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1632
1633         kfree(spg);
1634 }
1635
1636 /*
1637  * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1638  * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1639  * defined only within a specific spgid; it does not pass anything to
1640  * children on split, or to another primary.
1641  */
1642 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1643                  RB_BYPTR, const struct ceph_spg *, node)
1644
1645 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1646 {
1647         return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1648 }
1649
1650 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1651                                    void **pkey, size_t *pkey_len)
1652 {
1653         if (hoid->key_len) {
1654                 *pkey = hoid->key;
1655                 *pkey_len = hoid->key_len;
1656         } else {
1657                 *pkey = hoid->oid;
1658                 *pkey_len = hoid->oid_len;
1659         }
1660 }
1661
1662 static int compare_names(const void *name1, size_t name1_len,
1663                          const void *name2, size_t name2_len)
1664 {
1665         int ret;
1666
1667         ret = memcmp(name1, name2, min(name1_len, name2_len));
1668         if (!ret) {
1669                 if (name1_len < name2_len)
1670                         ret = -1;
1671                 else if (name1_len > name2_len)
1672                         ret = 1;
1673         }
1674         return ret;
1675 }
1676
1677 static int hoid_compare(const struct ceph_hobject_id *lhs,
1678                         const struct ceph_hobject_id *rhs)
1679 {
1680         void *effective_key1, *effective_key2;
1681         size_t effective_key1_len, effective_key2_len;
1682         int ret;
1683
1684         if (lhs->is_max < rhs->is_max)
1685                 return -1;
1686         if (lhs->is_max > rhs->is_max)
1687                 return 1;
1688
1689         if (lhs->pool < rhs->pool)
1690                 return -1;
1691         if (lhs->pool > rhs->pool)
1692                 return 1;
1693
1694         if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1695                 return -1;
1696         if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1697                 return 1;
1698
1699         ret = compare_names(lhs->nspace, lhs->nspace_len,
1700                             rhs->nspace, rhs->nspace_len);
1701         if (ret)
1702                 return ret;
1703
1704         hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1705         hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1706         ret = compare_names(effective_key1, effective_key1_len,
1707                             effective_key2, effective_key2_len);
1708         if (ret)
1709                 return ret;
1710
1711         ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1712         if (ret)
1713                 return ret;
1714
1715         if (lhs->snapid < rhs->snapid)
1716                 return -1;
1717         if (lhs->snapid > rhs->snapid)
1718                 return 1;
1719
1720         return 0;
1721 }
1722
1723 /*
1724  * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1725  * compat stuff here.
1726  *
1727  * Assumes @hoid is zero-initialized.
1728  */
1729 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1730 {
1731         u8 struct_v;
1732         u32 struct_len;
1733         int ret;
1734
1735         ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1736                                   &struct_len);
1737         if (ret)
1738                 return ret;
1739
1740         if (struct_v < 4) {
1741                 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1742                 goto e_inval;
1743         }
1744
1745         hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1746                                                 GFP_NOIO);
1747         if (IS_ERR(hoid->key)) {
1748                 ret = PTR_ERR(hoid->key);
1749                 hoid->key = NULL;
1750                 return ret;
1751         }
1752
1753         hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1754                                                 GFP_NOIO);
1755         if (IS_ERR(hoid->oid)) {
1756                 ret = PTR_ERR(hoid->oid);
1757                 hoid->oid = NULL;
1758                 return ret;
1759         }
1760
1761         ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1762         ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1763         ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1764
1765         hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1766                                                    GFP_NOIO);
1767         if (IS_ERR(hoid->nspace)) {
1768                 ret = PTR_ERR(hoid->nspace);
1769                 hoid->nspace = NULL;
1770                 return ret;
1771         }
1772
1773         ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1774
1775         ceph_hoid_build_hash_cache(hoid);
1776         return 0;
1777
1778 e_inval:
1779         return -EINVAL;
1780 }
1781
1782 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1783 {
1784         return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1785                4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1786 }
1787
1788 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1789 {
1790         ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1791         ceph_encode_string(p, end, hoid->key, hoid->key_len);
1792         ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1793         ceph_encode_64(p, hoid->snapid);
1794         ceph_encode_32(p, hoid->hash);
1795         ceph_encode_8(p, hoid->is_max);
1796         ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1797         ceph_encode_64(p, hoid->pool);
1798 }
1799
1800 static void free_hoid(struct ceph_hobject_id *hoid)
1801 {
1802         if (hoid) {
1803                 kfree(hoid->key);
1804                 kfree(hoid->oid);
1805                 kfree(hoid->nspace);
1806                 kfree(hoid);
1807         }
1808 }
1809
1810 static struct ceph_osd_backoff *alloc_backoff(void)
1811 {
1812         struct ceph_osd_backoff *backoff;
1813
1814         backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1815         if (!backoff)
1816                 return NULL;
1817
1818         RB_CLEAR_NODE(&backoff->spg_node);
1819         RB_CLEAR_NODE(&backoff->id_node);
1820         return backoff;
1821 }
1822
1823 static void free_backoff(struct ceph_osd_backoff *backoff)
1824 {
1825         WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1826         WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1827
1828         free_hoid(backoff->begin);
1829         free_hoid(backoff->end);
1830         kfree(backoff);
1831 }
1832
1833 /*
1834  * Within a specific spgid, backoffs are managed by ->begin hoid.
1835  */
1836 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1837                         RB_BYVAL, spg_node);
1838
1839 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1840                                             const struct ceph_hobject_id *hoid)
1841 {
1842         struct rb_node *n = root->rb_node;
1843
1844         while (n) {
1845                 struct ceph_osd_backoff *cur =
1846                     rb_entry(n, struct ceph_osd_backoff, spg_node);
1847                 int cmp;
1848
1849                 cmp = hoid_compare(hoid, cur->begin);
1850                 if (cmp < 0) {
1851                         n = n->rb_left;
1852                 } else if (cmp > 0) {
1853                         if (hoid_compare(hoid, cur->end) < 0)
1854                                 return cur;
1855
1856                         n = n->rb_right;
1857                 } else {
1858                         return cur;
1859                 }
1860         }
1861
1862         return NULL;
1863 }
1864
1865 /*
1866  * Each backoff has a unique id within its OSD session.
1867  */
1868 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1869
1870 static void clear_backoffs(struct ceph_osd *osd)
1871 {
1872         while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1873                 struct ceph_spg_mapping *spg =
1874                     rb_entry(rb_first(&osd->o_backoff_mappings),
1875                              struct ceph_spg_mapping, node);
1876
1877                 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1878                         struct ceph_osd_backoff *backoff =
1879                             rb_entry(rb_first(&spg->backoffs),
1880                                      struct ceph_osd_backoff, spg_node);
1881
1882                         erase_backoff(&spg->backoffs, backoff);
1883                         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1884                         free_backoff(backoff);
1885                 }
1886                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1887                 free_spg_mapping(spg);
1888         }
1889 }
1890
1891 /*
1892  * Set up a temporary, non-owning view into @t.
1893  */
1894 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1895                                   const struct ceph_osd_request_target *t)
1896 {
1897         hoid->key = NULL;
1898         hoid->key_len = 0;
1899         hoid->oid = t->target_oid.name;
1900         hoid->oid_len = t->target_oid.name_len;
1901         hoid->snapid = CEPH_NOSNAP;
1902         hoid->hash = t->pgid.seed;
1903         hoid->is_max = false;
1904         if (t->target_oloc.pool_ns) {
1905                 hoid->nspace = t->target_oloc.pool_ns->str;
1906                 hoid->nspace_len = t->target_oloc.pool_ns->len;
1907         } else {
1908                 hoid->nspace = NULL;
1909                 hoid->nspace_len = 0;
1910         }
1911         hoid->pool = t->target_oloc.pool;
1912         ceph_hoid_build_hash_cache(hoid);
1913 }
1914
1915 static bool should_plug_request(struct ceph_osd_request *req)
1916 {
1917         struct ceph_osd *osd = req->r_osd;
1918         struct ceph_spg_mapping *spg;
1919         struct ceph_osd_backoff *backoff;
1920         struct ceph_hobject_id hoid;
1921
1922         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1923         if (!spg)
1924                 return false;
1925
1926         hoid_fill_from_target(&hoid, &req->r_t);
1927         backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1928         if (!backoff)
1929                 return false;
1930
1931         dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1932              __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1933              backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1934         return true;
1935 }
1936
1937 /*
1938  * Keep get_num_data_items() in sync with this function.
1939  */
1940 static void setup_request_data(struct ceph_osd_request *req)
1941 {
1942         struct ceph_msg *request_msg = req->r_request;
1943         struct ceph_msg *reply_msg = req->r_reply;
1944         struct ceph_osd_req_op *op;
1945
1946         if (req->r_request->num_data_items || req->r_reply->num_data_items)
1947                 return;
1948
1949         WARN_ON(request_msg->data_length || reply_msg->data_length);
1950         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
1951                 switch (op->op) {
1952                 /* request */
1953                 case CEPH_OSD_OP_WRITE:
1954                 case CEPH_OSD_OP_WRITEFULL:
1955                         WARN_ON(op->indata_len != op->extent.length);
1956                         ceph_osdc_msg_data_add(request_msg,
1957                                                &op->extent.osd_data);
1958                         break;
1959                 case CEPH_OSD_OP_SETXATTR:
1960                 case CEPH_OSD_OP_CMPXATTR:
1961                         WARN_ON(op->indata_len != op->xattr.name_len +
1962                                                   op->xattr.value_len);
1963                         ceph_osdc_msg_data_add(request_msg,
1964                                                &op->xattr.osd_data);
1965                         break;
1966                 case CEPH_OSD_OP_NOTIFY_ACK:
1967                         ceph_osdc_msg_data_add(request_msg,
1968                                                &op->notify_ack.request_data);
1969                         break;
1970                 case CEPH_OSD_OP_COPY_FROM:
1971                         ceph_osdc_msg_data_add(request_msg,
1972                                                &op->copy_from.osd_data);
1973                         break;
1974
1975                 /* reply */
1976                 case CEPH_OSD_OP_STAT:
1977                         ceph_osdc_msg_data_add(reply_msg,
1978                                                &op->raw_data_in);
1979                         break;
1980                 case CEPH_OSD_OP_READ:
1981                         ceph_osdc_msg_data_add(reply_msg,
1982                                                &op->extent.osd_data);
1983                         break;
1984                 case CEPH_OSD_OP_LIST_WATCHERS:
1985                         ceph_osdc_msg_data_add(reply_msg,
1986                                                &op->list_watchers.response_data);
1987                         break;
1988
1989                 /* both */
1990                 case CEPH_OSD_OP_CALL:
1991                         WARN_ON(op->indata_len != op->cls.class_len +
1992                                                   op->cls.method_len +
1993                                                   op->cls.indata_len);
1994                         ceph_osdc_msg_data_add(request_msg,
1995                                                &op->cls.request_info);
1996                         /* optional, can be NONE */
1997                         ceph_osdc_msg_data_add(request_msg,
1998                                                &op->cls.request_data);
1999                         /* optional, can be NONE */
2000                         ceph_osdc_msg_data_add(reply_msg,
2001                                                &op->cls.response_data);
2002                         break;
2003                 case CEPH_OSD_OP_NOTIFY:
2004                         ceph_osdc_msg_data_add(request_msg,
2005                                                &op->notify.request_data);
2006                         ceph_osdc_msg_data_add(reply_msg,
2007                                                &op->notify.response_data);
2008                         break;
2009                 }
2010         }
2011 }
2012
2013 static void encode_pgid(void **p, const struct ceph_pg *pgid)
2014 {
2015         ceph_encode_8(p, 1);
2016         ceph_encode_64(p, pgid->pool);
2017         ceph_encode_32(p, pgid->seed);
2018         ceph_encode_32(p, -1); /* preferred */
2019 }
2020
2021 static void encode_spgid(void **p, const struct ceph_spg *spgid)
2022 {
2023         ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
2024         encode_pgid(p, &spgid->pgid);
2025         ceph_encode_8(p, spgid->shard);
2026 }
2027
2028 static void encode_oloc(void **p, void *end,
2029                         const struct ceph_object_locator *oloc)
2030 {
2031         ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
2032         ceph_encode_64(p, oloc->pool);
2033         ceph_encode_32(p, -1); /* preferred */
2034         ceph_encode_32(p, 0);  /* key len */
2035         if (oloc->pool_ns)
2036                 ceph_encode_string(p, end, oloc->pool_ns->str,
2037                                    oloc->pool_ns->len);
2038         else
2039                 ceph_encode_32(p, 0);
2040 }
2041
2042 static void encode_request_partial(struct ceph_osd_request *req,
2043                                    struct ceph_msg *msg)
2044 {
2045         void *p = msg->front.iov_base;
2046         void *const end = p + msg->front_alloc_len;
2047         u32 data_len = 0;
2048         int i;
2049
2050         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
2051                 /* snapshots aren't writeable */
2052                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
2053         } else {
2054                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
2055                         req->r_data_offset || req->r_snapc);
2056         }
2057
2058         setup_request_data(req);
2059
2060         encode_spgid(&p, &req->r_t.spgid); /* actual spg */
2061         ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
2062         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
2063         ceph_encode_32(&p, req->r_flags);
2064
2065         /* reqid */
2066         ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
2067         memset(p, 0, sizeof(struct ceph_osd_reqid));
2068         p += sizeof(struct ceph_osd_reqid);
2069
2070         /* trace */
2071         memset(p, 0, sizeof(struct ceph_blkin_trace_info));
2072         p += sizeof(struct ceph_blkin_trace_info);
2073
2074         ceph_encode_32(&p, 0); /* client_inc, always 0 */
2075         ceph_encode_timespec64(p, &req->r_mtime);
2076         p += sizeof(struct ceph_timespec);
2077
2078         encode_oloc(&p, end, &req->r_t.target_oloc);
2079         ceph_encode_string(&p, end, req->r_t.target_oid.name,
2080                            req->r_t.target_oid.name_len);
2081
2082         /* ops, can imply data */
2083         ceph_encode_16(&p, req->r_num_ops);
2084         for (i = 0; i < req->r_num_ops; i++) {
2085                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
2086                 p += sizeof(struct ceph_osd_op);
2087         }
2088
2089         ceph_encode_64(&p, req->r_snapid); /* snapid */
2090         if (req->r_snapc) {
2091                 ceph_encode_64(&p, req->r_snapc->seq);
2092                 ceph_encode_32(&p, req->r_snapc->num_snaps);
2093                 for (i = 0; i < req->r_snapc->num_snaps; i++)
2094                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
2095         } else {
2096                 ceph_encode_64(&p, 0); /* snap_seq */
2097                 ceph_encode_32(&p, 0); /* snaps len */
2098         }
2099
2100         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
2101         BUG_ON(p > end - 8); /* space for features */
2102
2103         msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
2104         /* front_len is finalized in encode_request_finish() */
2105         msg->front.iov_len = p - msg->front.iov_base;
2106         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2107         msg->hdr.data_len = cpu_to_le32(data_len);
2108         /*
2109          * The header "data_off" is a hint to the receiver allowing it
2110          * to align received data into its buffers such that there's no
2111          * need to re-copy it before writing it to disk (direct I/O).
2112          */
2113         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
2114
2115         dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
2116              req->r_t.target_oid.name, req->r_t.target_oid.name_len);
2117 }
2118
2119 static void encode_request_finish(struct ceph_msg *msg)
2120 {
2121         void *p = msg->front.iov_base;
2122         void *const partial_end = p + msg->front.iov_len;
2123         void *const end = p + msg->front_alloc_len;
2124
2125         if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
2126                 /* luminous OSD -- encode features and be done */
2127                 p = partial_end;
2128                 ceph_encode_64(&p, msg->con->peer_features);
2129         } else {
2130                 struct {
2131                         char spgid[CEPH_ENCODING_START_BLK_LEN +
2132                                    CEPH_PGID_ENCODING_LEN + 1];
2133                         __le32 hash;
2134                         __le32 epoch;
2135                         __le32 flags;
2136                         char reqid[CEPH_ENCODING_START_BLK_LEN +
2137                                    sizeof(struct ceph_osd_reqid)];
2138                         char trace[sizeof(struct ceph_blkin_trace_info)];
2139                         __le32 client_inc;
2140                         struct ceph_timespec mtime;
2141                 } __packed head;
2142                 struct ceph_pg pgid;
2143                 void *oloc, *oid, *tail;
2144                 int oloc_len, oid_len, tail_len;
2145                 int len;
2146
2147                 /*
2148                  * Pre-luminous OSD -- reencode v8 into v4 using @head
2149                  * as a temporary buffer.  Encode the raw PG; the rest
2150                  * is just a matter of moving oloc, oid and tail blobs
2151                  * around.
2152                  */
2153                 memcpy(&head, p, sizeof(head));
2154                 p += sizeof(head);
2155
2156                 oloc = p;
2157                 p += CEPH_ENCODING_START_BLK_LEN;
2158                 pgid.pool = ceph_decode_64(&p);
2159                 p += 4 + 4; /* preferred, key len */
2160                 len = ceph_decode_32(&p);
2161                 p += len;   /* nspace */
2162                 oloc_len = p - oloc;
2163
2164                 oid = p;
2165                 len = ceph_decode_32(&p);
2166                 p += len;
2167                 oid_len = p - oid;
2168
2169                 tail = p;
2170                 tail_len = partial_end - p;
2171
2172                 p = msg->front.iov_base;
2173                 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2174                 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2175                 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2176                 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2177
2178                 /* reassert_version */
2179                 memset(p, 0, sizeof(struct ceph_eversion));
2180                 p += sizeof(struct ceph_eversion);
2181
2182                 BUG_ON(p >= oloc);
2183                 memmove(p, oloc, oloc_len);
2184                 p += oloc_len;
2185
2186                 pgid.seed = le32_to_cpu(head.hash);
2187                 encode_pgid(&p, &pgid); /* raw pg */
2188
2189                 BUG_ON(p >= oid);
2190                 memmove(p, oid, oid_len);
2191                 p += oid_len;
2192
2193                 /* tail -- ops, snapid, snapc, retry_attempt */
2194                 BUG_ON(p >= tail);
2195                 memmove(p, tail, tail_len);
2196                 p += tail_len;
2197
2198                 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2199         }
2200
2201         BUG_ON(p > end);
2202         msg->front.iov_len = p - msg->front.iov_base;
2203         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2204
2205         dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2206              le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2207              le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2208              le16_to_cpu(msg->hdr.version));
2209 }
2210
2211 /*
2212  * @req has to be assigned a tid and registered.
2213  */
2214 static void send_request(struct ceph_osd_request *req)
2215 {
2216         struct ceph_osd *osd = req->r_osd;
2217
2218         verify_osd_locked(osd);
2219         WARN_ON(osd->o_osd != req->r_t.osd);
2220
2221         /* backoff? */
2222         if (should_plug_request(req))
2223                 return;
2224
2225         /*
2226          * We may have a previously queued request message hanging
2227          * around.  Cancel it to avoid corrupting the msgr.
2228          */
2229         if (req->r_sent)
2230                 ceph_msg_revoke(req->r_request);
2231
2232         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2233         if (req->r_attempts)
2234                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2235         else
2236                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2237
2238         encode_request_partial(req, req->r_request);
2239
2240         dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2241              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2242              req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2243              req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2244              req->r_attempts);
2245
2246         req->r_t.paused = false;
2247         req->r_stamp = jiffies;
2248         req->r_attempts++;
2249
2250         req->r_sent = osd->o_incarnation;
2251         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2252         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2253 }
2254
2255 static void maybe_request_map(struct ceph_osd_client *osdc)
2256 {
2257         bool continuous = false;
2258
2259         verify_osdc_locked(osdc);
2260         WARN_ON(!osdc->osdmap->epoch);
2261
2262         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2263             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2264             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2265                 dout("%s osdc %p continuous\n", __func__, osdc);
2266                 continuous = true;
2267         } else {
2268                 dout("%s osdc %p onetime\n", __func__, osdc);
2269         }
2270
2271         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2272                                osdc->osdmap->epoch + 1, continuous))
2273                 ceph_monc_renew_subs(&osdc->client->monc);
2274 }
2275
2276 static void complete_request(struct ceph_osd_request *req, int err);
2277 static void send_map_check(struct ceph_osd_request *req);
2278
2279 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2280 {
2281         struct ceph_osd_client *osdc = req->r_osdc;
2282         struct ceph_osd *osd;
2283         enum calc_target_result ct_res;
2284         int err = 0;
2285         bool need_send = false;
2286         bool promoted = false;
2287
2288         WARN_ON(req->r_tid);
2289         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2290
2291 again:
2292         ct_res = calc_target(osdc, &req->r_t, false);
2293         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2294                 goto promote;
2295
2296         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2297         if (IS_ERR(osd)) {
2298                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2299                 goto promote;
2300         }
2301
2302         if (osdc->abort_err) {
2303                 dout("req %p abort_err %d\n", req, osdc->abort_err);
2304                 err = osdc->abort_err;
2305         } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2306                 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2307                      osdc->epoch_barrier);
2308                 req->r_t.paused = true;
2309                 maybe_request_map(osdc);
2310         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2311                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2312                 dout("req %p pausewr\n", req);
2313                 req->r_t.paused = true;
2314                 maybe_request_map(osdc);
2315         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2316                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2317                 dout("req %p pauserd\n", req);
2318                 req->r_t.paused = true;
2319                 maybe_request_map(osdc);
2320         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2321                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2322                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
2323                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2324                     pool_full(osdc, req->r_t.base_oloc.pool))) {
2325                 dout("req %p full/pool_full\n", req);
2326                 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2327                         err = -ENOSPC;
2328                 } else {
2329                         pr_warn_ratelimited("FULL or reached pool quota\n");
2330                         req->r_t.paused = true;
2331                         maybe_request_map(osdc);
2332                 }
2333         } else if (!osd_homeless(osd)) {
2334                 need_send = true;
2335         } else {
2336                 maybe_request_map(osdc);
2337         }
2338
2339         mutex_lock(&osd->lock);
2340         /*
2341          * Assign the tid atomically with send_request() to protect
2342          * multiple writes to the same object from racing with each
2343          * other, resulting in out of order ops on the OSDs.
2344          */
2345         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2346         link_request(osd, req);
2347         if (need_send)
2348                 send_request(req);
2349         else if (err)
2350                 complete_request(req, err);
2351         mutex_unlock(&osd->lock);
2352
2353         if (!err && ct_res == CALC_TARGET_POOL_DNE)
2354                 send_map_check(req);
2355
2356         if (promoted)
2357                 downgrade_write(&osdc->lock);
2358         return;
2359
2360 promote:
2361         up_read(&osdc->lock);
2362         down_write(&osdc->lock);
2363         wrlocked = true;
2364         promoted = true;
2365         goto again;
2366 }
2367
2368 static void account_request(struct ceph_osd_request *req)
2369 {
2370         WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2371         WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2372
2373         req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2374         atomic_inc(&req->r_osdc->num_requests);
2375
2376         req->r_start_stamp = jiffies;
2377 }
2378
2379 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2380 {
2381         ceph_osdc_get_request(req);
2382         account_request(req);
2383         __submit_request(req, wrlocked);
2384 }
2385
2386 static void finish_request(struct ceph_osd_request *req)
2387 {
2388         struct ceph_osd_client *osdc = req->r_osdc;
2389
2390         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2391         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2392
2393         if (req->r_osd)
2394                 unlink_request(req->r_osd, req);
2395         atomic_dec(&osdc->num_requests);
2396
2397         /*
2398          * If an OSD has failed or returned and a request has been sent
2399          * twice, it's possible to get a reply and end up here while the
2400          * request message is queued for delivery.  We will ignore the
2401          * reply, so not a big deal, but better to try and catch it.
2402          */
2403         ceph_msg_revoke(req->r_request);
2404         ceph_msg_revoke_incoming(req->r_reply);
2405 }
2406
2407 static void __complete_request(struct ceph_osd_request *req)
2408 {
2409         dout("%s req %p tid %llu cb %ps result %d\n", __func__, req,
2410              req->r_tid, req->r_callback, req->r_result);
2411
2412         if (req->r_callback)
2413                 req->r_callback(req);
2414         complete_all(&req->r_completion);
2415         ceph_osdc_put_request(req);
2416 }
2417
2418 static void complete_request_workfn(struct work_struct *work)
2419 {
2420         struct ceph_osd_request *req =
2421             container_of(work, struct ceph_osd_request, r_complete_work);
2422
2423         __complete_request(req);
2424 }
2425
2426 /*
2427  * This is open-coded in handle_reply().
2428  */
2429 static void complete_request(struct ceph_osd_request *req, int err)
2430 {
2431         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2432
2433         req->r_result = err;
2434         finish_request(req);
2435
2436         INIT_WORK(&req->r_complete_work, complete_request_workfn);
2437         queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
2438 }
2439
2440 static void cancel_map_check(struct ceph_osd_request *req)
2441 {
2442         struct ceph_osd_client *osdc = req->r_osdc;
2443         struct ceph_osd_request *lookup_req;
2444
2445         verify_osdc_wrlocked(osdc);
2446
2447         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2448         if (!lookup_req)
2449                 return;
2450
2451         WARN_ON(lookup_req != req);
2452         erase_request_mc(&osdc->map_checks, req);
2453         ceph_osdc_put_request(req);
2454 }
2455
2456 static void cancel_request(struct ceph_osd_request *req)
2457 {
2458         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2459
2460         cancel_map_check(req);
2461         finish_request(req);
2462         complete_all(&req->r_completion);
2463         ceph_osdc_put_request(req);
2464 }
2465
2466 static void abort_request(struct ceph_osd_request *req, int err)
2467 {
2468         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2469
2470         cancel_map_check(req);
2471         complete_request(req, err);
2472 }
2473
2474 static int abort_fn(struct ceph_osd_request *req, void *arg)
2475 {
2476         int err = *(int *)arg;
2477
2478         abort_request(req, err);
2479         return 0; /* continue iteration */
2480 }
2481
2482 /*
2483  * Abort all in-flight requests with @err and arrange for all future
2484  * requests to be failed immediately.
2485  */
2486 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2487 {
2488         dout("%s osdc %p err %d\n", __func__, osdc, err);
2489         down_write(&osdc->lock);
2490         for_each_request(osdc, abort_fn, &err);
2491         osdc->abort_err = err;
2492         up_write(&osdc->lock);
2493 }
2494 EXPORT_SYMBOL(ceph_osdc_abort_requests);
2495
2496 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
2497 {
2498         down_write(&osdc->lock);
2499         osdc->abort_err = 0;
2500         up_write(&osdc->lock);
2501 }
2502 EXPORT_SYMBOL(ceph_osdc_clear_abort_err);
2503
2504 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2505 {
2506         if (likely(eb > osdc->epoch_barrier)) {
2507                 dout("updating epoch_barrier from %u to %u\n",
2508                                 osdc->epoch_barrier, eb);
2509                 osdc->epoch_barrier = eb;
2510                 /* Request map if we're not to the barrier yet */
2511                 if (eb > osdc->osdmap->epoch)
2512                         maybe_request_map(osdc);
2513         }
2514 }
2515
2516 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2517 {
2518         down_read(&osdc->lock);
2519         if (unlikely(eb > osdc->epoch_barrier)) {
2520                 up_read(&osdc->lock);
2521                 down_write(&osdc->lock);
2522                 update_epoch_barrier(osdc, eb);
2523                 up_write(&osdc->lock);
2524         } else {
2525                 up_read(&osdc->lock);
2526         }
2527 }
2528 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2529
2530 /*
2531  * We can end up releasing caps as a result of abort_request().
2532  * In that case, we probably want to ensure that the cap release message
2533  * has an updated epoch barrier in it, so set the epoch barrier prior to
2534  * aborting the first request.
2535  */
2536 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
2537 {
2538         struct ceph_osd_client *osdc = req->r_osdc;
2539         bool *victims = arg;
2540
2541         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2542             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2543              pool_full(osdc, req->r_t.base_oloc.pool))) {
2544                 if (!*victims) {
2545                         update_epoch_barrier(osdc, osdc->osdmap->epoch);
2546                         *victims = true;
2547                 }
2548                 abort_request(req, -ENOSPC);
2549         }
2550
2551         return 0; /* continue iteration */
2552 }
2553
2554 /*
2555  * Drop all pending requests that are stalled waiting on a full condition to
2556  * clear, and complete them with ENOSPC as the return code. Set the
2557  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2558  * cancelled.
2559  */
2560 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2561 {
2562         bool victims = false;
2563
2564         if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2565             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2566                 for_each_request(osdc, abort_on_full_fn, &victims);
2567 }
2568
2569 static void check_pool_dne(struct ceph_osd_request *req)
2570 {
2571         struct ceph_osd_client *osdc = req->r_osdc;
2572         struct ceph_osdmap *map = osdc->osdmap;
2573
2574         verify_osdc_wrlocked(osdc);
2575         WARN_ON(!map->epoch);
2576
2577         if (req->r_attempts) {
2578                 /*
2579                  * We sent a request earlier, which means that
2580                  * previously the pool existed, and now it does not
2581                  * (i.e., it was deleted).
2582                  */
2583                 req->r_map_dne_bound = map->epoch;
2584                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2585                      req->r_tid);
2586         } else {
2587                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2588                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
2589         }
2590
2591         if (req->r_map_dne_bound) {
2592                 if (map->epoch >= req->r_map_dne_bound) {
2593                         /* we had a new enough map */
2594                         pr_info_ratelimited("tid %llu pool does not exist\n",
2595                                             req->r_tid);
2596                         complete_request(req, -ENOENT);
2597                 }
2598         } else {
2599                 send_map_check(req);
2600         }
2601 }
2602
2603 static void map_check_cb(struct ceph_mon_generic_request *greq)
2604 {
2605         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2606         struct ceph_osd_request *req;
2607         u64 tid = greq->private_data;
2608
2609         WARN_ON(greq->result || !greq->u.newest);
2610
2611         down_write(&osdc->lock);
2612         req = lookup_request_mc(&osdc->map_checks, tid);
2613         if (!req) {
2614                 dout("%s tid %llu dne\n", __func__, tid);
2615                 goto out_unlock;
2616         }
2617
2618         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2619              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2620         if (!req->r_map_dne_bound)
2621                 req->r_map_dne_bound = greq->u.newest;
2622         erase_request_mc(&osdc->map_checks, req);
2623         check_pool_dne(req);
2624
2625         ceph_osdc_put_request(req);
2626 out_unlock:
2627         up_write(&osdc->lock);
2628 }
2629
2630 static void send_map_check(struct ceph_osd_request *req)
2631 {
2632         struct ceph_osd_client *osdc = req->r_osdc;
2633         struct ceph_osd_request *lookup_req;
2634         int ret;
2635
2636         verify_osdc_wrlocked(osdc);
2637
2638         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2639         if (lookup_req) {
2640                 WARN_ON(lookup_req != req);
2641                 return;
2642         }
2643
2644         ceph_osdc_get_request(req);
2645         insert_request_mc(&osdc->map_checks, req);
2646         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2647                                           map_check_cb, req->r_tid);
2648         WARN_ON(ret);
2649 }
2650
2651 /*
2652  * lingering requests, watch/notify v2 infrastructure
2653  */
2654 static void linger_release(struct kref *kref)
2655 {
2656         struct ceph_osd_linger_request *lreq =
2657             container_of(kref, struct ceph_osd_linger_request, kref);
2658
2659         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2660              lreq->reg_req, lreq->ping_req);
2661         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2662         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2663         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2664         WARN_ON(!list_empty(&lreq->scan_item));
2665         WARN_ON(!list_empty(&lreq->pending_lworks));
2666         WARN_ON(lreq->osd);
2667
2668         if (lreq->reg_req)
2669                 ceph_osdc_put_request(lreq->reg_req);
2670         if (lreq->ping_req)
2671                 ceph_osdc_put_request(lreq->ping_req);
2672         target_destroy(&lreq->t);
2673         kfree(lreq);
2674 }
2675
2676 static void linger_put(struct ceph_osd_linger_request *lreq)
2677 {
2678         if (lreq)
2679                 kref_put(&lreq->kref, linger_release);
2680 }
2681
2682 static struct ceph_osd_linger_request *
2683 linger_get(struct ceph_osd_linger_request *lreq)
2684 {
2685         kref_get(&lreq->kref);
2686         return lreq;
2687 }
2688
2689 static struct ceph_osd_linger_request *
2690 linger_alloc(struct ceph_osd_client *osdc)
2691 {
2692         struct ceph_osd_linger_request *lreq;
2693
2694         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2695         if (!lreq)
2696                 return NULL;
2697
2698         kref_init(&lreq->kref);
2699         mutex_init(&lreq->lock);
2700         RB_CLEAR_NODE(&lreq->node);
2701         RB_CLEAR_NODE(&lreq->osdc_node);
2702         RB_CLEAR_NODE(&lreq->mc_node);
2703         INIT_LIST_HEAD(&lreq->scan_item);
2704         INIT_LIST_HEAD(&lreq->pending_lworks);
2705         init_completion(&lreq->reg_commit_wait);
2706         init_completion(&lreq->notify_finish_wait);
2707
2708         lreq->osdc = osdc;
2709         target_init(&lreq->t);
2710
2711         dout("%s lreq %p\n", __func__, lreq);
2712         return lreq;
2713 }
2714
2715 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2716 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2717 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2718
2719 /*
2720  * Create linger request <-> OSD session relation.
2721  *
2722  * @lreq has to be registered, @osd may be homeless.
2723  */
2724 static void link_linger(struct ceph_osd *osd,
2725                         struct ceph_osd_linger_request *lreq)
2726 {
2727         verify_osd_locked(osd);
2728         WARN_ON(!lreq->linger_id || lreq->osd);
2729         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2730              osd->o_osd, lreq, lreq->linger_id);
2731
2732         if (!osd_homeless(osd))
2733                 __remove_osd_from_lru(osd);
2734         else
2735                 atomic_inc(&osd->o_osdc->num_homeless);
2736
2737         get_osd(osd);
2738         insert_linger(&osd->o_linger_requests, lreq);
2739         lreq->osd = osd;
2740 }
2741
2742 static void unlink_linger(struct ceph_osd *osd,
2743                           struct ceph_osd_linger_request *lreq)
2744 {
2745         verify_osd_locked(osd);
2746         WARN_ON(lreq->osd != osd);
2747         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2748              osd->o_osd, lreq, lreq->linger_id);
2749
2750         lreq->osd = NULL;
2751         erase_linger(&osd->o_linger_requests, lreq);
2752         put_osd(osd);
2753
2754         if (!osd_homeless(osd))
2755                 maybe_move_osd_to_lru(osd);
2756         else
2757                 atomic_dec(&osd->o_osdc->num_homeless);
2758 }
2759
2760 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2761 {
2762         verify_osdc_locked(lreq->osdc);
2763
2764         return !RB_EMPTY_NODE(&lreq->osdc_node);
2765 }
2766
2767 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2768 {
2769         struct ceph_osd_client *osdc = lreq->osdc;
2770         bool registered;
2771
2772         down_read(&osdc->lock);
2773         registered = __linger_registered(lreq);
2774         up_read(&osdc->lock);
2775
2776         return registered;
2777 }
2778
2779 static void linger_register(struct ceph_osd_linger_request *lreq)
2780 {
2781         struct ceph_osd_client *osdc = lreq->osdc;
2782
2783         verify_osdc_wrlocked(osdc);
2784         WARN_ON(lreq->linger_id);
2785
2786         linger_get(lreq);
2787         lreq->linger_id = ++osdc->last_linger_id;
2788         insert_linger_osdc(&osdc->linger_requests, lreq);
2789 }
2790
2791 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2792 {
2793         struct ceph_osd_client *osdc = lreq->osdc;
2794
2795         verify_osdc_wrlocked(osdc);
2796
2797         erase_linger_osdc(&osdc->linger_requests, lreq);
2798         linger_put(lreq);
2799 }
2800
2801 static void cancel_linger_request(struct ceph_osd_request *req)
2802 {
2803         struct ceph_osd_linger_request *lreq = req->r_priv;
2804
2805         WARN_ON(!req->r_linger);
2806         cancel_request(req);
2807         linger_put(lreq);
2808 }
2809
2810 struct linger_work {
2811         struct work_struct work;
2812         struct ceph_osd_linger_request *lreq;
2813         struct list_head pending_item;
2814         unsigned long queued_stamp;
2815
2816         union {
2817                 struct {
2818                         u64 notify_id;
2819                         u64 notifier_id;
2820                         void *payload; /* points into @msg front */
2821                         size_t payload_len;
2822
2823                         struct ceph_msg *msg; /* for ceph_msg_put() */
2824                 } notify;
2825                 struct {
2826                         int err;
2827                 } error;
2828         };
2829 };
2830
2831 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2832                                        work_func_t workfn)
2833 {
2834         struct linger_work *lwork;
2835
2836         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2837         if (!lwork)
2838                 return NULL;
2839
2840         INIT_WORK(&lwork->work, workfn);
2841         INIT_LIST_HEAD(&lwork->pending_item);
2842         lwork->lreq = linger_get(lreq);
2843
2844         return lwork;
2845 }
2846
2847 static void lwork_free(struct linger_work *lwork)
2848 {
2849         struct ceph_osd_linger_request *lreq = lwork->lreq;
2850
2851         mutex_lock(&lreq->lock);
2852         list_del(&lwork->pending_item);
2853         mutex_unlock(&lreq->lock);
2854
2855         linger_put(lreq);
2856         kfree(lwork);
2857 }
2858
2859 static void lwork_queue(struct linger_work *lwork)
2860 {
2861         struct ceph_osd_linger_request *lreq = lwork->lreq;
2862         struct ceph_osd_client *osdc = lreq->osdc;
2863
2864         verify_lreq_locked(lreq);
2865         WARN_ON(!list_empty(&lwork->pending_item));
2866
2867         lwork->queued_stamp = jiffies;
2868         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2869         queue_work(osdc->notify_wq, &lwork->work);
2870 }
2871
2872 static void do_watch_notify(struct work_struct *w)
2873 {
2874         struct linger_work *lwork = container_of(w, struct linger_work, work);
2875         struct ceph_osd_linger_request *lreq = lwork->lreq;
2876
2877         if (!linger_registered(lreq)) {
2878                 dout("%s lreq %p not registered\n", __func__, lreq);
2879                 goto out;
2880         }
2881
2882         WARN_ON(!lreq->is_watch);
2883         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2884              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2885              lwork->notify.payload_len);
2886         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2887                   lwork->notify.notifier_id, lwork->notify.payload,
2888                   lwork->notify.payload_len);
2889
2890 out:
2891         ceph_msg_put(lwork->notify.msg);
2892         lwork_free(lwork);
2893 }
2894
2895 static void do_watch_error(struct work_struct *w)
2896 {
2897         struct linger_work *lwork = container_of(w, struct linger_work, work);
2898         struct ceph_osd_linger_request *lreq = lwork->lreq;
2899
2900         if (!linger_registered(lreq)) {
2901                 dout("%s lreq %p not registered\n", __func__, lreq);
2902                 goto out;
2903         }
2904
2905         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2906         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2907
2908 out:
2909         lwork_free(lwork);
2910 }
2911
2912 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2913 {
2914         struct linger_work *lwork;
2915
2916         lwork = lwork_alloc(lreq, do_watch_error);
2917         if (!lwork) {
2918                 pr_err("failed to allocate error-lwork\n");
2919                 return;
2920         }
2921
2922         lwork->error.err = lreq->last_error;
2923         lwork_queue(lwork);
2924 }
2925
2926 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2927                                        int result)
2928 {
2929         if (!completion_done(&lreq->reg_commit_wait)) {
2930                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2931                 complete_all(&lreq->reg_commit_wait);
2932         }
2933 }
2934
2935 static void linger_commit_cb(struct ceph_osd_request *req)
2936 {
2937         struct ceph_osd_linger_request *lreq = req->r_priv;
2938
2939         mutex_lock(&lreq->lock);
2940         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2941              lreq->linger_id, req->r_result);
2942         linger_reg_commit_complete(lreq, req->r_result);
2943         lreq->committed = true;
2944
2945         if (!lreq->is_watch) {
2946                 struct ceph_osd_data *osd_data =
2947                     osd_req_op_data(req, 0, notify, response_data);
2948                 void *p = page_address(osd_data->pages[0]);
2949
2950                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2951                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2952
2953                 /* make note of the notify_id */
2954                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2955                         lreq->notify_id = ceph_decode_64(&p);
2956                         dout("lreq %p notify_id %llu\n", lreq,
2957                              lreq->notify_id);
2958                 } else {
2959                         dout("lreq %p no notify_id\n", lreq);
2960                 }
2961         }
2962
2963         mutex_unlock(&lreq->lock);
2964         linger_put(lreq);
2965 }
2966
2967 static int normalize_watch_error(int err)
2968 {
2969         /*
2970          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2971          * notification and a failure to reconnect because we raced with
2972          * the delete appear the same to the user.
2973          */
2974         if (err == -ENOENT)
2975                 err = -ENOTCONN;
2976
2977         return err;
2978 }
2979
2980 static void linger_reconnect_cb(struct ceph_osd_request *req)
2981 {
2982         struct ceph_osd_linger_request *lreq = req->r_priv;
2983
2984         mutex_lock(&lreq->lock);
2985         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2986              lreq, lreq->linger_id, req->r_result, lreq->last_error);
2987         if (req->r_result < 0) {
2988                 if (!lreq->last_error) {
2989                         lreq->last_error = normalize_watch_error(req->r_result);
2990                         queue_watch_error(lreq);
2991                 }
2992         }
2993
2994         mutex_unlock(&lreq->lock);
2995         linger_put(lreq);
2996 }
2997
2998 static void send_linger(struct ceph_osd_linger_request *lreq)
2999 {
3000         struct ceph_osd_request *req = lreq->reg_req;
3001         struct ceph_osd_req_op *op = &req->r_ops[0];
3002
3003         verify_osdc_wrlocked(req->r_osdc);
3004         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3005
3006         if (req->r_osd)
3007                 cancel_linger_request(req);
3008
3009         request_reinit(req);
3010         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3011         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3012         req->r_flags = lreq->t.flags;
3013         req->r_mtime = lreq->mtime;
3014
3015         mutex_lock(&lreq->lock);
3016         if (lreq->is_watch && lreq->committed) {
3017                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3018                         op->watch.cookie != lreq->linger_id);
3019                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
3020                 op->watch.gen = ++lreq->register_gen;
3021                 dout("lreq %p reconnect register_gen %u\n", lreq,
3022                      op->watch.gen);
3023                 req->r_callback = linger_reconnect_cb;
3024         } else {
3025                 if (!lreq->is_watch)
3026                         lreq->notify_id = 0;
3027                 else
3028                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
3029                 dout("lreq %p register\n", lreq);
3030                 req->r_callback = linger_commit_cb;
3031         }
3032         mutex_unlock(&lreq->lock);
3033
3034         req->r_priv = linger_get(lreq);
3035         req->r_linger = true;
3036
3037         submit_request(req, true);
3038 }
3039
3040 static void linger_ping_cb(struct ceph_osd_request *req)
3041 {
3042         struct ceph_osd_linger_request *lreq = req->r_priv;
3043
3044         mutex_lock(&lreq->lock);
3045         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
3046              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
3047              lreq->last_error);
3048         if (lreq->register_gen == req->r_ops[0].watch.gen) {
3049                 if (!req->r_result) {
3050                         lreq->watch_valid_thru = lreq->ping_sent;
3051                 } else if (!lreq->last_error) {
3052                         lreq->last_error = normalize_watch_error(req->r_result);
3053                         queue_watch_error(lreq);
3054                 }
3055         } else {
3056                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
3057                      lreq->register_gen, req->r_ops[0].watch.gen);
3058         }
3059
3060         mutex_unlock(&lreq->lock);
3061         linger_put(lreq);
3062 }
3063
3064 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
3065 {
3066         struct ceph_osd_client *osdc = lreq->osdc;
3067         struct ceph_osd_request *req = lreq->ping_req;
3068         struct ceph_osd_req_op *op = &req->r_ops[0];
3069
3070         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
3071                 dout("%s PAUSERD\n", __func__);
3072                 return;
3073         }
3074
3075         lreq->ping_sent = jiffies;
3076         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
3077              __func__, lreq, lreq->linger_id, lreq->ping_sent,
3078              lreq->register_gen);
3079
3080         if (req->r_osd)
3081                 cancel_linger_request(req);
3082
3083         request_reinit(req);
3084         target_copy(&req->r_t, &lreq->t);
3085
3086         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3087                 op->watch.cookie != lreq->linger_id ||
3088                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
3089         op->watch.gen = lreq->register_gen;
3090         req->r_callback = linger_ping_cb;
3091         req->r_priv = linger_get(lreq);
3092         req->r_linger = true;
3093
3094         ceph_osdc_get_request(req);
3095         account_request(req);
3096         req->r_tid = atomic64_inc_return(&osdc->last_tid);
3097         link_request(lreq->osd, req);
3098         send_request(req);
3099 }
3100
3101 static void linger_submit(struct ceph_osd_linger_request *lreq)
3102 {
3103         struct ceph_osd_client *osdc = lreq->osdc;
3104         struct ceph_osd *osd;
3105
3106         down_write(&osdc->lock);
3107         linger_register(lreq);
3108         if (lreq->is_watch) {
3109                 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
3110                 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
3111         } else {
3112                 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
3113         }
3114
3115         calc_target(osdc, &lreq->t, false);
3116         osd = lookup_create_osd(osdc, lreq->t.osd, true);
3117         link_linger(osd, lreq);
3118
3119         send_linger(lreq);
3120         up_write(&osdc->lock);
3121 }
3122
3123 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3124 {
3125         struct ceph_osd_client *osdc = lreq->osdc;
3126         struct ceph_osd_linger_request *lookup_lreq;
3127
3128         verify_osdc_wrlocked(osdc);
3129
3130         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3131                                        lreq->linger_id);
3132         if (!lookup_lreq)
3133                 return;
3134
3135         WARN_ON(lookup_lreq != lreq);
3136         erase_linger_mc(&osdc->linger_map_checks, lreq);
3137         linger_put(lreq);
3138 }
3139
3140 /*
3141  * @lreq has to be both registered and linked.
3142  */
3143 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3144 {
3145         if (lreq->is_watch && lreq->ping_req->r_osd)
3146                 cancel_linger_request(lreq->ping_req);
3147         if (lreq->reg_req->r_osd)
3148                 cancel_linger_request(lreq->reg_req);
3149         cancel_linger_map_check(lreq);
3150         unlink_linger(lreq->osd, lreq);
3151         linger_unregister(lreq);
3152 }
3153
3154 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3155 {
3156         struct ceph_osd_client *osdc = lreq->osdc;
3157
3158         down_write(&osdc->lock);
3159         if (__linger_registered(lreq))
3160                 __linger_cancel(lreq);
3161         up_write(&osdc->lock);
3162 }
3163
3164 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3165
3166 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3167 {
3168         struct ceph_osd_client *osdc = lreq->osdc;
3169         struct ceph_osdmap *map = osdc->osdmap;
3170
3171         verify_osdc_wrlocked(osdc);
3172         WARN_ON(!map->epoch);
3173
3174         if (lreq->register_gen) {
3175                 lreq->map_dne_bound = map->epoch;
3176                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3177                      lreq, lreq->linger_id);
3178         } else {
3179                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3180                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3181                      map->epoch);
3182         }
3183
3184         if (lreq->map_dne_bound) {
3185                 if (map->epoch >= lreq->map_dne_bound) {
3186                         /* we had a new enough map */
3187                         pr_info("linger_id %llu pool does not exist\n",
3188                                 lreq->linger_id);
3189                         linger_reg_commit_complete(lreq, -ENOENT);
3190                         __linger_cancel(lreq);
3191                 }
3192         } else {
3193                 send_linger_map_check(lreq);
3194         }
3195 }
3196
3197 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3198 {
3199         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3200         struct ceph_osd_linger_request *lreq;
3201         u64 linger_id = greq->private_data;
3202
3203         WARN_ON(greq->result || !greq->u.newest);
3204
3205         down_write(&osdc->lock);
3206         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3207         if (!lreq) {
3208                 dout("%s linger_id %llu dne\n", __func__, linger_id);
3209                 goto out_unlock;
3210         }
3211
3212         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3213              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3214              greq->u.newest);
3215         if (!lreq->map_dne_bound)
3216                 lreq->map_dne_bound = greq->u.newest;
3217         erase_linger_mc(&osdc->linger_map_checks, lreq);
3218         check_linger_pool_dne(lreq);
3219
3220         linger_put(lreq);
3221 out_unlock:
3222         up_write(&osdc->lock);
3223 }
3224
3225 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3226 {
3227         struct ceph_osd_client *osdc = lreq->osdc;
3228         struct ceph_osd_linger_request *lookup_lreq;
3229         int ret;
3230
3231         verify_osdc_wrlocked(osdc);
3232
3233         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3234                                        lreq->linger_id);
3235         if (lookup_lreq) {
3236                 WARN_ON(lookup_lreq != lreq);
3237                 return;
3238         }
3239
3240         linger_get(lreq);
3241         insert_linger_mc(&osdc->linger_map_checks, lreq);
3242         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3243                                           linger_map_check_cb, lreq->linger_id);
3244         WARN_ON(ret);
3245 }
3246
3247 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3248 {
3249         int ret;
3250
3251         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3252         ret = wait_for_completion_killable(&lreq->reg_commit_wait);
3253         return ret ?: lreq->reg_commit_error;
3254 }
3255
3256 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq,
3257                                      unsigned long timeout)
3258 {
3259         long left;
3260
3261         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3262         left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait,
3263                                                 ceph_timeout_jiffies(timeout));
3264         if (left <= 0)
3265                 left = left ?: -ETIMEDOUT;
3266         else
3267                 left = lreq->notify_finish_error; /* completed */
3268
3269         return left;
3270 }
3271
3272 /*
3273  * Timeout callback, called every N seconds.  When 1 or more OSD
3274  * requests has been active for more than N seconds, we send a keepalive
3275  * (tag + timestamp) to its OSD to ensure any communications channel
3276  * reset is detected.
3277  */
3278 static void handle_timeout(struct work_struct *work)
3279 {
3280         struct ceph_osd_client *osdc =
3281                 container_of(work, struct ceph_osd_client, timeout_work.work);
3282         struct ceph_options *opts = osdc->client->options;
3283         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3284         unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3285         LIST_HEAD(slow_osds);
3286         struct rb_node *n, *p;
3287
3288         dout("%s osdc %p\n", __func__, osdc);
3289         down_write(&osdc->lock);
3290
3291         /*
3292          * ping osds that are a bit slow.  this ensures that if there
3293          * is a break in the TCP connection we will notice, and reopen
3294          * a connection with that osd (from the fault callback).
3295          */
3296         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3297                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3298                 bool found = false;
3299
3300                 for (p = rb_first(&osd->o_requests); p; ) {
3301                         struct ceph_osd_request *req =
3302                             rb_entry(p, struct ceph_osd_request, r_node);
3303
3304                         p = rb_next(p); /* abort_request() */
3305
3306                         if (time_before(req->r_stamp, cutoff)) {
3307                                 dout(" req %p tid %llu on osd%d is laggy\n",
3308                                      req, req->r_tid, osd->o_osd);
3309                                 found = true;
3310                         }
3311                         if (opts->osd_request_timeout &&
3312                             time_before(req->r_start_stamp, expiry_cutoff)) {
3313                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3314                                        req->r_tid, osd->o_osd);
3315                                 abort_request(req, -ETIMEDOUT);
3316                         }
3317                 }
3318                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3319                         struct ceph_osd_linger_request *lreq =
3320                             rb_entry(p, struct ceph_osd_linger_request, node);
3321
3322                         dout(" lreq %p linger_id %llu is served by osd%d\n",
3323                              lreq, lreq->linger_id, osd->o_osd);
3324                         found = true;
3325
3326                         mutex_lock(&lreq->lock);
3327                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
3328                                 send_linger_ping(lreq);
3329                         mutex_unlock(&lreq->lock);
3330                 }
3331
3332                 if (found)
3333                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
3334         }
3335
3336         if (opts->osd_request_timeout) {
3337                 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3338                         struct ceph_osd_request *req =
3339                             rb_entry(p, struct ceph_osd_request, r_node);
3340
3341                         p = rb_next(p); /* abort_request() */
3342
3343                         if (time_before(req->r_start_stamp, expiry_cutoff)) {
3344                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3345                                        req->r_tid, osdc->homeless_osd.o_osd);
3346                                 abort_request(req, -ETIMEDOUT);
3347                         }
3348                 }
3349         }
3350
3351         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3352                 maybe_request_map(osdc);
3353
3354         while (!list_empty(&slow_osds)) {
3355                 struct ceph_osd *osd = list_first_entry(&slow_osds,
3356                                                         struct ceph_osd,
3357                                                         o_keepalive_item);
3358                 list_del_init(&osd->o_keepalive_item);
3359                 ceph_con_keepalive(&osd->o_con);
3360         }
3361
3362         up_write(&osdc->lock);
3363         schedule_delayed_work(&osdc->timeout_work,
3364                               osdc->client->options->osd_keepalive_timeout);
3365 }
3366
3367 static void handle_osds_timeout(struct work_struct *work)
3368 {
3369         struct ceph_osd_client *osdc =
3370                 container_of(work, struct ceph_osd_client,
3371                              osds_timeout_work.work);
3372         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3373         struct ceph_osd *osd, *nosd;
3374
3375         dout("%s osdc %p\n", __func__, osdc);
3376         down_write(&osdc->lock);
3377         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3378                 if (time_before(jiffies, osd->lru_ttl))
3379                         break;
3380
3381                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3382                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3383                 close_osd(osd);
3384         }
3385
3386         up_write(&osdc->lock);
3387         schedule_delayed_work(&osdc->osds_timeout_work,
3388                               round_jiffies_relative(delay));
3389 }
3390
3391 static int ceph_oloc_decode(void **p, void *end,
3392                             struct ceph_object_locator *oloc)
3393 {
3394         u8 struct_v, struct_cv;
3395         u32 len;
3396         void *struct_end;
3397         int ret = 0;
3398
3399         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3400         struct_v = ceph_decode_8(p);
3401         struct_cv = ceph_decode_8(p);
3402         if (struct_v < 3) {
3403                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3404                         struct_v, struct_cv);
3405                 goto e_inval;
3406         }
3407         if (struct_cv > 6) {
3408                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3409                         struct_v, struct_cv);
3410                 goto e_inval;
3411         }
3412         len = ceph_decode_32(p);
3413         ceph_decode_need(p, end, len, e_inval);
3414         struct_end = *p + len;
3415
3416         oloc->pool = ceph_decode_64(p);
3417         *p += 4; /* skip preferred */
3418
3419         len = ceph_decode_32(p);
3420         if (len > 0) {
3421                 pr_warn("ceph_object_locator::key is set\n");
3422                 goto e_inval;
3423         }
3424
3425         if (struct_v >= 5) {
3426                 bool changed = false;
3427
3428                 len = ceph_decode_32(p);
3429                 if (len > 0) {
3430                         ceph_decode_need(p, end, len, e_inval);
3431                         if (!oloc->pool_ns ||
3432                             ceph_compare_string(oloc->pool_ns, *p, len))
3433                                 changed = true;
3434                         *p += len;
3435                 } else {
3436                         if (oloc->pool_ns)
3437                                 changed = true;
3438                 }
3439                 if (changed) {
3440                         /* redirect changes namespace */
3441                         pr_warn("ceph_object_locator::nspace is changed\n");
3442                         goto e_inval;
3443                 }
3444         }
3445
3446         if (struct_v >= 6) {
3447                 s64 hash = ceph_decode_64(p);
3448                 if (hash != -1) {
3449                         pr_warn("ceph_object_locator::hash is set\n");
3450                         goto e_inval;
3451                 }
3452         }
3453
3454         /* skip the rest */
3455         *p = struct_end;
3456 out:
3457         return ret;
3458
3459 e_inval:
3460         ret = -EINVAL;
3461         goto out;
3462 }
3463
3464 static int ceph_redirect_decode(void **p, void *end,
3465                                 struct ceph_request_redirect *redir)
3466 {
3467         u8 struct_v, struct_cv;
3468         u32 len;
3469         void *struct_end;
3470         int ret;
3471
3472         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3473         struct_v = ceph_decode_8(p);
3474         struct_cv = ceph_decode_8(p);
3475         if (struct_cv > 1) {
3476                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3477                         struct_v, struct_cv);
3478                 goto e_inval;
3479         }
3480         len = ceph_decode_32(p);
3481         ceph_decode_need(p, end, len, e_inval);
3482         struct_end = *p + len;
3483
3484         ret = ceph_oloc_decode(p, end, &redir->oloc);
3485         if (ret)
3486                 goto out;
3487
3488         len = ceph_decode_32(p);
3489         if (len > 0) {
3490                 pr_warn("ceph_request_redirect::object_name is set\n");
3491                 goto e_inval;
3492         }
3493
3494         len = ceph_decode_32(p);
3495         *p += len; /* skip osd_instructions */
3496
3497         /* skip the rest */
3498         *p = struct_end;
3499 out:
3500         return ret;
3501
3502 e_inval:
3503         ret = -EINVAL;
3504         goto out;
3505 }
3506
3507 struct MOSDOpReply {
3508         struct ceph_pg pgid;
3509         u64 flags;
3510         int result;
3511         u32 epoch;
3512         int num_ops;
3513         u32 outdata_len[CEPH_OSD_MAX_OPS];
3514         s32 rval[CEPH_OSD_MAX_OPS];
3515         int retry_attempt;
3516         struct ceph_eversion replay_version;
3517         u64 user_version;
3518         struct ceph_request_redirect redirect;
3519 };
3520
3521 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3522 {
3523         void *p = msg->front.iov_base;
3524         void *const end = p + msg->front.iov_len;
3525         u16 version = le16_to_cpu(msg->hdr.version);
3526         struct ceph_eversion bad_replay_version;
3527         u8 decode_redir;
3528         u32 len;
3529         int ret;
3530         int i;
3531
3532         ceph_decode_32_safe(&p, end, len, e_inval);
3533         ceph_decode_need(&p, end, len, e_inval);
3534         p += len; /* skip oid */
3535
3536         ret = ceph_decode_pgid(&p, end, &m->pgid);
3537         if (ret)
3538                 return ret;
3539
3540         ceph_decode_64_safe(&p, end, m->flags, e_inval);
3541         ceph_decode_32_safe(&p, end, m->result, e_inval);
3542         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3543         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3544         p += sizeof(bad_replay_version);
3545         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3546
3547         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3548         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3549                 goto e_inval;
3550
3551         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3552                          e_inval);
3553         for (i = 0; i < m->num_ops; i++) {
3554                 struct ceph_osd_op *op = p;
3555
3556                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3557                 p += sizeof(*op);
3558         }
3559
3560         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3561         for (i = 0; i < m->num_ops; i++)
3562                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3563
3564         if (version >= 5) {
3565                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3566                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3567                 p += sizeof(m->replay_version);
3568                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3569         } else {
3570                 m->replay_version = bad_replay_version; /* struct */
3571                 m->user_version = le64_to_cpu(m->replay_version.version);
3572         }
3573
3574         if (version >= 6) {
3575                 if (version >= 7)
3576                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3577                 else
3578                         decode_redir = 1;
3579         } else {
3580                 decode_redir = 0;
3581         }
3582
3583         if (decode_redir) {
3584                 ret = ceph_redirect_decode(&p, end, &m->redirect);
3585                 if (ret)
3586                         return ret;
3587         } else {
3588                 ceph_oloc_init(&m->redirect.oloc);
3589         }
3590
3591         return 0;
3592
3593 e_inval:
3594         return -EINVAL;
3595 }
3596
3597 /*
3598  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3599  * specified.
3600  */
3601 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3602 {
3603         struct ceph_osd_client *osdc = osd->o_osdc;
3604         struct ceph_osd_request *req;
3605         struct MOSDOpReply m;
3606         u64 tid = le64_to_cpu(msg->hdr.tid);
3607         u32 data_len = 0;
3608         int ret;
3609         int i;
3610
3611         dout("%s msg %p tid %llu\n", __func__, msg, tid);
3612
3613         down_read(&osdc->lock);
3614         if (!osd_registered(osd)) {
3615                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3616                 goto out_unlock_osdc;
3617         }
3618         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3619
3620         mutex_lock(&osd->lock);
3621         req = lookup_request(&osd->o_requests, tid);
3622         if (!req) {
3623                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3624                 goto out_unlock_session;
3625         }
3626
3627         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3628         ret = decode_MOSDOpReply(msg, &m);
3629         m.redirect.oloc.pool_ns = NULL;
3630         if (ret) {
3631                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3632                        req->r_tid, ret);
3633                 ceph_msg_dump(msg);
3634                 goto fail_request;
3635         }
3636         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3637              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3638              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3639              le64_to_cpu(m.replay_version.version), m.user_version);
3640
3641         if (m.retry_attempt >= 0) {
3642                 if (m.retry_attempt != req->r_attempts - 1) {
3643                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3644                              req, req->r_tid, m.retry_attempt,
3645                              req->r_attempts - 1);
3646                         goto out_unlock_session;
3647                 }
3648         } else {
3649                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3650         }
3651
3652         if (!ceph_oloc_empty(&m.redirect.oloc)) {
3653                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3654                      m.redirect.oloc.pool);
3655                 unlink_request(osd, req);
3656                 mutex_unlock(&osd->lock);
3657
3658                 /*
3659                  * Not ceph_oloc_copy() - changing pool_ns is not
3660                  * supported.
3661                  */
3662                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3663                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
3664                                 CEPH_OSD_FLAG_IGNORE_OVERLAY |
3665                                 CEPH_OSD_FLAG_IGNORE_CACHE;
3666                 req->r_tid = 0;
3667                 __submit_request(req, false);
3668                 goto out_unlock_osdc;
3669         }
3670
3671         if (m.num_ops != req->r_num_ops) {
3672                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3673                        req->r_num_ops, req->r_tid);
3674                 goto fail_request;
3675         }
3676         for (i = 0; i < req->r_num_ops; i++) {
3677                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3678                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
3679                 req->r_ops[i].rval = m.rval[i];
3680                 req->r_ops[i].outdata_len = m.outdata_len[i];
3681                 data_len += m.outdata_len[i];
3682         }
3683         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3684                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3685                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
3686                 goto fail_request;
3687         }
3688         dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3689              req, req->r_tid, m.result, data_len);
3690
3691         /*
3692          * Since we only ever request ONDISK, we should only ever get
3693          * one (type of) reply back.
3694          */
3695         WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3696         req->r_result = m.result ?: data_len;
3697         finish_request(req);
3698         mutex_unlock(&osd->lock);
3699         up_read(&osdc->lock);
3700
3701         __complete_request(req);
3702         return;
3703
3704 fail_request:
3705         complete_request(req, -EIO);
3706 out_unlock_session:
3707         mutex_unlock(&osd->lock);
3708 out_unlock_osdc:
3709         up_read(&osdc->lock);
3710 }
3711
3712 static void set_pool_was_full(struct ceph_osd_client *osdc)
3713 {
3714         struct rb_node *n;
3715
3716         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3717                 struct ceph_pg_pool_info *pi =
3718                     rb_entry(n, struct ceph_pg_pool_info, node);
3719
3720                 pi->was_full = __pool_full(pi);
3721         }
3722 }
3723
3724 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3725 {
3726         struct ceph_pg_pool_info *pi;
3727
3728         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3729         if (!pi)
3730                 return false;
3731
3732         return pi->was_full && !__pool_full(pi);
3733 }
3734
3735 static enum calc_target_result
3736 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3737 {
3738         struct ceph_osd_client *osdc = lreq->osdc;
3739         enum calc_target_result ct_res;
3740
3741         ct_res = calc_target(osdc, &lreq->t, true);
3742         if (ct_res == CALC_TARGET_NEED_RESEND) {
3743                 struct ceph_osd *osd;
3744
3745                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3746                 if (osd != lreq->osd) {
3747                         unlink_linger(lreq->osd, lreq);
3748                         link_linger(osd, lreq);
3749                 }
3750         }
3751
3752         return ct_res;
3753 }
3754
3755 /*
3756  * Requeue requests whose mapping to an OSD has changed.
3757  */
3758 static void scan_requests(struct ceph_osd *osd,
3759                           bool force_resend,
3760                           bool cleared_full,
3761                           bool check_pool_cleared_full,
3762                           struct rb_root *need_resend,
3763                           struct list_head *need_resend_linger)
3764 {
3765         struct ceph_osd_client *osdc = osd->o_osdc;
3766         struct rb_node *n;
3767         bool force_resend_writes;
3768
3769         for (n = rb_first(&osd->o_linger_requests); n; ) {
3770                 struct ceph_osd_linger_request *lreq =
3771                     rb_entry(n, struct ceph_osd_linger_request, node);
3772                 enum calc_target_result ct_res;
3773
3774                 n = rb_next(n); /* recalc_linger_target() */
3775
3776                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3777                      lreq->linger_id);
3778                 ct_res = recalc_linger_target(lreq);
3779                 switch (ct_res) {
3780                 case CALC_TARGET_NO_ACTION:
3781                         force_resend_writes = cleared_full ||
3782                             (check_pool_cleared_full &&
3783                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3784                         if (!force_resend && !force_resend_writes)
3785                                 break;
3786
3787                         /* fall through */
3788                 case CALC_TARGET_NEED_RESEND:
3789                         cancel_linger_map_check(lreq);
3790                         /*
3791                          * scan_requests() for the previous epoch(s)
3792                          * may have already added it to the list, since
3793                          * it's not unlinked here.
3794                          */
3795                         if (list_empty(&lreq->scan_item))
3796                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3797                         break;
3798                 case CALC_TARGET_POOL_DNE:
3799                         list_del_init(&lreq->scan_item);
3800                         check_linger_pool_dne(lreq);
3801                         break;
3802                 }
3803         }
3804
3805         for (n = rb_first(&osd->o_requests); n; ) {
3806                 struct ceph_osd_request *req =
3807                     rb_entry(n, struct ceph_osd_request, r_node);
3808                 enum calc_target_result ct_res;
3809
3810                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3811
3812                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3813                 ct_res = calc_target(osdc, &req->r_t, false);
3814                 switch (ct_res) {
3815                 case CALC_TARGET_NO_ACTION:
3816                         force_resend_writes = cleared_full ||
3817                             (check_pool_cleared_full &&
3818                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3819                         if (!force_resend &&
3820                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3821                              !force_resend_writes))
3822                                 break;
3823
3824                         /* fall through */
3825                 case CALC_TARGET_NEED_RESEND:
3826                         cancel_map_check(req);
3827                         unlink_request(osd, req);
3828                         insert_request(need_resend, req);
3829                         break;
3830                 case CALC_TARGET_POOL_DNE:
3831                         check_pool_dne(req);
3832                         break;
3833                 }
3834         }
3835 }
3836
3837 static int handle_one_map(struct ceph_osd_client *osdc,
3838                           void *p, void *end, bool incremental,
3839                           struct rb_root *need_resend,
3840                           struct list_head *need_resend_linger)
3841 {
3842         struct ceph_osdmap *newmap;
3843         struct rb_node *n;
3844         bool skipped_map = false;
3845         bool was_full;
3846
3847         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3848         set_pool_was_full(osdc);
3849
3850         if (incremental)
3851                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3852         else
3853                 newmap = ceph_osdmap_decode(&p, end);
3854         if (IS_ERR(newmap))
3855                 return PTR_ERR(newmap);
3856
3857         if (newmap != osdc->osdmap) {
3858                 /*
3859                  * Preserve ->was_full before destroying the old map.
3860                  * For pools that weren't in the old map, ->was_full
3861                  * should be false.
3862                  */
3863                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3864                         struct ceph_pg_pool_info *pi =
3865                             rb_entry(n, struct ceph_pg_pool_info, node);
3866                         struct ceph_pg_pool_info *old_pi;
3867
3868                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3869                         if (old_pi)
3870                                 pi->was_full = old_pi->was_full;
3871                         else
3872                                 WARN_ON(pi->was_full);
3873                 }
3874
3875                 if (osdc->osdmap->epoch &&
3876                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3877                         WARN_ON(incremental);
3878                         skipped_map = true;
3879                 }
3880
3881                 ceph_osdmap_destroy(osdc->osdmap);
3882                 osdc->osdmap = newmap;
3883         }
3884
3885         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3886         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3887                       need_resend, need_resend_linger);
3888
3889         for (n = rb_first(&osdc->osds); n; ) {
3890                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3891
3892                 n = rb_next(n); /* close_osd() */
3893
3894                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3895                               need_resend_linger);
3896                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3897                     memcmp(&osd->o_con.peer_addr,
3898                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3899                            sizeof(struct ceph_entity_addr)))
3900                         close_osd(osd);
3901         }
3902
3903         return 0;
3904 }
3905
3906 static void kick_requests(struct ceph_osd_client *osdc,
3907                           struct rb_root *need_resend,
3908                           struct list_head *need_resend_linger)
3909 {
3910         struct ceph_osd_linger_request *lreq, *nlreq;
3911         enum calc_target_result ct_res;
3912         struct rb_node *n;
3913
3914         /* make sure need_resend targets reflect latest map */
3915         for (n = rb_first(need_resend); n; ) {
3916                 struct ceph_osd_request *req =
3917                     rb_entry(n, struct ceph_osd_request, r_node);
3918
3919                 n = rb_next(n);
3920
3921                 if (req->r_t.epoch < osdc->osdmap->epoch) {
3922                         ct_res = calc_target(osdc, &req->r_t, false);
3923                         if (ct_res == CALC_TARGET_POOL_DNE) {
3924                                 erase_request(need_resend, req);
3925                                 check_pool_dne(req);
3926                         }
3927                 }
3928         }
3929
3930         for (n = rb_first(need_resend); n; ) {
3931                 struct ceph_osd_request *req =
3932                     rb_entry(n, struct ceph_osd_request, r_node);
3933                 struct ceph_osd *osd;
3934
3935                 n = rb_next(n);
3936                 erase_request(need_resend, req); /* before link_request() */
3937
3938                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3939                 link_request(osd, req);
3940                 if (!req->r_linger) {
3941                         if (!osd_homeless(osd) && !req->r_t.paused)
3942                                 send_request(req);
3943                 } else {
3944                         cancel_linger_request(req);
3945                 }
3946         }
3947
3948         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3949                 if (!osd_homeless(lreq->osd))
3950                         send_linger(lreq);
3951
3952                 list_del_init(&lreq->scan_item);
3953         }
3954 }
3955
3956 /*
3957  * Process updated osd map.
3958  *
3959  * The message contains any number of incremental and full maps, normally
3960  * indicating some sort of topology change in the cluster.  Kick requests
3961  * off to different OSDs as needed.
3962  */
3963 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3964 {
3965         void *p = msg->front.iov_base;
3966         void *const end = p + msg->front.iov_len;
3967         u32 nr_maps, maplen;
3968         u32 epoch;
3969         struct ceph_fsid fsid;
3970         struct rb_root need_resend = RB_ROOT;
3971         LIST_HEAD(need_resend_linger);
3972         bool handled_incremental = false;
3973         bool was_pauserd, was_pausewr;
3974         bool pauserd, pausewr;
3975         int err;
3976
3977         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3978         down_write(&osdc->lock);
3979
3980         /* verify fsid */
3981         ceph_decode_need(&p, end, sizeof(fsid), bad);
3982         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3983         if (ceph_check_fsid(osdc->client, &fsid) < 0)
3984                 goto bad;
3985
3986         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3987         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3988                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3989                       have_pool_full(osdc);
3990
3991         /* incremental maps */
3992         ceph_decode_32_safe(&p, end, nr_maps, bad);
3993         dout(" %d inc maps\n", nr_maps);
3994         while (nr_maps > 0) {
3995                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3996                 epoch = ceph_decode_32(&p);
3997                 maplen = ceph_decode_32(&p);
3998                 ceph_decode_need(&p, end, maplen, bad);
3999                 if (osdc->osdmap->epoch &&
4000                     osdc->osdmap->epoch + 1 == epoch) {
4001                         dout("applying incremental map %u len %d\n",
4002                              epoch, maplen);
4003                         err = handle_one_map(osdc, p, p + maplen, true,
4004                                              &need_resend, &need_resend_linger);
4005                         if (err)
4006                                 goto bad;
4007                         handled_incremental = true;
4008                 } else {
4009                         dout("ignoring incremental map %u len %d\n",
4010                              epoch, maplen);
4011                 }
4012                 p += maplen;
4013                 nr_maps--;
4014         }
4015         if (handled_incremental)
4016                 goto done;
4017
4018         /* full maps */
4019         ceph_decode_32_safe(&p, end, nr_maps, bad);
4020         dout(" %d full maps\n", nr_maps);
4021         while (nr_maps) {
4022                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4023                 epoch = ceph_decode_32(&p);
4024                 maplen = ceph_decode_32(&p);
4025                 ceph_decode_need(&p, end, maplen, bad);
4026                 if (nr_maps > 1) {
4027                         dout("skipping non-latest full map %u len %d\n",
4028                              epoch, maplen);
4029                 } else if (osdc->osdmap->epoch >= epoch) {
4030                         dout("skipping full map %u len %d, "
4031                              "older than our %u\n", epoch, maplen,
4032                              osdc->osdmap->epoch);
4033                 } else {
4034                         dout("taking full map %u len %d\n", epoch, maplen);
4035                         err = handle_one_map(osdc, p, p + maplen, false,
4036                                              &need_resend, &need_resend_linger);
4037                         if (err)
4038                                 goto bad;
4039                 }
4040                 p += maplen;
4041                 nr_maps--;
4042         }
4043
4044 done:
4045         /*
4046          * subscribe to subsequent osdmap updates if full to ensure
4047          * we find out when we are no longer full and stop returning
4048          * ENOSPC.
4049          */
4050         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4051         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4052                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4053                   have_pool_full(osdc);
4054         if (was_pauserd || was_pausewr || pauserd || pausewr ||
4055             osdc->osdmap->epoch < osdc->epoch_barrier)
4056                 maybe_request_map(osdc);
4057
4058         kick_requests(osdc, &need_resend, &need_resend_linger);
4059
4060         ceph_osdc_abort_on_full(osdc);
4061         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
4062                           osdc->osdmap->epoch);
4063         up_write(&osdc->lock);
4064         wake_up_all(&osdc->client->auth_wq);
4065         return;
4066
4067 bad:
4068         pr_err("osdc handle_map corrupt msg\n");
4069         ceph_msg_dump(msg);
4070         up_write(&osdc->lock);
4071 }
4072
4073 /*
4074  * Resubmit requests pending on the given osd.
4075  */
4076 static void kick_osd_requests(struct ceph_osd *osd)
4077 {
4078         struct rb_node *n;
4079
4080         clear_backoffs(osd);
4081
4082         for (n = rb_first(&osd->o_requests); n; ) {
4083                 struct ceph_osd_request *req =
4084                     rb_entry(n, struct ceph_osd_request, r_node);
4085
4086                 n = rb_next(n); /* cancel_linger_request() */
4087
4088                 if (!req->r_linger) {
4089                         if (!req->r_t.paused)
4090                                 send_request(req);
4091                 } else {
4092                         cancel_linger_request(req);
4093                 }
4094         }
4095         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
4096                 struct ceph_osd_linger_request *lreq =
4097                     rb_entry(n, struct ceph_osd_linger_request, node);
4098
4099                 send_linger(lreq);
4100         }
4101 }
4102
4103 /*
4104  * If the osd connection drops, we need to resubmit all requests.
4105  */
4106 static void osd_fault(struct ceph_connection *con)
4107 {
4108         struct ceph_osd *osd = con->private;
4109         struct ceph_osd_client *osdc = osd->o_osdc;
4110
4111         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
4112
4113         down_write(&osdc->lock);
4114         if (!osd_registered(osd)) {
4115                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4116                 goto out_unlock;
4117         }
4118
4119         if (!reopen_osd(osd))
4120                 kick_osd_requests(osd);
4121         maybe_request_map(osdc);
4122
4123 out_unlock:
4124         up_write(&osdc->lock);
4125 }
4126
4127 struct MOSDBackoff {
4128         struct ceph_spg spgid;
4129         u32 map_epoch;
4130         u8 op;
4131         u64 id;
4132         struct ceph_hobject_id *begin;
4133         struct ceph_hobject_id *end;
4134 };
4135
4136 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
4137 {
4138         void *p = msg->front.iov_base;
4139         void *const end = p + msg->front.iov_len;
4140         u8 struct_v;
4141         u32 struct_len;
4142         int ret;
4143
4144         ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
4145         if (ret)
4146                 return ret;
4147
4148         ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
4149         if (ret)
4150                 return ret;
4151
4152         ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
4153         ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
4154         ceph_decode_8_safe(&p, end, m->op, e_inval);
4155         ceph_decode_64_safe(&p, end, m->id, e_inval);
4156
4157         m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
4158         if (!m->begin)
4159                 return -ENOMEM;
4160
4161         ret = decode_hoid(&p, end, m->begin);
4162         if (ret) {
4163                 free_hoid(m->begin);
4164                 return ret;
4165         }
4166
4167         m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
4168         if (!m->end) {
4169                 free_hoid(m->begin);
4170                 return -ENOMEM;
4171         }
4172
4173         ret = decode_hoid(&p, end, m->end);
4174         if (ret) {
4175                 free_hoid(m->begin);
4176                 free_hoid(m->end);
4177                 return ret;
4178         }
4179
4180         return 0;
4181
4182 e_inval:
4183         return -EINVAL;
4184 }
4185
4186 static struct ceph_msg *create_backoff_message(
4187                                 const struct ceph_osd_backoff *backoff,
4188                                 u32 map_epoch)
4189 {
4190         struct ceph_msg *msg;
4191         void *p, *end;
4192         int msg_size;
4193
4194         msg_size = CEPH_ENCODING_START_BLK_LEN +
4195                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4196         msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4197         msg_size += CEPH_ENCODING_START_BLK_LEN +
4198                         hoid_encoding_size(backoff->begin);
4199         msg_size += CEPH_ENCODING_START_BLK_LEN +
4200                         hoid_encoding_size(backoff->end);
4201
4202         msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4203         if (!msg)
4204                 return NULL;
4205
4206         p = msg->front.iov_base;
4207         end = p + msg->front_alloc_len;
4208
4209         encode_spgid(&p, &backoff->spgid);
4210         ceph_encode_32(&p, map_epoch);
4211         ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4212         ceph_encode_64(&p, backoff->id);
4213         encode_hoid(&p, end, backoff->begin);
4214         encode_hoid(&p, end, backoff->end);
4215         BUG_ON(p != end);
4216
4217         msg->front.iov_len = p - msg->front.iov_base;
4218         msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4219         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4220
4221         return msg;
4222 }
4223
4224 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4225 {
4226         struct ceph_spg_mapping *spg;
4227         struct ceph_osd_backoff *backoff;
4228         struct ceph_msg *msg;
4229
4230         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4231              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4232
4233         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4234         if (!spg) {
4235                 spg = alloc_spg_mapping();
4236                 if (!spg) {
4237                         pr_err("%s failed to allocate spg\n", __func__);
4238                         return;
4239                 }
4240                 spg->spgid = m->spgid; /* struct */
4241                 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4242         }
4243
4244         backoff = alloc_backoff();
4245         if (!backoff) {
4246                 pr_err("%s failed to allocate backoff\n", __func__);
4247                 return;
4248         }
4249         backoff->spgid = m->spgid; /* struct */
4250         backoff->id = m->id;
4251         backoff->begin = m->begin;
4252         m->begin = NULL; /* backoff now owns this */
4253         backoff->end = m->end;
4254         m->end = NULL;   /* ditto */
4255
4256         insert_backoff(&spg->backoffs, backoff);
4257         insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4258
4259         /*
4260          * Ack with original backoff's epoch so that the OSD can
4261          * discard this if there was a PG split.
4262          */
4263         msg = create_backoff_message(backoff, m->map_epoch);
4264         if (!msg) {
4265                 pr_err("%s failed to allocate msg\n", __func__);
4266                 return;
4267         }
4268         ceph_con_send(&osd->o_con, msg);
4269 }
4270
4271 static bool target_contained_by(const struct ceph_osd_request_target *t,
4272                                 const struct ceph_hobject_id *begin,
4273                                 const struct ceph_hobject_id *end)
4274 {
4275         struct ceph_hobject_id hoid;
4276         int cmp;
4277
4278         hoid_fill_from_target(&hoid, t);
4279         cmp = hoid_compare(&hoid, begin);
4280         return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4281 }
4282
4283 static void handle_backoff_unblock(struct ceph_osd *osd,
4284                                    const struct MOSDBackoff *m)
4285 {
4286         struct ceph_spg_mapping *spg;
4287         struct ceph_osd_backoff *backoff;
4288         struct rb_node *n;
4289
4290         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4291              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4292
4293         backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4294         if (!backoff) {
4295                 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4296                        __func__, osd->o_osd, m->spgid.pgid.pool,
4297                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4298                 return;
4299         }
4300
4301         if (hoid_compare(backoff->begin, m->begin) &&
4302             hoid_compare(backoff->end, m->end)) {
4303                 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4304                        __func__, osd->o_osd, m->spgid.pgid.pool,
4305                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4306                 /* unblock it anyway... */
4307         }
4308
4309         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4310         BUG_ON(!spg);
4311
4312         erase_backoff(&spg->backoffs, backoff);
4313         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4314         free_backoff(backoff);
4315
4316         if (RB_EMPTY_ROOT(&spg->backoffs)) {
4317                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4318                 free_spg_mapping(spg);
4319         }
4320
4321         for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4322                 struct ceph_osd_request *req =
4323                     rb_entry(n, struct ceph_osd_request, r_node);
4324
4325                 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4326                         /*
4327                          * Match against @m, not @backoff -- the PG may
4328                          * have split on the OSD.
4329                          */
4330                         if (target_contained_by(&req->r_t, m->begin, m->end)) {
4331                                 /*
4332                                  * If no other installed backoff applies,
4333                                  * resend.
4334                                  */
4335                                 send_request(req);
4336                         }
4337                 }
4338         }
4339 }
4340
4341 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4342 {
4343         struct ceph_osd_client *osdc = osd->o_osdc;
4344         struct MOSDBackoff m;
4345         int ret;
4346
4347         down_read(&osdc->lock);
4348         if (!osd_registered(osd)) {
4349                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4350                 up_read(&osdc->lock);
4351                 return;
4352         }
4353         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4354
4355         mutex_lock(&osd->lock);
4356         ret = decode_MOSDBackoff(msg, &m);
4357         if (ret) {
4358                 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4359                 ceph_msg_dump(msg);
4360                 goto out_unlock;
4361         }
4362
4363         switch (m.op) {
4364         case CEPH_OSD_BACKOFF_OP_BLOCK:
4365                 handle_backoff_block(osd, &m);
4366                 break;
4367         case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4368                 handle_backoff_unblock(osd, &m);
4369                 break;
4370         default:
4371                 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4372         }
4373
4374         free_hoid(m.begin);
4375         free_hoid(m.end);
4376
4377 out_unlock:
4378         mutex_unlock(&osd->lock);
4379         up_read(&osdc->lock);
4380 }
4381
4382 /*
4383  * Process osd watch notifications
4384  */
4385 static void handle_watch_notify(struct ceph_osd_client *osdc,
4386                                 struct ceph_msg *msg)
4387 {
4388         void *p = msg->front.iov_base;
4389         void *const end = p + msg->front.iov_len;
4390         struct ceph_osd_linger_request *lreq;
4391         struct linger_work *lwork;
4392         u8 proto_ver, opcode;
4393         u64 cookie, notify_id;
4394         u64 notifier_id = 0;
4395         s32 return_code = 0;
4396         void *payload = NULL;
4397         u32 payload_len = 0;
4398
4399         ceph_decode_8_safe(&p, end, proto_ver, bad);
4400         ceph_decode_8_safe(&p, end, opcode, bad);
4401         ceph_decode_64_safe(&p, end, cookie, bad);
4402         p += 8; /* skip ver */
4403         ceph_decode_64_safe(&p, end, notify_id, bad);
4404
4405         if (proto_ver >= 1) {
4406                 ceph_decode_32_safe(&p, end, payload_len, bad);
4407                 ceph_decode_need(&p, end, payload_len, bad);
4408                 payload = p;
4409                 p += payload_len;
4410         }
4411
4412         if (le16_to_cpu(msg->hdr.version) >= 2)
4413                 ceph_decode_32_safe(&p, end, return_code, bad);
4414
4415         if (le16_to_cpu(msg->hdr.version) >= 3)
4416                 ceph_decode_64_safe(&p, end, notifier_id, bad);
4417
4418         down_read(&osdc->lock);
4419         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4420         if (!lreq) {
4421                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4422                      cookie);
4423                 goto out_unlock_osdc;
4424         }
4425
4426         mutex_lock(&lreq->lock);
4427         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4428              opcode, cookie, lreq, lreq->is_watch);
4429         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4430                 if (!lreq->last_error) {
4431                         lreq->last_error = -ENOTCONN;
4432                         queue_watch_error(lreq);
4433                 }
4434         } else if (!lreq->is_watch) {
4435                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4436                 if (lreq->notify_id && lreq->notify_id != notify_id) {
4437                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4438                              lreq->notify_id, notify_id);
4439                 } else if (!completion_done(&lreq->notify_finish_wait)) {
4440                         struct ceph_msg_data *data =
4441                             msg->num_data_items ? &msg->data[0] : NULL;
4442
4443                         if (data) {
4444                                 if (lreq->preply_pages) {
4445                                         WARN_ON(data->type !=
4446                                                         CEPH_MSG_DATA_PAGES);
4447                                         *lreq->preply_pages = data->pages;
4448                                         *lreq->preply_len = data->length;
4449                                         data->own_pages = false;
4450                                 }
4451                         }
4452                         lreq->notify_finish_error = return_code;
4453                         complete_all(&lreq->notify_finish_wait);
4454                 }
4455         } else {
4456                 /* CEPH_WATCH_EVENT_NOTIFY */
4457                 lwork = lwork_alloc(lreq, do_watch_notify);
4458                 if (!lwork) {
4459                         pr_err("failed to allocate notify-lwork\n");
4460                         goto out_unlock_lreq;
4461                 }
4462
4463                 lwork->notify.notify_id = notify_id;
4464                 lwork->notify.notifier_id = notifier_id;
4465                 lwork->notify.payload = payload;
4466                 lwork->notify.payload_len = payload_len;
4467                 lwork->notify.msg = ceph_msg_get(msg);
4468                 lwork_queue(lwork);
4469         }
4470
4471 out_unlock_lreq:
4472         mutex_unlock(&lreq->lock);
4473 out_unlock_osdc:
4474         up_read(&osdc->lock);
4475         return;
4476
4477 bad:
4478         pr_err("osdc handle_watch_notify corrupt msg\n");
4479 }
4480
4481 /*
4482  * Register request, send initial attempt.
4483  */
4484 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4485                             struct ceph_osd_request *req,
4486                             bool nofail)
4487 {
4488         down_read(&osdc->lock);
4489         submit_request(req, false);
4490         up_read(&osdc->lock);
4491
4492         return 0;
4493 }
4494 EXPORT_SYMBOL(ceph_osdc_start_request);
4495
4496 /*
4497  * Unregister a registered request.  The request is not completed:
4498  * ->r_result isn't set and __complete_request() isn't called.
4499  */
4500 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4501 {
4502         struct ceph_osd_client *osdc = req->r_osdc;
4503
4504         down_write(&osdc->lock);
4505         if (req->r_osd)
4506                 cancel_request(req);
4507         up_write(&osdc->lock);
4508 }
4509 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4510
4511 /*
4512  * @timeout: in jiffies, 0 means "wait forever"
4513  */
4514 static int wait_request_timeout(struct ceph_osd_request *req,
4515                                 unsigned long timeout)
4516 {
4517         long left;
4518
4519         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4520         left = wait_for_completion_killable_timeout(&req->r_completion,
4521                                                 ceph_timeout_jiffies(timeout));
4522         if (left <= 0) {
4523                 left = left ?: -ETIMEDOUT;
4524                 ceph_osdc_cancel_request(req);
4525         } else {
4526                 left = req->r_result; /* completed */
4527         }
4528
4529         return left;
4530 }
4531
4532 /*
4533  * wait for a request to complete
4534  */
4535 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4536                            struct ceph_osd_request *req)
4537 {
4538         return wait_request_timeout(req, 0);
4539 }
4540 EXPORT_SYMBOL(ceph_osdc_wait_request);
4541
4542 /*
4543  * sync - wait for all in-flight requests to flush.  avoid starvation.
4544  */
4545 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4546 {
4547         struct rb_node *n, *p;
4548         u64 last_tid = atomic64_read(&osdc->last_tid);
4549
4550 again:
4551         down_read(&osdc->lock);
4552         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4553                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4554
4555                 mutex_lock(&osd->lock);
4556                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4557                         struct ceph_osd_request *req =
4558                             rb_entry(p, struct ceph_osd_request, r_node);
4559
4560                         if (req->r_tid > last_tid)
4561                                 break;
4562
4563                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4564                                 continue;
4565
4566                         ceph_osdc_get_request(req);
4567                         mutex_unlock(&osd->lock);
4568                         up_read(&osdc->lock);
4569                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
4570                              __func__, req, req->r_tid, last_tid);
4571                         wait_for_completion(&req->r_completion);
4572                         ceph_osdc_put_request(req);
4573                         goto again;
4574                 }
4575
4576                 mutex_unlock(&osd->lock);
4577         }
4578
4579         up_read(&osdc->lock);
4580         dout("%s done last_tid %llu\n", __func__, last_tid);
4581 }
4582 EXPORT_SYMBOL(ceph_osdc_sync);
4583
4584 static struct ceph_osd_request *
4585 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4586 {
4587         struct ceph_osd_request *req;
4588
4589         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4590         if (!req)
4591                 return NULL;
4592
4593         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4594         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4595         return req;
4596 }
4597
4598 static struct ceph_osd_request *
4599 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
4600 {
4601         struct ceph_osd_request *req;
4602
4603         req = alloc_linger_request(lreq);
4604         if (!req)
4605                 return NULL;
4606
4607         /*
4608          * Pass 0 for cookie because we don't know it yet, it will be
4609          * filled in by linger_submit().
4610          */
4611         osd_req_op_watch_init(req, 0, 0, watch_opcode);
4612
4613         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4614                 ceph_osdc_put_request(req);
4615                 return NULL;
4616         }
4617
4618         return req;
4619 }
4620
4621 /*
4622  * Returns a handle, caller owns a ref.
4623  */
4624 struct ceph_osd_linger_request *
4625 ceph_osdc_watch(struct ceph_osd_client *osdc,
4626                 struct ceph_object_id *oid,
4627                 struct ceph_object_locator *oloc,
4628                 rados_watchcb2_t wcb,
4629                 rados_watcherrcb_t errcb,
4630                 void *data)
4631 {
4632         struct ceph_osd_linger_request *lreq;
4633         int ret;
4634
4635         lreq = linger_alloc(osdc);
4636         if (!lreq)
4637                 return ERR_PTR(-ENOMEM);
4638
4639         lreq->is_watch = true;
4640         lreq->wcb = wcb;
4641         lreq->errcb = errcb;
4642         lreq->data = data;
4643         lreq->watch_valid_thru = jiffies;
4644
4645         ceph_oid_copy(&lreq->t.base_oid, oid);
4646         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4647         lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4648         ktime_get_real_ts64(&lreq->mtime);
4649
4650         lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
4651         if (!lreq->reg_req) {
4652                 ret = -ENOMEM;
4653                 goto err_put_lreq;
4654         }
4655
4656         lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
4657         if (!lreq->ping_req) {
4658                 ret = -ENOMEM;
4659                 goto err_put_lreq;
4660         }
4661
4662         linger_submit(lreq);
4663         ret = linger_reg_commit_wait(lreq);
4664         if (ret) {
4665                 linger_cancel(lreq);
4666                 goto err_put_lreq;
4667         }
4668
4669         return lreq;
4670
4671 err_put_lreq:
4672         linger_put(lreq);
4673         return ERR_PTR(ret);
4674 }
4675 EXPORT_SYMBOL(ceph_osdc_watch);
4676
4677 /*
4678  * Releases a ref.
4679  *
4680  * Times out after mount_timeout to preserve rbd unmap behaviour
4681  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4682  * with mount_timeout").
4683  */
4684 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4685                       struct ceph_osd_linger_request *lreq)
4686 {
4687         struct ceph_options *opts = osdc->client->options;
4688         struct ceph_osd_request *req;
4689         int ret;
4690
4691         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4692         if (!req)
4693                 return -ENOMEM;
4694
4695         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4696         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4697         req->r_flags = CEPH_OSD_FLAG_WRITE;
4698         ktime_get_real_ts64(&req->r_mtime);
4699         osd_req_op_watch_init(req, 0, lreq->linger_id,
4700                               CEPH_OSD_WATCH_OP_UNWATCH);
4701
4702         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4703         if (ret)
4704                 goto out_put_req;
4705
4706         ceph_osdc_start_request(osdc, req, false);
4707         linger_cancel(lreq);
4708         linger_put(lreq);
4709         ret = wait_request_timeout(req, opts->mount_timeout);
4710
4711 out_put_req:
4712         ceph_osdc_put_request(req);
4713         return ret;
4714 }
4715 EXPORT_SYMBOL(ceph_osdc_unwatch);
4716
4717 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4718                                       u64 notify_id, u64 cookie, void *payload,
4719                                       u32 payload_len)
4720 {
4721         struct ceph_osd_req_op *op;
4722         struct ceph_pagelist *pl;
4723         int ret;
4724
4725         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4726
4727         pl = ceph_pagelist_alloc(GFP_NOIO);
4728         if (!pl)
4729                 return -ENOMEM;
4730
4731         ret = ceph_pagelist_encode_64(pl, notify_id);
4732         ret |= ceph_pagelist_encode_64(pl, cookie);
4733         if (payload) {
4734                 ret |= ceph_pagelist_encode_32(pl, payload_len);
4735                 ret |= ceph_pagelist_append(pl, payload, payload_len);
4736         } else {
4737                 ret |= ceph_pagelist_encode_32(pl, 0);
4738         }
4739         if (ret) {
4740                 ceph_pagelist_release(pl);
4741                 return -ENOMEM;
4742         }
4743
4744         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4745         op->indata_len = pl->length;
4746         return 0;
4747 }
4748
4749 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4750                          struct ceph_object_id *oid,
4751                          struct ceph_object_locator *oloc,
4752                          u64 notify_id,
4753                          u64 cookie,
4754                          void *payload,
4755                          u32 payload_len)
4756 {
4757         struct ceph_osd_request *req;
4758         int ret;
4759
4760         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4761         if (!req)
4762                 return -ENOMEM;
4763
4764         ceph_oid_copy(&req->r_base_oid, oid);
4765         ceph_oloc_copy(&req->r_base_oloc, oloc);
4766         req->r_flags = CEPH_OSD_FLAG_READ;
4767
4768         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4769                                          payload_len);
4770         if (ret)
4771                 goto out_put_req;
4772
4773         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4774         if (ret)
4775                 goto out_put_req;
4776
4777         ceph_osdc_start_request(osdc, req, false);
4778         ret = ceph_osdc_wait_request(osdc, req);
4779
4780 out_put_req:
4781         ceph_osdc_put_request(req);
4782         return ret;
4783 }
4784 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4785
4786 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4787                                   u64 cookie, u32 prot_ver, u32 timeout,
4788                                   void *payload, u32 payload_len)
4789 {
4790         struct ceph_osd_req_op *op;
4791         struct ceph_pagelist *pl;
4792         int ret;
4793
4794         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4795         op->notify.cookie = cookie;
4796
4797         pl = ceph_pagelist_alloc(GFP_NOIO);
4798         if (!pl)
4799                 return -ENOMEM;
4800
4801         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4802         ret |= ceph_pagelist_encode_32(pl, timeout);
4803         ret |= ceph_pagelist_encode_32(pl, payload_len);
4804         ret |= ceph_pagelist_append(pl, payload, payload_len);
4805         if (ret) {
4806                 ceph_pagelist_release(pl);
4807                 return -ENOMEM;
4808         }
4809
4810         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4811         op->indata_len = pl->length;
4812         return 0;
4813 }
4814
4815 /*
4816  * @timeout: in seconds
4817  *
4818  * @preply_{pages,len} are initialized both on success and error.
4819  * The caller is responsible for:
4820  *
4821  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4822  */
4823 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4824                      struct ceph_object_id *oid,
4825                      struct ceph_object_locator *oloc,
4826                      void *payload,
4827                      u32 payload_len,
4828                      u32 timeout,
4829                      struct page ***preply_pages,
4830                      size_t *preply_len)
4831 {
4832         struct ceph_osd_linger_request *lreq;
4833         struct page **pages;
4834         int ret;
4835
4836         WARN_ON(!timeout);
4837         if (preply_pages) {
4838                 *preply_pages = NULL;
4839                 *preply_len = 0;
4840         }
4841
4842         lreq = linger_alloc(osdc);
4843         if (!lreq)
4844                 return -ENOMEM;
4845
4846         lreq->preply_pages = preply_pages;
4847         lreq->preply_len = preply_len;
4848
4849         ceph_oid_copy(&lreq->t.base_oid, oid);
4850         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4851         lreq->t.flags = CEPH_OSD_FLAG_READ;
4852
4853         lreq->reg_req = alloc_linger_request(lreq);
4854         if (!lreq->reg_req) {
4855                 ret = -ENOMEM;
4856                 goto out_put_lreq;
4857         }
4858
4859         /*
4860          * Pass 0 for cookie because we don't know it yet, it will be
4861          * filled in by linger_submit().
4862          */
4863         ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
4864                                      payload, payload_len);
4865         if (ret)
4866                 goto out_put_lreq;
4867
4868         /* for notify_id */
4869         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4870         if (IS_ERR(pages)) {
4871                 ret = PTR_ERR(pages);
4872                 goto out_put_lreq;
4873         }
4874         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4875                                                  response_data),
4876                                  pages, PAGE_SIZE, 0, false, true);
4877
4878         ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
4879         if (ret)
4880                 goto out_put_lreq;
4881
4882         linger_submit(lreq);
4883         ret = linger_reg_commit_wait(lreq);
4884         if (!ret)
4885                 ret = linger_notify_finish_wait(lreq,
4886                                  msecs_to_jiffies(2 * timeout * MSEC_PER_SEC));
4887         else
4888                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4889
4890         linger_cancel(lreq);
4891 out_put_lreq:
4892         linger_put(lreq);
4893         return ret;
4894 }
4895 EXPORT_SYMBOL(ceph_osdc_notify);
4896
4897 /*
4898  * Return the number of milliseconds since the watch was last
4899  * confirmed, or an error.  If there is an error, the watch is no
4900  * longer valid, and should be destroyed with ceph_osdc_unwatch().
4901  */
4902 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4903                           struct ceph_osd_linger_request *lreq)
4904 {
4905         unsigned long stamp, age;
4906         int ret;
4907
4908         down_read(&osdc->lock);
4909         mutex_lock(&lreq->lock);
4910         stamp = lreq->watch_valid_thru;
4911         if (!list_empty(&lreq->pending_lworks)) {
4912                 struct linger_work *lwork =
4913                     list_first_entry(&lreq->pending_lworks,
4914                                      struct linger_work,
4915                                      pending_item);
4916
4917                 if (time_before(lwork->queued_stamp, stamp))
4918                         stamp = lwork->queued_stamp;
4919         }
4920         age = jiffies - stamp;
4921         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4922              lreq, lreq->linger_id, age, lreq->last_error);
4923         /* we are truncating to msecs, so return a safe upper bound */
4924         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4925
4926         mutex_unlock(&lreq->lock);
4927         up_read(&osdc->lock);
4928         return ret;
4929 }
4930
4931 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4932 {
4933         u8 struct_v;
4934         u32 struct_len;
4935         int ret;
4936
4937         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4938                                   &struct_v, &struct_len);
4939         if (ret)
4940                 goto bad;
4941
4942         ret = -EINVAL;
4943         ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad);
4944         ceph_decode_64_safe(p, end, item->cookie, bad);
4945         ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */
4946
4947         if (struct_v >= 2) {
4948                 ret = ceph_decode_entity_addr(p, end, &item->addr);
4949                 if (ret)
4950                         goto bad;
4951         } else {
4952                 ret = 0;
4953         }
4954
4955         dout("%s %s%llu cookie %llu addr %s\n", __func__,
4956              ENTITY_NAME(item->name), item->cookie,
4957              ceph_pr_addr(&item->addr));
4958 bad:
4959         return ret;
4960 }
4961
4962 static int decode_watchers(void **p, void *end,
4963                            struct ceph_watch_item **watchers,
4964                            u32 *num_watchers)
4965 {
4966         u8 struct_v;
4967         u32 struct_len;
4968         int i;
4969         int ret;
4970
4971         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4972                                   &struct_v, &struct_len);
4973         if (ret)
4974                 return ret;
4975
4976         *num_watchers = ceph_decode_32(p);
4977         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4978         if (!*watchers)
4979                 return -ENOMEM;
4980
4981         for (i = 0; i < *num_watchers; i++) {
4982                 ret = decode_watcher(p, end, *watchers + i);
4983                 if (ret) {
4984                         kfree(*watchers);
4985                         return ret;
4986                 }
4987         }
4988
4989         return 0;
4990 }
4991
4992 /*
4993  * On success, the caller is responsible for:
4994  *
4995  *     kfree(watchers);
4996  */
4997 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4998                             struct ceph_object_id *oid,
4999                             struct ceph_object_locator *oloc,
5000                             struct ceph_watch_item **watchers,
5001                             u32 *num_watchers)
5002 {
5003         struct ceph_osd_request *req;
5004         struct page **pages;
5005         int ret;
5006
5007         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5008         if (!req)
5009                 return -ENOMEM;
5010
5011         ceph_oid_copy(&req->r_base_oid, oid);
5012         ceph_oloc_copy(&req->r_base_oloc, oloc);
5013         req->r_flags = CEPH_OSD_FLAG_READ;
5014
5015         pages = ceph_alloc_page_vector(1, GFP_NOIO);
5016         if (IS_ERR(pages)) {
5017                 ret = PTR_ERR(pages);
5018                 goto out_put_req;
5019         }
5020
5021         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
5022         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
5023                                                  response_data),
5024                                  pages, PAGE_SIZE, 0, false, true);
5025
5026         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5027         if (ret)
5028                 goto out_put_req;
5029
5030         ceph_osdc_start_request(osdc, req, false);
5031         ret = ceph_osdc_wait_request(osdc, req);
5032         if (ret >= 0) {
5033                 void *p = page_address(pages[0]);
5034                 void *const end = p + req->r_ops[0].outdata_len;
5035
5036                 ret = decode_watchers(&p, end, watchers, num_watchers);
5037         }
5038
5039 out_put_req:
5040         ceph_osdc_put_request(req);
5041         return ret;
5042 }
5043 EXPORT_SYMBOL(ceph_osdc_list_watchers);
5044
5045 /*
5046  * Call all pending notify callbacks - for use after a watch is
5047  * unregistered, to make sure no more callbacks for it will be invoked
5048  */
5049 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
5050 {
5051         dout("%s osdc %p\n", __func__, osdc);
5052         flush_workqueue(osdc->notify_wq);
5053 }
5054 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
5055
5056 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
5057 {
5058         down_read(&osdc->lock);
5059         maybe_request_map(osdc);
5060         up_read(&osdc->lock);
5061 }
5062 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
5063
5064 /*
5065  * Execute an OSD class method on an object.
5066  *
5067  * @flags: CEPH_OSD_FLAG_*
5068  * @resp_len: in/out param for reply length
5069  */
5070 int ceph_osdc_call(struct ceph_osd_client *osdc,
5071                    struct ceph_object_id *oid,
5072                    struct ceph_object_locator *oloc,
5073                    const char *class, const char *method,
5074                    unsigned int flags,
5075                    struct page *req_page, size_t req_len,
5076                    struct page **resp_pages, size_t *resp_len)
5077 {
5078         struct ceph_osd_request *req;
5079         int ret;
5080
5081         if (req_len > PAGE_SIZE)
5082                 return -E2BIG;
5083
5084         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5085         if (!req)
5086                 return -ENOMEM;
5087
5088         ceph_oid_copy(&req->r_base_oid, oid);
5089         ceph_oloc_copy(&req->r_base_oloc, oloc);
5090         req->r_flags = flags;
5091
5092         ret = osd_req_op_cls_init(req, 0, class, method);
5093         if (ret)
5094                 goto out_put_req;
5095
5096         if (req_page)
5097                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
5098                                                   0, false, false);
5099         if (resp_pages)
5100                 osd_req_op_cls_response_data_pages(req, 0, resp_pages,
5101                                                    *resp_len, 0, false, false);
5102
5103         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5104         if (ret)
5105                 goto out_put_req;
5106
5107         ceph_osdc_start_request(osdc, req, false);
5108         ret = ceph_osdc_wait_request(osdc, req);
5109         if (ret >= 0) {
5110                 ret = req->r_ops[0].rval;
5111                 if (resp_pages)
5112                         *resp_len = req->r_ops[0].outdata_len;
5113         }
5114
5115 out_put_req:
5116         ceph_osdc_put_request(req);
5117         return ret;
5118 }
5119 EXPORT_SYMBOL(ceph_osdc_call);
5120
5121 /*
5122  * reset all osd connections
5123  */
5124 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
5125 {
5126         struct rb_node *n;
5127
5128         down_write(&osdc->lock);
5129         for (n = rb_first(&osdc->osds); n; ) {
5130                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
5131
5132                 n = rb_next(n);
5133                 if (!reopen_osd(osd))
5134                         kick_osd_requests(osd);
5135         }
5136         up_write(&osdc->lock);
5137 }
5138
5139 /*
5140  * init, shutdown
5141  */
5142 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5143 {
5144         int err;
5145
5146         dout("init\n");
5147         osdc->client = client;
5148         init_rwsem(&osdc->lock);
5149         osdc->osds = RB_ROOT;
5150         INIT_LIST_HEAD(&osdc->osd_lru);
5151         spin_lock_init(&osdc->osd_lru_lock);
5152         osd_init(&osdc->homeless_osd);
5153         osdc->homeless_osd.o_osdc = osdc;
5154         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5155         osdc->last_linger_id = CEPH_LINGER_ID_START;
5156         osdc->linger_requests = RB_ROOT;
5157         osdc->map_checks = RB_ROOT;
5158         osdc->linger_map_checks = RB_ROOT;
5159         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5160         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5161
5162         err = -ENOMEM;
5163         osdc->osdmap = ceph_osdmap_alloc();
5164         if (!osdc->osdmap)
5165                 goto out;
5166
5167         osdc->req_mempool = mempool_create_slab_pool(10,
5168                                                      ceph_osd_request_cache);
5169         if (!osdc->req_mempool)
5170                 goto out_map;
5171
5172         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5173                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
5174         if (err < 0)
5175                 goto out_mempool;
5176         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5177                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
5178                                 "osd_op_reply");
5179         if (err < 0)
5180                 goto out_msgpool;
5181
5182         err = -ENOMEM;
5183         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5184         if (!osdc->notify_wq)
5185                 goto out_msgpool_reply;
5186
5187         osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5188         if (!osdc->completion_wq)
5189                 goto out_notify_wq;
5190
5191         schedule_delayed_work(&osdc->timeout_work,
5192                               osdc->client->options->osd_keepalive_timeout);
5193         schedule_delayed_work(&osdc->osds_timeout_work,
5194             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5195
5196         return 0;
5197
5198 out_notify_wq:
5199         destroy_workqueue(osdc->notify_wq);
5200 out_msgpool_reply:
5201         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5202 out_msgpool:
5203         ceph_msgpool_destroy(&osdc->msgpool_op);
5204 out_mempool:
5205         mempool_destroy(osdc->req_mempool);
5206 out_map:
5207         ceph_osdmap_destroy(osdc->osdmap);
5208 out:
5209         return err;
5210 }
5211
5212 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5213 {
5214         destroy_workqueue(osdc->completion_wq);
5215         destroy_workqueue(osdc->notify_wq);
5216         cancel_delayed_work_sync(&osdc->timeout_work);
5217         cancel_delayed_work_sync(&osdc->osds_timeout_work);
5218
5219         down_write(&osdc->lock);
5220         while (!RB_EMPTY_ROOT(&osdc->osds)) {
5221                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5222                                                 struct ceph_osd, o_node);
5223                 close_osd(osd);
5224         }
5225         up_write(&osdc->lock);
5226         WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5227         osd_cleanup(&osdc->homeless_osd);
5228
5229         WARN_ON(!list_empty(&osdc->osd_lru));
5230         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5231         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5232         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5233         WARN_ON(atomic_read(&osdc->num_requests));
5234         WARN_ON(atomic_read(&osdc->num_homeless));
5235
5236         ceph_osdmap_destroy(osdc->osdmap);
5237         mempool_destroy(osdc->req_mempool);
5238         ceph_msgpool_destroy(&osdc->msgpool_op);
5239         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5240 }
5241
5242 /*
5243  * Read some contiguous pages.  If we cross a stripe boundary, shorten
5244  * *plen.  Return number of bytes read, or error.
5245  */
5246 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
5247                         struct ceph_vino vino, struct ceph_file_layout *layout,
5248                         u64 off, u64 *plen,
5249                         u32 truncate_seq, u64 truncate_size,
5250                         struct page **pages, int num_pages, int page_align)
5251 {
5252         struct ceph_osd_request *req;
5253         int rc = 0;
5254
5255         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
5256              vino.snap, off, *plen);
5257         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5258                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5259                                     NULL, truncate_seq, truncate_size,
5260                                     false);
5261         if (IS_ERR(req))
5262                 return PTR_ERR(req);
5263
5264         /* it may be a short read due to an object boundary */
5265         osd_req_op_extent_osd_data_pages(req, 0,
5266                                 pages, *plen, page_align, false, false);
5267
5268         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
5269              off, *plen, *plen, page_align);
5270
5271         rc = ceph_osdc_start_request(osdc, req, false);
5272         if (!rc)
5273                 rc = ceph_osdc_wait_request(osdc, req);
5274
5275         ceph_osdc_put_request(req);
5276         dout("readpages result %d\n", rc);
5277         return rc;
5278 }
5279 EXPORT_SYMBOL(ceph_osdc_readpages);
5280
5281 /*
5282  * do a synchronous write on N pages
5283  */
5284 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5285                          struct ceph_file_layout *layout,
5286                          struct ceph_snap_context *snapc,
5287                          u64 off, u64 len,
5288                          u32 truncate_seq, u64 truncate_size,
5289                          struct timespec64 *mtime,
5290                          struct page **pages, int num_pages)
5291 {
5292         struct ceph_osd_request *req;
5293         int rc = 0;
5294         int page_align = off & ~PAGE_MASK;
5295
5296         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5297                                     CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5298                                     snapc, truncate_seq, truncate_size,
5299                                     true);
5300         if (IS_ERR(req))
5301                 return PTR_ERR(req);
5302
5303         /* it may be a short write due to an object boundary */
5304         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5305                                 false, false);
5306         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5307
5308         req->r_mtime = *mtime;
5309         rc = ceph_osdc_start_request(osdc, req, true);
5310         if (!rc)
5311                 rc = ceph_osdc_wait_request(osdc, req);
5312
5313         ceph_osdc_put_request(req);
5314         if (rc == 0)
5315                 rc = len;
5316         dout("writepages result %d\n", rc);
5317         return rc;
5318 }
5319 EXPORT_SYMBOL(ceph_osdc_writepages);
5320
5321 static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
5322                                      u64 src_snapid, u64 src_version,
5323                                      struct ceph_object_id *src_oid,
5324                                      struct ceph_object_locator *src_oloc,
5325                                      u32 src_fadvise_flags,
5326                                      u32 dst_fadvise_flags,
5327                                      u8 copy_from_flags)
5328 {
5329         struct ceph_osd_req_op *op;
5330         struct page **pages;
5331         void *p, *end;
5332
5333         pages = ceph_alloc_page_vector(1, GFP_KERNEL);
5334         if (IS_ERR(pages))
5335                 return PTR_ERR(pages);
5336
5337         op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags);
5338         op->copy_from.snapid = src_snapid;
5339         op->copy_from.src_version = src_version;
5340         op->copy_from.flags = copy_from_flags;
5341         op->copy_from.src_fadvise_flags = src_fadvise_flags;
5342
5343         p = page_address(pages[0]);
5344         end = p + PAGE_SIZE;
5345         ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
5346         encode_oloc(&p, end, src_oloc);
5347         op->indata_len = PAGE_SIZE - (end - p);
5348
5349         ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
5350                                  op->indata_len, 0, false, true);
5351         return 0;
5352 }
5353
5354 int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
5355                         u64 src_snapid, u64 src_version,
5356                         struct ceph_object_id *src_oid,
5357                         struct ceph_object_locator *src_oloc,
5358                         u32 src_fadvise_flags,
5359                         struct ceph_object_id *dst_oid,
5360                         struct ceph_object_locator *dst_oloc,
5361                         u32 dst_fadvise_flags,
5362                         u8 copy_from_flags)
5363 {
5364         struct ceph_osd_request *req;
5365         int ret;
5366
5367         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
5368         if (!req)
5369                 return -ENOMEM;
5370
5371         req->r_flags = CEPH_OSD_FLAG_WRITE;
5372
5373         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
5374         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
5375
5376         ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
5377                                         src_oloc, src_fadvise_flags,
5378                                         dst_fadvise_flags, copy_from_flags);
5379         if (ret)
5380                 goto out;
5381
5382         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
5383         if (ret)
5384                 goto out;
5385
5386         ceph_osdc_start_request(osdc, req, false);
5387         ret = ceph_osdc_wait_request(osdc, req);
5388
5389 out:
5390         ceph_osdc_put_request(req);
5391         return ret;
5392 }
5393 EXPORT_SYMBOL(ceph_osdc_copy_from);
5394
5395 int __init ceph_osdc_setup(void)
5396 {
5397         size_t size = sizeof(struct ceph_osd_request) +
5398             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5399
5400         BUG_ON(ceph_osd_request_cache);
5401         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5402                                                    0, 0, NULL);
5403
5404         return ceph_osd_request_cache ? 0 : -ENOMEM;
5405 }
5406
5407 void ceph_osdc_cleanup(void)
5408 {
5409         BUG_ON(!ceph_osd_request_cache);
5410         kmem_cache_destroy(ceph_osd_request_cache);
5411         ceph_osd_request_cache = NULL;
5412 }
5413
5414 /*
5415  * handle incoming message
5416  */
5417 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5418 {
5419         struct ceph_osd *osd = con->private;
5420         struct ceph_osd_client *osdc = osd->o_osdc;
5421         int type = le16_to_cpu(msg->hdr.type);
5422
5423         switch (type) {
5424         case CEPH_MSG_OSD_MAP:
5425                 ceph_osdc_handle_map(osdc, msg);
5426                 break;
5427         case CEPH_MSG_OSD_OPREPLY:
5428                 handle_reply(osd, msg);
5429                 break;
5430         case CEPH_MSG_OSD_BACKOFF:
5431                 handle_backoff(osd, msg);
5432                 break;
5433         case CEPH_MSG_WATCH_NOTIFY:
5434                 handle_watch_notify(osdc, msg);
5435                 break;
5436
5437         default:
5438                 pr_err("received unknown message type %d %s\n", type,
5439                        ceph_msg_type_name(type));
5440         }
5441
5442         ceph_msg_put(msg);
5443 }
5444
5445 /*
5446  * Lookup and return message for incoming reply.  Don't try to do
5447  * anything about a larger than preallocated data portion of the
5448  * message at the moment - for now, just skip the message.
5449  */
5450 static struct ceph_msg *get_reply(struct ceph_connection *con,
5451                                   struct ceph_msg_header *hdr,
5452                                   int *skip)
5453 {
5454         struct ceph_osd *osd = con->private;
5455         struct ceph_osd_client *osdc = osd->o_osdc;
5456         struct ceph_msg *m = NULL;
5457         struct ceph_osd_request *req;
5458         int front_len = le32_to_cpu(hdr->front_len);
5459         int data_len = le32_to_cpu(hdr->data_len);
5460         u64 tid = le64_to_cpu(hdr->tid);
5461
5462         down_read(&osdc->lock);
5463         if (!osd_registered(osd)) {
5464                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5465                 *skip = 1;
5466                 goto out_unlock_osdc;
5467         }
5468         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5469
5470         mutex_lock(&osd->lock);
5471         req = lookup_request(&osd->o_requests, tid);
5472         if (!req) {
5473                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5474                      osd->o_osd, tid);
5475                 *skip = 1;
5476                 goto out_unlock_session;
5477         }
5478
5479         ceph_msg_revoke_incoming(req->r_reply);
5480
5481         if (front_len > req->r_reply->front_alloc_len) {
5482                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5483                         __func__, osd->o_osd, req->r_tid, front_len,
5484                         req->r_reply->front_alloc_len);
5485                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5486                                  false);
5487                 if (!m)
5488                         goto out_unlock_session;
5489                 ceph_msg_put(req->r_reply);
5490                 req->r_reply = m;
5491         }
5492
5493         if (data_len > req->r_reply->data_length) {
5494                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5495                         __func__, osd->o_osd, req->r_tid, data_len,
5496                         req->r_reply->data_length);
5497                 m = NULL;
5498                 *skip = 1;
5499                 goto out_unlock_session;
5500         }
5501
5502         m = ceph_msg_get(req->r_reply);
5503         dout("get_reply tid %lld %p\n", tid, m);
5504
5505 out_unlock_session:
5506         mutex_unlock(&osd->lock);
5507 out_unlock_osdc:
5508         up_read(&osdc->lock);
5509         return m;
5510 }
5511
5512 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5513 {
5514         struct ceph_msg *m;
5515         int type = le16_to_cpu(hdr->type);
5516         u32 front_len = le32_to_cpu(hdr->front_len);
5517         u32 data_len = le32_to_cpu(hdr->data_len);
5518
5519         m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
5520         if (!m)
5521                 return NULL;
5522
5523         if (data_len) {
5524                 struct page **pages;
5525
5526                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5527                                                GFP_NOIO);
5528                 if (IS_ERR(pages)) {
5529                         ceph_msg_put(m);
5530                         return NULL;
5531                 }
5532
5533                 ceph_msg_data_add_pages(m, pages, data_len, 0, true);
5534         }
5535
5536         return m;
5537 }
5538
5539 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5540                                   struct ceph_msg_header *hdr,
5541                                   int *skip)
5542 {
5543         struct ceph_osd *osd = con->private;
5544         int type = le16_to_cpu(hdr->type);
5545
5546         *skip = 0;
5547         switch (type) {
5548         case CEPH_MSG_OSD_MAP:
5549         case CEPH_MSG_OSD_BACKOFF:
5550         case CEPH_MSG_WATCH_NOTIFY:
5551                 return alloc_msg_with_page_vector(hdr);
5552         case CEPH_MSG_OSD_OPREPLY:
5553                 return get_reply(con, hdr, skip);
5554         default:
5555                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5556                         osd->o_osd, type);
5557                 *skip = 1;
5558                 return NULL;
5559         }
5560 }
5561
5562 /*
5563  * Wrappers to refcount containing ceph_osd struct
5564  */
5565 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5566 {
5567         struct ceph_osd *osd = con->private;
5568         if (get_osd(osd))
5569                 return con;
5570         return NULL;
5571 }
5572
5573 static void put_osd_con(struct ceph_connection *con)
5574 {
5575         struct ceph_osd *osd = con->private;
5576         put_osd(osd);
5577 }
5578
5579 /*
5580  * authentication
5581  */
5582 /*
5583  * Note: returned pointer is the address of a structure that's
5584  * managed separately.  Caller must *not* attempt to free it.
5585  */
5586 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5587                                         int *proto, int force_new)
5588 {
5589         struct ceph_osd *o = con->private;
5590         struct ceph_osd_client *osdc = o->o_osdc;
5591         struct ceph_auth_client *ac = osdc->client->monc.auth;
5592         struct ceph_auth_handshake *auth = &o->o_auth;
5593
5594         if (force_new && auth->authorizer) {
5595                 ceph_auth_destroy_authorizer(auth->authorizer);
5596                 auth->authorizer = NULL;
5597         }
5598         if (!auth->authorizer) {
5599                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5600                                                       auth);
5601                 if (ret)
5602                         return ERR_PTR(ret);
5603         } else {
5604                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5605                                                      auth);
5606                 if (ret)
5607                         return ERR_PTR(ret);
5608         }
5609         *proto = ac->protocol;
5610
5611         return auth;
5612 }
5613
5614 static int add_authorizer_challenge(struct ceph_connection *con,
5615                                     void *challenge_buf, int challenge_buf_len)
5616 {
5617         struct ceph_osd *o = con->private;
5618         struct ceph_osd_client *osdc = o->o_osdc;
5619         struct ceph_auth_client *ac = osdc->client->monc.auth;
5620
5621         return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5622                                             challenge_buf, challenge_buf_len);
5623 }
5624
5625 static int verify_authorizer_reply(struct ceph_connection *con)
5626 {
5627         struct ceph_osd *o = con->private;
5628         struct ceph_osd_client *osdc = o->o_osdc;
5629         struct ceph_auth_client *ac = osdc->client->monc.auth;
5630
5631         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5632 }
5633
5634 static int invalidate_authorizer(struct ceph_connection *con)
5635 {
5636         struct ceph_osd *o = con->private;
5637         struct ceph_osd_client *osdc = o->o_osdc;
5638         struct ceph_auth_client *ac = osdc->client->monc.auth;
5639
5640         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5641         return ceph_monc_validate_auth(&osdc->client->monc);
5642 }
5643
5644 static void osd_reencode_message(struct ceph_msg *msg)
5645 {
5646         int type = le16_to_cpu(msg->hdr.type);
5647
5648         if (type == CEPH_MSG_OSD_OP)
5649                 encode_request_finish(msg);
5650 }
5651
5652 static int osd_sign_message(struct ceph_msg *msg)
5653 {
5654         struct ceph_osd *o = msg->con->private;
5655         struct ceph_auth_handshake *auth = &o->o_auth;
5656
5657         return ceph_auth_sign_message(auth, msg);
5658 }
5659
5660 static int osd_check_message_signature(struct ceph_msg *msg)
5661 {
5662         struct ceph_osd *o = msg->con->private;
5663         struct ceph_auth_handshake *auth = &o->o_auth;
5664
5665         return ceph_auth_check_message_signature(auth, msg);
5666 }
5667
5668 static const struct ceph_connection_operations osd_con_ops = {
5669         .get = get_osd_con,
5670         .put = put_osd_con,
5671         .dispatch = dispatch,
5672         .get_authorizer = get_authorizer,
5673         .add_authorizer_challenge = add_authorizer_challenge,
5674         .verify_authorizer_reply = verify_authorizer_reply,
5675         .invalidate_authorizer = invalidate_authorizer,
5676         .alloc_msg = alloc_msg,
5677         .reencode_message = osd_reencode_message,
5678         .sign_message = osd_sign_message,
5679         .check_message_signature = osd_check_message_signature,
5680         .fault = osd_fault,
5681 };