GNU Linux-libre 4.14.251-gnu1
[releases.git] / drivers / staging / lustre / lustre / ptlrpc / sec_plain.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/sec_plain.c
33  *
34  * Author: Eric Mei <ericm@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39 #include <obd_support.h>
40 #include <obd_cksum.h>
41 #include <obd_class.h>
42 #include <lustre_net.h>
43 #include <lustre_sec.h>
44 #include "ptlrpc_internal.h"
45
46 struct plain_sec {
47         struct ptlrpc_sec       pls_base;
48         rwlock_t            pls_lock;
49         struct ptlrpc_cli_ctx  *pls_ctx;
50 };
51
52 static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
53 {
54         return container_of(sec, struct plain_sec, pls_base);
55 }
56
57 static struct ptlrpc_sec_policy plain_policy;
58 static struct ptlrpc_ctx_ops    plain_ctx_ops;
59 static struct ptlrpc_svc_ctx    plain_svc_ctx;
60
61 static unsigned int plain_at_offset;
62
63 /*
64  * for simplicity, plain policy rpc use fixed layout.
65  */
66 #define PLAIN_PACK_SEGMENTS          (4)
67
68 #define PLAIN_PACK_HDR_OFF            (0)
69 #define PLAIN_PACK_MSG_OFF            (1)
70 #define PLAIN_PACK_USER_OFF          (2)
71 #define PLAIN_PACK_BULK_OFF          (3)
72
73 #define PLAIN_FL_USER              (0x01)
74 #define PLAIN_FL_BULK              (0x02)
75
76 struct plain_header {
77         __u8        ph_ver;         /* 0 */
78         __u8        ph_flags;
79         __u8        ph_sp;           /* source */
80         __u8        ph_bulk_hash_alg;  /* complete flavor desc */
81         __u8        ph_pad[4];
82 };
83
84 struct plain_bulk_token {
85         __u8        pbt_hash[8];
86 };
87
88 #define PLAIN_BSD_SIZE \
89         (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
90
91 /****************************************
92  * bulk checksum helpers                *
93  ****************************************/
94
95 static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
96 {
97         struct ptlrpc_bulk_sec_desc *bsd;
98
99         if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
100                 return -EPROTO;
101
102         bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
103         if (!bsd) {
104                 CERROR("bulk sec desc has short size %d\n",
105                        lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
106                 return -EPROTO;
107         }
108
109         if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
110             bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
111                 CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
112                 return -EPROTO;
113         }
114
115         return 0;
116 }
117
118 static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
119                                     __u8 hash_alg,
120                                     struct plain_bulk_token *token)
121 {
122         if (hash_alg == BULK_HASH_ALG_NULL)
123                 return 0;
124
125         memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
126         return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
127                                          sizeof(token->pbt_hash));
128 }
129
130 static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
131                                   __u8 hash_alg,
132                                   struct plain_bulk_token *tokenr)
133 {
134         struct plain_bulk_token tokenv;
135         int rc;
136
137         if (hash_alg == BULK_HASH_ALG_NULL)
138                 return 0;
139
140         memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
141         rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
142                                        sizeof(tokenv.pbt_hash));
143         if (rc)
144                 return rc;
145
146         if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
147                 return -EACCES;
148         return 0;
149 }
150
151 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
152 {
153         char *ptr;
154         unsigned int off, i;
155
156         LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
157
158         for (i = 0; i < desc->bd_iov_count; i++) {
159                 if (!BD_GET_KIOV(desc, i).bv_len)
160                         continue;
161
162                 ptr = kmap(BD_GET_KIOV(desc, i).bv_page);
163                 off = BD_GET_KIOV(desc, i).bv_offset & ~PAGE_MASK;
164                 ptr[off] ^= 0x1;
165                 kunmap(BD_GET_KIOV(desc, i).bv_page);
166                 return;
167         }
168 }
169
170 /****************************************
171  * cli_ctx apis                  *
172  ****************************************/
173
174 static
175 int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
176 {
177         /* should never reach here */
178         LBUG();
179         return 0;
180 }
181
182 static
183 int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
184 {
185         return 0;
186 }
187
188 static
189 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
190 {
191         struct lustre_msg *msg = req->rq_reqbuf;
192         struct plain_header *phdr;
193
194         msg->lm_secflvr = req->rq_flvr.sf_rpc;
195
196         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
197         phdr->ph_ver = 0;
198         phdr->ph_flags = 0;
199         phdr->ph_sp = ctx->cc_sec->ps_part;
200         phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
201
202         if (req->rq_pack_udesc)
203                 phdr->ph_flags |= PLAIN_FL_USER;
204         if (req->rq_pack_bulk)
205                 phdr->ph_flags |= PLAIN_FL_BULK;
206
207         req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
208                                                  msg->lm_buflens);
209         return 0;
210 }
211
212 static
213 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
214 {
215         struct lustre_msg *msg = req->rq_repdata;
216         struct plain_header *phdr;
217         __u32 cksum;
218         int swabbed;
219
220         if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
221                 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
222                 return -EPROTO;
223         }
224
225         swabbed = ptlrpc_rep_need_swab(req);
226
227         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
228         if (!phdr) {
229                 CERROR("missing plain header\n");
230                 return -EPROTO;
231         }
232
233         if (phdr->ph_ver != 0) {
234                 CERROR("Invalid header version\n");
235                 return -EPROTO;
236         }
237
238         /* expect no user desc in reply */
239         if (phdr->ph_flags & PLAIN_FL_USER) {
240                 CERROR("Unexpected udesc flag in reply\n");
241                 return -EPROTO;
242         }
243
244         if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
245                 CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
246                        req->rq_flvr.u_bulk.hash.hash_alg);
247                 return -EPROTO;
248         }
249
250         if (unlikely(req->rq_early)) {
251                 unsigned int hsize = 4;
252
253                 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
254                                        lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
255                                                       0),
256                                        lustre_msg_buflen(msg,
257                                                          PLAIN_PACK_MSG_OFF),
258                                        NULL, 0, (unsigned char *)&cksum,
259                                        &hsize);
260                 if (cksum != msg->lm_cksum) {
261                         CDEBUG(D_SEC,
262                                "early reply checksum mismatch: %08x != %08x\n",
263                                cpu_to_le32(cksum), msg->lm_cksum);
264                         return -EINVAL;
265                 }
266         } else {
267                 /* whether we sent with bulk or not, we expect the same
268                  * in reply, except for early reply
269                  */
270                 if (!req->rq_early &&
271                     !equi(req->rq_pack_bulk == 1,
272                           phdr->ph_flags & PLAIN_FL_BULK)) {
273                         CERROR("%s bulk checksum in reply\n",
274                                req->rq_pack_bulk ? "Missing" : "Unexpected");
275                         return -EPROTO;
276                 }
277
278                 if (phdr->ph_flags & PLAIN_FL_BULK) {
279                         if (plain_unpack_bsd(msg, swabbed))
280                                 return -EPROTO;
281                 }
282         }
283
284         req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
285         req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
286         return 0;
287 }
288
289 static
290 int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
291                         struct ptlrpc_request *req,
292                         struct ptlrpc_bulk_desc *desc)
293 {
294         struct ptlrpc_bulk_sec_desc *bsd;
295         struct plain_bulk_token *token;
296         int rc;
297
298         LASSERT(req->rq_pack_bulk);
299         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
300
301         bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
302         token = (struct plain_bulk_token *)bsd->bsd_data;
303
304         bsd->bsd_version = 0;
305         bsd->bsd_flags = 0;
306         bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
307         bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
308
309         if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
310                 return 0;
311
312         if (req->rq_bulk_read)
313                 return 0;
314
315         rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
316                                       token);
317         if (rc) {
318                 CERROR("bulk write: failed to compute checksum: %d\n", rc);
319         } else {
320                 /*
321                  * for sending we only compute the wrong checksum instead
322                  * of corrupting the data so it is still correct on a redo
323                  */
324                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
325                     req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
326                         token->pbt_hash[0] ^= 0x1;
327         }
328
329         return rc;
330 }
331
332 static
333 int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
334                           struct ptlrpc_request *req,
335                           struct ptlrpc_bulk_desc *desc)
336 {
337         struct ptlrpc_bulk_sec_desc *bsdv;
338         struct plain_bulk_token *tokenv;
339         int rc;
340         int i, nob;
341
342         LASSERT(req->rq_pack_bulk);
343         LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
344         LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
345
346         bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
347         tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
348
349         if (req->rq_bulk_write) {
350                 if (bsdv->bsd_flags & BSD_FL_ERR)
351                         return -EIO;
352                 return 0;
353         }
354
355         /* fix the actual data size */
356         for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
357                 struct bio_vec bv_desc = BD_GET_KIOV(desc, i);
358
359                 if (bv_desc.bv_len + nob > desc->bd_nob_transferred)
360                         bv_desc.bv_len = desc->bd_nob_transferred - nob;
361                 nob += bv_desc.bv_len;
362         }
363
364         rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
365                                     tokenv);
366         if (rc)
367                 CERROR("bulk read: client verify failed: %d\n", rc);
368
369         return rc;
370 }
371
372 /****************************************
373  * sec apis                          *
374  ****************************************/
375
376 static
377 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
378 {
379         struct ptlrpc_cli_ctx *ctx, *ctx_new;
380
381         ctx_new = kzalloc(sizeof(*ctx_new), GFP_NOFS);
382
383         write_lock(&plsec->pls_lock);
384
385         ctx = plsec->pls_ctx;
386         if (ctx) {
387                 atomic_inc(&ctx->cc_refcount);
388
389                 kfree(ctx_new);
390         } else if (ctx_new) {
391                 ctx = ctx_new;
392
393                 atomic_set(&ctx->cc_refcount, 1); /* for cache */
394                 ctx->cc_sec = &plsec->pls_base;
395                 ctx->cc_ops = &plain_ctx_ops;
396                 ctx->cc_expire = 0;
397                 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
398                 ctx->cc_vcred.vc_uid = 0;
399                 spin_lock_init(&ctx->cc_lock);
400                 INIT_LIST_HEAD(&ctx->cc_req_list);
401                 INIT_LIST_HEAD(&ctx->cc_gc_chain);
402
403                 plsec->pls_ctx = ctx;
404                 atomic_inc(&plsec->pls_base.ps_nctx);
405                 atomic_inc(&plsec->pls_base.ps_refcount);
406
407                 atomic_inc(&ctx->cc_refcount); /* for caller */
408         }
409
410         write_unlock(&plsec->pls_lock);
411
412         return ctx;
413 }
414
415 static
416 void plain_destroy_sec(struct ptlrpc_sec *sec)
417 {
418         struct plain_sec *plsec = sec2plsec(sec);
419
420         LASSERT(sec->ps_policy == &plain_policy);
421         LASSERT(sec->ps_import);
422         LASSERT(atomic_read(&sec->ps_refcount) == 0);
423         LASSERT(atomic_read(&sec->ps_nctx) == 0);
424         LASSERT(!plsec->pls_ctx);
425
426         class_import_put(sec->ps_import);
427
428         kfree(plsec);
429 }
430
431 static
432 void plain_kill_sec(struct ptlrpc_sec *sec)
433 {
434         sec->ps_dying = 1;
435 }
436
437 static
438 struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
439                                     struct ptlrpc_svc_ctx *svc_ctx,
440                                     struct sptlrpc_flavor *sf)
441 {
442         struct plain_sec *plsec;
443         struct ptlrpc_sec *sec;
444         struct ptlrpc_cli_ctx *ctx;
445
446         LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
447
448         plsec = kzalloc(sizeof(*plsec), GFP_NOFS);
449         if (!plsec)
450                 return NULL;
451
452         /*
453          * initialize plain_sec
454          */
455         rwlock_init(&plsec->pls_lock);
456         plsec->pls_ctx = NULL;
457
458         sec = &plsec->pls_base;
459         sec->ps_policy = &plain_policy;
460         atomic_set(&sec->ps_refcount, 0);
461         atomic_set(&sec->ps_nctx, 0);
462         sec->ps_id = sptlrpc_get_next_secid();
463         sec->ps_import = class_import_get(imp);
464         sec->ps_flvr = *sf;
465         spin_lock_init(&sec->ps_lock);
466         INIT_LIST_HEAD(&sec->ps_gc_list);
467         sec->ps_gc_interval = 0;
468         sec->ps_gc_next = 0;
469
470         /* install ctx immediately if this is a reverse sec */
471         if (svc_ctx) {
472                 ctx = plain_sec_install_ctx(plsec);
473                 if (!ctx) {
474                         plain_destroy_sec(sec);
475                         return NULL;
476                 }
477                 sptlrpc_cli_ctx_put(ctx, 1);
478         }
479
480         return sec;
481 }
482
483 static
484 struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
485                                         struct vfs_cred *vcred,
486                                         int create, int remove_dead)
487 {
488         struct plain_sec *plsec = sec2plsec(sec);
489         struct ptlrpc_cli_ctx *ctx;
490
491         read_lock(&plsec->pls_lock);
492         ctx = plsec->pls_ctx;
493         if (ctx)
494                 atomic_inc(&ctx->cc_refcount);
495         read_unlock(&plsec->pls_lock);
496
497         if (unlikely(!ctx))
498                 ctx = plain_sec_install_ctx(plsec);
499
500         return ctx;
501 }
502
503 static
504 void plain_release_ctx(struct ptlrpc_sec *sec,
505                        struct ptlrpc_cli_ctx *ctx, int sync)
506 {
507         LASSERT(atomic_read(&sec->ps_refcount) > 0);
508         LASSERT(atomic_read(&sec->ps_nctx) > 0);
509         LASSERT(atomic_read(&ctx->cc_refcount) == 0);
510         LASSERT(ctx->cc_sec == sec);
511
512         kfree(ctx);
513
514         atomic_dec(&sec->ps_nctx);
515         sptlrpc_sec_put(sec);
516 }
517
518 static
519 int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
520                           uid_t uid, int grace, int force)
521 {
522         struct plain_sec *plsec = sec2plsec(sec);
523         struct ptlrpc_cli_ctx *ctx;
524
525         /* do nothing unless caller want to flush for 'all' */
526         if (uid != -1)
527                 return 0;
528
529         write_lock(&plsec->pls_lock);
530         ctx = plsec->pls_ctx;
531         plsec->pls_ctx = NULL;
532         write_unlock(&plsec->pls_lock);
533
534         if (ctx)
535                 sptlrpc_cli_ctx_put(ctx, 1);
536         return 0;
537 }
538
539 static
540 int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
541                        struct ptlrpc_request *req,
542                        int msgsize)
543 {
544         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
545         int alloc_len;
546
547         buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
548         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
549
550         if (req->rq_pack_udesc)
551                 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
552
553         if (req->rq_pack_bulk) {
554                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
555                 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
556         }
557
558         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
559
560         if (!req->rq_reqbuf) {
561                 LASSERT(!req->rq_pool);
562
563                 alloc_len = size_roundup_power2(alloc_len);
564                 req->rq_reqbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
565                 if (!req->rq_reqbuf)
566                         return -ENOMEM;
567
568                 req->rq_reqbuf_len = alloc_len;
569         } else {
570                 LASSERT(req->rq_pool);
571                 LASSERT(req->rq_reqbuf_len >= alloc_len);
572                 memset(req->rq_reqbuf, 0, alloc_len);
573         }
574
575         lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
576         req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
577
578         if (req->rq_pack_udesc) {
579                 int rc = sptlrpc_pack_user_desc(req->rq_reqbuf,
580                                               PLAIN_PACK_USER_OFF);
581                 if (rc < 0)
582                         return rc;
583         }
584
585         return 0;
586 }
587
588 static
589 void plain_free_reqbuf(struct ptlrpc_sec *sec,
590                        struct ptlrpc_request *req)
591 {
592         if (!req->rq_pool) {
593                 kvfree(req->rq_reqbuf);
594                 req->rq_reqbuf = NULL;
595                 req->rq_reqbuf_len = 0;
596         }
597 }
598
599 static
600 int plain_alloc_repbuf(struct ptlrpc_sec *sec,
601                        struct ptlrpc_request *req,
602                        int msgsize)
603 {
604         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
605         int alloc_len;
606
607         buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
608         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
609
610         if (req->rq_pack_bulk) {
611                 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
612                 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
613         }
614
615         alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
616
617         /* add space for early reply */
618         alloc_len += plain_at_offset;
619
620         alloc_len = size_roundup_power2(alloc_len);
621
622         req->rq_repbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
623         if (!req->rq_repbuf)
624                 return -ENOMEM;
625
626         req->rq_repbuf_len = alloc_len;
627         return 0;
628 }
629
630 static
631 void plain_free_repbuf(struct ptlrpc_sec *sec,
632                        struct ptlrpc_request *req)
633 {
634         kvfree(req->rq_repbuf);
635         req->rq_repbuf = NULL;
636         req->rq_repbuf_len = 0;
637 }
638
639 static
640 int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
641                          struct ptlrpc_request *req,
642                          int segment, int newsize)
643 {
644         struct lustre_msg *newbuf;
645         int oldsize;
646         int newmsg_size, newbuf_size;
647
648         LASSERT(req->rq_reqbuf);
649         LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
650         LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
651                 req->rq_reqmsg);
652
653         /* compute new embedded msg size.  */
654         oldsize = req->rq_reqmsg->lm_buflens[segment];
655         req->rq_reqmsg->lm_buflens[segment] = newsize;
656         newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
657                                          req->rq_reqmsg->lm_buflens);
658         req->rq_reqmsg->lm_buflens[segment] = oldsize;
659
660         /* compute new wrapper msg size.  */
661         oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
662         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
663         newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
664                                          req->rq_reqbuf->lm_buflens);
665         req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
666
667         /* request from pool should always have enough buffer */
668         LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
669
670         if (req->rq_reqbuf_len < newbuf_size) {
671                 newbuf_size = size_roundup_power2(newbuf_size);
672
673                 newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS);
674                 if (!newbuf)
675                         return -ENOMEM;
676
677                 /* Must lock this, so that otherwise unprotected change of
678                  * rq_reqmsg is not racing with parallel processing of
679                  * imp_replay_list traversing threads. See LU-3333
680                  * This is a bandaid at best, we really need to deal with this
681                  * in request enlarging code before unpacking that's already
682                  * there
683                  */
684                 if (req->rq_import)
685                         spin_lock(&req->rq_import->imp_lock);
686
687                 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
688
689                 kvfree(req->rq_reqbuf);
690                 req->rq_reqbuf = newbuf;
691                 req->rq_reqbuf_len = newbuf_size;
692                 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
693                                                 PLAIN_PACK_MSG_OFF, 0);
694
695                 if (req->rq_import)
696                         spin_unlock(&req->rq_import->imp_lock);
697         }
698
699         _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
700                                      newmsg_size);
701         _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
702
703         req->rq_reqlen = newmsg_size;
704         return 0;
705 }
706
707 /****************************************
708  * service apis                  *
709  ****************************************/
710
711 static struct ptlrpc_svc_ctx plain_svc_ctx = {
712         .sc_refcount    = ATOMIC_INIT(1),
713         .sc_policy      = &plain_policy,
714 };
715
716 static
717 int plain_accept(struct ptlrpc_request *req)
718 {
719         struct lustre_msg *msg = req->rq_reqbuf;
720         struct plain_header *phdr;
721         int swabbed;
722
723         LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
724                 SPTLRPC_POLICY_PLAIN);
725
726         if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
727             SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
728             SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
729             SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
730                 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
731                 return SECSVC_DROP;
732         }
733
734         if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
735                 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
736                 return SECSVC_DROP;
737         }
738
739         swabbed = ptlrpc_req_need_swab(req);
740
741         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
742         if (!phdr) {
743                 CERROR("missing plain header\n");
744                 return -EPROTO;
745         }
746
747         if (phdr->ph_ver != 0) {
748                 CERROR("Invalid header version\n");
749                 return -EPROTO;
750         }
751
752         if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
753                 CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
754                 return -EPROTO;
755         }
756
757         req->rq_sp_from = phdr->ph_sp;
758         req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
759
760         if (phdr->ph_flags & PLAIN_FL_USER) {
761                 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
762                                              swabbed)) {
763                         CERROR("Mal-formed user descriptor\n");
764                         return SECSVC_DROP;
765                 }
766
767                 req->rq_pack_udesc = 1;
768                 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
769         }
770
771         if (phdr->ph_flags & PLAIN_FL_BULK) {
772                 if (plain_unpack_bsd(msg, swabbed))
773                         return SECSVC_DROP;
774
775                 req->rq_pack_bulk = 1;
776         }
777
778         req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
779         req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
780
781         req->rq_svc_ctx = &plain_svc_ctx;
782         atomic_inc(&req->rq_svc_ctx->sc_refcount);
783
784         return SECSVC_OK;
785 }
786
787 static
788 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
789 {
790         struct ptlrpc_reply_state *rs;
791         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
792         int rs_size = sizeof(*rs);
793
794         LASSERT(msgsize % 8 == 0);
795
796         buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
797         buflens[PLAIN_PACK_MSG_OFF] = msgsize;
798
799         if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
800                 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
801
802         rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
803
804         rs = req->rq_reply_state;
805
806         if (rs) {
807                 /* pre-allocated */
808                 LASSERT(rs->rs_size >= rs_size);
809         } else {
810                 rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
811                 if (!rs)
812                         return -ENOMEM;
813
814                 rs->rs_size = rs_size;
815         }
816
817         rs->rs_svc_ctx = req->rq_svc_ctx;
818         atomic_inc(&req->rq_svc_ctx->sc_refcount);
819         rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
820         rs->rs_repbuf_len = rs_size - sizeof(*rs);
821
822         lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
823         rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
824
825         req->rq_reply_state = rs;
826         return 0;
827 }
828
829 static
830 void plain_free_rs(struct ptlrpc_reply_state *rs)
831 {
832         LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
833         atomic_dec(&rs->rs_svc_ctx->sc_refcount);
834
835         if (!rs->rs_prealloc)
836                 kvfree(rs);
837 }
838
839 static
840 int plain_authorize(struct ptlrpc_request *req)
841 {
842         struct ptlrpc_reply_state *rs = req->rq_reply_state;
843         struct lustre_msg_v2 *msg = rs->rs_repbuf;
844         struct plain_header *phdr;
845         int len;
846
847         LASSERT(rs);
848         LASSERT(msg);
849
850         if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
851                 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
852                                         req->rq_replen, 1);
853         else
854                 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
855
856         msg->lm_secflvr = req->rq_flvr.sf_rpc;
857
858         phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
859         phdr->ph_ver = 0;
860         phdr->ph_flags = 0;
861         phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
862
863         if (req->rq_pack_bulk)
864                 phdr->ph_flags |= PLAIN_FL_BULK;
865
866         rs->rs_repdata_len = len;
867
868         if (likely(req->rq_packed_final)) {
869                 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
870                         req->rq_reply_off = plain_at_offset;
871                 else
872                         req->rq_reply_off = 0;
873         } else {
874                 unsigned int hsize = 4;
875
876                 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
877                                        lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
878                                                       0),
879                                        lustre_msg_buflen(msg,
880                                                          PLAIN_PACK_MSG_OFF),
881                                        NULL, 0, (unsigned char *)&msg->lm_cksum,
882                                        &hsize);
883                 req->rq_reply_off = 0;
884         }
885
886         return 0;
887 }
888
889 static
890 int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
891                           struct ptlrpc_bulk_desc *desc)
892 {
893         struct ptlrpc_reply_state *rs = req->rq_reply_state;
894         struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
895         struct plain_bulk_token *tokenr;
896         int rc;
897
898         LASSERT(req->rq_bulk_write);
899         LASSERT(req->rq_pack_bulk);
900
901         bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
902         tokenr = (struct plain_bulk_token *)bsdr->bsd_data;
903         bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
904
905         bsdv->bsd_version = 0;
906         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
907         bsdv->bsd_svc = bsdr->bsd_svc;
908         bsdv->bsd_flags = 0;
909
910         if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
911                 return 0;
912
913         rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
914                                     tokenr);
915         if (rc) {
916                 bsdv->bsd_flags |= BSD_FL_ERR;
917                 CERROR("bulk write: server verify failed: %d\n", rc);
918         }
919
920         return rc;
921 }
922
923 static
924 int plain_svc_wrap_bulk(struct ptlrpc_request *req,
925                         struct ptlrpc_bulk_desc *desc)
926 {
927         struct ptlrpc_reply_state *rs = req->rq_reply_state;
928         struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
929         struct plain_bulk_token *tokenv;
930         int rc;
931
932         LASSERT(req->rq_bulk_read);
933         LASSERT(req->rq_pack_bulk);
934
935         bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
936         bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
937         tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
938
939         bsdv->bsd_version = 0;
940         bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
941         bsdv->bsd_svc = bsdr->bsd_svc;
942         bsdv->bsd_flags = 0;
943
944         if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
945                 return 0;
946
947         rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
948                                       tokenv);
949         if (rc) {
950                 CERROR("bulk read: server failed to compute checksum: %d\n",
951                        rc);
952         } else {
953                 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
954                         corrupt_bulk_data(desc);
955         }
956
957         return rc;
958 }
959
960 static struct ptlrpc_ctx_ops plain_ctx_ops = {
961         .refresh                = plain_ctx_refresh,
962         .validate              = plain_ctx_validate,
963         .sign              = plain_ctx_sign,
964         .verify          = plain_ctx_verify,
965         .wrap_bulk            = plain_cli_wrap_bulk,
966         .unwrap_bulk        = plain_cli_unwrap_bulk,
967 };
968
969 static struct ptlrpc_sec_cops plain_sec_cops = {
970         .create_sec          = plain_create_sec,
971         .destroy_sec        = plain_destroy_sec,
972         .kill_sec              = plain_kill_sec,
973         .lookup_ctx          = plain_lookup_ctx,
974         .release_ctx        = plain_release_ctx,
975         .flush_ctx_cache        = plain_flush_ctx_cache,
976         .alloc_reqbuf      = plain_alloc_reqbuf,
977         .free_reqbuf        = plain_free_reqbuf,
978         .alloc_repbuf      = plain_alloc_repbuf,
979         .free_repbuf        = plain_free_repbuf,
980         .enlarge_reqbuf  = plain_enlarge_reqbuf,
981 };
982
983 static struct ptlrpc_sec_sops plain_sec_sops = {
984         .accept          = plain_accept,
985         .alloc_rs              = plain_alloc_rs,
986         .authorize            = plain_authorize,
987         .free_rs                = plain_free_rs,
988         .unwrap_bulk        = plain_svc_unwrap_bulk,
989         .wrap_bulk            = plain_svc_wrap_bulk,
990 };
991
992 static struct ptlrpc_sec_policy plain_policy = {
993         .sp_owner              = THIS_MODULE,
994         .sp_name                = "plain",
995         .sp_policy            = SPTLRPC_POLICY_PLAIN,
996         .sp_cops                = &plain_sec_cops,
997         .sp_sops                = &plain_sec_sops,
998 };
999
1000 int sptlrpc_plain_init(void)
1001 {
1002         __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
1003         int rc;
1004
1005         buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
1006         plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
1007
1008         rc = sptlrpc_register_policy(&plain_policy);
1009         if (rc)
1010                 CERROR("failed to register: %d\n", rc);
1011
1012         return rc;
1013 }
1014
1015 void sptlrpc_plain_fini(void)
1016 {
1017         int rc;
1018
1019         rc = sptlrpc_unregister_policy(&plain_policy);
1020         if (rc)
1021                 CERROR("cannot unregister: %d\n", rc);
1022 }