GNU Linux-libre 4.4.283-gnu1
[releases.git] / fs / nfsd / nfs4state.c
1 /*
2 *  Copyright (c) 2001 The Regents of the University of Michigan.
3 *  All rights reserved.
4 *
5 *  Kendrick Smith <kmsmith@umich.edu>
6 *  Andy Adamson <kandros@umich.edu>
7 *
8 *  Redistribution and use in source and binary forms, with or without
9 *  modification, are permitted provided that the following conditions
10 *  are met:
11 *
12 *  1. Redistributions of source code must retain the above copyright
13 *     notice, this list of conditions and the following disclaimer.
14 *  2. Redistributions in binary form must reproduce the above copyright
15 *     notice, this list of conditions and the following disclaimer in the
16 *     documentation and/or other materials provided with the distribution.
17 *  3. Neither the name of the University nor the names of its
18 *     contributors may be used to endorse or promote products derived
19 *     from this software without specific prior written permission.
20 *
21 *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49
50 #include "netns.h"
51 #include "pnfs.h"
52
53 #define NFSDDBG_FACILITY                NFSDDBG_PROC
54
55 #define all_ones {{~0,~0},~0}
56 static const stateid_t one_stateid = {
57         .si_generation = ~0,
58         .si_opaque = all_ones,
59 };
60 static const stateid_t zero_stateid = {
61         /* all fields zero */
62 };
63 static const stateid_t currentstateid = {
64         .si_generation = 1,
65 };
66 static const stateid_t close_stateid = {
67         .si_generation = 0xffffffffU,
68 };
69
70 static u64 current_sessionid = 1;
71
72 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73 #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
75 #define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
76
77 /* forward declarations */
78 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
79 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
80
81 /* Locking: */
82
83 /*
84  * Currently used for the del_recall_lru and file hash table.  In an
85  * effort to decrease the scope of the client_mutex, this spinlock may
86  * eventually cover more:
87  */
88 static DEFINE_SPINLOCK(state_lock);
89
90 /*
91  * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
92  * the refcount on the open stateid to drop.
93  */
94 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
95
96 static struct kmem_cache *openowner_slab;
97 static struct kmem_cache *lockowner_slab;
98 static struct kmem_cache *file_slab;
99 static struct kmem_cache *stateid_slab;
100 static struct kmem_cache *deleg_slab;
101 static struct kmem_cache *odstate_slab;
102
103 static void free_session(struct nfsd4_session *);
104
105 static struct nfsd4_callback_ops nfsd4_cb_recall_ops;
106
107 static bool is_session_dead(struct nfsd4_session *ses)
108 {
109         return ses->se_flags & NFS4_SESSION_DEAD;
110 }
111
112 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
113 {
114         if (atomic_read(&ses->se_ref) > ref_held_by_me)
115                 return nfserr_jukebox;
116         ses->se_flags |= NFS4_SESSION_DEAD;
117         return nfs_ok;
118 }
119
120 static bool is_client_expired(struct nfs4_client *clp)
121 {
122         return clp->cl_time == 0;
123 }
124
125 static __be32 get_client_locked(struct nfs4_client *clp)
126 {
127         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
128
129         lockdep_assert_held(&nn->client_lock);
130
131         if (is_client_expired(clp))
132                 return nfserr_expired;
133         atomic_inc(&clp->cl_refcount);
134         return nfs_ok;
135 }
136
137 /* must be called under the client_lock */
138 static inline void
139 renew_client_locked(struct nfs4_client *clp)
140 {
141         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
142
143         if (is_client_expired(clp)) {
144                 WARN_ON(1);
145                 printk("%s: client (clientid %08x/%08x) already expired\n",
146                         __func__,
147                         clp->cl_clientid.cl_boot,
148                         clp->cl_clientid.cl_id);
149                 return;
150         }
151
152         dprintk("renewing client (clientid %08x/%08x)\n",
153                         clp->cl_clientid.cl_boot,
154                         clp->cl_clientid.cl_id);
155         list_move_tail(&clp->cl_lru, &nn->client_lru);
156         clp->cl_time = get_seconds();
157 }
158
159 static void put_client_renew_locked(struct nfs4_client *clp)
160 {
161         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
162
163         lockdep_assert_held(&nn->client_lock);
164
165         if (!atomic_dec_and_test(&clp->cl_refcount))
166                 return;
167         if (!is_client_expired(clp))
168                 renew_client_locked(clp);
169 }
170
171 static void put_client_renew(struct nfs4_client *clp)
172 {
173         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
174
175         if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
176                 return;
177         if (!is_client_expired(clp))
178                 renew_client_locked(clp);
179         spin_unlock(&nn->client_lock);
180 }
181
182 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
183 {
184         __be32 status;
185
186         if (is_session_dead(ses))
187                 return nfserr_badsession;
188         status = get_client_locked(ses->se_client);
189         if (status)
190                 return status;
191         atomic_inc(&ses->se_ref);
192         return nfs_ok;
193 }
194
195 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
196 {
197         struct nfs4_client *clp = ses->se_client;
198         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
199
200         lockdep_assert_held(&nn->client_lock);
201
202         if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
203                 free_session(ses);
204         put_client_renew_locked(clp);
205 }
206
207 static void nfsd4_put_session(struct nfsd4_session *ses)
208 {
209         struct nfs4_client *clp = ses->se_client;
210         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
211
212         spin_lock(&nn->client_lock);
213         nfsd4_put_session_locked(ses);
214         spin_unlock(&nn->client_lock);
215 }
216
217 static inline struct nfs4_stateowner *
218 nfs4_get_stateowner(struct nfs4_stateowner *sop)
219 {
220         atomic_inc(&sop->so_count);
221         return sop;
222 }
223
224 static int
225 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
226 {
227         return (sop->so_owner.len == owner->len) &&
228                 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
229 }
230
231 static struct nfs4_openowner *
232 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
233                         struct nfs4_client *clp)
234 {
235         struct nfs4_stateowner *so;
236
237         lockdep_assert_held(&clp->cl_lock);
238
239         list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
240                             so_strhash) {
241                 if (!so->so_is_open_owner)
242                         continue;
243                 if (same_owner_str(so, &open->op_owner))
244                         return openowner(nfs4_get_stateowner(so));
245         }
246         return NULL;
247 }
248
249 static struct nfs4_openowner *
250 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
251                         struct nfs4_client *clp)
252 {
253         struct nfs4_openowner *oo;
254
255         spin_lock(&clp->cl_lock);
256         oo = find_openstateowner_str_locked(hashval, open, clp);
257         spin_unlock(&clp->cl_lock);
258         return oo;
259 }
260
261 static inline u32
262 opaque_hashval(const void *ptr, int nbytes)
263 {
264         unsigned char *cptr = (unsigned char *) ptr;
265
266         u32 x = 0;
267         while (nbytes--) {
268                 x *= 37;
269                 x += *cptr++;
270         }
271         return x;
272 }
273
274 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
275 {
276         struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
277
278         kmem_cache_free(file_slab, fp);
279 }
280
281 void
282 put_nfs4_file(struct nfs4_file *fi)
283 {
284         might_lock(&state_lock);
285
286         if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
287                 hlist_del_rcu(&fi->fi_hash);
288                 spin_unlock(&state_lock);
289                 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
290                 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
291                 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
292         }
293 }
294
295 static struct file *
296 __nfs4_get_fd(struct nfs4_file *f, int oflag)
297 {
298         if (f->fi_fds[oflag])
299                 return get_file(f->fi_fds[oflag]);
300         return NULL;
301 }
302
303 static struct file *
304 find_writeable_file_locked(struct nfs4_file *f)
305 {
306         struct file *ret;
307
308         lockdep_assert_held(&f->fi_lock);
309
310         ret = __nfs4_get_fd(f, O_WRONLY);
311         if (!ret)
312                 ret = __nfs4_get_fd(f, O_RDWR);
313         return ret;
314 }
315
316 static struct file *
317 find_writeable_file(struct nfs4_file *f)
318 {
319         struct file *ret;
320
321         spin_lock(&f->fi_lock);
322         ret = find_writeable_file_locked(f);
323         spin_unlock(&f->fi_lock);
324
325         return ret;
326 }
327
328 static struct file *find_readable_file_locked(struct nfs4_file *f)
329 {
330         struct file *ret;
331
332         lockdep_assert_held(&f->fi_lock);
333
334         ret = __nfs4_get_fd(f, O_RDONLY);
335         if (!ret)
336                 ret = __nfs4_get_fd(f, O_RDWR);
337         return ret;
338 }
339
340 static struct file *
341 find_readable_file(struct nfs4_file *f)
342 {
343         struct file *ret;
344
345         spin_lock(&f->fi_lock);
346         ret = find_readable_file_locked(f);
347         spin_unlock(&f->fi_lock);
348
349         return ret;
350 }
351
352 struct file *
353 find_any_file(struct nfs4_file *f)
354 {
355         struct file *ret;
356
357         spin_lock(&f->fi_lock);
358         ret = __nfs4_get_fd(f, O_RDWR);
359         if (!ret) {
360                 ret = __nfs4_get_fd(f, O_WRONLY);
361                 if (!ret)
362                         ret = __nfs4_get_fd(f, O_RDONLY);
363         }
364         spin_unlock(&f->fi_lock);
365         return ret;
366 }
367
368 static atomic_long_t num_delegations;
369 unsigned long max_delegations;
370
371 /*
372  * Open owner state (share locks)
373  */
374
375 /* hash tables for lock and open owners */
376 #define OWNER_HASH_BITS              8
377 #define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
378 #define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
379
380 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
381 {
382         unsigned int ret;
383
384         ret = opaque_hashval(ownername->data, ownername->len);
385         return ret & OWNER_HASH_MASK;
386 }
387
388 /* hash table for nfs4_file */
389 #define FILE_HASH_BITS                   8
390 #define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
391
392 static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
393 {
394         return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
395 }
396
397 static unsigned int file_hashval(struct knfsd_fh *fh)
398 {
399         return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
400 }
401
402 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
403
404 static void
405 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
406 {
407         lockdep_assert_held(&fp->fi_lock);
408
409         if (access & NFS4_SHARE_ACCESS_WRITE)
410                 atomic_inc(&fp->fi_access[O_WRONLY]);
411         if (access & NFS4_SHARE_ACCESS_READ)
412                 atomic_inc(&fp->fi_access[O_RDONLY]);
413 }
414
415 static __be32
416 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
417 {
418         lockdep_assert_held(&fp->fi_lock);
419
420         /* Does this access mode make sense? */
421         if (access & ~NFS4_SHARE_ACCESS_BOTH)
422                 return nfserr_inval;
423
424         /* Does it conflict with a deny mode already set? */
425         if ((access & fp->fi_share_deny) != 0)
426                 return nfserr_share_denied;
427
428         __nfs4_file_get_access(fp, access);
429         return nfs_ok;
430 }
431
432 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
433 {
434         /* Common case is that there is no deny mode. */
435         if (deny) {
436                 /* Does this deny mode make sense? */
437                 if (deny & ~NFS4_SHARE_DENY_BOTH)
438                         return nfserr_inval;
439
440                 if ((deny & NFS4_SHARE_DENY_READ) &&
441                     atomic_read(&fp->fi_access[O_RDONLY]))
442                         return nfserr_share_denied;
443
444                 if ((deny & NFS4_SHARE_DENY_WRITE) &&
445                     atomic_read(&fp->fi_access[O_WRONLY]))
446                         return nfserr_share_denied;
447         }
448         return nfs_ok;
449 }
450
451 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
452 {
453         might_lock(&fp->fi_lock);
454
455         if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
456                 struct file *f1 = NULL;
457                 struct file *f2 = NULL;
458
459                 swap(f1, fp->fi_fds[oflag]);
460                 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
461                         swap(f2, fp->fi_fds[O_RDWR]);
462                 spin_unlock(&fp->fi_lock);
463                 if (f1)
464                         fput(f1);
465                 if (f2)
466                         fput(f2);
467         }
468 }
469
470 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
471 {
472         WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
473
474         if (access & NFS4_SHARE_ACCESS_WRITE)
475                 __nfs4_file_put_access(fp, O_WRONLY);
476         if (access & NFS4_SHARE_ACCESS_READ)
477                 __nfs4_file_put_access(fp, O_RDONLY);
478 }
479
480 /*
481  * Allocate a new open/delegation state counter. This is needed for
482  * pNFS for proper return on close semantics.
483  *
484  * Note that we only allocate it for pNFS-enabled exports, otherwise
485  * all pointers to struct nfs4_clnt_odstate are always NULL.
486  */
487 static struct nfs4_clnt_odstate *
488 alloc_clnt_odstate(struct nfs4_client *clp)
489 {
490         struct nfs4_clnt_odstate *co;
491
492         co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
493         if (co) {
494                 co->co_client = clp;
495                 atomic_set(&co->co_odcount, 1);
496         }
497         return co;
498 }
499
500 static void
501 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
502 {
503         struct nfs4_file *fp = co->co_file;
504
505         lockdep_assert_held(&fp->fi_lock);
506         list_add(&co->co_perfile, &fp->fi_clnt_odstate);
507 }
508
509 static inline void
510 get_clnt_odstate(struct nfs4_clnt_odstate *co)
511 {
512         if (co)
513                 atomic_inc(&co->co_odcount);
514 }
515
516 static void
517 put_clnt_odstate(struct nfs4_clnt_odstate *co)
518 {
519         struct nfs4_file *fp;
520
521         if (!co)
522                 return;
523
524         fp = co->co_file;
525         if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
526                 list_del(&co->co_perfile);
527                 spin_unlock(&fp->fi_lock);
528
529                 nfsd4_return_all_file_layouts(co->co_client, fp);
530                 kmem_cache_free(odstate_slab, co);
531         }
532 }
533
534 static struct nfs4_clnt_odstate *
535 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
536 {
537         struct nfs4_clnt_odstate *co;
538         struct nfs4_client *cl;
539
540         if (!new)
541                 return NULL;
542
543         cl = new->co_client;
544
545         spin_lock(&fp->fi_lock);
546         list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
547                 if (co->co_client == cl) {
548                         get_clnt_odstate(co);
549                         goto out;
550                 }
551         }
552         co = new;
553         co->co_file = fp;
554         hash_clnt_odstate_locked(new);
555 out:
556         spin_unlock(&fp->fi_lock);
557         return co;
558 }
559
560 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
561                                   void (*sc_free)(struct nfs4_stid *))
562 {
563         struct nfs4_stid *stid;
564         int new_id;
565
566         stid = kmem_cache_zalloc(slab, GFP_KERNEL);
567         if (!stid)
568                 return NULL;
569
570         idr_preload(GFP_KERNEL);
571         spin_lock(&cl->cl_lock);
572         new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
573         spin_unlock(&cl->cl_lock);
574         idr_preload_end();
575         if (new_id < 0)
576                 goto out_free;
577
578         stid->sc_free = sc_free;
579         stid->sc_client = cl;
580         stid->sc_stateid.si_opaque.so_id = new_id;
581         stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
582         /* Will be incremented before return to client: */
583         atomic_set(&stid->sc_count, 1);
584         spin_lock_init(&stid->sc_lock);
585
586         /*
587          * It shouldn't be a problem to reuse an opaque stateid value.
588          * I don't think it is for 4.1.  But with 4.0 I worry that, for
589          * example, a stray write retransmission could be accepted by
590          * the server when it should have been rejected.  Therefore,
591          * adopt a trick from the sctp code to attempt to maximize the
592          * amount of time until an id is reused, by ensuring they always
593          * "increase" (mod INT_MAX):
594          */
595         return stid;
596 out_free:
597         kmem_cache_free(slab, stid);
598         return NULL;
599 }
600
601 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
602 {
603         struct nfs4_stid *stid;
604
605         stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
606         if (!stid)
607                 return NULL;
608
609         return openlockstateid(stid);
610 }
611
612 static void nfs4_free_deleg(struct nfs4_stid *stid)
613 {
614         kmem_cache_free(deleg_slab, stid);
615         atomic_long_dec(&num_delegations);
616 }
617
618 /*
619  * When we recall a delegation, we should be careful not to hand it
620  * out again straight away.
621  * To ensure this we keep a pair of bloom filters ('new' and 'old')
622  * in which the filehandles of recalled delegations are "stored".
623  * If a filehandle appear in either filter, a delegation is blocked.
624  * When a delegation is recalled, the filehandle is stored in the "new"
625  * filter.
626  * Every 30 seconds we swap the filters and clear the "new" one,
627  * unless both are empty of course.
628  *
629  * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
630  * low 3 bytes as hash-table indices.
631  *
632  * 'blocked_delegations_lock', which is always taken in block_delegations(),
633  * is used to manage concurrent access.  Testing does not need the lock
634  * except when swapping the two filters.
635  */
636 static DEFINE_SPINLOCK(blocked_delegations_lock);
637 static struct bloom_pair {
638         int     entries, old_entries;
639         time_t  swap_time;
640         int     new; /* index into 'set' */
641         DECLARE_BITMAP(set[2], 256);
642 } blocked_delegations;
643
644 static int delegation_blocked(struct knfsd_fh *fh)
645 {
646         u32 hash;
647         struct bloom_pair *bd = &blocked_delegations;
648
649         if (bd->entries == 0)
650                 return 0;
651         if (seconds_since_boot() - bd->swap_time > 30) {
652                 spin_lock(&blocked_delegations_lock);
653                 if (seconds_since_boot() - bd->swap_time > 30) {
654                         bd->entries -= bd->old_entries;
655                         bd->old_entries = bd->entries;
656                         memset(bd->set[bd->new], 0,
657                                sizeof(bd->set[0]));
658                         bd->new = 1-bd->new;
659                         bd->swap_time = seconds_since_boot();
660                 }
661                 spin_unlock(&blocked_delegations_lock);
662         }
663         hash = jhash(&fh->fh_base, fh->fh_size, 0);
664         if (test_bit(hash&255, bd->set[0]) &&
665             test_bit((hash>>8)&255, bd->set[0]) &&
666             test_bit((hash>>16)&255, bd->set[0]))
667                 return 1;
668
669         if (test_bit(hash&255, bd->set[1]) &&
670             test_bit((hash>>8)&255, bd->set[1]) &&
671             test_bit((hash>>16)&255, bd->set[1]))
672                 return 1;
673
674         return 0;
675 }
676
677 static void block_delegations(struct knfsd_fh *fh)
678 {
679         u32 hash;
680         struct bloom_pair *bd = &blocked_delegations;
681
682         hash = jhash(&fh->fh_base, fh->fh_size, 0);
683
684         spin_lock(&blocked_delegations_lock);
685         __set_bit(hash&255, bd->set[bd->new]);
686         __set_bit((hash>>8)&255, bd->set[bd->new]);
687         __set_bit((hash>>16)&255, bd->set[bd->new]);
688         if (bd->entries == 0)
689                 bd->swap_time = seconds_since_boot();
690         bd->entries += 1;
691         spin_unlock(&blocked_delegations_lock);
692 }
693
694 static struct nfs4_delegation *
695 alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
696                  struct nfs4_clnt_odstate *odstate)
697 {
698         struct nfs4_delegation *dp;
699         long n;
700
701         dprintk("NFSD alloc_init_deleg\n");
702         n = atomic_long_inc_return(&num_delegations);
703         if (n < 0 || n > max_delegations)
704                 goto out_dec;
705         if (delegation_blocked(&current_fh->fh_handle))
706                 goto out_dec;
707         dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
708         if (dp == NULL)
709                 goto out_dec;
710
711         /*
712          * delegation seqid's are never incremented.  The 4.1 special
713          * meaning of seqid 0 isn't meaningful, really, but let's avoid
714          * 0 anyway just for consistency and use 1:
715          */
716         dp->dl_stid.sc_stateid.si_generation = 1;
717         INIT_LIST_HEAD(&dp->dl_perfile);
718         INIT_LIST_HEAD(&dp->dl_perclnt);
719         INIT_LIST_HEAD(&dp->dl_recall_lru);
720         dp->dl_clnt_odstate = odstate;
721         get_clnt_odstate(odstate);
722         dp->dl_type = NFS4_OPEN_DELEGATE_READ;
723         dp->dl_retries = 1;
724         nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
725                       &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
726         return dp;
727 out_dec:
728         atomic_long_dec(&num_delegations);
729         return NULL;
730 }
731
732 void
733 nfs4_put_stid(struct nfs4_stid *s)
734 {
735         struct nfs4_file *fp = s->sc_file;
736         struct nfs4_client *clp = s->sc_client;
737
738         might_lock(&clp->cl_lock);
739
740         if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
741                 wake_up_all(&close_wq);
742                 return;
743         }
744         idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
745         spin_unlock(&clp->cl_lock);
746         s->sc_free(s);
747         if (fp)
748                 put_nfs4_file(fp);
749 }
750
751 void
752 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
753 {
754         stateid_t *src = &stid->sc_stateid;
755
756         spin_lock(&stid->sc_lock);
757         if (unlikely(++src->si_generation == 0))
758                 src->si_generation = 1;
759         memcpy(dst, src, sizeof(*dst));
760         spin_unlock(&stid->sc_lock);
761 }
762
763 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
764 {
765         struct file *filp = NULL;
766
767         spin_lock(&fp->fi_lock);
768         if (fp->fi_deleg_file && --fp->fi_delegees == 0)
769                 swap(filp, fp->fi_deleg_file);
770         spin_unlock(&fp->fi_lock);
771
772         if (filp) {
773                 vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp);
774                 fput(filp);
775         }
776 }
777
778 void nfs4_unhash_stid(struct nfs4_stid *s)
779 {
780         s->sc_type = 0;
781 }
782
783 /**
784  * nfs4_get_existing_delegation - Discover if this delegation already exists
785  * @clp:     a pointer to the nfs4_client we're granting a delegation to
786  * @fp:      a pointer to the nfs4_file we're granting a delegation on
787  *
788  * Return:
789  *      On success: NULL if an existing delegation was not found.
790  *
791  *      On error: -EAGAIN if one was previously granted to this nfs4_client
792  *                 for this nfs4_file.
793  *
794  */
795
796 static int
797 nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
798 {
799         struct nfs4_delegation *searchdp = NULL;
800         struct nfs4_client *searchclp = NULL;
801
802         lockdep_assert_held(&state_lock);
803         lockdep_assert_held(&fp->fi_lock);
804
805         list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
806                 searchclp = searchdp->dl_stid.sc_client;
807                 if (clp == searchclp) {
808                         return -EAGAIN;
809                 }
810         }
811         return 0;
812 }
813
814 /**
815  * hash_delegation_locked - Add a delegation to the appropriate lists
816  * @dp:     a pointer to the nfs4_delegation we are adding.
817  * @fp:     a pointer to the nfs4_file we're granting a delegation on
818  *
819  * Return:
820  *      On success: NULL if the delegation was successfully hashed.
821  *
822  *      On error: -EAGAIN if one was previously granted to this
823  *                 nfs4_client for this nfs4_file. Delegation is not hashed.
824  *
825  */
826
827 static int
828 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
829 {
830         int status;
831         struct nfs4_client *clp = dp->dl_stid.sc_client;
832
833         lockdep_assert_held(&state_lock);
834         lockdep_assert_held(&fp->fi_lock);
835
836         status = nfs4_get_existing_delegation(clp, fp);
837         if (status)
838                 return status;
839         ++fp->fi_delegees;
840         atomic_inc(&dp->dl_stid.sc_count);
841         dp->dl_stid.sc_type = NFS4_DELEG_STID;
842         list_add(&dp->dl_perfile, &fp->fi_delegations);
843         list_add(&dp->dl_perclnt, &clp->cl_delegations);
844         return 0;
845 }
846
847 static bool
848 unhash_delegation_locked(struct nfs4_delegation *dp)
849 {
850         struct nfs4_file *fp = dp->dl_stid.sc_file;
851
852         lockdep_assert_held(&state_lock);
853
854         if (list_empty(&dp->dl_perfile))
855                 return false;
856
857         dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
858         /* Ensure that deleg break won't try to requeue it */
859         ++dp->dl_time;
860         spin_lock(&fp->fi_lock);
861         list_del_init(&dp->dl_perclnt);
862         list_del_init(&dp->dl_recall_lru);
863         list_del_init(&dp->dl_perfile);
864         spin_unlock(&fp->fi_lock);
865         return true;
866 }
867
868 static void destroy_delegation(struct nfs4_delegation *dp)
869 {
870         bool unhashed;
871
872         spin_lock(&state_lock);
873         unhashed = unhash_delegation_locked(dp);
874         spin_unlock(&state_lock);
875         if (unhashed) {
876                 put_clnt_odstate(dp->dl_clnt_odstate);
877                 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
878                 nfs4_put_stid(&dp->dl_stid);
879         }
880 }
881
882 static void revoke_delegation(struct nfs4_delegation *dp)
883 {
884         struct nfs4_client *clp = dp->dl_stid.sc_client;
885
886         WARN_ON(!list_empty(&dp->dl_recall_lru));
887
888         put_clnt_odstate(dp->dl_clnt_odstate);
889         nfs4_put_deleg_lease(dp->dl_stid.sc_file);
890
891         if (clp->cl_minorversion == 0)
892                 nfs4_put_stid(&dp->dl_stid);
893         else {
894                 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
895                 spin_lock(&clp->cl_lock);
896                 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
897                 spin_unlock(&clp->cl_lock);
898         }
899 }
900
901 /* 
902  * SETCLIENTID state 
903  */
904
905 static unsigned int clientid_hashval(u32 id)
906 {
907         return id & CLIENT_HASH_MASK;
908 }
909
910 static unsigned int clientstr_hashval(const char *name)
911 {
912         return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
913 }
914
915 /*
916  * We store the NONE, READ, WRITE, and BOTH bits separately in the
917  * st_{access,deny}_bmap field of the stateid, in order to track not
918  * only what share bits are currently in force, but also what
919  * combinations of share bits previous opens have used.  This allows us
920  * to enforce the recommendation of rfc 3530 14.2.19 that the server
921  * return an error if the client attempt to downgrade to a combination
922  * of share bits not explicable by closing some of its previous opens.
923  *
924  * XXX: This enforcement is actually incomplete, since we don't keep
925  * track of access/deny bit combinations; so, e.g., we allow:
926  *
927  *      OPEN allow read, deny write
928  *      OPEN allow both, deny none
929  *      DOWNGRADE allow read, deny none
930  *
931  * which we should reject.
932  */
933 static unsigned int
934 bmap_to_share_mode(unsigned long bmap) {
935         int i;
936         unsigned int access = 0;
937
938         for (i = 1; i < 4; i++) {
939                 if (test_bit(i, &bmap))
940                         access |= i;
941         }
942         return access;
943 }
944
945 /* set share access for a given stateid */
946 static inline void
947 set_access(u32 access, struct nfs4_ol_stateid *stp)
948 {
949         unsigned char mask = 1 << access;
950
951         WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
952         stp->st_access_bmap |= mask;
953 }
954
955 /* clear share access for a given stateid */
956 static inline void
957 clear_access(u32 access, struct nfs4_ol_stateid *stp)
958 {
959         unsigned char mask = 1 << access;
960
961         WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
962         stp->st_access_bmap &= ~mask;
963 }
964
965 /* test whether a given stateid has access */
966 static inline bool
967 test_access(u32 access, struct nfs4_ol_stateid *stp)
968 {
969         unsigned char mask = 1 << access;
970
971         return (bool)(stp->st_access_bmap & mask);
972 }
973
974 /* set share deny for a given stateid */
975 static inline void
976 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
977 {
978         unsigned char mask = 1 << deny;
979
980         WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
981         stp->st_deny_bmap |= mask;
982 }
983
984 /* clear share deny for a given stateid */
985 static inline void
986 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
987 {
988         unsigned char mask = 1 << deny;
989
990         WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
991         stp->st_deny_bmap &= ~mask;
992 }
993
994 /* test whether a given stateid is denying specific access */
995 static inline bool
996 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
997 {
998         unsigned char mask = 1 << deny;
999
1000         return (bool)(stp->st_deny_bmap & mask);
1001 }
1002
1003 static int nfs4_access_to_omode(u32 access)
1004 {
1005         switch (access & NFS4_SHARE_ACCESS_BOTH) {
1006         case NFS4_SHARE_ACCESS_READ:
1007                 return O_RDONLY;
1008         case NFS4_SHARE_ACCESS_WRITE:
1009                 return O_WRONLY;
1010         case NFS4_SHARE_ACCESS_BOTH:
1011                 return O_RDWR;
1012         }
1013         WARN_ON_ONCE(1);
1014         return O_RDONLY;
1015 }
1016
1017 /*
1018  * A stateid that had a deny mode associated with it is being released
1019  * or downgraded. Recalculate the deny mode on the file.
1020  */
1021 static void
1022 recalculate_deny_mode(struct nfs4_file *fp)
1023 {
1024         struct nfs4_ol_stateid *stp;
1025
1026         spin_lock(&fp->fi_lock);
1027         fp->fi_share_deny = 0;
1028         list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1029                 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1030         spin_unlock(&fp->fi_lock);
1031 }
1032
1033 static void
1034 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1035 {
1036         int i;
1037         bool change = false;
1038
1039         for (i = 1; i < 4; i++) {
1040                 if ((i & deny) != i) {
1041                         change = true;
1042                         clear_deny(i, stp);
1043                 }
1044         }
1045
1046         /* Recalculate per-file deny mode if there was a change */
1047         if (change)
1048                 recalculate_deny_mode(stp->st_stid.sc_file);
1049 }
1050
1051 /* release all access and file references for a given stateid */
1052 static void
1053 release_all_access(struct nfs4_ol_stateid *stp)
1054 {
1055         int i;
1056         struct nfs4_file *fp = stp->st_stid.sc_file;
1057
1058         if (fp && stp->st_deny_bmap != 0)
1059                 recalculate_deny_mode(fp);
1060
1061         for (i = 1; i < 4; i++) {
1062                 if (test_access(i, stp))
1063                         nfs4_file_put_access(stp->st_stid.sc_file, i);
1064                 clear_access(i, stp);
1065         }
1066 }
1067
1068 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1069 {
1070         kfree(sop->so_owner.data);
1071         sop->so_ops->so_free(sop);
1072 }
1073
1074 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1075 {
1076         struct nfs4_client *clp = sop->so_client;
1077
1078         might_lock(&clp->cl_lock);
1079
1080         if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1081                 return;
1082         sop->so_ops->so_unhash(sop);
1083         spin_unlock(&clp->cl_lock);
1084         nfs4_free_stateowner(sop);
1085 }
1086
1087 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1088 {
1089         struct nfs4_file *fp = stp->st_stid.sc_file;
1090
1091         lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1092
1093         if (list_empty(&stp->st_perfile))
1094                 return false;
1095
1096         spin_lock(&fp->fi_lock);
1097         list_del_init(&stp->st_perfile);
1098         spin_unlock(&fp->fi_lock);
1099         list_del(&stp->st_perstateowner);
1100         return true;
1101 }
1102
1103 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1104 {
1105         struct nfs4_ol_stateid *stp = openlockstateid(stid);
1106
1107         put_clnt_odstate(stp->st_clnt_odstate);
1108         release_all_access(stp);
1109         if (stp->st_stateowner)
1110                 nfs4_put_stateowner(stp->st_stateowner);
1111         kmem_cache_free(stateid_slab, stid);
1112 }
1113
1114 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1115 {
1116         struct nfs4_ol_stateid *stp = openlockstateid(stid);
1117         struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1118         struct file *file;
1119
1120         file = find_any_file(stp->st_stid.sc_file);
1121         if (file)
1122                 filp_close(file, (fl_owner_t)lo);
1123         nfs4_free_ol_stateid(stid);
1124 }
1125
1126 /*
1127  * Put the persistent reference to an already unhashed generic stateid, while
1128  * holding the cl_lock. If it's the last reference, then put it onto the
1129  * reaplist for later destruction.
1130  */
1131 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1132                                        struct list_head *reaplist)
1133 {
1134         struct nfs4_stid *s = &stp->st_stid;
1135         struct nfs4_client *clp = s->sc_client;
1136
1137         lockdep_assert_held(&clp->cl_lock);
1138
1139         WARN_ON_ONCE(!list_empty(&stp->st_locks));
1140
1141         if (!atomic_dec_and_test(&s->sc_count)) {
1142                 wake_up_all(&close_wq);
1143                 return;
1144         }
1145
1146         idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1147         list_add(&stp->st_locks, reaplist);
1148 }
1149
1150 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1151 {
1152         lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1153
1154         list_del_init(&stp->st_locks);
1155         nfs4_unhash_stid(&stp->st_stid);
1156         return unhash_ol_stateid(stp);
1157 }
1158
1159 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1160 {
1161         struct nfs4_client *clp = stp->st_stid.sc_client;
1162         bool unhashed;
1163
1164         spin_lock(&clp->cl_lock);
1165         unhashed = unhash_lock_stateid(stp);
1166         spin_unlock(&clp->cl_lock);
1167         if (unhashed)
1168                 nfs4_put_stid(&stp->st_stid);
1169 }
1170
1171 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1172 {
1173         struct nfs4_client *clp = lo->lo_owner.so_client;
1174
1175         lockdep_assert_held(&clp->cl_lock);
1176
1177         list_del_init(&lo->lo_owner.so_strhash);
1178 }
1179
1180 /*
1181  * Free a list of generic stateids that were collected earlier after being
1182  * fully unhashed.
1183  */
1184 static void
1185 free_ol_stateid_reaplist(struct list_head *reaplist)
1186 {
1187         struct nfs4_ol_stateid *stp;
1188         struct nfs4_file *fp;
1189
1190         might_sleep();
1191
1192         while (!list_empty(reaplist)) {
1193                 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1194                                        st_locks);
1195                 list_del(&stp->st_locks);
1196                 fp = stp->st_stid.sc_file;
1197                 stp->st_stid.sc_free(&stp->st_stid);
1198                 if (fp)
1199                         put_nfs4_file(fp);
1200         }
1201 }
1202
1203 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1204                                        struct list_head *reaplist)
1205 {
1206         struct nfs4_ol_stateid *stp;
1207
1208         lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1209
1210         while (!list_empty(&open_stp->st_locks)) {
1211                 stp = list_entry(open_stp->st_locks.next,
1212                                 struct nfs4_ol_stateid, st_locks);
1213                 WARN_ON(!unhash_lock_stateid(stp));
1214                 put_ol_stateid_locked(stp, reaplist);
1215         }
1216 }
1217
1218 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1219                                 struct list_head *reaplist)
1220 {
1221         bool unhashed;
1222
1223         lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1224
1225         unhashed = unhash_ol_stateid(stp);
1226         release_open_stateid_locks(stp, reaplist);
1227         return unhashed;
1228 }
1229
1230 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1231 {
1232         LIST_HEAD(reaplist);
1233
1234         spin_lock(&stp->st_stid.sc_client->cl_lock);
1235         if (unhash_open_stateid(stp, &reaplist))
1236                 put_ol_stateid_locked(stp, &reaplist);
1237         spin_unlock(&stp->st_stid.sc_client->cl_lock);
1238         free_ol_stateid_reaplist(&reaplist);
1239 }
1240
1241 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1242 {
1243         struct nfs4_client *clp = oo->oo_owner.so_client;
1244
1245         lockdep_assert_held(&clp->cl_lock);
1246
1247         list_del_init(&oo->oo_owner.so_strhash);
1248         list_del_init(&oo->oo_perclient);
1249 }
1250
1251 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1252 {
1253         struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1254                                           nfsd_net_id);
1255         struct nfs4_ol_stateid *s;
1256
1257         spin_lock(&nn->client_lock);
1258         s = oo->oo_last_closed_stid;
1259         if (s) {
1260                 list_del_init(&oo->oo_close_lru);
1261                 oo->oo_last_closed_stid = NULL;
1262         }
1263         spin_unlock(&nn->client_lock);
1264         if (s)
1265                 nfs4_put_stid(&s->st_stid);
1266 }
1267
1268 static void release_openowner(struct nfs4_openowner *oo)
1269 {
1270         struct nfs4_ol_stateid *stp;
1271         struct nfs4_client *clp = oo->oo_owner.so_client;
1272         struct list_head reaplist;
1273
1274         INIT_LIST_HEAD(&reaplist);
1275
1276         spin_lock(&clp->cl_lock);
1277         unhash_openowner_locked(oo);
1278         while (!list_empty(&oo->oo_owner.so_stateids)) {
1279                 stp = list_first_entry(&oo->oo_owner.so_stateids,
1280                                 struct nfs4_ol_stateid, st_perstateowner);
1281                 if (unhash_open_stateid(stp, &reaplist))
1282                         put_ol_stateid_locked(stp, &reaplist);
1283         }
1284         spin_unlock(&clp->cl_lock);
1285         free_ol_stateid_reaplist(&reaplist);
1286         release_last_closed_stateid(oo);
1287         nfs4_put_stateowner(&oo->oo_owner);
1288 }
1289
1290 static inline int
1291 hash_sessionid(struct nfs4_sessionid *sessionid)
1292 {
1293         struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1294
1295         return sid->sequence % SESSION_HASH_SIZE;
1296 }
1297
1298 #ifdef CONFIG_SUNRPC_DEBUG
1299 static inline void
1300 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1301 {
1302         u32 *ptr = (u32 *)(&sessionid->data[0]);
1303         dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1304 }
1305 #else
1306 static inline void
1307 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1308 {
1309 }
1310 #endif
1311
1312 /*
1313  * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1314  * won't be used for replay.
1315  */
1316 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1317 {
1318         struct nfs4_stateowner *so = cstate->replay_owner;
1319
1320         if (nfserr == nfserr_replay_me)
1321                 return;
1322
1323         if (!seqid_mutating_err(ntohl(nfserr))) {
1324                 nfsd4_cstate_clear_replay(cstate);
1325                 return;
1326         }
1327         if (!so)
1328                 return;
1329         if (so->so_is_open_owner)
1330                 release_last_closed_stateid(openowner(so));
1331         so->so_seqid++;
1332         return;
1333 }
1334
1335 static void
1336 gen_sessionid(struct nfsd4_session *ses)
1337 {
1338         struct nfs4_client *clp = ses->se_client;
1339         struct nfsd4_sessionid *sid;
1340
1341         sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1342         sid->clientid = clp->cl_clientid;
1343         sid->sequence = current_sessionid++;
1344         sid->reserved = 0;
1345 }
1346
1347 /*
1348  * The protocol defines ca_maxresponssize_cached to include the size of
1349  * the rpc header, but all we need to cache is the data starting after
1350  * the end of the initial SEQUENCE operation--the rest we regenerate
1351  * each time.  Therefore we can advertise a ca_maxresponssize_cached
1352  * value that is the number of bytes in our cache plus a few additional
1353  * bytes.  In order to stay on the safe side, and not promise more than
1354  * we can cache, those additional bytes must be the minimum possible: 24
1355  * bytes of rpc header (xid through accept state, with AUTH_NULL
1356  * verifier), 12 for the compound header (with zero-length tag), and 44
1357  * for the SEQUENCE op response:
1358  */
1359 #define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1360
1361 static void
1362 free_session_slots(struct nfsd4_session *ses)
1363 {
1364         int i;
1365
1366         for (i = 0; i < ses->se_fchannel.maxreqs; i++)
1367                 kfree(ses->se_slots[i]);
1368 }
1369
1370 /*
1371  * We don't actually need to cache the rpc and session headers, so we
1372  * can allocate a little less for each slot:
1373  */
1374 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1375 {
1376         u32 size;
1377
1378         if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1379                 size = 0;
1380         else
1381                 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1382         return size + sizeof(struct nfsd4_slot);
1383 }
1384
1385 /*
1386  * XXX: If we run out of reserved DRC memory we could (up to a point)
1387  * re-negotiate active sessions and reduce their slot usage to make
1388  * room for new connections. For now we just fail the create session.
1389  */
1390 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1391 {
1392         u32 slotsize = slot_bytes(ca);
1393         u32 num = ca->maxreqs;
1394         unsigned long avail, total_avail;
1395
1396         spin_lock(&nfsd_drc_lock);
1397         total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1398         avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1399         /*
1400          * Never use more than a third of the remaining memory,
1401          * unless it's the only way to give this client a slot:
1402          */
1403         avail = clamp_t(unsigned long, avail, slotsize, total_avail/3);
1404         num = min_t(int, num, avail / slotsize);
1405         nfsd_drc_mem_used += num * slotsize;
1406         spin_unlock(&nfsd_drc_lock);
1407
1408         return num;
1409 }
1410
1411 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1412 {
1413         int slotsize = slot_bytes(ca);
1414
1415         spin_lock(&nfsd_drc_lock);
1416         nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1417         spin_unlock(&nfsd_drc_lock);
1418 }
1419
1420 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1421                                            struct nfsd4_channel_attrs *battrs)
1422 {
1423         int numslots = fattrs->maxreqs;
1424         int slotsize = slot_bytes(fattrs);
1425         struct nfsd4_session *new;
1426         int mem, i;
1427
1428         BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1429                         + sizeof(struct nfsd4_session) > PAGE_SIZE);
1430         mem = numslots * sizeof(struct nfsd4_slot *);
1431
1432         new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1433         if (!new)
1434                 return NULL;
1435         /* allocate each struct nfsd4_slot and data cache in one piece */
1436         for (i = 0; i < numslots; i++) {
1437                 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1438                 if (!new->se_slots[i])
1439                         goto out_free;
1440         }
1441
1442         memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1443         memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1444
1445         return new;
1446 out_free:
1447         while (i--)
1448                 kfree(new->se_slots[i]);
1449         kfree(new);
1450         return NULL;
1451 }
1452
1453 static void free_conn(struct nfsd4_conn *c)
1454 {
1455         svc_xprt_put(c->cn_xprt);
1456         kfree(c);
1457 }
1458
1459 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1460 {
1461         struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1462         struct nfs4_client *clp = c->cn_session->se_client;
1463
1464         spin_lock(&clp->cl_lock);
1465         if (!list_empty(&c->cn_persession)) {
1466                 list_del(&c->cn_persession);
1467                 free_conn(c);
1468         }
1469         nfsd4_probe_callback(clp);
1470         spin_unlock(&clp->cl_lock);
1471 }
1472
1473 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1474 {
1475         struct nfsd4_conn *conn;
1476
1477         conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1478         if (!conn)
1479                 return NULL;
1480         svc_xprt_get(rqstp->rq_xprt);
1481         conn->cn_xprt = rqstp->rq_xprt;
1482         conn->cn_flags = flags;
1483         INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1484         return conn;
1485 }
1486
1487 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1488 {
1489         conn->cn_session = ses;
1490         list_add(&conn->cn_persession, &ses->se_conns);
1491 }
1492
1493 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1494 {
1495         struct nfs4_client *clp = ses->se_client;
1496
1497         spin_lock(&clp->cl_lock);
1498         __nfsd4_hash_conn(conn, ses);
1499         spin_unlock(&clp->cl_lock);
1500 }
1501
1502 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1503 {
1504         conn->cn_xpt_user.callback = nfsd4_conn_lost;
1505         return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1506 }
1507
1508 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1509 {
1510         int ret;
1511
1512         nfsd4_hash_conn(conn, ses);
1513         ret = nfsd4_register_conn(conn);
1514         if (ret)
1515                 /* oops; xprt is already down: */
1516                 nfsd4_conn_lost(&conn->cn_xpt_user);
1517         /* We may have gained or lost a callback channel: */
1518         nfsd4_probe_callback_sync(ses->se_client);
1519 }
1520
1521 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1522 {
1523         u32 dir = NFS4_CDFC4_FORE;
1524
1525         if (cses->flags & SESSION4_BACK_CHAN)
1526                 dir |= NFS4_CDFC4_BACK;
1527         return alloc_conn(rqstp, dir);
1528 }
1529
1530 /* must be called under client_lock */
1531 static void nfsd4_del_conns(struct nfsd4_session *s)
1532 {
1533         struct nfs4_client *clp = s->se_client;
1534         struct nfsd4_conn *c;
1535
1536         spin_lock(&clp->cl_lock);
1537         while (!list_empty(&s->se_conns)) {
1538                 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1539                 list_del_init(&c->cn_persession);
1540                 spin_unlock(&clp->cl_lock);
1541
1542                 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1543                 free_conn(c);
1544
1545                 spin_lock(&clp->cl_lock);
1546         }
1547         spin_unlock(&clp->cl_lock);
1548 }
1549
1550 static void __free_session(struct nfsd4_session *ses)
1551 {
1552         free_session_slots(ses);
1553         kfree(ses);
1554 }
1555
1556 static void free_session(struct nfsd4_session *ses)
1557 {
1558         nfsd4_del_conns(ses);
1559         nfsd4_put_drc_mem(&ses->se_fchannel);
1560         __free_session(ses);
1561 }
1562
1563 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1564 {
1565         int idx;
1566         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1567
1568         new->se_client = clp;
1569         gen_sessionid(new);
1570
1571         INIT_LIST_HEAD(&new->se_conns);
1572
1573         new->se_cb_seq_nr = 1;
1574         new->se_flags = cses->flags;
1575         new->se_cb_prog = cses->callback_prog;
1576         new->se_cb_sec = cses->cb_sec;
1577         atomic_set(&new->se_ref, 0);
1578         idx = hash_sessionid(&new->se_sessionid);
1579         list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1580         spin_lock(&clp->cl_lock);
1581         list_add(&new->se_perclnt, &clp->cl_sessions);
1582         spin_unlock(&clp->cl_lock);
1583
1584         {
1585                 struct sockaddr *sa = svc_addr(rqstp);
1586                 /*
1587                  * This is a little silly; with sessions there's no real
1588                  * use for the callback address.  Use the peer address
1589                  * as a reasonable default for now, but consider fixing
1590                  * the rpc client not to require an address in the
1591                  * future:
1592                  */
1593                 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1594                 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1595         }
1596 }
1597
1598 /* caller must hold client_lock */
1599 static struct nfsd4_session *
1600 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1601 {
1602         struct nfsd4_session *elem;
1603         int idx;
1604         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1605
1606         lockdep_assert_held(&nn->client_lock);
1607
1608         dump_sessionid(__func__, sessionid);
1609         idx = hash_sessionid(sessionid);
1610         /* Search in the appropriate list */
1611         list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1612                 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1613                             NFS4_MAX_SESSIONID_LEN)) {
1614                         return elem;
1615                 }
1616         }
1617
1618         dprintk("%s: session not found\n", __func__);
1619         return NULL;
1620 }
1621
1622 static struct nfsd4_session *
1623 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1624                 __be32 *ret)
1625 {
1626         struct nfsd4_session *session;
1627         __be32 status = nfserr_badsession;
1628
1629         session = __find_in_sessionid_hashtbl(sessionid, net);
1630         if (!session)
1631                 goto out;
1632         status = nfsd4_get_session_locked(session);
1633         if (status)
1634                 session = NULL;
1635 out:
1636         *ret = status;
1637         return session;
1638 }
1639
1640 /* caller must hold client_lock */
1641 static void
1642 unhash_session(struct nfsd4_session *ses)
1643 {
1644         struct nfs4_client *clp = ses->se_client;
1645         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1646
1647         lockdep_assert_held(&nn->client_lock);
1648
1649         list_del(&ses->se_hash);
1650         spin_lock(&ses->se_client->cl_lock);
1651         list_del(&ses->se_perclnt);
1652         spin_unlock(&ses->se_client->cl_lock);
1653 }
1654
1655 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1656 static int
1657 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1658 {
1659         /*
1660          * We're assuming the clid was not given out from a boot
1661          * precisely 2^32 (about 136 years) before this one.  That seems
1662          * a safe assumption:
1663          */
1664         if (clid->cl_boot == (u32)nn->boot_time)
1665                 return 0;
1666         dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1667                 clid->cl_boot, clid->cl_id, nn->boot_time);
1668         return 1;
1669 }
1670
1671 /* 
1672  * XXX Should we use a slab cache ?
1673  * This type of memory management is somewhat inefficient, but we use it
1674  * anyway since SETCLIENTID is not a common operation.
1675  */
1676 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1677 {
1678         struct nfs4_client *clp;
1679         int i;
1680
1681         clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1682         if (clp == NULL)
1683                 return NULL;
1684         clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1685         if (clp->cl_name.data == NULL)
1686                 goto err_no_name;
1687         clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1688                         OWNER_HASH_SIZE, GFP_KERNEL);
1689         if (!clp->cl_ownerstr_hashtbl)
1690                 goto err_no_hashtbl;
1691         for (i = 0; i < OWNER_HASH_SIZE; i++)
1692                 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1693         clp->cl_name.len = name.len;
1694         INIT_LIST_HEAD(&clp->cl_sessions);
1695         idr_init(&clp->cl_stateids);
1696         atomic_set(&clp->cl_refcount, 0);
1697         clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1698         INIT_LIST_HEAD(&clp->cl_idhash);
1699         INIT_LIST_HEAD(&clp->cl_openowners);
1700         INIT_LIST_HEAD(&clp->cl_delegations);
1701         INIT_LIST_HEAD(&clp->cl_lru);
1702         INIT_LIST_HEAD(&clp->cl_revoked);
1703 #ifdef CONFIG_NFSD_PNFS
1704         INIT_LIST_HEAD(&clp->cl_lo_states);
1705 #endif
1706         spin_lock_init(&clp->cl_lock);
1707         rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1708         return clp;
1709 err_no_hashtbl:
1710         kfree(clp->cl_name.data);
1711 err_no_name:
1712         kfree(clp);
1713         return NULL;
1714 }
1715
1716 static void
1717 free_client(struct nfs4_client *clp)
1718 {
1719         while (!list_empty(&clp->cl_sessions)) {
1720                 struct nfsd4_session *ses;
1721                 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1722                                 se_perclnt);
1723                 list_del(&ses->se_perclnt);
1724                 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1725                 free_session(ses);
1726         }
1727         rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1728         free_svc_cred(&clp->cl_cred);
1729         kfree(clp->cl_ownerstr_hashtbl);
1730         kfree(clp->cl_name.data);
1731         idr_destroy(&clp->cl_stateids);
1732         kfree(clp);
1733 }
1734
1735 /* must be called under the client_lock */
1736 static void
1737 unhash_client_locked(struct nfs4_client *clp)
1738 {
1739         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1740         struct nfsd4_session *ses;
1741
1742         lockdep_assert_held(&nn->client_lock);
1743
1744         /* Mark the client as expired! */
1745         clp->cl_time = 0;
1746         /* Make it invisible */
1747         if (!list_empty(&clp->cl_idhash)) {
1748                 list_del_init(&clp->cl_idhash);
1749                 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1750                         rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1751                 else
1752                         rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1753         }
1754         list_del_init(&clp->cl_lru);
1755         spin_lock(&clp->cl_lock);
1756         list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1757                 list_del_init(&ses->se_hash);
1758         spin_unlock(&clp->cl_lock);
1759 }
1760
1761 static void
1762 unhash_client(struct nfs4_client *clp)
1763 {
1764         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1765
1766         spin_lock(&nn->client_lock);
1767         unhash_client_locked(clp);
1768         spin_unlock(&nn->client_lock);
1769 }
1770
1771 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1772 {
1773         if (atomic_read(&clp->cl_refcount))
1774                 return nfserr_jukebox;
1775         unhash_client_locked(clp);
1776         return nfs_ok;
1777 }
1778
1779 static void
1780 __destroy_client(struct nfs4_client *clp)
1781 {
1782         struct nfs4_openowner *oo;
1783         struct nfs4_delegation *dp;
1784         struct list_head reaplist;
1785
1786         INIT_LIST_HEAD(&reaplist);
1787         spin_lock(&state_lock);
1788         while (!list_empty(&clp->cl_delegations)) {
1789                 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1790                 WARN_ON(!unhash_delegation_locked(dp));
1791                 list_add(&dp->dl_recall_lru, &reaplist);
1792         }
1793         spin_unlock(&state_lock);
1794         while (!list_empty(&reaplist)) {
1795                 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1796                 list_del_init(&dp->dl_recall_lru);
1797                 put_clnt_odstate(dp->dl_clnt_odstate);
1798                 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
1799                 nfs4_put_stid(&dp->dl_stid);
1800         }
1801         while (!list_empty(&clp->cl_revoked)) {
1802                 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1803                 list_del_init(&dp->dl_recall_lru);
1804                 nfs4_put_stid(&dp->dl_stid);
1805         }
1806         while (!list_empty(&clp->cl_openowners)) {
1807                 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1808                 nfs4_get_stateowner(&oo->oo_owner);
1809                 release_openowner(oo);
1810         }
1811         nfsd4_return_all_client_layouts(clp);
1812         nfsd4_shutdown_callback(clp);
1813         if (clp->cl_cb_conn.cb_xprt)
1814                 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1815         free_client(clp);
1816 }
1817
1818 static void
1819 destroy_client(struct nfs4_client *clp)
1820 {
1821         unhash_client(clp);
1822         __destroy_client(clp);
1823 }
1824
1825 static void expire_client(struct nfs4_client *clp)
1826 {
1827         unhash_client(clp);
1828         nfsd4_client_record_remove(clp);
1829         __destroy_client(clp);
1830 }
1831
1832 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1833 {
1834         memcpy(target->cl_verifier.data, source->data,
1835                         sizeof(target->cl_verifier.data));
1836 }
1837
1838 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1839 {
1840         target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
1841         target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
1842 }
1843
1844 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1845 {
1846         if (source->cr_principal) {
1847                 target->cr_principal =
1848                                 kstrdup(source->cr_principal, GFP_KERNEL);
1849                 if (target->cr_principal == NULL)
1850                         return -ENOMEM;
1851         } else
1852                 target->cr_principal = NULL;
1853         target->cr_flavor = source->cr_flavor;
1854         target->cr_uid = source->cr_uid;
1855         target->cr_gid = source->cr_gid;
1856         target->cr_group_info = source->cr_group_info;
1857         get_group_info(target->cr_group_info);
1858         target->cr_gss_mech = source->cr_gss_mech;
1859         if (source->cr_gss_mech)
1860                 gss_mech_get(source->cr_gss_mech);
1861         return 0;
1862 }
1863
1864 static int
1865 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1866 {
1867         if (o1->len < o2->len)
1868                 return -1;
1869         if (o1->len > o2->len)
1870                 return 1;
1871         return memcmp(o1->data, o2->data, o1->len);
1872 }
1873
1874 static int same_name(const char *n1, const char *n2)
1875 {
1876         return 0 == memcmp(n1, n2, HEXDIR_LEN);
1877 }
1878
1879 static int
1880 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1881 {
1882         return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1883 }
1884
1885 static int
1886 same_clid(clientid_t *cl1, clientid_t *cl2)
1887 {
1888         return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1889 }
1890
1891 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1892 {
1893         int i;
1894
1895         if (g1->ngroups != g2->ngroups)
1896                 return false;
1897         for (i=0; i<g1->ngroups; i++)
1898                 if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1899                         return false;
1900         return true;
1901 }
1902
1903 /*
1904  * RFC 3530 language requires clid_inuse be returned when the
1905  * "principal" associated with a requests differs from that previously
1906  * used.  We use uid, gid's, and gss principal string as our best
1907  * approximation.  We also don't want to allow non-gss use of a client
1908  * established using gss: in theory cr_principal should catch that
1909  * change, but in practice cr_principal can be null even in the gss case
1910  * since gssd doesn't always pass down a principal string.
1911  */
1912 static bool is_gss_cred(struct svc_cred *cr)
1913 {
1914         /* Is cr_flavor one of the gss "pseudoflavors"?: */
1915         return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1916 }
1917
1918
1919 static bool
1920 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1921 {
1922         if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1923                 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
1924                 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1925                 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1926                 return false;
1927         if (cr1->cr_principal == cr2->cr_principal)
1928                 return true;
1929         if (!cr1->cr_principal || !cr2->cr_principal)
1930                 return false;
1931         return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1932 }
1933
1934 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1935 {
1936         struct svc_cred *cr = &rqstp->rq_cred;
1937         u32 service;
1938
1939         if (!cr->cr_gss_mech)
1940                 return false;
1941         service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1942         return service == RPC_GSS_SVC_INTEGRITY ||
1943                service == RPC_GSS_SVC_PRIVACY;
1944 }
1945
1946 static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1947 {
1948         struct svc_cred *cr = &rqstp->rq_cred;
1949
1950         if (!cl->cl_mach_cred)
1951                 return true;
1952         if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
1953                 return false;
1954         if (!svc_rqst_integrity_protected(rqstp))
1955                 return false;
1956         if (!cr->cr_principal)
1957                 return false;
1958         return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1959 }
1960
1961 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
1962 {
1963         __be32 verf[2];
1964
1965         /*
1966          * This is opaque to client, so no need to byte-swap. Use
1967          * __force to keep sparse happy
1968          */
1969         verf[0] = (__force __be32)get_seconds();
1970         verf[1] = (__force __be32)nn->clverifier_counter++;
1971         memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1972 }
1973
1974 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1975 {
1976         clp->cl_clientid.cl_boot = nn->boot_time;
1977         clp->cl_clientid.cl_id = nn->clientid_counter++;
1978         gen_confirm(clp, nn);
1979 }
1980
1981 static struct nfs4_stid *
1982 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
1983 {
1984         struct nfs4_stid *ret;
1985
1986         ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1987         if (!ret || !ret->sc_type)
1988                 return NULL;
1989         return ret;
1990 }
1991
1992 static struct nfs4_stid *
1993 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1994 {
1995         struct nfs4_stid *s;
1996
1997         spin_lock(&cl->cl_lock);
1998         s = find_stateid_locked(cl, t);
1999         if (s != NULL) {
2000                 if (typemask & s->sc_type)
2001                         atomic_inc(&s->sc_count);
2002                 else
2003                         s = NULL;
2004         }
2005         spin_unlock(&cl->cl_lock);
2006         return s;
2007 }
2008
2009 static struct nfs4_client *create_client(struct xdr_netobj name,
2010                 struct svc_rqst *rqstp, nfs4_verifier *verf)
2011 {
2012         struct nfs4_client *clp;
2013         struct sockaddr *sa = svc_addr(rqstp);
2014         int ret;
2015         struct net *net = SVC_NET(rqstp);
2016
2017         clp = alloc_client(name);
2018         if (clp == NULL)
2019                 return NULL;
2020
2021         ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2022         if (ret) {
2023                 free_client(clp);
2024                 return NULL;
2025         }
2026         nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2027         clp->cl_time = get_seconds();
2028         clear_bit(0, &clp->cl_cb_slot_busy);
2029         copy_verf(clp, verf);
2030         rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2031         clp->cl_cb_session = NULL;
2032         clp->net = net;
2033         return clp;
2034 }
2035
2036 static void
2037 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2038 {
2039         struct rb_node **new = &(root->rb_node), *parent = NULL;
2040         struct nfs4_client *clp;
2041
2042         while (*new) {
2043                 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2044                 parent = *new;
2045
2046                 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2047                         new = &((*new)->rb_left);
2048                 else
2049                         new = &((*new)->rb_right);
2050         }
2051
2052         rb_link_node(&new_clp->cl_namenode, parent, new);
2053         rb_insert_color(&new_clp->cl_namenode, root);
2054 }
2055
2056 static struct nfs4_client *
2057 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2058 {
2059         int cmp;
2060         struct rb_node *node = root->rb_node;
2061         struct nfs4_client *clp;
2062
2063         while (node) {
2064                 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2065                 cmp = compare_blob(&clp->cl_name, name);
2066                 if (cmp > 0)
2067                         node = node->rb_left;
2068                 else if (cmp < 0)
2069                         node = node->rb_right;
2070                 else
2071                         return clp;
2072         }
2073         return NULL;
2074 }
2075
2076 static void
2077 add_to_unconfirmed(struct nfs4_client *clp)
2078 {
2079         unsigned int idhashval;
2080         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2081
2082         lockdep_assert_held(&nn->client_lock);
2083
2084         clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2085         add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2086         idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2087         list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2088         renew_client_locked(clp);
2089 }
2090
2091 static void
2092 move_to_confirmed(struct nfs4_client *clp)
2093 {
2094         unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2095         struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2096
2097         lockdep_assert_held(&nn->client_lock);
2098
2099         dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2100         list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2101         rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2102         add_clp_to_name_tree(clp, &nn->conf_name_tree);
2103         set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2104         renew_client_locked(clp);
2105 }
2106
2107 static struct nfs4_client *
2108 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2109 {
2110         struct nfs4_client *clp;
2111         unsigned int idhashval = clientid_hashval(clid->cl_id);
2112
2113         list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2114                 if (same_clid(&clp->cl_clientid, clid)) {
2115                         if ((bool)clp->cl_minorversion != sessions)
2116                                 return NULL;
2117                         renew_client_locked(clp);
2118                         return clp;
2119                 }
2120         }
2121         return NULL;
2122 }
2123
2124 static struct nfs4_client *
2125 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2126 {
2127         struct list_head *tbl = nn->conf_id_hashtbl;
2128
2129         lockdep_assert_held(&nn->client_lock);
2130         return find_client_in_id_table(tbl, clid, sessions);
2131 }
2132
2133 static struct nfs4_client *
2134 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2135 {
2136         struct list_head *tbl = nn->unconf_id_hashtbl;
2137
2138         lockdep_assert_held(&nn->client_lock);
2139         return find_client_in_id_table(tbl, clid, sessions);
2140 }
2141
2142 static bool clp_used_exchangeid(struct nfs4_client *clp)
2143 {
2144         return clp->cl_exchange_flags != 0;
2145
2146
2147 static struct nfs4_client *
2148 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2149 {
2150         lockdep_assert_held(&nn->client_lock);
2151         return find_clp_in_name_tree(name, &nn->conf_name_tree);
2152 }
2153
2154 static struct nfs4_client *
2155 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2156 {
2157         lockdep_assert_held(&nn->client_lock);
2158         return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2159 }
2160
2161 static void
2162 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2163 {
2164         struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2165         struct sockaddr *sa = svc_addr(rqstp);
2166         u32 scopeid = rpc_get_scope_id(sa);
2167         unsigned short expected_family;
2168
2169         /* Currently, we only support tcp and tcp6 for the callback channel */
2170         if (se->se_callback_netid_len == 3 &&
2171             !memcmp(se->se_callback_netid_val, "tcp", 3))
2172                 expected_family = AF_INET;
2173         else if (se->se_callback_netid_len == 4 &&
2174                  !memcmp(se->se_callback_netid_val, "tcp6", 4))
2175                 expected_family = AF_INET6;
2176         else
2177                 goto out_err;
2178
2179         conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2180                                             se->se_callback_addr_len,
2181                                             (struct sockaddr *)&conn->cb_addr,
2182                                             sizeof(conn->cb_addr));
2183
2184         if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2185                 goto out_err;
2186
2187         if (conn->cb_addr.ss_family == AF_INET6)
2188                 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2189
2190         conn->cb_prog = se->se_callback_prog;
2191         conn->cb_ident = se->se_callback_ident;
2192         memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2193         return;
2194 out_err:
2195         conn->cb_addr.ss_family = AF_UNSPEC;
2196         conn->cb_addrlen = 0;
2197         dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
2198                 "will not receive delegations\n",
2199                 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2200
2201         return;
2202 }
2203
2204 /*
2205  * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2206  */
2207 static void
2208 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2209 {
2210         struct xdr_buf *buf = resp->xdr.buf;
2211         struct nfsd4_slot *slot = resp->cstate.slot;
2212         unsigned int base;
2213
2214         dprintk("--> %s slot %p\n", __func__, slot);
2215
2216         slot->sl_opcnt = resp->opcnt;
2217         slot->sl_status = resp->cstate.status;
2218
2219         slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2220         if (nfsd4_not_cached(resp)) {
2221                 slot->sl_datalen = 0;
2222                 return;
2223         }
2224         base = resp->cstate.data_offset;
2225         slot->sl_datalen = buf->len - base;
2226         if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2227                 WARN("%s: sessions DRC could not cache compound\n", __func__);
2228         return;
2229 }
2230
2231 /*
2232  * Encode the replay sequence operation from the slot values.
2233  * If cachethis is FALSE encode the uncached rep error on the next
2234  * operation which sets resp->p and increments resp->opcnt for
2235  * nfs4svc_encode_compoundres.
2236  *
2237  */
2238 static __be32
2239 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2240                           struct nfsd4_compoundres *resp)
2241 {
2242         struct nfsd4_op *op;
2243         struct nfsd4_slot *slot = resp->cstate.slot;
2244
2245         /* Encode the replayed sequence operation */
2246         op = &args->ops[resp->opcnt - 1];
2247         nfsd4_encode_operation(resp, op);
2248
2249         /* Return nfserr_retry_uncached_rep in next operation. */
2250         if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
2251                 op = &args->ops[resp->opcnt++];
2252                 op->status = nfserr_retry_uncached_rep;
2253                 nfsd4_encode_operation(resp, op);
2254         }
2255         return op->status;
2256 }
2257
2258 /*
2259  * The sequence operation is not cached because we can use the slot and
2260  * session values.
2261  */
2262 static __be32
2263 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2264                          struct nfsd4_sequence *seq)
2265 {
2266         struct nfsd4_slot *slot = resp->cstate.slot;
2267         struct xdr_stream *xdr = &resp->xdr;
2268         __be32 *p;
2269         __be32 status;
2270
2271         dprintk("--> %s slot %p\n", __func__, slot);
2272
2273         status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2274         if (status)
2275                 return status;
2276
2277         p = xdr_reserve_space(xdr, slot->sl_datalen);
2278         if (!p) {
2279                 WARN_ON_ONCE(1);
2280                 return nfserr_serverfault;
2281         }
2282         xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2283         xdr_commit_encode(xdr);
2284
2285         resp->opcnt = slot->sl_opcnt;
2286         return slot->sl_status;
2287 }
2288
2289 /*
2290  * Set the exchange_id flags returned by the server.
2291  */
2292 static void
2293 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2294 {
2295 #ifdef CONFIG_NFSD_PNFS
2296         new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2297 #else
2298         new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2299 #endif
2300
2301         /* Referrals are supported, Migration is not. */
2302         new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2303
2304         /* set the wire flags to return to client. */
2305         clid->flags = new->cl_exchange_flags;
2306 }
2307
2308 static bool client_has_openowners(struct nfs4_client *clp)
2309 {
2310         struct nfs4_openowner *oo;
2311
2312         list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2313                 if (!list_empty(&oo->oo_owner.so_stateids))
2314                         return true;
2315         }
2316         return false;
2317 }
2318
2319 static bool client_has_state(struct nfs4_client *clp)
2320 {
2321         return client_has_openowners(clp)
2322 #ifdef CONFIG_NFSD_PNFS
2323                 || !list_empty(&clp->cl_lo_states)
2324 #endif
2325                 || !list_empty(&clp->cl_delegations)
2326                 || !list_empty(&clp->cl_sessions);
2327 }
2328
2329 __be32
2330 nfsd4_exchange_id(struct svc_rqst *rqstp,
2331                   struct nfsd4_compound_state *cstate,
2332                   struct nfsd4_exchange_id *exid)
2333 {
2334         struct nfs4_client *conf, *new;
2335         struct nfs4_client *unconf = NULL;
2336         __be32 status;
2337         char                    addr_str[INET6_ADDRSTRLEN];
2338         nfs4_verifier           verf = exid->verifier;
2339         struct sockaddr         *sa = svc_addr(rqstp);
2340         bool    update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2341         struct nfsd_net         *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2342
2343         rpc_ntop(sa, addr_str, sizeof(addr_str));
2344         dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2345                 "ip_addr=%s flags %x, spa_how %d\n",
2346                 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
2347                 addr_str, exid->flags, exid->spa_how);
2348
2349         if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2350                 return nfserr_inval;
2351
2352         switch (exid->spa_how) {
2353         case SP4_MACH_CRED:
2354                 if (!svc_rqst_integrity_protected(rqstp))
2355                         return nfserr_inval;
2356         case SP4_NONE:
2357                 break;
2358         default:                                /* checked by xdr code */
2359                 WARN_ON_ONCE(1);
2360         case SP4_SSV:
2361                 return nfserr_encr_alg_unsupp;
2362         }
2363
2364         new = create_client(exid->clname, rqstp, &verf);
2365         if (new == NULL)
2366                 return nfserr_jukebox;
2367
2368         /* Cases below refer to rfc 5661 section 18.35.4: */
2369         spin_lock(&nn->client_lock);
2370         conf = find_confirmed_client_by_name(&exid->clname, nn);
2371         if (conf) {
2372                 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2373                 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2374
2375                 if (update) {
2376                         if (!clp_used_exchangeid(conf)) { /* buggy client */
2377                                 status = nfserr_inval;
2378                                 goto out;
2379                         }
2380                         if (!mach_creds_match(conf, rqstp)) {
2381                                 status = nfserr_wrong_cred;
2382                                 goto out;
2383                         }
2384                         if (!creds_match) { /* case 9 */
2385                                 status = nfserr_perm;
2386                                 goto out;
2387                         }
2388                         if (!verfs_match) { /* case 8 */
2389                                 status = nfserr_not_same;
2390                                 goto out;
2391                         }
2392                         /* case 6 */
2393                         exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2394                         goto out_copy;
2395                 }
2396                 if (!creds_match) { /* case 3 */
2397                         if (client_has_state(conf)) {
2398                                 status = nfserr_clid_inuse;
2399                                 goto out;
2400                         }
2401                         goto out_new;
2402                 }
2403                 if (verfs_match) { /* case 2 */
2404                         conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2405                         goto out_copy;
2406                 }
2407                 /* case 5, client reboot */
2408                 conf = NULL;
2409                 goto out_new;
2410         }
2411
2412         if (update) { /* case 7 */
2413                 status = nfserr_noent;
2414                 goto out;
2415         }
2416
2417         unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
2418         if (unconf) /* case 4, possible retry or client restart */
2419                 unhash_client_locked(unconf);
2420
2421         /* case 1 (normal case) */
2422 out_new:
2423         if (conf) {
2424                 status = mark_client_expired_locked(conf);
2425                 if (status)
2426                         goto out;
2427         }
2428         new->cl_minorversion = cstate->minorversion;
2429         new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
2430
2431         gen_clid(new, nn);
2432         add_to_unconfirmed(new);
2433         swap(new, conf);
2434 out_copy:
2435         exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2436         exid->clientid.cl_id = conf->cl_clientid.cl_id;
2437
2438         exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2439         nfsd4_set_ex_flags(conf, exid);
2440
2441         dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2442                 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2443         status = nfs_ok;
2444
2445 out:
2446         spin_unlock(&nn->client_lock);
2447         if (new)
2448                 expire_client(new);
2449         if (unconf)
2450                 expire_client(unconf);
2451         return status;
2452 }
2453
2454 static __be32
2455 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2456 {
2457         dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2458                 slot_seqid);
2459
2460         /* The slot is in use, and no response has been sent. */
2461         if (slot_inuse) {
2462                 if (seqid == slot_seqid)
2463                         return nfserr_jukebox;
2464                 else
2465                         return nfserr_seq_misordered;
2466         }
2467         /* Note unsigned 32-bit arithmetic handles wraparound: */
2468         if (likely(seqid == slot_seqid + 1))
2469                 return nfs_ok;
2470         if (seqid == slot_seqid)
2471                 return nfserr_replay_cache;
2472         return nfserr_seq_misordered;
2473 }
2474
2475 /*
2476  * Cache the create session result into the create session single DRC
2477  * slot cache by saving the xdr structure. sl_seqid has been set.
2478  * Do this for solo or embedded create session operations.
2479  */
2480 static void
2481 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2482                            struct nfsd4_clid_slot *slot, __be32 nfserr)
2483 {
2484         slot->sl_status = nfserr;
2485         memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2486 }
2487
2488 static __be32
2489 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2490                             struct nfsd4_clid_slot *slot)
2491 {
2492         memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2493         return slot->sl_status;
2494 }
2495
2496 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2497                         2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2498                         1 +     /* MIN tag is length with zero, only length */ \
2499                         3 +     /* version, opcount, opcode */ \
2500                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2501                                 /* seqid, slotID, slotID, cache */ \
2502                         4 ) * sizeof(__be32))
2503
2504 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2505                         2 +     /* verifier: AUTH_NULL, length 0 */\
2506                         1 +     /* status */ \
2507                         1 +     /* MIN tag is length with zero, only length */ \
2508                         3 +     /* opcount, opcode, opstatus*/ \
2509                         XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2510                                 /* seqid, slotID, slotID, slotID, status */ \
2511                         5 ) * sizeof(__be32))
2512
2513 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2514 {
2515         u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2516
2517         if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2518                 return nfserr_toosmall;
2519         if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2520                 return nfserr_toosmall;
2521         ca->headerpadsz = 0;
2522         ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2523         ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2524         ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2525         ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2526                         NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2527         ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2528         /*
2529          * Note decreasing slot size below client's request may make it
2530          * difficult for client to function correctly, whereas
2531          * decreasing the number of slots will (just?) affect
2532          * performance.  When short on memory we therefore prefer to
2533          * decrease number of slots instead of their size.  Clients that
2534          * request larger slots than they need will get poor results:
2535          */
2536         ca->maxreqs = nfsd4_get_drc_mem(ca);
2537         if (!ca->maxreqs)
2538                 return nfserr_jukebox;
2539
2540         return nfs_ok;
2541 }
2542
2543 #define NFSD_CB_MAX_REQ_SZ      ((NFS4_enc_cb_recall_sz + \
2544                                  RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2545 #define NFSD_CB_MAX_RESP_SZ     ((NFS4_dec_cb_recall_sz + \
2546                                  RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2547
2548 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2549 {
2550         ca->headerpadsz = 0;
2551
2552         /*
2553          * These RPC_MAX_HEADER macros are overkill, especially since we
2554          * don't even do gss on the backchannel yet.  But this is still
2555          * less than 1k.  Tighten up this estimate in the unlikely event
2556          * it turns out to be a problem for some client:
2557          */
2558         if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2559                 return nfserr_toosmall;
2560         if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2561                 return nfserr_toosmall;
2562         ca->maxresp_cached = 0;
2563         if (ca->maxops < 2)
2564                 return nfserr_toosmall;
2565
2566         return nfs_ok;
2567 }
2568
2569 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2570 {
2571         switch (cbs->flavor) {
2572         case RPC_AUTH_NULL:
2573         case RPC_AUTH_UNIX:
2574                 return nfs_ok;
2575         default:
2576                 /*
2577                  * GSS case: the spec doesn't allow us to return this
2578                  * error.  But it also doesn't allow us not to support
2579                  * GSS.
2580                  * I'd rather this fail hard than return some error the
2581                  * client might think it can already handle:
2582                  */
2583                 return nfserr_encr_alg_unsupp;
2584         }
2585 }
2586
2587 __be32
2588 nfsd4_create_session(struct svc_rqst *rqstp,
2589                      struct nfsd4_compound_state *cstate,
2590                      struct nfsd4_create_session *cr_ses)
2591 {
2592         struct sockaddr *sa = svc_addr(rqstp);
2593         struct nfs4_client *conf, *unconf;
2594         struct nfs4_client *old = NULL;
2595         struct nfsd4_session *new;
2596         struct nfsd4_conn *conn;
2597         struct nfsd4_clid_slot *cs_slot = NULL;
2598         __be32 status = 0;
2599         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2600
2601         if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2602                 return nfserr_inval;
2603         status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2604         if (status)
2605                 return status;
2606         status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2607         if (status)
2608                 return status;
2609         status = check_backchannel_attrs(&cr_ses->back_channel);
2610         if (status)
2611                 goto out_release_drc_mem;
2612         status = nfserr_jukebox;
2613         new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2614         if (!new)
2615                 goto out_release_drc_mem;
2616         conn = alloc_conn_from_crses(rqstp, cr_ses);
2617         if (!conn)
2618                 goto out_free_session;
2619
2620         spin_lock(&nn->client_lock);
2621         unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2622         conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2623         WARN_ON_ONCE(conf && unconf);
2624
2625         if (conf) {
2626                 status = nfserr_wrong_cred;
2627                 if (!mach_creds_match(conf, rqstp))
2628                         goto out_free_conn;
2629                 cs_slot = &conf->cl_cs_slot;
2630                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2631                 if (status) {
2632                         if (status == nfserr_replay_cache)
2633                                 status = nfsd4_replay_create_session(cr_ses, cs_slot);
2634                         goto out_free_conn;
2635                 }
2636         } else if (unconf) {
2637                 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2638                     !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2639                         status = nfserr_clid_inuse;
2640                         goto out_free_conn;
2641                 }
2642                 status = nfserr_wrong_cred;
2643                 if (!mach_creds_match(unconf, rqstp))
2644                         goto out_free_conn;
2645                 cs_slot = &unconf->cl_cs_slot;
2646                 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2647                 if (status) {
2648                         /* an unconfirmed replay returns misordered */
2649                         status = nfserr_seq_misordered;
2650                         goto out_free_conn;
2651                 }
2652                 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2653                 if (old) {
2654                         status = mark_client_expired_locked(old);
2655                         if (status) {
2656                                 old = NULL;
2657                                 goto out_free_conn;
2658                         }
2659                 }
2660                 move_to_confirmed(unconf);
2661                 conf = unconf;
2662         } else {
2663                 status = nfserr_stale_clientid;
2664                 goto out_free_conn;
2665         }
2666         status = nfs_ok;
2667         /*
2668          * We do not support RDMA or persistent sessions
2669          */
2670         cr_ses->flags &= ~SESSION4_PERSIST;
2671         cr_ses->flags &= ~SESSION4_RDMA;
2672
2673         init_session(rqstp, new, conf, cr_ses);
2674         nfsd4_get_session_locked(new);
2675
2676         memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2677                NFS4_MAX_SESSIONID_LEN);
2678         cs_slot->sl_seqid++;
2679         cr_ses->seqid = cs_slot->sl_seqid;
2680
2681         /* cache solo and embedded create sessions under the client_lock */
2682         nfsd4_cache_create_session(cr_ses, cs_slot, status);
2683         spin_unlock(&nn->client_lock);
2684         /* init connection and backchannel */
2685         nfsd4_init_conn(rqstp, conn, new);
2686         nfsd4_put_session(new);
2687         if (old)
2688                 expire_client(old);
2689         return status;
2690 out_free_conn:
2691         spin_unlock(&nn->client_lock);
2692         free_conn(conn);
2693         if (old)
2694                 expire_client(old);
2695 out_free_session:
2696         __free_session(new);
2697 out_release_drc_mem:
2698         nfsd4_put_drc_mem(&cr_ses->fore_channel);
2699         return status;
2700 }
2701
2702 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2703 {
2704         switch (*dir) {
2705         case NFS4_CDFC4_FORE:
2706         case NFS4_CDFC4_BACK:
2707                 return nfs_ok;
2708         case NFS4_CDFC4_FORE_OR_BOTH:
2709         case NFS4_CDFC4_BACK_OR_BOTH:
2710                 *dir = NFS4_CDFC4_BOTH;
2711                 return nfs_ok;
2712         };
2713         return nfserr_inval;
2714 }
2715
2716 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
2717 {
2718         struct nfsd4_session *session = cstate->session;
2719         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2720         __be32 status;
2721
2722         status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2723         if (status)
2724                 return status;
2725         spin_lock(&nn->client_lock);
2726         session->se_cb_prog = bc->bc_cb_program;
2727         session->se_cb_sec = bc->bc_cb_sec;
2728         spin_unlock(&nn->client_lock);
2729
2730         nfsd4_probe_callback(session->se_client);
2731
2732         return nfs_ok;
2733 }
2734
2735 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2736                      struct nfsd4_compound_state *cstate,
2737                      struct nfsd4_bind_conn_to_session *bcts)
2738 {
2739         __be32 status;
2740         struct nfsd4_conn *conn;
2741         struct nfsd4_session *session;
2742         struct net *net = SVC_NET(rqstp);
2743         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2744
2745         if (!nfsd4_last_compound_op(rqstp))
2746                 return nfserr_not_only_op;
2747         spin_lock(&nn->client_lock);
2748         session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2749         spin_unlock(&nn->client_lock);
2750         if (!session)
2751                 goto out_no_session;
2752         status = nfserr_wrong_cred;
2753         if (!mach_creds_match(session->se_client, rqstp))
2754                 goto out;
2755         status = nfsd4_map_bcts_dir(&bcts->dir);
2756         if (status)
2757                 goto out;
2758         conn = alloc_conn(rqstp, bcts->dir);
2759         status = nfserr_jukebox;
2760         if (!conn)
2761                 goto out;
2762         nfsd4_init_conn(rqstp, conn, session);
2763         status = nfs_ok;
2764 out:
2765         nfsd4_put_session(session);
2766 out_no_session:
2767         return status;
2768 }
2769
2770 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2771 {
2772         if (!session)
2773                 return 0;
2774         return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2775 }
2776
2777 __be32
2778 nfsd4_destroy_session(struct svc_rqst *r,
2779                       struct nfsd4_compound_state *cstate,
2780                       struct nfsd4_destroy_session *sessionid)
2781 {
2782         struct nfsd4_session *ses;
2783         __be32 status;
2784         int ref_held_by_me = 0;
2785         struct net *net = SVC_NET(r);
2786         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2787
2788         status = nfserr_not_only_op;
2789         if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2790                 if (!nfsd4_last_compound_op(r))
2791                         goto out;
2792                 ref_held_by_me++;
2793         }
2794         dump_sessionid(__func__, &sessionid->sessionid);
2795         spin_lock(&nn->client_lock);
2796         ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2797         if (!ses)
2798                 goto out_client_lock;
2799         status = nfserr_wrong_cred;
2800         if (!mach_creds_match(ses->se_client, r))
2801                 goto out_put_session;
2802         status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2803         if (status)
2804                 goto out_put_session;
2805         unhash_session(ses);
2806         spin_unlock(&nn->client_lock);
2807
2808         nfsd4_probe_callback_sync(ses->se_client);
2809
2810         spin_lock(&nn->client_lock);
2811         status = nfs_ok;
2812 out_put_session:
2813         nfsd4_put_session_locked(ses);
2814 out_client_lock:
2815         spin_unlock(&nn->client_lock);
2816 out:
2817         return status;
2818 }
2819
2820 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2821 {
2822         struct nfsd4_conn *c;
2823
2824         list_for_each_entry(c, &s->se_conns, cn_persession) {
2825                 if (c->cn_xprt == xpt) {
2826                         return c;
2827                 }
2828         }
2829         return NULL;
2830 }
2831
2832 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2833 {
2834         struct nfs4_client *clp = ses->se_client;
2835         struct nfsd4_conn *c;
2836         __be32 status = nfs_ok;
2837         int ret;
2838
2839         spin_lock(&clp->cl_lock);
2840         c = __nfsd4_find_conn(new->cn_xprt, ses);
2841         if (c)
2842                 goto out_free;
2843         status = nfserr_conn_not_bound_to_session;
2844         if (clp->cl_mach_cred)
2845                 goto out_free;
2846         __nfsd4_hash_conn(new, ses);
2847         spin_unlock(&clp->cl_lock);
2848         ret = nfsd4_register_conn(new);
2849         if (ret)
2850                 /* oops; xprt is already down: */
2851                 nfsd4_conn_lost(&new->cn_xpt_user);
2852         return nfs_ok;
2853 out_free:
2854         spin_unlock(&clp->cl_lock);
2855         free_conn(new);
2856         return status;
2857 }
2858
2859 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2860 {
2861         struct nfsd4_compoundargs *args = rqstp->rq_argp;
2862
2863         return args->opcnt > session->se_fchannel.maxops;
2864 }
2865
2866 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2867                                   struct nfsd4_session *session)
2868 {
2869         struct xdr_buf *xb = &rqstp->rq_arg;
2870
2871         return xb->len > session->se_fchannel.maxreq_sz;
2872 }
2873
2874 __be32
2875 nfsd4_sequence(struct svc_rqst *rqstp,
2876                struct nfsd4_compound_state *cstate,
2877                struct nfsd4_sequence *seq)
2878 {
2879         struct nfsd4_compoundres *resp = rqstp->rq_resp;
2880         struct xdr_stream *xdr = &resp->xdr;
2881         struct nfsd4_session *session;
2882         struct nfs4_client *clp;
2883         struct nfsd4_slot *slot;
2884         struct nfsd4_conn *conn;
2885         __be32 status;
2886         int buflen;
2887         struct net *net = SVC_NET(rqstp);
2888         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2889
2890         if (resp->opcnt != 1)
2891                 return nfserr_sequence_pos;
2892
2893         /*
2894          * Will be either used or freed by nfsd4_sequence_check_conn
2895          * below.
2896          */
2897         conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2898         if (!conn)
2899                 return nfserr_jukebox;
2900
2901         spin_lock(&nn->client_lock);
2902         session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
2903         if (!session)
2904                 goto out_no_session;
2905         clp = session->se_client;
2906
2907         status = nfserr_too_many_ops;
2908         if (nfsd4_session_too_many_ops(rqstp, session))
2909                 goto out_put_session;
2910
2911         status = nfserr_req_too_big;
2912         if (nfsd4_request_too_big(rqstp, session))
2913                 goto out_put_session;
2914
2915         status = nfserr_badslot;
2916         if (seq->slotid >= session->se_fchannel.maxreqs)
2917                 goto out_put_session;
2918
2919         slot = session->se_slots[seq->slotid];
2920         dprintk("%s: slotid %d\n", __func__, seq->slotid);
2921
2922         /* We do not negotiate the number of slots yet, so set the
2923          * maxslots to the session maxreqs which is used to encode
2924          * sr_highest_slotid and the sr_target_slot id to maxslots */
2925         seq->maxslots = session->se_fchannel.maxreqs;
2926
2927         status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2928                                         slot->sl_flags & NFSD4_SLOT_INUSE);
2929         if (status == nfserr_replay_cache) {
2930                 status = nfserr_seq_misordered;
2931                 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2932                         goto out_put_session;
2933                 cstate->slot = slot;
2934                 cstate->session = session;
2935                 cstate->clp = clp;
2936                 /* Return the cached reply status and set cstate->status
2937                  * for nfsd4_proc_compound processing */
2938                 status = nfsd4_replay_cache_entry(resp, seq);
2939                 cstate->status = nfserr_replay_cache;
2940                 goto out;
2941         }
2942         if (status)
2943                 goto out_put_session;
2944
2945         status = nfsd4_sequence_check_conn(conn, session);
2946         conn = NULL;
2947         if (status)
2948                 goto out_put_session;
2949
2950         buflen = (seq->cachethis) ?
2951                         session->se_fchannel.maxresp_cached :
2952                         session->se_fchannel.maxresp_sz;
2953         status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
2954                                     nfserr_rep_too_big;
2955         if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2956                 goto out_put_session;
2957         svc_reserve(rqstp, buflen);
2958
2959         status = nfs_ok;
2960         /* Success! bump slot seqid */
2961         slot->sl_seqid = seq->seqid;
2962         slot->sl_flags |= NFSD4_SLOT_INUSE;
2963         if (seq->cachethis)
2964                 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2965         else
2966                 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2967
2968         cstate->slot = slot;
2969         cstate->session = session;
2970         cstate->clp = clp;
2971
2972 out:
2973         switch (clp->cl_cb_state) {
2974         case NFSD4_CB_DOWN:
2975                 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2976                 break;
2977         case NFSD4_CB_FAULT:
2978                 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2979                 break;
2980         default:
2981                 seq->status_flags = 0;
2982         }
2983         if (!list_empty(&clp->cl_revoked))
2984                 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2985 out_no_session:
2986         if (conn)
2987                 free_conn(conn);
2988         spin_unlock(&nn->client_lock);
2989         return status;
2990 out_put_session:
2991         nfsd4_put_session_locked(session);
2992         goto out_no_session;
2993 }
2994
2995 void
2996 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
2997 {
2998         struct nfsd4_compound_state *cs = &resp->cstate;
2999
3000         if (nfsd4_has_session(cs)) {
3001                 if (cs->status != nfserr_replay_cache) {
3002                         nfsd4_store_cache_entry(resp);
3003                         cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3004                 }
3005                 /* Drop session reference that was taken in nfsd4_sequence() */
3006                 nfsd4_put_session(cs->session);
3007         } else if (cs->clp)
3008                 put_client_renew(cs->clp);
3009 }
3010
3011 __be32
3012 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
3013 {
3014         struct nfs4_client *conf, *unconf;
3015         struct nfs4_client *clp = NULL;
3016         __be32 status = 0;
3017         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3018
3019         spin_lock(&nn->client_lock);
3020         unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3021         conf = find_confirmed_client(&dc->clientid, true, nn);
3022         WARN_ON_ONCE(conf && unconf);
3023
3024         if (conf) {
3025                 if (client_has_state(conf)) {
3026                         status = nfserr_clientid_busy;
3027                         goto out;
3028                 }
3029                 status = mark_client_expired_locked(conf);
3030                 if (status)
3031                         goto out;
3032                 clp = conf;
3033         } else if (unconf)
3034                 clp = unconf;
3035         else {
3036                 status = nfserr_stale_clientid;
3037                 goto out;
3038         }
3039         if (!mach_creds_match(clp, rqstp)) {
3040                 clp = NULL;
3041                 status = nfserr_wrong_cred;
3042                 goto out;
3043         }
3044         unhash_client_locked(clp);
3045 out:
3046         spin_unlock(&nn->client_lock);
3047         if (clp)
3048                 expire_client(clp);
3049         return status;
3050 }
3051
3052 __be32
3053 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
3054 {
3055         __be32 status = 0;
3056
3057         if (rc->rca_one_fs) {
3058                 if (!cstate->current_fh.fh_dentry)
3059                         return nfserr_nofilehandle;
3060                 /*
3061                  * We don't take advantage of the rca_one_fs case.
3062                  * That's OK, it's optional, we can safely ignore it.
3063                  */
3064                  return nfs_ok;
3065         }
3066
3067         status = nfserr_complete_already;
3068         if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3069                              &cstate->session->se_client->cl_flags))
3070                 goto out;
3071
3072         status = nfserr_stale_clientid;
3073         if (is_client_expired(cstate->session->se_client))
3074                 /*
3075                  * The following error isn't really legal.
3076                  * But we only get here if the client just explicitly
3077                  * destroyed the client.  Surely it no longer cares what
3078                  * error it gets back on an operation for the dead
3079                  * client.
3080                  */
3081                 goto out;
3082
3083         status = nfs_ok;
3084         nfsd4_client_record_create(cstate->session->se_client);
3085 out:
3086         return status;
3087 }
3088
3089 __be32
3090 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3091                   struct nfsd4_setclientid *setclid)
3092 {
3093         struct xdr_netobj       clname = setclid->se_name;
3094         nfs4_verifier           clverifier = setclid->se_verf;
3095         struct nfs4_client      *conf, *new;
3096         struct nfs4_client      *unconf = NULL;
3097         __be32                  status;
3098         struct nfsd_net         *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3099
3100         new = create_client(clname, rqstp, &clverifier);
3101         if (new == NULL)
3102                 return nfserr_jukebox;
3103         /* Cases below refer to rfc 3530 section 14.2.33: */
3104         spin_lock(&nn->client_lock);
3105         conf = find_confirmed_client_by_name(&clname, nn);
3106         if (conf && client_has_state(conf)) {
3107                 /* case 0: */
3108                 status = nfserr_clid_inuse;
3109                 if (clp_used_exchangeid(conf))
3110                         goto out;
3111                 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3112                         char addr_str[INET6_ADDRSTRLEN];
3113                         rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3114                                  sizeof(addr_str));
3115                         dprintk("NFSD: setclientid: string in use by client "
3116                                 "at %s\n", addr_str);
3117                         goto out;
3118                 }
3119         }
3120         unconf = find_unconfirmed_client_by_name(&clname, nn);
3121         if (unconf)
3122                 unhash_client_locked(unconf);
3123         if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3124                 /* case 1: probable callback update */
3125                 copy_clid(new, conf);
3126                 gen_confirm(new, nn);
3127         } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3128                 gen_clid(new, nn);
3129         new->cl_minorversion = 0;
3130         gen_callback(new, setclid, rqstp);
3131         add_to_unconfirmed(new);
3132         setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3133         setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3134         memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3135         new = NULL;
3136         status = nfs_ok;
3137 out:
3138         spin_unlock(&nn->client_lock);
3139         if (new)
3140                 free_client(new);
3141         if (unconf)
3142                 expire_client(unconf);
3143         return status;
3144 }
3145
3146
3147 __be32
3148 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3149                          struct nfsd4_compound_state *cstate,
3150                          struct nfsd4_setclientid_confirm *setclientid_confirm)
3151 {
3152         struct nfs4_client *conf, *unconf;
3153         struct nfs4_client *old = NULL;
3154         nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
3155         clientid_t * clid = &setclientid_confirm->sc_clientid;
3156         __be32 status;
3157         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3158
3159         if (STALE_CLIENTID(clid, nn))
3160                 return nfserr_stale_clientid;
3161
3162         spin_lock(&nn->client_lock);
3163         conf = find_confirmed_client(clid, false, nn);
3164         unconf = find_unconfirmed_client(clid, false, nn);
3165         /*
3166          * We try hard to give out unique clientid's, so if we get an
3167          * attempt to confirm the same clientid with a different cred,
3168          * the client may be buggy; this should never happen.
3169          *
3170          * Nevertheless, RFC 7530 recommends INUSE for this case:
3171          */
3172         status = nfserr_clid_inuse;
3173         if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3174                 goto out;
3175         if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3176                 goto out;
3177         /* cases below refer to rfc 3530 section 14.2.34: */
3178         if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3179                 if (conf && !unconf) /* case 2: probable retransmit */
3180                         status = nfs_ok;
3181                 else /* case 4: client hasn't noticed we rebooted yet? */
3182                         status = nfserr_stale_clientid;
3183                 goto out;
3184         }
3185         status = nfs_ok;
3186         if (conf) { /* case 1: callback update */
3187                 old = unconf;
3188                 unhash_client_locked(old);
3189                 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3190         } else { /* case 3: normal case; new or rebooted client */
3191                 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3192                 if (old) {
3193                         status = nfserr_clid_inuse;
3194                         if (client_has_state(old)
3195                                         && !same_creds(&unconf->cl_cred,
3196                                                         &old->cl_cred))
3197                                 goto out;
3198                         status = mark_client_expired_locked(old);
3199                         if (status) {
3200                                 old = NULL;
3201                                 goto out;
3202                         }
3203                 }
3204                 move_to_confirmed(unconf);
3205                 conf = unconf;
3206         }
3207         get_client_locked(conf);
3208         spin_unlock(&nn->client_lock);
3209         nfsd4_probe_callback(conf);
3210         spin_lock(&nn->client_lock);
3211         put_client_renew_locked(conf);
3212 out:
3213         spin_unlock(&nn->client_lock);
3214         if (old)
3215                 expire_client(old);
3216         return status;
3217 }
3218
3219 static struct nfs4_file *nfsd4_alloc_file(void)
3220 {
3221         return kmem_cache_alloc(file_slab, GFP_KERNEL);
3222 }
3223
3224 /* OPEN Share state helper functions */
3225 static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3226                                 struct nfs4_file *fp)
3227 {
3228         lockdep_assert_held(&state_lock);
3229
3230         atomic_set(&fp->fi_ref, 1);
3231         spin_lock_init(&fp->fi_lock);
3232         INIT_LIST_HEAD(&fp->fi_stateids);
3233         INIT_LIST_HEAD(&fp->fi_delegations);
3234         INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3235         fh_copy_shallow(&fp->fi_fhandle, fh);
3236         fp->fi_deleg_file = NULL;
3237         fp->fi_had_conflict = false;
3238         fp->fi_share_deny = 0;
3239         memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3240         memset(fp->fi_access, 0, sizeof(fp->fi_access));
3241 #ifdef CONFIG_NFSD_PNFS
3242         INIT_LIST_HEAD(&fp->fi_lo_states);
3243         atomic_set(&fp->fi_lo_recalls, 0);
3244 #endif
3245         hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3246 }
3247
3248 void
3249 nfsd4_free_slabs(void)
3250 {
3251         kmem_cache_destroy(odstate_slab);
3252         kmem_cache_destroy(openowner_slab);
3253         kmem_cache_destroy(lockowner_slab);
3254         kmem_cache_destroy(file_slab);
3255         kmem_cache_destroy(stateid_slab);
3256         kmem_cache_destroy(deleg_slab);
3257 }
3258
3259 int
3260 nfsd4_init_slabs(void)
3261 {
3262         openowner_slab = kmem_cache_create("nfsd4_openowners",
3263                         sizeof(struct nfs4_openowner), 0, 0, NULL);
3264         if (openowner_slab == NULL)
3265                 goto out;
3266         lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3267                         sizeof(struct nfs4_lockowner), 0, 0, NULL);
3268         if (lockowner_slab == NULL)
3269                 goto out_free_openowner_slab;
3270         file_slab = kmem_cache_create("nfsd4_files",
3271                         sizeof(struct nfs4_file), 0, 0, NULL);
3272         if (file_slab == NULL)
3273                 goto out_free_lockowner_slab;
3274         stateid_slab = kmem_cache_create("nfsd4_stateids",
3275                         sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3276         if (stateid_slab == NULL)
3277                 goto out_free_file_slab;
3278         deleg_slab = kmem_cache_create("nfsd4_delegations",
3279                         sizeof(struct nfs4_delegation), 0, 0, NULL);
3280         if (deleg_slab == NULL)
3281                 goto out_free_stateid_slab;
3282         odstate_slab = kmem_cache_create("nfsd4_odstate",
3283                         sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3284         if (odstate_slab == NULL)
3285                 goto out_free_deleg_slab;
3286         return 0;
3287
3288 out_free_deleg_slab:
3289         kmem_cache_destroy(deleg_slab);
3290 out_free_stateid_slab:
3291         kmem_cache_destroy(stateid_slab);
3292 out_free_file_slab:
3293         kmem_cache_destroy(file_slab);
3294 out_free_lockowner_slab:
3295         kmem_cache_destroy(lockowner_slab);
3296 out_free_openowner_slab:
3297         kmem_cache_destroy(openowner_slab);
3298 out:
3299         dprintk("nfsd4: out of memory while initializing nfsv4\n");
3300         return -ENOMEM;
3301 }
3302
3303 static void init_nfs4_replay(struct nfs4_replay *rp)
3304 {
3305         rp->rp_status = nfserr_serverfault;
3306         rp->rp_buflen = 0;
3307         rp->rp_buf = rp->rp_ibuf;
3308         mutex_init(&rp->rp_mutex);
3309 }
3310
3311 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3312                 struct nfs4_stateowner *so)
3313 {
3314         if (!nfsd4_has_session(cstate)) {
3315                 mutex_lock(&so->so_replay.rp_mutex);
3316                 cstate->replay_owner = nfs4_get_stateowner(so);
3317         }
3318 }
3319
3320 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3321 {
3322         struct nfs4_stateowner *so = cstate->replay_owner;
3323
3324         if (so != NULL) {
3325                 cstate->replay_owner = NULL;
3326                 mutex_unlock(&so->so_replay.rp_mutex);
3327                 nfs4_put_stateowner(so);
3328         }
3329 }
3330
3331 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3332 {
3333         struct nfs4_stateowner *sop;
3334
3335         sop = kmem_cache_alloc(slab, GFP_KERNEL);
3336         if (!sop)
3337                 return NULL;
3338
3339         sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3340         if (!sop->so_owner.data) {
3341                 kmem_cache_free(slab, sop);
3342                 return NULL;
3343         }
3344         sop->so_owner.len = owner->len;
3345
3346         INIT_LIST_HEAD(&sop->so_stateids);
3347         sop->so_client = clp;
3348         init_nfs4_replay(&sop->so_replay);
3349         atomic_set(&sop->so_count, 1);
3350         return sop;
3351 }
3352
3353 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3354 {
3355         lockdep_assert_held(&clp->cl_lock);
3356
3357         list_add(&oo->oo_owner.so_strhash,
3358                  &clp->cl_ownerstr_hashtbl[strhashval]);
3359         list_add(&oo->oo_perclient, &clp->cl_openowners);
3360 }
3361
3362 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3363 {
3364         unhash_openowner_locked(openowner(so));
3365 }
3366
3367 static void nfs4_free_openowner(struct nfs4_stateowner *so)
3368 {
3369         struct nfs4_openowner *oo = openowner(so);
3370
3371         kmem_cache_free(openowner_slab, oo);
3372 }
3373
3374 static const struct nfs4_stateowner_operations openowner_ops = {
3375         .so_unhash =    nfs4_unhash_openowner,
3376         .so_free =      nfs4_free_openowner,
3377 };
3378
3379 static struct nfs4_ol_stateid *
3380 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3381 {
3382         struct nfs4_ol_stateid *local, *ret = NULL;
3383         struct nfs4_openowner *oo = open->op_openowner;
3384
3385         lockdep_assert_held(&fp->fi_lock);
3386
3387         list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3388                 /* ignore lock owners */
3389                 if (local->st_stateowner->so_is_open_owner == 0)
3390                         continue;
3391                 if (local->st_stateowner != &oo->oo_owner)
3392                         continue;
3393                 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
3394                         ret = local;
3395                         atomic_inc(&ret->st_stid.sc_count);
3396                         break;
3397                 }
3398         }
3399         return ret;
3400 }
3401
3402 static __be32
3403 nfsd4_verify_open_stid(struct nfs4_stid *s)
3404 {
3405         __be32 ret = nfs_ok;
3406
3407         switch (s->sc_type) {
3408         default:
3409                 break;
3410         case NFS4_CLOSED_STID:
3411         case NFS4_CLOSED_DELEG_STID:
3412                 ret = nfserr_bad_stateid;
3413                 break;
3414         case NFS4_REVOKED_DELEG_STID:
3415                 ret = nfserr_deleg_revoked;
3416         }
3417         return ret;
3418 }
3419
3420 /* Lock the stateid st_mutex, and deal with races with CLOSE */
3421 static __be32
3422 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
3423 {
3424         __be32 ret;
3425
3426         mutex_lock(&stp->st_mutex);
3427         ret = nfsd4_verify_open_stid(&stp->st_stid);
3428         if (ret != nfs_ok)
3429                 mutex_unlock(&stp->st_mutex);
3430         return ret;
3431 }
3432
3433 static struct nfs4_ol_stateid *
3434 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3435 {
3436         struct nfs4_ol_stateid *stp;
3437         for (;;) {
3438                 spin_lock(&fp->fi_lock);
3439                 stp = nfsd4_find_existing_open(fp, open);
3440                 spin_unlock(&fp->fi_lock);
3441                 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
3442                         break;
3443                 nfs4_put_stid(&stp->st_stid);
3444         }
3445         return stp;
3446 }
3447
3448 static struct nfs4_openowner *
3449 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3450                            struct nfsd4_compound_state *cstate)
3451 {
3452         struct nfs4_client *clp = cstate->clp;
3453         struct nfs4_openowner *oo, *ret;
3454
3455         oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3456         if (!oo)
3457                 return NULL;
3458         oo->oo_owner.so_ops = &openowner_ops;
3459         oo->oo_owner.so_is_open_owner = 1;
3460         oo->oo_owner.so_seqid = open->op_seqid;
3461         oo->oo_flags = 0;
3462         if (nfsd4_has_session(cstate))
3463                 oo->oo_flags |= NFS4_OO_CONFIRMED;
3464         oo->oo_time = 0;
3465         oo->oo_last_closed_stid = NULL;
3466         INIT_LIST_HEAD(&oo->oo_close_lru);
3467         spin_lock(&clp->cl_lock);
3468         ret = find_openstateowner_str_locked(strhashval, open, clp);
3469         if (ret == NULL) {
3470                 hash_openowner(oo, clp, strhashval);
3471                 ret = oo;
3472         } else
3473                 nfs4_free_stateowner(&oo->oo_owner);
3474
3475         spin_unlock(&clp->cl_lock);
3476         return ret;
3477 }
3478
3479 static struct nfs4_ol_stateid *
3480 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3481 {
3482
3483         struct nfs4_openowner *oo = open->op_openowner;
3484         struct nfs4_ol_stateid *retstp = NULL;
3485         struct nfs4_ol_stateid *stp;
3486
3487         stp = open->op_stp;
3488         /* We are moving these outside of the spinlocks to avoid the warnings */
3489         mutex_init(&stp->st_mutex);
3490         mutex_lock(&stp->st_mutex);
3491
3492 retry:
3493         spin_lock(&oo->oo_owner.so_client->cl_lock);
3494         spin_lock(&fp->fi_lock);
3495
3496         retstp = nfsd4_find_existing_open(fp, open);
3497         if (retstp)
3498                 goto out_unlock;
3499
3500         open->op_stp = NULL;
3501         atomic_inc(&stp->st_stid.sc_count);
3502         stp->st_stid.sc_type = NFS4_OPEN_STID;
3503         INIT_LIST_HEAD(&stp->st_locks);
3504         stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3505         get_nfs4_file(fp);
3506         stp->st_stid.sc_file = fp;
3507         stp->st_access_bmap = 0;
3508         stp->st_deny_bmap = 0;
3509         stp->st_openstp = NULL;
3510         list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3511         list_add(&stp->st_perfile, &fp->fi_stateids);
3512
3513 out_unlock:
3514         spin_unlock(&fp->fi_lock);
3515         spin_unlock(&oo->oo_owner.so_client->cl_lock);
3516         if (retstp) {
3517                 /* Handle races with CLOSE */
3518                 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
3519                         nfs4_put_stid(&retstp->st_stid);
3520                         goto retry;
3521                 }
3522                 /* To keep mutex tracking happy */
3523                 mutex_unlock(&stp->st_mutex);
3524                 stp = retstp;
3525         }
3526         return stp;
3527 }
3528
3529 /*
3530  * In the 4.0 case we need to keep the owners around a little while to handle
3531  * CLOSE replay. We still do need to release any file access that is held by
3532  * them before returning however.
3533  */
3534 static void
3535 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3536 {
3537         struct nfs4_ol_stateid *last;
3538         struct nfs4_openowner *oo = openowner(s->st_stateowner);
3539         struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3540                                                 nfsd_net_id);
3541
3542         dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3543
3544         /*
3545          * We know that we hold one reference via nfsd4_close, and another
3546          * "persistent" reference for the client. If the refcount is higher
3547          * than 2, then there are still calls in progress that are using this
3548          * stateid. We can't put the sc_file reference until they are finished.
3549          * Wait for the refcount to drop to 2. Since it has been unhashed,
3550          * there should be no danger of the refcount going back up again at
3551          * this point.
3552          */
3553         wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);
3554
3555         release_all_access(s);
3556         if (s->st_stid.sc_file) {
3557                 put_nfs4_file(s->st_stid.sc_file);
3558                 s->st_stid.sc_file = NULL;
3559         }
3560
3561         spin_lock(&nn->client_lock);
3562         last = oo->oo_last_closed_stid;
3563         oo->oo_last_closed_stid = s;
3564         list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3565         oo->oo_time = get_seconds();
3566         spin_unlock(&nn->client_lock);
3567         if (last)
3568                 nfs4_put_stid(&last->st_stid);
3569 }
3570
3571 /* search file_hashtbl[] for file */
3572 static struct nfs4_file *
3573 find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3574 {
3575         struct nfs4_file *fp;
3576
3577         hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3578                 if (fh_match(&fp->fi_fhandle, fh)) {
3579                         if (atomic_inc_not_zero(&fp->fi_ref))
3580                                 return fp;
3581                 }
3582         }
3583         return NULL;
3584 }
3585
3586 struct nfs4_file *
3587 find_file(struct knfsd_fh *fh)
3588 {
3589         struct nfs4_file *fp;
3590         unsigned int hashval = file_hashval(fh);
3591
3592         rcu_read_lock();
3593         fp = find_file_locked(fh, hashval);
3594         rcu_read_unlock();
3595         return fp;
3596 }
3597
3598 static struct nfs4_file *
3599 find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3600 {
3601         struct nfs4_file *fp;
3602         unsigned int hashval = file_hashval(fh);
3603
3604         rcu_read_lock();
3605         fp = find_file_locked(fh, hashval);
3606         rcu_read_unlock();
3607         if (fp)
3608                 return fp;
3609
3610         spin_lock(&state_lock);
3611         fp = find_file_locked(fh, hashval);
3612         if (likely(fp == NULL)) {
3613                 nfsd4_init_file(fh, hashval, new);
3614                 fp = new;
3615         }
3616         spin_unlock(&state_lock);
3617
3618         return fp;
3619 }
3620
3621 /*
3622  * Called to check deny when READ with all zero stateid or
3623  * WRITE with all zero or all one stateid
3624  */
3625 static __be32
3626 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3627 {
3628         struct nfs4_file *fp;
3629         __be32 ret = nfs_ok;
3630
3631         fp = find_file(&current_fh->fh_handle);
3632         if (!fp)
3633                 return ret;
3634         /* Check for conflicting share reservations */
3635         spin_lock(&fp->fi_lock);
3636         if (fp->fi_share_deny & deny_type)
3637                 ret = nfserr_locked;
3638         spin_unlock(&fp->fi_lock);
3639         put_nfs4_file(fp);
3640         return ret;
3641 }
3642
3643 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3644 {
3645         struct nfs4_delegation *dp = cb_to_delegation(cb);
3646         struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3647                                           nfsd_net_id);
3648
3649         block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3650
3651         /*
3652          * We can't do this in nfsd_break_deleg_cb because it is
3653          * already holding inode->i_lock.
3654          *
3655          * If the dl_time != 0, then we know that it has already been
3656          * queued for a lease break. Don't queue it again.
3657          */
3658         spin_lock(&state_lock);
3659         if (dp->dl_time == 0) {
3660                 dp->dl_time = get_seconds();
3661                 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3662         }
3663         spin_unlock(&state_lock);
3664 }
3665
3666 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3667                 struct rpc_task *task)
3668 {
3669         struct nfs4_delegation *dp = cb_to_delegation(cb);
3670
3671         if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3672                 return 1;
3673
3674         switch (task->tk_status) {
3675         case 0:
3676                 return 1;
3677         case -EBADHANDLE:
3678         case -NFS4ERR_BAD_STATEID:
3679                 /*
3680                  * Race: client probably got cb_recall before open reply
3681                  * granting delegation.
3682                  */
3683                 if (dp->dl_retries--) {
3684                         rpc_delay(task, 2 * HZ);
3685                         return 0;
3686                 }
3687                 /*FALLTHRU*/
3688         default:
3689                 return -1;
3690         }
3691 }
3692
3693 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3694 {
3695         struct nfs4_delegation *dp = cb_to_delegation(cb);
3696
3697         nfs4_put_stid(&dp->dl_stid);
3698 }
3699
3700 static struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3701         .prepare        = nfsd4_cb_recall_prepare,
3702         .done           = nfsd4_cb_recall_done,
3703         .release        = nfsd4_cb_recall_release,
3704 };
3705
3706 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3707 {
3708         /*
3709          * We're assuming the state code never drops its reference
3710          * without first removing the lease.  Since we're in this lease
3711          * callback (and since the lease code is serialized by the kernel
3712          * lock) we know the server hasn't removed the lease yet, we know
3713          * it's safe to take a reference.
3714          */
3715         atomic_inc(&dp->dl_stid.sc_count);
3716         nfsd4_run_cb(&dp->dl_recall);
3717 }
3718
3719 /* Called from break_lease() with i_lock held. */
3720 static bool
3721 nfsd_break_deleg_cb(struct file_lock *fl)
3722 {
3723         bool ret = false;
3724         struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
3725         struct nfs4_delegation *dp;
3726
3727         if (!fp) {
3728                 WARN(1, "(%p)->fl_owner NULL\n", fl);
3729                 return ret;
3730         }
3731         if (fp->fi_had_conflict) {
3732                 WARN(1, "duplicate break on %p\n", fp);
3733                 return ret;
3734         }
3735         /*
3736          * We don't want the locks code to timeout the lease for us;
3737          * we'll remove it ourself if a delegation isn't returned
3738          * in time:
3739          */
3740         fl->fl_break_time = 0;
3741
3742         spin_lock(&fp->fi_lock);
3743         fp->fi_had_conflict = true;
3744         /*
3745          * If there are no delegations on the list, then return true
3746          * so that the lease code will go ahead and delete it.
3747          */
3748         if (list_empty(&fp->fi_delegations))
3749                 ret = true;
3750         else
3751                 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
3752                         nfsd_break_one_deleg(dp);
3753         spin_unlock(&fp->fi_lock);
3754         return ret;
3755 }
3756
3757 static int
3758 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3759                      struct list_head *dispose)
3760 {
3761         if (arg & F_UNLCK)
3762                 return lease_modify(onlist, arg, dispose);
3763         else
3764                 return -EAGAIN;
3765 }
3766
3767 static const struct lock_manager_operations nfsd_lease_mng_ops = {
3768         .lm_break = nfsd_break_deleg_cb,
3769         .lm_change = nfsd_change_deleg_cb,
3770 };
3771
3772 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3773 {
3774         if (nfsd4_has_session(cstate))
3775                 return nfs_ok;
3776         if (seqid == so->so_seqid - 1)
3777                 return nfserr_replay_me;
3778         if (seqid == so->so_seqid)
3779                 return nfs_ok;
3780         return nfserr_bad_seqid;
3781 }
3782
3783 static __be32 lookup_clientid(clientid_t *clid,
3784                 struct nfsd4_compound_state *cstate,
3785                 struct nfsd_net *nn)
3786 {
3787         struct nfs4_client *found;
3788
3789         if (cstate->clp) {
3790                 found = cstate->clp;
3791                 if (!same_clid(&found->cl_clientid, clid))
3792                         return nfserr_stale_clientid;
3793                 return nfs_ok;
3794         }
3795
3796         if (STALE_CLIENTID(clid, nn))
3797                 return nfserr_stale_clientid;
3798
3799         /*
3800          * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3801          * cached already then we know this is for is for v4.0 and "sessions"
3802          * will be false.
3803          */
3804         WARN_ON_ONCE(cstate->session);
3805         spin_lock(&nn->client_lock);
3806         found = find_confirmed_client(clid, false, nn);
3807         if (!found) {
3808                 spin_unlock(&nn->client_lock);
3809                 return nfserr_expired;
3810         }
3811         atomic_inc(&found->cl_refcount);
3812         spin_unlock(&nn->client_lock);
3813
3814         /* Cache the nfs4_client in cstate! */
3815         cstate->clp = found;
3816         return nfs_ok;
3817 }
3818
3819 __be32
3820 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3821                     struct nfsd4_open *open, struct nfsd_net *nn)
3822 {
3823         clientid_t *clientid = &open->op_clientid;
3824         struct nfs4_client *clp = NULL;
3825         unsigned int strhashval;
3826         struct nfs4_openowner *oo = NULL;
3827         __be32 status;
3828
3829         if (STALE_CLIENTID(&open->op_clientid, nn))
3830                 return nfserr_stale_clientid;
3831         /*
3832          * In case we need it later, after we've already created the
3833          * file and don't want to risk a further failure:
3834          */
3835         open->op_file = nfsd4_alloc_file();
3836         if (open->op_file == NULL)
3837                 return nfserr_jukebox;
3838
3839         status = lookup_clientid(clientid, cstate, nn);
3840         if (status)
3841                 return status;
3842         clp = cstate->clp;
3843
3844         strhashval = ownerstr_hashval(&open->op_owner);
3845         oo = find_openstateowner_str(strhashval, open, clp);
3846         open->op_openowner = oo;
3847         if (!oo) {
3848                 goto new_owner;
3849         }
3850         if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3851                 /* Replace unconfirmed owners without checking for replay. */
3852                 release_openowner(oo);
3853                 open->op_openowner = NULL;
3854                 goto new_owner;
3855         }
3856         status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
3857         if (status)
3858                 return status;
3859         goto alloc_stateid;
3860 new_owner:
3861         oo = alloc_init_open_stateowner(strhashval, open, cstate);
3862         if (oo == NULL)
3863                 return nfserr_jukebox;
3864         open->op_openowner = oo;
3865 alloc_stateid:
3866         open->op_stp = nfs4_alloc_open_stateid(clp);
3867         if (!open->op_stp)
3868                 return nfserr_jukebox;
3869
3870         if (nfsd4_has_session(cstate) &&
3871             (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
3872                 open->op_odstate = alloc_clnt_odstate(clp);
3873                 if (!open->op_odstate)
3874                         return nfserr_jukebox;
3875         }
3876
3877         return nfs_ok;
3878 }
3879
3880 static inline __be32
3881 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
3882 {
3883         if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
3884                 return nfserr_openmode;
3885         else
3886                 return nfs_ok;
3887 }
3888
3889 static int share_access_to_flags(u32 share_access)
3890 {
3891         return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3892 }
3893
3894 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3895 {
3896         struct nfs4_stid *ret;
3897
3898         ret = find_stateid_by_type(cl, s,
3899                                 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
3900         if (!ret)
3901                 return NULL;
3902         return delegstateid(ret);
3903 }
3904
3905 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
3906 {
3907         return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
3908                open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
3909 }
3910
3911 static __be32
3912 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3913                 struct nfs4_delegation **dp)
3914 {
3915         int flags;
3916         __be32 status = nfserr_bad_stateid;
3917         struct nfs4_delegation *deleg;
3918
3919         deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
3920         if (deleg == NULL)
3921                 goto out;
3922         if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
3923                 nfs4_put_stid(&deleg->dl_stid);
3924                 if (cl->cl_minorversion)
3925                         status = nfserr_deleg_revoked;
3926                 goto out;
3927         }
3928         flags = share_access_to_flags(open->op_share_access);
3929         status = nfs4_check_delegmode(deleg, flags);
3930         if (status) {
3931                 nfs4_put_stid(&deleg->dl_stid);
3932                 goto out;
3933         }
3934         *dp = deleg;
3935 out:
3936         if (!nfsd4_is_deleg_cur(open))
3937                 return nfs_ok;
3938         if (status)
3939                 return status;
3940         open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3941         return nfs_ok;
3942 }
3943
3944 static inline int nfs4_access_to_access(u32 nfs4_access)
3945 {
3946         int flags = 0;
3947
3948         if (nfs4_access & NFS4_SHARE_ACCESS_READ)
3949                 flags |= NFSD_MAY_READ;
3950         if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
3951                 flags |= NFSD_MAY_WRITE;
3952         return flags;
3953 }
3954
3955 static inline __be32
3956 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3957                 struct nfsd4_open *open)
3958 {
3959         struct iattr iattr = {
3960                 .ia_valid = ATTR_SIZE,
3961                 .ia_size = 0,
3962         };
3963         if (!open->op_truncate)
3964                 return 0;
3965         if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
3966                 return nfserr_inval;
3967         return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
3968 }
3969
3970 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3971                 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
3972                 struct nfsd4_open *open)
3973 {
3974         struct file *filp = NULL;
3975         __be32 status;
3976         int oflag = nfs4_access_to_omode(open->op_share_access);
3977         int access = nfs4_access_to_access(open->op_share_access);
3978         unsigned char old_access_bmap, old_deny_bmap;
3979
3980         spin_lock(&fp->fi_lock);
3981
3982         /*
3983          * Are we trying to set a deny mode that would conflict with
3984          * current access?
3985          */
3986         status = nfs4_file_check_deny(fp, open->op_share_deny);
3987         if (status != nfs_ok) {
3988                 spin_unlock(&fp->fi_lock);
3989                 goto out;
3990         }
3991
3992         /* set access to the file */
3993         status = nfs4_file_get_access(fp, open->op_share_access);
3994         if (status != nfs_ok) {
3995                 spin_unlock(&fp->fi_lock);
3996                 goto out;
3997         }
3998
3999         /* Set access bits in stateid */
4000         old_access_bmap = stp->st_access_bmap;
4001         set_access(open->op_share_access, stp);
4002
4003         /* Set new deny mask */
4004         old_deny_bmap = stp->st_deny_bmap;
4005         set_deny(open->op_share_deny, stp);
4006         fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4007
4008         if (!fp->fi_fds[oflag]) {
4009                 spin_unlock(&fp->fi_lock);
4010                 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
4011                 if (status)
4012                         goto out_put_access;
4013                 spin_lock(&fp->fi_lock);
4014                 if (!fp->fi_fds[oflag]) {
4015                         fp->fi_fds[oflag] = filp;
4016                         filp = NULL;
4017                 }
4018         }
4019         spin_unlock(&fp->fi_lock);
4020         if (filp)
4021                 fput(filp);
4022
4023         status = nfsd4_truncate(rqstp, cur_fh, open);
4024         if (status)
4025                 goto out_put_access;
4026 out:
4027         return status;
4028 out_put_access:
4029         stp->st_access_bmap = old_access_bmap;
4030         nfs4_file_put_access(fp, open->op_share_access);
4031         reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4032         goto out;
4033 }
4034
4035 static __be32
4036 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4037 {
4038         __be32 status;
4039         unsigned char old_deny_bmap = stp->st_deny_bmap;
4040
4041         if (!test_access(open->op_share_access, stp))
4042                 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4043
4044         /* test and set deny mode */
4045         spin_lock(&fp->fi_lock);
4046         status = nfs4_file_check_deny(fp, open->op_share_deny);
4047         if (status == nfs_ok) {
4048                 set_deny(open->op_share_deny, stp);
4049                 fp->fi_share_deny |=
4050                                 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4051         }
4052         spin_unlock(&fp->fi_lock);
4053
4054         if (status != nfs_ok)
4055                 return status;
4056
4057         status = nfsd4_truncate(rqstp, cur_fh, open);
4058         if (status != nfs_ok)
4059                 reset_union_bmap_deny(old_deny_bmap, stp);
4060         return status;
4061 }
4062
4063 /* Should we give out recallable state?: */
4064 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4065 {
4066         if (clp->cl_cb_state == NFSD4_CB_UP)
4067                 return true;
4068         /*
4069          * In the sessions case, since we don't have to establish a
4070          * separate connection for callbacks, we assume it's OK
4071          * until we hear otherwise:
4072          */
4073         return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4074 }
4075
4076 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
4077 {
4078         struct file_lock *fl;
4079
4080         fl = locks_alloc_lock();
4081         if (!fl)
4082                 return NULL;
4083         fl->fl_lmops = &nfsd_lease_mng_ops;
4084         fl->fl_flags = FL_DELEG;
4085         fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4086         fl->fl_end = OFFSET_MAX;
4087         fl->fl_owner = (fl_owner_t)fp;
4088         fl->fl_pid = current->tgid;
4089         return fl;
4090 }
4091
4092 /**
4093  * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
4094  * @dp:   a pointer to the nfs4_delegation we're adding.
4095  *
4096  * Return:
4097  *      On success: Return code will be 0 on success.
4098  *
4099  *      On error: -EAGAIN if there was an existing delegation.
4100  *                 nonzero if there is an error in other cases.
4101  *
4102  */
4103
4104 static int nfs4_setlease(struct nfs4_delegation *dp)
4105 {
4106         struct nfs4_file *fp = dp->dl_stid.sc_file;
4107         struct file_lock *fl;
4108         struct file *filp;
4109         int status = 0;
4110
4111         fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
4112         if (!fl)
4113                 return -ENOMEM;
4114         filp = find_readable_file(fp);
4115         if (!filp) {
4116                 /* We should always have a readable file here */
4117                 WARN_ON_ONCE(1);
4118                 locks_free_lock(fl);
4119                 return -EBADF;
4120         }
4121         fl->fl_file = filp;
4122         status = vfs_setlease(filp, fl->fl_type, &fl, NULL);
4123         if (fl)
4124                 locks_free_lock(fl);
4125         if (status)
4126                 goto out_fput;
4127         spin_lock(&state_lock);
4128         spin_lock(&fp->fi_lock);
4129         /* Did the lease get broken before we took the lock? */
4130         status = -EAGAIN;
4131         if (fp->fi_had_conflict)
4132                 goto out_unlock;
4133         /* Race breaker */
4134         if (fp->fi_deleg_file) {
4135                 status = hash_delegation_locked(dp, fp);
4136                 goto out_unlock;
4137         }
4138         fp->fi_deleg_file = filp;
4139         fp->fi_delegees = 0;
4140         status = hash_delegation_locked(dp, fp);
4141         spin_unlock(&fp->fi_lock);
4142         spin_unlock(&state_lock);
4143         if (status) {
4144                 /* Should never happen, this is a new fi_deleg_file  */
4145                 WARN_ON_ONCE(1);
4146                 goto out_fput;
4147         }
4148         return 0;
4149 out_unlock:
4150         spin_unlock(&fp->fi_lock);
4151         spin_unlock(&state_lock);
4152 out_fput:
4153         fput(filp);
4154         return status;
4155 }
4156
4157 static struct nfs4_delegation *
4158 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4159                     struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4160 {
4161         int status;
4162         struct nfs4_delegation *dp;
4163
4164         if (fp->fi_had_conflict)
4165                 return ERR_PTR(-EAGAIN);
4166
4167         spin_lock(&state_lock);
4168         spin_lock(&fp->fi_lock);
4169         status = nfs4_get_existing_delegation(clp, fp);
4170         spin_unlock(&fp->fi_lock);
4171         spin_unlock(&state_lock);
4172
4173         if (status)
4174                 return ERR_PTR(status);
4175
4176         dp = alloc_init_deleg(clp, fh, odstate);
4177         if (!dp)
4178                 return ERR_PTR(-ENOMEM);
4179
4180         get_nfs4_file(fp);
4181         spin_lock(&state_lock);
4182         spin_lock(&fp->fi_lock);
4183         dp->dl_stid.sc_file = fp;
4184         if (!fp->fi_deleg_file) {
4185                 spin_unlock(&fp->fi_lock);
4186                 spin_unlock(&state_lock);
4187                 status = nfs4_setlease(dp);
4188                 goto out;
4189         }
4190         if (fp->fi_had_conflict) {
4191                 status = -EAGAIN;
4192                 goto out_unlock;
4193         }
4194         status = hash_delegation_locked(dp, fp);
4195 out_unlock:
4196         spin_unlock(&fp->fi_lock);
4197         spin_unlock(&state_lock);
4198 out:
4199         if (status) {
4200                 put_clnt_odstate(dp->dl_clnt_odstate);
4201                 nfs4_put_stid(&dp->dl_stid);
4202                 return ERR_PTR(status);
4203         }
4204         return dp;
4205 }
4206
4207 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4208 {
4209         open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4210         if (status == -EAGAIN)
4211                 open->op_why_no_deleg = WND4_CONTENTION;
4212         else {
4213                 open->op_why_no_deleg = WND4_RESOURCE;
4214                 switch (open->op_deleg_want) {
4215                 case NFS4_SHARE_WANT_READ_DELEG:
4216                 case NFS4_SHARE_WANT_WRITE_DELEG:
4217                 case NFS4_SHARE_WANT_ANY_DELEG:
4218                         break;
4219                 case NFS4_SHARE_WANT_CANCEL:
4220                         open->op_why_no_deleg = WND4_CANCELLED;
4221                         break;
4222                 case NFS4_SHARE_WANT_NO_DELEG:
4223                         WARN_ON_ONCE(1);
4224                 }
4225         }
4226 }
4227
4228 /*
4229  * Attempt to hand out a delegation.
4230  *
4231  * Note we don't support write delegations, and won't until the vfs has
4232  * proper support for them.
4233  */
4234 static void
4235 nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4236                         struct nfs4_ol_stateid *stp)
4237 {
4238         struct nfs4_delegation *dp;
4239         struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4240         struct nfs4_client *clp = stp->st_stid.sc_client;
4241         int cb_up;
4242         int status = 0;
4243
4244         cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4245         open->op_recall = 0;
4246         switch (open->op_claim_type) {
4247                 case NFS4_OPEN_CLAIM_PREVIOUS:
4248                         if (!cb_up)
4249                                 open->op_recall = 1;
4250                         if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4251                                 goto out_no_deleg;
4252                         break;
4253                 case NFS4_OPEN_CLAIM_NULL:
4254                 case NFS4_OPEN_CLAIM_FH:
4255                         /*
4256                          * Let's not give out any delegations till everyone's
4257                          * had the chance to reclaim theirs, *and* until
4258                          * NLM locks have all been reclaimed:
4259                          */
4260                         if (locks_in_grace(clp->net))
4261                                 goto out_no_deleg;
4262                         if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4263                                 goto out_no_deleg;
4264                         /*
4265                          * Also, if the file was opened for write or
4266                          * create, there's a good chance the client's
4267                          * about to write to it, resulting in an
4268                          * immediate recall (since we don't support
4269                          * write delegations):
4270                          */
4271                         if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4272                                 goto out_no_deleg;
4273                         if (open->op_create == NFS4_OPEN_CREATE)
4274                                 goto out_no_deleg;
4275                         break;
4276                 default:
4277                         goto out_no_deleg;
4278         }
4279         dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4280         if (IS_ERR(dp))
4281                 goto out_no_deleg;
4282
4283         memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4284
4285         dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4286                 STATEID_VAL(&dp->dl_stid.sc_stateid));
4287         open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4288         nfs4_put_stid(&dp->dl_stid);
4289         return;
4290 out_no_deleg:
4291         open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4292         if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4293             open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4294                 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4295                 open->op_recall = 1;
4296         }
4297
4298         /* 4.1 client asking for a delegation? */
4299         if (open->op_deleg_want)
4300                 nfsd4_open_deleg_none_ext(open, status);
4301         return;
4302 }
4303
4304 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4305                                         struct nfs4_delegation *dp)
4306 {
4307         if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4308             dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4309                 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4310                 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4311         } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4312                    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4313                 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4314                 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4315         }
4316         /* Otherwise the client must be confused wanting a delegation
4317          * it already has, therefore we don't return
4318          * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4319          */
4320 }
4321
4322 __be32
4323 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4324 {
4325         struct nfsd4_compoundres *resp = rqstp->rq_resp;
4326         struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4327         struct nfs4_file *fp = NULL;
4328         struct nfs4_ol_stateid *stp = NULL;
4329         struct nfs4_delegation *dp = NULL;
4330         __be32 status;
4331         bool new_stp = false;
4332
4333         /*
4334          * Lookup file; if found, lookup stateid and check open request,
4335          * and check for delegations in the process of being recalled.
4336          * If not found, create the nfs4_file struct
4337          */
4338         fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
4339         if (fp != open->op_file) {
4340                 status = nfs4_check_deleg(cl, open, &dp);
4341                 if (status)
4342                         goto out;
4343                 stp = nfsd4_find_and_lock_existing_open(fp, open);
4344         } else {
4345                 open->op_file = NULL;
4346                 status = nfserr_bad_stateid;
4347                 if (nfsd4_is_deleg_cur(open))
4348                         goto out;
4349         }
4350
4351         if (!stp) {
4352                 stp = init_open_stateid(fp, open);
4353                 if (!open->op_stp)
4354                         new_stp = true;
4355         }
4356
4357         /*
4358          * OPEN the file, or upgrade an existing OPEN.
4359          * If truncate fails, the OPEN fails.
4360          *
4361          * stp is already locked.
4362          */
4363         if (!new_stp) {
4364                 /* Stateid was found, this is an OPEN upgrade */
4365                 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4366                 if (status) {
4367                         mutex_unlock(&stp->st_mutex);
4368                         goto out;
4369                 }
4370         } else {
4371                 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4372                 if (status) {
4373                         stp->st_stid.sc_type = NFS4_CLOSED_STID;
4374                         release_open_stateid(stp);
4375                         mutex_unlock(&stp->st_mutex);
4376                         goto out;
4377                 }
4378
4379                 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4380                                                         open->op_odstate);
4381                 if (stp->st_clnt_odstate == open->op_odstate)
4382                         open->op_odstate = NULL;
4383         }
4384
4385         nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4386         mutex_unlock(&stp->st_mutex);
4387
4388         if (nfsd4_has_session(&resp->cstate)) {
4389                 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4390                         open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4391                         open->op_why_no_deleg = WND4_NOT_WANTED;
4392                         goto nodeleg;
4393                 }
4394         }
4395
4396         /*
4397         * Attempt to hand out a delegation. No error return, because the
4398         * OPEN succeeds even if we fail.
4399         */
4400         nfs4_open_delegation(current_fh, open, stp);
4401 nodeleg:
4402         status = nfs_ok;
4403
4404         dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4405                 STATEID_VAL(&stp->st_stid.sc_stateid));
4406 out:
4407         /* 4.1 client trying to upgrade/downgrade delegation? */
4408         if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4409             open->op_deleg_want)
4410                 nfsd4_deleg_xgrade_none_ext(open, dp);
4411
4412         if (fp)
4413                 put_nfs4_file(fp);
4414         if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4415                 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4416         /*
4417         * To finish the open response, we just need to set the rflags.
4418         */
4419         open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4420         if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
4421             !nfsd4_has_session(&resp->cstate))
4422                 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4423         if (dp)
4424                 nfs4_put_stid(&dp->dl_stid);
4425         if (stp)
4426                 nfs4_put_stid(&stp->st_stid);
4427
4428         return status;
4429 }
4430
4431 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4432                               struct nfsd4_open *open)
4433 {
4434         if (open->op_openowner) {
4435                 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4436
4437                 nfsd4_cstate_assign_replay(cstate, so);
4438                 nfs4_put_stateowner(so);
4439         }
4440         if (open->op_file)
4441                 kmem_cache_free(file_slab, open->op_file);
4442         if (open->op_stp)
4443                 nfs4_put_stid(&open->op_stp->st_stid);
4444         if (open->op_odstate)
4445                 kmem_cache_free(odstate_slab, open->op_odstate);
4446 }
4447
4448 __be32
4449 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4450             clientid_t *clid)
4451 {
4452         struct nfs4_client *clp;
4453         __be32 status;
4454         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4455
4456         dprintk("process_renew(%08x/%08x): starting\n", 
4457                         clid->cl_boot, clid->cl_id);
4458         status = lookup_clientid(clid, cstate, nn);
4459         if (status)
4460                 goto out;
4461         clp = cstate->clp;
4462         status = nfserr_cb_path_down;
4463         if (!list_empty(&clp->cl_delegations)
4464                         && clp->cl_cb_state != NFSD4_CB_UP)
4465                 goto out;
4466         status = nfs_ok;
4467 out:
4468         return status;
4469 }
4470
4471 void
4472 nfsd4_end_grace(struct nfsd_net *nn)
4473 {
4474         /* do nothing if grace period already ended */
4475         if (nn->grace_ended)
4476                 return;
4477
4478         dprintk("NFSD: end of grace period\n");
4479         nn->grace_ended = true;
4480         /*
4481          * If the server goes down again right now, an NFSv4
4482          * client will still be allowed to reclaim after it comes back up,
4483          * even if it hasn't yet had a chance to reclaim state this time.
4484          *
4485          */
4486         nfsd4_record_grace_done(nn);
4487         /*
4488          * At this point, NFSv4 clients can still reclaim.  But if the
4489          * server crashes, any that have not yet reclaimed will be out
4490          * of luck on the next boot.
4491          *
4492          * (NFSv4.1+ clients are considered to have reclaimed once they
4493          * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
4494          * have reclaimed after their first OPEN.)
4495          */
4496         locks_end_grace(&nn->nfsd4_manager);
4497         /*
4498          * At this point, and once lockd and/or any other containers
4499          * exit their grace period, further reclaims will fail and
4500          * regular locking can resume.
4501          */
4502 }
4503
4504 static time_t
4505 nfs4_laundromat(struct nfsd_net *nn)
4506 {
4507         struct nfs4_client *clp;
4508         struct nfs4_openowner *oo;
4509         struct nfs4_delegation *dp;
4510         struct nfs4_ol_stateid *stp;
4511         struct list_head *pos, *next, reaplist;
4512         time_t cutoff = get_seconds() - nn->nfsd4_lease;
4513         time_t t, new_timeo = nn->nfsd4_lease;
4514
4515         dprintk("NFSD: laundromat service - starting\n");
4516         nfsd4_end_grace(nn);
4517         INIT_LIST_HEAD(&reaplist);
4518         spin_lock(&nn->client_lock);
4519         list_for_each_safe(pos, next, &nn->client_lru) {
4520                 clp = list_entry(pos, struct nfs4_client, cl_lru);
4521                 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4522                         t = clp->cl_time - cutoff;
4523                         new_timeo = min(new_timeo, t);
4524                         break;
4525                 }
4526                 if (mark_client_expired_locked(clp)) {
4527                         dprintk("NFSD: client in use (clientid %08x)\n",
4528                                 clp->cl_clientid.cl_id);
4529                         continue;
4530                 }
4531                 list_add(&clp->cl_lru, &reaplist);
4532         }
4533         spin_unlock(&nn->client_lock);
4534         list_for_each_safe(pos, next, &reaplist) {
4535                 clp = list_entry(pos, struct nfs4_client, cl_lru);
4536                 dprintk("NFSD: purging unused client (clientid %08x)\n",
4537                         clp->cl_clientid.cl_id);
4538                 list_del_init(&clp->cl_lru);
4539                 expire_client(clp);
4540         }
4541         spin_lock(&state_lock);
4542         list_for_each_safe(pos, next, &nn->del_recall_lru) {
4543                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4544                 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4545                         t = dp->dl_time - cutoff;
4546                         new_timeo = min(new_timeo, t);
4547                         break;
4548                 }
4549                 WARN_ON(!unhash_delegation_locked(dp));
4550                 list_add(&dp->dl_recall_lru, &reaplist);
4551         }
4552         spin_unlock(&state_lock);
4553         while (!list_empty(&reaplist)) {
4554                 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4555                                         dl_recall_lru);
4556                 list_del_init(&dp->dl_recall_lru);
4557                 revoke_delegation(dp);
4558         }
4559
4560         spin_lock(&nn->client_lock);
4561         while (!list_empty(&nn->close_lru)) {
4562                 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4563                                         oo_close_lru);
4564                 if (time_after((unsigned long)oo->oo_time,
4565                                (unsigned long)cutoff)) {
4566                         t = oo->oo_time - cutoff;
4567                         new_timeo = min(new_timeo, t);
4568                         break;
4569                 }
4570                 list_del_init(&oo->oo_close_lru);
4571                 stp = oo->oo_last_closed_stid;
4572                 oo->oo_last_closed_stid = NULL;
4573                 spin_unlock(&nn->client_lock);
4574                 nfs4_put_stid(&stp->st_stid);
4575                 spin_lock(&nn->client_lock);
4576         }
4577         spin_unlock(&nn->client_lock);
4578
4579         new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4580         return new_timeo;
4581 }
4582
4583 static struct workqueue_struct *laundry_wq;
4584 static void laundromat_main(struct work_struct *);
4585
4586 static void
4587 laundromat_main(struct work_struct *laundry)
4588 {
4589         time_t t;
4590         struct delayed_work *dwork = container_of(laundry, struct delayed_work,
4591                                                   work);
4592         struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4593                                            laundromat_work);
4594
4595         t = nfs4_laundromat(nn);
4596         dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4597         queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4598 }
4599
4600 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4601 {
4602         if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4603                 return nfserr_bad_stateid;
4604         return nfs_ok;
4605 }
4606
4607 static inline int
4608 access_permit_read(struct nfs4_ol_stateid *stp)
4609 {
4610         return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4611                 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4612                 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4613 }
4614
4615 static inline int
4616 access_permit_write(struct nfs4_ol_stateid *stp)
4617 {
4618         return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4619                 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4620 }
4621
4622 static
4623 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4624 {
4625         __be32 status = nfserr_openmode;
4626
4627         /* For lock stateid's, we test the parent open, not the lock: */
4628         if (stp->st_openstp)
4629                 stp = stp->st_openstp;
4630         if ((flags & WR_STATE) && !access_permit_write(stp))
4631                 goto out;
4632         if ((flags & RD_STATE) && !access_permit_read(stp))
4633                 goto out;
4634         status = nfs_ok;
4635 out:
4636         return status;
4637 }
4638
4639 static inline __be32
4640 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4641 {
4642         if (ONE_STATEID(stateid) && (flags & RD_STATE))
4643                 return nfs_ok;
4644         else if (opens_in_grace(net)) {
4645                 /* Answer in remaining cases depends on existence of
4646                  * conflicting state; so we must wait out the grace period. */
4647                 return nfserr_grace;
4648         } else if (flags & WR_STATE)
4649                 return nfs4_share_conflict(current_fh,
4650                                 NFS4_SHARE_DENY_WRITE);
4651         else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4652                 return nfs4_share_conflict(current_fh,
4653                                 NFS4_SHARE_DENY_READ);
4654 }
4655
4656 /*
4657  * Allow READ/WRITE during grace period on recovered state only for files
4658  * that are not able to provide mandatory locking.
4659  */
4660 static inline int
4661 grace_disallows_io(struct net *net, struct inode *inode)
4662 {
4663         return opens_in_grace(net) && mandatory_lock(inode);
4664 }
4665
4666 /* Returns true iff a is later than b: */
4667 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
4668 {
4669         return (s32)(a->si_generation - b->si_generation) > 0;
4670 }
4671
4672 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4673 {
4674         /*
4675          * When sessions are used the stateid generation number is ignored
4676          * when it is zero.
4677          */
4678         if (has_session && in->si_generation == 0)
4679                 return nfs_ok;
4680
4681         if (in->si_generation == ref->si_generation)
4682                 return nfs_ok;
4683
4684         /* If the client sends us a stateid from the future, it's buggy: */
4685         if (stateid_generation_after(in, ref))
4686                 return nfserr_bad_stateid;
4687         /*
4688          * However, we could see a stateid from the past, even from a
4689          * non-buggy client.  For example, if the client sends a lock
4690          * while some IO is outstanding, the lock may bump si_generation
4691          * while the IO is still in flight.  The client could avoid that
4692          * situation by waiting for responses on all the IO requests,
4693          * but better performance may result in retrying IO that
4694          * receives an old_stateid error if requests are rarely
4695          * reordered in flight:
4696          */
4697         return nfserr_old_stateid;
4698 }
4699
4700 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4701 {
4702         if (ols->st_stateowner->so_is_open_owner &&
4703             !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4704                 return nfserr_bad_stateid;
4705         return nfs_ok;
4706 }
4707
4708 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4709 {
4710         struct nfs4_stid *s;
4711         __be32 status = nfserr_bad_stateid;
4712
4713         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4714                 CLOSE_STATEID(stateid))
4715                 return status;
4716         /* Client debugging aid. */
4717         if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4718                 char addr_str[INET6_ADDRSTRLEN];
4719                 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4720                                  sizeof(addr_str));
4721                 pr_warn_ratelimited("NFSD: client %s testing state ID "
4722                                         "with incorrect client ID\n", addr_str);
4723                 return status;
4724         }
4725         spin_lock(&cl->cl_lock);
4726         s = find_stateid_locked(cl, stateid);
4727         if (!s)
4728                 goto out_unlock;
4729         status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4730         if (status)
4731                 goto out_unlock;
4732         switch (s->sc_type) {
4733         case NFS4_DELEG_STID:
4734                 status = nfs_ok;
4735                 break;
4736         case NFS4_REVOKED_DELEG_STID:
4737                 status = nfserr_deleg_revoked;
4738                 break;
4739         case NFS4_OPEN_STID:
4740         case NFS4_LOCK_STID:
4741                 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
4742                 break;
4743         default:
4744                 printk("unknown stateid type %x\n", s->sc_type);
4745                 /* Fallthrough */
4746         case NFS4_CLOSED_STID:
4747         case NFS4_CLOSED_DELEG_STID:
4748                 status = nfserr_bad_stateid;
4749         }
4750 out_unlock:
4751         spin_unlock(&cl->cl_lock);
4752         return status;
4753 }
4754
4755 __be32
4756 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4757                      stateid_t *stateid, unsigned char typemask,
4758                      struct nfs4_stid **s, struct nfsd_net *nn)
4759 {
4760         __be32 status;
4761         bool return_revoked = false;
4762
4763         /*
4764          *  only return revoked delegations if explicitly asked.
4765          *  otherwise we report revoked or bad_stateid status.
4766          */
4767         if (typemask & NFS4_REVOKED_DELEG_STID)
4768                 return_revoked = true;
4769         else if (typemask & NFS4_DELEG_STID)
4770                 typemask |= NFS4_REVOKED_DELEG_STID;
4771
4772         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4773                 CLOSE_STATEID(stateid))
4774                 return nfserr_bad_stateid;
4775         status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4776         if (status == nfserr_stale_clientid) {
4777                 if (cstate->session)
4778                         return nfserr_bad_stateid;
4779                 return nfserr_stale_stateid;
4780         }
4781         if (status)
4782                 return status;
4783         *s = find_stateid_by_type(cstate->clp, stateid, typemask);
4784         if (!*s)
4785                 return nfserr_bad_stateid;
4786         if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
4787                 nfs4_put_stid(*s);
4788                 if (cstate->minorversion)
4789                         return nfserr_deleg_revoked;
4790                 return nfserr_bad_stateid;
4791         }
4792         return nfs_ok;
4793 }
4794
4795 static struct file *
4796 nfs4_find_file(struct nfs4_stid *s, int flags)
4797 {
4798         if (!s)
4799                 return NULL;
4800
4801         switch (s->sc_type) {
4802         case NFS4_DELEG_STID:
4803                 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
4804                         return NULL;
4805                 return get_file(s->sc_file->fi_deleg_file);
4806         case NFS4_OPEN_STID:
4807         case NFS4_LOCK_STID:
4808                 if (flags & RD_STATE)
4809                         return find_readable_file(s->sc_file);
4810                 else
4811                         return find_writeable_file(s->sc_file);
4812                 break;
4813         }
4814
4815         return NULL;
4816 }
4817
4818 static __be32
4819 nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
4820 {
4821         __be32 status;
4822
4823         status = nfsd4_check_openowner_confirmed(ols);
4824         if (status)
4825                 return status;
4826         return nfs4_check_openmode(ols, flags);
4827 }
4828
4829 static __be32
4830 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
4831                 struct file **filpp, bool *tmp_file, int flags)
4832 {
4833         int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
4834         struct file *file;
4835         __be32 status;
4836
4837         file = nfs4_find_file(s, flags);
4838         if (file) {
4839                 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
4840                                 acc | NFSD_MAY_OWNER_OVERRIDE);
4841                 if (status) {
4842                         fput(file);
4843                         return status;
4844                 }
4845
4846                 *filpp = file;
4847         } else {
4848                 status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
4849                 if (status)
4850                         return status;
4851
4852                 if (tmp_file)
4853                         *tmp_file = true;
4854         }
4855
4856         return 0;
4857 }
4858
4859 /*
4860  * Checks for stateid operations
4861  */
4862 __be32
4863 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
4864                 struct nfsd4_compound_state *cstate, stateid_t *stateid,
4865                 int flags, struct file **filpp, bool *tmp_file)
4866 {
4867         struct svc_fh *fhp = &cstate->current_fh;
4868         struct inode *ino = d_inode(fhp->fh_dentry);
4869         struct net *net = SVC_NET(rqstp);
4870         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4871         struct nfs4_stid *s = NULL;
4872         __be32 status;
4873
4874         if (filpp)
4875                 *filpp = NULL;
4876         if (tmp_file)
4877                 *tmp_file = false;
4878
4879         if (grace_disallows_io(net, ino))
4880                 return nfserr_grace;
4881
4882         if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
4883                 status = check_special_stateids(net, fhp, stateid, flags);
4884                 goto done;
4885         }
4886
4887         status = nfsd4_lookup_stateid(cstate, stateid,
4888                                 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
4889                                 &s, nn);
4890         if (status)
4891                 return status;
4892         status = check_stateid_generation(stateid, &s->sc_stateid,
4893                         nfsd4_has_session(cstate));
4894         if (status)
4895                 goto out;
4896
4897         switch (s->sc_type) {
4898         case NFS4_DELEG_STID:
4899                 status = nfs4_check_delegmode(delegstateid(s), flags);
4900                 break;
4901         case NFS4_OPEN_STID:
4902         case NFS4_LOCK_STID:
4903                 status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
4904                 break;
4905         default:
4906                 status = nfserr_bad_stateid;
4907                 break;
4908         }
4909         if (status)
4910                 goto out;
4911         status = nfs4_check_fh(fhp, s);
4912
4913 done:
4914         if (!status && filpp)
4915                 status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
4916 out:
4917         if (s)
4918                 nfs4_put_stid(s);
4919         return status;
4920 }
4921
4922 /*
4923  * Test if the stateid is valid
4924  */
4925 __be32
4926 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4927                    struct nfsd4_test_stateid *test_stateid)
4928 {
4929         struct nfsd4_test_stateid_id *stateid;
4930         struct nfs4_client *cl = cstate->session->se_client;
4931
4932         list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
4933                 stateid->ts_id_status =
4934                         nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
4935
4936         return nfs_ok;
4937 }
4938
4939 static __be32
4940 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
4941 {
4942         struct nfs4_ol_stateid *stp = openlockstateid(s);
4943         __be32 ret;
4944
4945         mutex_lock(&stp->st_mutex);
4946
4947         ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4948         if (ret)
4949                 goto out;
4950
4951         ret = nfserr_locks_held;
4952         if (check_for_locks(stp->st_stid.sc_file,
4953                             lockowner(stp->st_stateowner)))
4954                 goto out;
4955
4956         release_lock_stateid(stp);
4957         ret = nfs_ok;
4958
4959 out:
4960         mutex_unlock(&stp->st_mutex);
4961         nfs4_put_stid(s);
4962         return ret;
4963 }
4964
4965 __be32
4966 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4967                    struct nfsd4_free_stateid *free_stateid)
4968 {
4969         stateid_t *stateid = &free_stateid->fr_stateid;
4970         struct nfs4_stid *s;
4971         struct nfs4_delegation *dp;
4972         struct nfs4_client *cl = cstate->session->se_client;
4973         __be32 ret = nfserr_bad_stateid;
4974
4975         spin_lock(&cl->cl_lock);
4976         s = find_stateid_locked(cl, stateid);
4977         if (!s)
4978                 goto out_unlock;
4979         switch (s->sc_type) {
4980         case NFS4_DELEG_STID:
4981                 ret = nfserr_locks_held;
4982                 break;
4983         case NFS4_OPEN_STID:
4984                 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
4985                 if (ret)
4986                         break;
4987                 ret = nfserr_locks_held;
4988                 break;
4989         case NFS4_LOCK_STID:
4990                 atomic_inc(&s->sc_count);
4991                 spin_unlock(&cl->cl_lock);
4992                 ret = nfsd4_free_lock_stateid(stateid, s);
4993                 goto out;
4994         case NFS4_REVOKED_DELEG_STID:
4995                 dp = delegstateid(s);
4996                 list_del_init(&dp->dl_recall_lru);
4997                 spin_unlock(&cl->cl_lock);
4998                 nfs4_put_stid(s);
4999                 ret = nfs_ok;
5000                 goto out;
5001         /* Default falls through and returns nfserr_bad_stateid */
5002         }
5003 out_unlock:
5004         spin_unlock(&cl->cl_lock);
5005 out:
5006         return ret;
5007 }
5008
5009 static inline int
5010 setlkflg (int type)
5011 {
5012         return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5013                 RD_STATE : WR_STATE;
5014 }
5015
5016 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5017 {
5018         struct svc_fh *current_fh = &cstate->current_fh;
5019         struct nfs4_stateowner *sop = stp->st_stateowner;
5020         __be32 status;
5021
5022         status = nfsd4_check_seqid(cstate, sop, seqid);
5023         if (status)
5024                 return status;
5025         status = nfsd4_lock_ol_stateid(stp);
5026         if (status != nfs_ok)
5027                 return status;
5028         status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5029         if (status == nfs_ok)
5030                 status = nfs4_check_fh(current_fh, &stp->st_stid);
5031         if (status != nfs_ok)
5032                 mutex_unlock(&stp->st_mutex);
5033         return status;
5034 }
5035
5036 /* 
5037  * Checks for sequence id mutating operations. 
5038  */
5039 static __be32
5040 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5041                          stateid_t *stateid, char typemask,
5042                          struct nfs4_ol_stateid **stpp,
5043                          struct nfsd_net *nn)
5044 {
5045         __be32 status;
5046         struct nfs4_stid *s;
5047         struct nfs4_ol_stateid *stp = NULL;
5048
5049         dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5050                 seqid, STATEID_VAL(stateid));
5051
5052         *stpp = NULL;
5053         status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5054         if (status)
5055                 return status;
5056         stp = openlockstateid(s);
5057         nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5058
5059         status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5060         if (!status)
5061                 *stpp = stp;
5062         else
5063                 nfs4_put_stid(&stp->st_stid);
5064         return status;
5065 }
5066
5067 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5068                                                  stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5069 {
5070         __be32 status;
5071         struct nfs4_openowner *oo;
5072         struct nfs4_ol_stateid *stp;
5073
5074         status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5075                                                 NFS4_OPEN_STID, &stp, nn);
5076         if (status)
5077                 return status;
5078         oo = openowner(stp->st_stateowner);
5079         if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5080                 mutex_unlock(&stp->st_mutex);
5081                 nfs4_put_stid(&stp->st_stid);
5082                 return nfserr_bad_stateid;
5083         }
5084         *stpp = stp;
5085         return nfs_ok;
5086 }
5087
5088 __be32
5089 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5090                    struct nfsd4_open_confirm *oc)
5091 {
5092         __be32 status;
5093         struct nfs4_openowner *oo;
5094         struct nfs4_ol_stateid *stp;
5095         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5096
5097         dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5098                         cstate->current_fh.fh_dentry);
5099
5100         status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5101         if (status)
5102                 return status;
5103
5104         status = nfs4_preprocess_seqid_op(cstate,
5105                                         oc->oc_seqid, &oc->oc_req_stateid,
5106                                         NFS4_OPEN_STID, &stp, nn);
5107         if (status)
5108                 goto out;
5109         oo = openowner(stp->st_stateowner);
5110         status = nfserr_bad_stateid;
5111         if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5112                 mutex_unlock(&stp->st_mutex);
5113                 goto put_stateid;
5114         }
5115         oo->oo_flags |= NFS4_OO_CONFIRMED;
5116         nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5117         mutex_unlock(&stp->st_mutex);
5118         dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5119                 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5120
5121         nfsd4_client_record_create(oo->oo_owner.so_client);
5122         status = nfs_ok;
5123 put_stateid:
5124         nfs4_put_stid(&stp->st_stid);
5125 out:
5126         nfsd4_bump_seqid(cstate, status);
5127         return status;
5128 }
5129
5130 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5131 {
5132         if (!test_access(access, stp))
5133                 return;
5134         nfs4_file_put_access(stp->st_stid.sc_file, access);
5135         clear_access(access, stp);
5136 }
5137
5138 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5139 {
5140         switch (to_access) {
5141         case NFS4_SHARE_ACCESS_READ:
5142                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5143                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5144                 break;
5145         case NFS4_SHARE_ACCESS_WRITE:
5146                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5147                 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5148                 break;
5149         case NFS4_SHARE_ACCESS_BOTH:
5150                 break;
5151         default:
5152                 WARN_ON_ONCE(1);
5153         }
5154 }
5155
5156 __be32
5157 nfsd4_open_downgrade(struct svc_rqst *rqstp,
5158                      struct nfsd4_compound_state *cstate,
5159                      struct nfsd4_open_downgrade *od)
5160 {
5161         __be32 status;
5162         struct nfs4_ol_stateid *stp;
5163         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5164
5165         dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
5166                         cstate->current_fh.fh_dentry);
5167
5168         /* We don't yet support WANT bits: */
5169         if (od->od_deleg_want)
5170                 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5171                         od->od_deleg_want);
5172
5173         status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5174                                         &od->od_stateid, &stp, nn);
5175         if (status)
5176                 goto out; 
5177         status = nfserr_inval;
5178         if (!test_access(od->od_share_access, stp)) {
5179                 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5180                         stp->st_access_bmap, od->od_share_access);
5181                 goto put_stateid;
5182         }
5183         if (!test_deny(od->od_share_deny, stp)) {
5184                 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5185                         stp->st_deny_bmap, od->od_share_deny);
5186                 goto put_stateid;
5187         }
5188         nfs4_stateid_downgrade(stp, od->od_share_access);
5189         reset_union_bmap_deny(od->od_share_deny, stp);
5190         nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5191         status = nfs_ok;
5192 put_stateid:
5193         mutex_unlock(&stp->st_mutex);
5194         nfs4_put_stid(&stp->st_stid);
5195 out:
5196         nfsd4_bump_seqid(cstate, status);
5197         return status;
5198 }
5199
5200 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5201 {
5202         struct nfs4_client *clp = s->st_stid.sc_client;
5203         bool unhashed;
5204         LIST_HEAD(reaplist);
5205
5206         spin_lock(&clp->cl_lock);
5207         unhashed = unhash_open_stateid(s, &reaplist);
5208
5209         if (clp->cl_minorversion) {
5210                 if (unhashed)
5211                         put_ol_stateid_locked(s, &reaplist);
5212                 spin_unlock(&clp->cl_lock);
5213                 free_ol_stateid_reaplist(&reaplist);
5214         } else {
5215                 spin_unlock(&clp->cl_lock);
5216                 free_ol_stateid_reaplist(&reaplist);
5217                 if (unhashed)
5218                         move_to_close_lru(s, clp->net);
5219         }
5220 }
5221
5222 /*
5223  * nfs4_unlock_state() called after encode
5224  */
5225 __be32
5226 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5227             struct nfsd4_close *close)
5228 {
5229         __be32 status;
5230         struct nfs4_ol_stateid *stp;
5231         struct net *net = SVC_NET(rqstp);
5232         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5233
5234         dprintk("NFSD: nfsd4_close on file %pd\n", 
5235                         cstate->current_fh.fh_dentry);
5236
5237         status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5238                                         &close->cl_stateid,
5239                                         NFS4_OPEN_STID|NFS4_CLOSED_STID,
5240                                         &stp, nn);
5241         nfsd4_bump_seqid(cstate, status);
5242         if (status)
5243                 goto out; 
5244
5245         stp->st_stid.sc_type = NFS4_CLOSED_STID;
5246         nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5247
5248         nfsd4_close_open_stateid(stp);
5249         mutex_unlock(&stp->st_mutex);
5250
5251         /* See RFC5661 sectionm 18.2.4 */
5252         if (stp->st_stid.sc_client->cl_minorversion)
5253                 memcpy(&close->cl_stateid, &close_stateid,
5254                                 sizeof(close->cl_stateid));
5255
5256         /* put reference from nfs4_preprocess_seqid_op */
5257         nfs4_put_stid(&stp->st_stid);
5258 out:
5259         return status;
5260 }
5261
5262 __be32
5263 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5264                   struct nfsd4_delegreturn *dr)
5265 {
5266         struct nfs4_delegation *dp;
5267         stateid_t *stateid = &dr->dr_stateid;
5268         struct nfs4_stid *s;
5269         __be32 status;
5270         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5271
5272         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5273                 return status;
5274
5275         status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5276         if (status)
5277                 goto out;
5278         dp = delegstateid(s);
5279         status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
5280         if (status)
5281                 goto put_stateid;
5282
5283         destroy_delegation(dp);
5284 put_stateid:
5285         nfs4_put_stid(&dp->dl_stid);
5286 out:
5287         return status;
5288 }
5289
5290 static inline u64
5291 end_offset(u64 start, u64 len)
5292 {
5293         u64 end;
5294
5295         end = start + len;
5296         return end >= start ? end: NFS4_MAX_UINT64;
5297 }
5298
5299 /* last octet in a range */
5300 static inline u64
5301 last_byte_offset(u64 start, u64 len)
5302 {
5303         u64 end;
5304
5305         WARN_ON_ONCE(!len);
5306         end = start + len;
5307         return end > start ? end - 1: NFS4_MAX_UINT64;
5308 }
5309
5310 /*
5311  * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5312  * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5313  * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
5314  * locking, this prevents us from being completely protocol-compliant.  The
5315  * real solution to this problem is to start using unsigned file offsets in
5316  * the VFS, but this is a very deep change!
5317  */
5318 static inline void
5319 nfs4_transform_lock_offset(struct file_lock *lock)
5320 {
5321         if (lock->fl_start < 0)
5322                 lock->fl_start = OFFSET_MAX;
5323         if (lock->fl_end < 0)
5324                 lock->fl_end = OFFSET_MAX;
5325 }
5326
5327 static fl_owner_t
5328 nfsd4_fl_get_owner(fl_owner_t owner)
5329 {
5330         struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5331
5332         nfs4_get_stateowner(&lo->lo_owner);
5333         return owner;
5334 }
5335
5336 static void
5337 nfsd4_fl_put_owner(fl_owner_t owner)
5338 {
5339         struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5340
5341         if (lo)
5342                 nfs4_put_stateowner(&lo->lo_owner);
5343 }
5344
5345 static const struct lock_manager_operations nfsd_posix_mng_ops  = {
5346         .lm_get_owner = nfsd4_fl_get_owner,
5347         .lm_put_owner = nfsd4_fl_put_owner,
5348 };
5349
5350 static inline void
5351 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5352 {
5353         struct nfs4_lockowner *lo;
5354
5355         if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5356                 lo = (struct nfs4_lockowner *) fl->fl_owner;
5357                 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5358                                         lo->lo_owner.so_owner.len, GFP_KERNEL);
5359                 if (!deny->ld_owner.data)
5360                         /* We just don't care that much */
5361                         goto nevermind;
5362                 deny->ld_owner.len = lo->lo_owner.so_owner.len;
5363                 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5364         } else {
5365 nevermind:
5366                 deny->ld_owner.len = 0;
5367                 deny->ld_owner.data = NULL;
5368                 deny->ld_clientid.cl_boot = 0;
5369                 deny->ld_clientid.cl_id = 0;
5370         }
5371         deny->ld_start = fl->fl_start;
5372         deny->ld_length = NFS4_MAX_UINT64;
5373         if (fl->fl_end != NFS4_MAX_UINT64)
5374                 deny->ld_length = fl->fl_end - fl->fl_start + 1;        
5375         deny->ld_type = NFS4_READ_LT;
5376         if (fl->fl_type != F_RDLCK)
5377                 deny->ld_type = NFS4_WRITE_LT;
5378 }
5379
5380 static struct nfs4_lockowner *
5381 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5382 {
5383         unsigned int strhashval = ownerstr_hashval(owner);
5384         struct nfs4_stateowner *so;
5385
5386         lockdep_assert_held(&clp->cl_lock);
5387
5388         list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5389                             so_strhash) {
5390                 if (so->so_is_open_owner)
5391                         continue;
5392                 if (same_owner_str(so, owner))
5393                         return lockowner(nfs4_get_stateowner(so));
5394         }
5395         return NULL;
5396 }
5397
5398 static struct nfs4_lockowner *
5399 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5400 {
5401         struct nfs4_lockowner *lo;
5402
5403         spin_lock(&clp->cl_lock);
5404         lo = find_lockowner_str_locked(clp, owner);
5405         spin_unlock(&clp->cl_lock);
5406         return lo;
5407 }
5408
5409 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5410 {
5411         unhash_lockowner_locked(lockowner(sop));
5412 }
5413
5414 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5415 {
5416         struct nfs4_lockowner *lo = lockowner(sop);
5417
5418         kmem_cache_free(lockowner_slab, lo);
5419 }
5420
5421 static const struct nfs4_stateowner_operations lockowner_ops = {
5422         .so_unhash =    nfs4_unhash_lockowner,
5423         .so_free =      nfs4_free_lockowner,
5424 };
5425
5426 /*
5427  * Alloc a lock owner structure.
5428  * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
5429  * occurred. 
5430  *
5431  * strhashval = ownerstr_hashval
5432  */
5433 static struct nfs4_lockowner *
5434 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5435                            struct nfs4_ol_stateid *open_stp,
5436                            struct nfsd4_lock *lock)
5437 {
5438         struct nfs4_lockowner *lo, *ret;
5439
5440         lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5441         if (!lo)
5442                 return NULL;
5443         INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5444         lo->lo_owner.so_is_open_owner = 0;
5445         lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5446         lo->lo_owner.so_ops = &lockowner_ops;
5447         spin_lock(&clp->cl_lock);
5448         ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5449         if (ret == NULL) {
5450                 list_add(&lo->lo_owner.so_strhash,
5451                          &clp->cl_ownerstr_hashtbl[strhashval]);
5452                 ret = lo;
5453         } else
5454                 nfs4_free_stateowner(&lo->lo_owner);
5455
5456         spin_unlock(&clp->cl_lock);
5457         return ret;
5458 }
5459
5460 static void
5461 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5462                   struct nfs4_file *fp, struct inode *inode,
5463                   struct nfs4_ol_stateid *open_stp)
5464 {
5465         struct nfs4_client *clp = lo->lo_owner.so_client;
5466
5467         lockdep_assert_held(&clp->cl_lock);
5468
5469         atomic_inc(&stp->st_stid.sc_count);
5470         stp->st_stid.sc_type = NFS4_LOCK_STID;
5471         stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5472         get_nfs4_file(fp);
5473         stp->st_stid.sc_file = fp;
5474         stp->st_access_bmap = 0;
5475         stp->st_deny_bmap = open_stp->st_deny_bmap;
5476         stp->st_openstp = open_stp;
5477         mutex_init(&stp->st_mutex);
5478         list_add(&stp->st_locks, &open_stp->st_locks);
5479         list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5480         spin_lock(&fp->fi_lock);
5481         list_add(&stp->st_perfile, &fp->fi_stateids);
5482         spin_unlock(&fp->fi_lock);
5483 }
5484
5485 static struct nfs4_ol_stateid *
5486 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5487 {
5488         struct nfs4_ol_stateid *lst;
5489         struct nfs4_client *clp = lo->lo_owner.so_client;
5490
5491         lockdep_assert_held(&clp->cl_lock);
5492
5493         list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5494                 if (lst->st_stid.sc_file == fp) {
5495                         atomic_inc(&lst->st_stid.sc_count);
5496                         return lst;
5497                 }
5498         }
5499         return NULL;
5500 }
5501
5502 static struct nfs4_ol_stateid *
5503 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5504                             struct inode *inode, struct nfs4_ol_stateid *ost,
5505                             bool *new)
5506 {
5507         struct nfs4_stid *ns = NULL;
5508         struct nfs4_ol_stateid *lst;
5509         struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5510         struct nfs4_client *clp = oo->oo_owner.so_client;
5511
5512         spin_lock(&clp->cl_lock);
5513         lst = find_lock_stateid(lo, fi);
5514         if (lst == NULL) {
5515                 spin_unlock(&clp->cl_lock);
5516                 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5517                 if (ns == NULL)
5518                         return NULL;
5519
5520                 spin_lock(&clp->cl_lock);
5521                 lst = find_lock_stateid(lo, fi);
5522                 if (likely(!lst)) {
5523                         lst = openlockstateid(ns);
5524                         init_lock_stateid(lst, lo, fi, inode, ost);
5525                         ns = NULL;
5526                         *new = true;
5527                 }
5528         }
5529         spin_unlock(&clp->cl_lock);
5530         if (ns)
5531                 nfs4_put_stid(ns);
5532         return lst;
5533 }
5534
5535 static int
5536 check_lock_length(u64 offset, u64 length)
5537 {
5538         return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5539                 (length > ~offset)));
5540 }
5541
5542 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5543 {
5544         struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5545
5546         lockdep_assert_held(&fp->fi_lock);
5547
5548         if (test_access(access, lock_stp))
5549                 return;
5550         __nfs4_file_get_access(fp, access);
5551         set_access(access, lock_stp);
5552 }
5553
5554 static __be32
5555 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5556                             struct nfs4_ol_stateid *ost,
5557                             struct nfsd4_lock *lock,
5558                             struct nfs4_ol_stateid **plst, bool *new)
5559 {
5560         __be32 status;
5561         struct nfs4_file *fi = ost->st_stid.sc_file;
5562         struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5563         struct nfs4_client *cl = oo->oo_owner.so_client;
5564         struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5565         struct nfs4_lockowner *lo;
5566         struct nfs4_ol_stateid *lst;
5567         unsigned int strhashval;
5568         bool hashed;
5569
5570         lo = find_lockowner_str(cl, &lock->lk_new_owner);
5571         if (!lo) {
5572                 strhashval = ownerstr_hashval(&lock->lk_new_owner);
5573                 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5574                 if (lo == NULL)
5575                         return nfserr_jukebox;
5576         } else {
5577                 /* with an existing lockowner, seqids must be the same */
5578                 status = nfserr_bad_seqid;
5579                 if (!cstate->minorversion &&
5580                     lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5581                         goto out;
5582         }
5583
5584 retry:
5585         lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5586         if (lst == NULL) {
5587                 status = nfserr_jukebox;
5588                 goto out;
5589         }
5590
5591         mutex_lock(&lst->st_mutex);
5592
5593         /* See if it's still hashed to avoid race with FREE_STATEID */
5594         spin_lock(&cl->cl_lock);
5595         hashed = !list_empty(&lst->st_perfile);
5596         spin_unlock(&cl->cl_lock);
5597
5598         if (!hashed) {
5599                 mutex_unlock(&lst->st_mutex);
5600                 nfs4_put_stid(&lst->st_stid);
5601                 goto retry;
5602         }
5603         status = nfs_ok;
5604         *plst = lst;
5605 out:
5606         nfs4_put_stateowner(&lo->lo_owner);
5607         return status;
5608 }
5609
5610 /*
5611  *  LOCK operation 
5612  */
5613 __be32
5614 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5615            struct nfsd4_lock *lock)
5616 {
5617         struct nfs4_openowner *open_sop = NULL;
5618         struct nfs4_lockowner *lock_sop = NULL;
5619         struct nfs4_ol_stateid *lock_stp = NULL;
5620         struct nfs4_ol_stateid *open_stp = NULL;
5621         struct nfs4_file *fp;
5622         struct file *filp = NULL;
5623         struct file_lock *file_lock = NULL;
5624         struct file_lock *conflock = NULL;
5625         __be32 status = 0;
5626         int lkflg;
5627         int err;
5628         bool new = false;
5629         struct net *net = SVC_NET(rqstp);
5630         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5631
5632         dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5633                 (long long) lock->lk_offset,
5634                 (long long) lock->lk_length);
5635
5636         if (check_lock_length(lock->lk_offset, lock->lk_length))
5637                  return nfserr_inval;
5638
5639         if ((status = fh_verify(rqstp, &cstate->current_fh,
5640                                 S_IFREG, NFSD_MAY_LOCK))) {
5641                 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5642                 return status;
5643         }
5644
5645         if (lock->lk_is_new) {
5646                 if (nfsd4_has_session(cstate))
5647                         /* See rfc 5661 18.10.3: given clientid is ignored: */
5648                         memcpy(&lock->lk_new_clientid,
5649                                 &cstate->session->se_client->cl_clientid,
5650                                 sizeof(clientid_t));
5651
5652                 status = nfserr_stale_clientid;
5653                 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5654                         goto out;
5655
5656                 /* validate and update open stateid and open seqid */
5657                 status = nfs4_preprocess_confirmed_seqid_op(cstate,
5658                                         lock->lk_new_open_seqid,
5659                                         &lock->lk_new_open_stateid,
5660                                         &open_stp, nn);
5661                 if (status)
5662                         goto out;
5663                 mutex_unlock(&open_stp->st_mutex);
5664                 open_sop = openowner(open_stp->st_stateowner);
5665                 status = nfserr_bad_stateid;
5666                 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5667                                                 &lock->lk_new_clientid))
5668                         goto out;
5669                 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5670                                                         &lock_stp, &new);
5671         } else {
5672                 status = nfs4_preprocess_seqid_op(cstate,
5673                                        lock->lk_old_lock_seqid,
5674                                        &lock->lk_old_lock_stateid,
5675                                        NFS4_LOCK_STID, &lock_stp, nn);
5676         }
5677         if (status)
5678                 goto out;
5679         lock_sop = lockowner(lock_stp->st_stateowner);
5680
5681         lkflg = setlkflg(lock->lk_type);
5682         status = nfs4_check_openmode(lock_stp, lkflg);
5683         if (status)
5684                 goto out;
5685
5686         status = nfserr_grace;
5687         if (locks_in_grace(net) && !lock->lk_reclaim)
5688                 goto out;
5689         status = nfserr_no_grace;
5690         if (!locks_in_grace(net) && lock->lk_reclaim)
5691                 goto out;
5692
5693         file_lock = locks_alloc_lock();
5694         if (!file_lock) {
5695                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5696                 status = nfserr_jukebox;
5697                 goto out;
5698         }
5699
5700         fp = lock_stp->st_stid.sc_file;
5701         switch (lock->lk_type) {
5702                 case NFS4_READ_LT:
5703                 case NFS4_READW_LT:
5704                         spin_lock(&fp->fi_lock);
5705                         filp = find_readable_file_locked(fp);
5706                         if (filp)
5707                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5708                         spin_unlock(&fp->fi_lock);
5709                         file_lock->fl_type = F_RDLCK;
5710                         break;
5711                 case NFS4_WRITE_LT:
5712                 case NFS4_WRITEW_LT:
5713                         spin_lock(&fp->fi_lock);
5714                         filp = find_writeable_file_locked(fp);
5715                         if (filp)
5716                                 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5717                         spin_unlock(&fp->fi_lock);
5718                         file_lock->fl_type = F_WRLCK;
5719                         break;
5720                 default:
5721                         status = nfserr_inval;
5722                 goto out;
5723         }
5724         if (!filp) {
5725                 status = nfserr_openmode;
5726                 goto out;
5727         }
5728
5729         file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
5730         file_lock->fl_pid = current->tgid;
5731         file_lock->fl_file = filp;
5732         file_lock->fl_flags = FL_POSIX;
5733         file_lock->fl_lmops = &nfsd_posix_mng_ops;
5734         file_lock->fl_start = lock->lk_offset;
5735         file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
5736         nfs4_transform_lock_offset(file_lock);
5737
5738         conflock = locks_alloc_lock();
5739         if (!conflock) {
5740                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5741                 status = nfserr_jukebox;
5742                 goto out;
5743         }
5744
5745         err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
5746         switch (-err) {
5747         case 0: /* success! */
5748                 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
5749                 status = 0;
5750                 break;
5751         case (EAGAIN):          /* conflock holds conflicting lock */
5752                 status = nfserr_denied;
5753                 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5754                 nfs4_set_lock_denied(conflock, &lock->lk_denied);
5755                 break;
5756         case (EDEADLK):
5757                 status = nfserr_deadlock;
5758                 break;
5759         default:
5760                 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
5761                 status = nfserrno(err);
5762                 break;
5763         }
5764 out:
5765         if (filp)
5766                 fput(filp);
5767         if (lock_stp) {
5768                 /* Bump seqid manually if the 4.0 replay owner is openowner */
5769                 if (cstate->replay_owner &&
5770                     cstate->replay_owner != &lock_sop->lo_owner &&
5771                     seqid_mutating_err(ntohl(status)))
5772                         lock_sop->lo_owner.so_seqid++;
5773
5774                 mutex_unlock(&lock_stp->st_mutex);
5775
5776                 /*
5777                  * If this is a new, never-before-used stateid, and we are
5778                  * returning an error, then just go ahead and release it.
5779                  */
5780                 if (status && new)
5781                         release_lock_stateid(lock_stp);
5782
5783                 nfs4_put_stid(&lock_stp->st_stid);
5784         }
5785         if (open_stp)
5786                 nfs4_put_stid(&open_stp->st_stid);
5787         nfsd4_bump_seqid(cstate, status);
5788         if (file_lock)
5789                 locks_free_lock(file_lock);
5790         if (conflock)
5791                 locks_free_lock(conflock);
5792         return status;
5793 }
5794
5795 /*
5796  * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
5797  * so we do a temporary open here just to get an open file to pass to
5798  * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
5799  * inode operation.)
5800  */
5801 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
5802 {
5803         struct file *file;
5804         __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
5805         if (!err) {
5806                 err = nfserrno(vfs_test_lock(file, lock));
5807                 fput(file);
5808         }
5809         return err;
5810 }
5811
5812 /*
5813  * LOCKT operation
5814  */
5815 __be32
5816 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5817             struct nfsd4_lockt *lockt)
5818 {
5819         struct file_lock *file_lock = NULL;
5820         struct nfs4_lockowner *lo = NULL;
5821         __be32 status;
5822         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5823
5824         if (locks_in_grace(SVC_NET(rqstp)))
5825                 return nfserr_grace;
5826
5827         if (check_lock_length(lockt->lt_offset, lockt->lt_length))
5828                  return nfserr_inval;
5829
5830         if (!nfsd4_has_session(cstate)) {
5831                 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
5832                 if (status)
5833                         goto out;
5834         }
5835
5836         if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5837                 goto out;
5838
5839         file_lock = locks_alloc_lock();
5840         if (!file_lock) {
5841                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5842                 status = nfserr_jukebox;
5843                 goto out;
5844         }
5845
5846         switch (lockt->lt_type) {
5847                 case NFS4_READ_LT:
5848                 case NFS4_READW_LT:
5849                         file_lock->fl_type = F_RDLCK;
5850                 break;
5851                 case NFS4_WRITE_LT:
5852                 case NFS4_WRITEW_LT:
5853                         file_lock->fl_type = F_WRLCK;
5854                 break;
5855                 default:
5856                         dprintk("NFSD: nfs4_lockt: bad lock type!\n");
5857                         status = nfserr_inval;
5858                 goto out;
5859         }
5860
5861         lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
5862         if (lo)
5863                 file_lock->fl_owner = (fl_owner_t)lo;
5864         file_lock->fl_pid = current->tgid;
5865         file_lock->fl_flags = FL_POSIX;
5866
5867         file_lock->fl_start = lockt->lt_offset;
5868         file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
5869
5870         nfs4_transform_lock_offset(file_lock);
5871
5872         status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
5873         if (status)
5874                 goto out;
5875
5876         if (file_lock->fl_type != F_UNLCK) {
5877                 status = nfserr_denied;
5878                 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
5879         }
5880 out:
5881         if (lo)
5882                 nfs4_put_stateowner(&lo->lo_owner);
5883         if (file_lock)
5884                 locks_free_lock(file_lock);
5885         return status;
5886 }
5887
5888 __be32
5889 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5890             struct nfsd4_locku *locku)
5891 {
5892         struct nfs4_ol_stateid *stp;
5893         struct file *filp = NULL;
5894         struct file_lock *file_lock = NULL;
5895         __be32 status;
5896         int err;
5897         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5898
5899         dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
5900                 (long long) locku->lu_offset,
5901                 (long long) locku->lu_length);
5902
5903         if (check_lock_length(locku->lu_offset, locku->lu_length))
5904                  return nfserr_inval;
5905
5906         status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
5907                                         &locku->lu_stateid, NFS4_LOCK_STID,
5908                                         &stp, nn);
5909         if (status)
5910                 goto out;
5911         filp = find_any_file(stp->st_stid.sc_file);
5912         if (!filp) {
5913                 status = nfserr_lock_range;
5914                 goto put_stateid;
5915         }
5916         file_lock = locks_alloc_lock();
5917         if (!file_lock) {
5918                 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
5919                 status = nfserr_jukebox;
5920                 goto fput;
5921         }
5922
5923         file_lock->fl_type = F_UNLCK;
5924         file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
5925         file_lock->fl_pid = current->tgid;
5926         file_lock->fl_file = filp;
5927         file_lock->fl_flags = FL_POSIX;
5928         file_lock->fl_lmops = &nfsd_posix_mng_ops;
5929         file_lock->fl_start = locku->lu_offset;
5930
5931         file_lock->fl_end = last_byte_offset(locku->lu_offset,
5932                                                 locku->lu_length);
5933         nfs4_transform_lock_offset(file_lock);
5934
5935         err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
5936         if (err) {
5937                 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
5938                 goto out_nfserr;
5939         }
5940         nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
5941 fput:
5942         fput(filp);
5943 put_stateid:
5944         mutex_unlock(&stp->st_mutex);
5945         nfs4_put_stid(&stp->st_stid);
5946 out:
5947         nfsd4_bump_seqid(cstate, status);
5948         if (file_lock)
5949                 locks_free_lock(file_lock);
5950         return status;
5951
5952 out_nfserr:
5953         status = nfserrno(err);
5954         goto fput;
5955 }
5956
5957 /*
5958  * returns
5959  *      true:  locks held by lockowner
5960  *      false: no locks held by lockowner
5961  */
5962 static bool
5963 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
5964 {
5965         struct file_lock *fl;
5966         int status = false;
5967         struct file *filp = find_any_file(fp);
5968         struct inode *inode;
5969         struct file_lock_context *flctx;
5970
5971         if (!filp) {
5972                 /* Any valid lock stateid should have some sort of access */
5973                 WARN_ON_ONCE(1);
5974                 return status;
5975         }
5976
5977         inode = file_inode(filp);
5978         flctx = inode->i_flctx;
5979
5980         if (flctx && !list_empty_careful(&flctx->flc_posix)) {
5981                 spin_lock(&flctx->flc_lock);
5982                 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
5983                         if (fl->fl_owner == (fl_owner_t)lowner) {
5984                                 status = true;
5985                                 break;
5986                         }
5987                 }
5988                 spin_unlock(&flctx->flc_lock);
5989         }
5990         fput(filp);
5991         return status;
5992 }
5993
5994 __be32
5995 nfsd4_release_lockowner(struct svc_rqst *rqstp,
5996                         struct nfsd4_compound_state *cstate,
5997                         struct nfsd4_release_lockowner *rlockowner)
5998 {
5999         clientid_t *clid = &rlockowner->rl_clientid;
6000         struct nfs4_stateowner *sop;
6001         struct nfs4_lockowner *lo = NULL;
6002         struct nfs4_ol_stateid *stp;
6003         struct xdr_netobj *owner = &rlockowner->rl_owner;
6004         unsigned int hashval = ownerstr_hashval(owner);
6005         __be32 status;
6006         struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6007         struct nfs4_client *clp;
6008         LIST_HEAD (reaplist);
6009
6010         dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6011                 clid->cl_boot, clid->cl_id);
6012
6013         status = lookup_clientid(clid, cstate, nn);
6014         if (status)
6015                 return status;
6016
6017         clp = cstate->clp;
6018         /* Find the matching lock stateowner */
6019         spin_lock(&clp->cl_lock);
6020         list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6021                             so_strhash) {
6022
6023                 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6024                         continue;
6025
6026                 /* see if there are still any locks associated with it */
6027                 lo = lockowner(sop);
6028                 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6029                         if (check_for_locks(stp->st_stid.sc_file, lo)) {
6030                                 status = nfserr_locks_held;
6031                                 spin_unlock(&clp->cl_lock);
6032                                 return status;
6033                         }
6034                 }
6035
6036                 nfs4_get_stateowner(sop);
6037                 break;
6038         }
6039         if (!lo) {
6040                 spin_unlock(&clp->cl_lock);
6041                 return status;
6042         }
6043
6044         unhash_lockowner_locked(lo);
6045         while (!list_empty(&lo->lo_owner.so_stateids)) {
6046                 stp = list_first_entry(&lo->lo_owner.so_stateids,
6047                                        struct nfs4_ol_stateid,
6048                                        st_perstateowner);
6049                 WARN_ON(!unhash_lock_stateid(stp));
6050                 put_ol_stateid_locked(stp, &reaplist);
6051         }
6052         spin_unlock(&clp->cl_lock);
6053         free_ol_stateid_reaplist(&reaplist);
6054         nfs4_put_stateowner(&lo->lo_owner);
6055
6056         return status;
6057 }
6058
6059 static inline struct nfs4_client_reclaim *
6060 alloc_reclaim(void)
6061 {
6062         return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6063 }
6064
6065 bool
6066 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
6067 {
6068         struct nfs4_client_reclaim *crp;
6069
6070         crp = nfsd4_find_reclaim_client(name, nn);
6071         return (crp && crp->cr_clp);
6072 }
6073
6074 /*
6075  * failure => all reset bets are off, nfserr_no_grace...
6076  */
6077 struct nfs4_client_reclaim *
6078 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6079 {
6080         unsigned int strhashval;
6081         struct nfs4_client_reclaim *crp;
6082
6083         dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6084         crp = alloc_reclaim();
6085         if (crp) {
6086                 strhashval = clientstr_hashval(name);
6087                 INIT_LIST_HEAD(&crp->cr_strhash);
6088                 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6089                 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6090                 crp->cr_clp = NULL;
6091                 nn->reclaim_str_hashtbl_size++;
6092         }
6093         return crp;
6094 }
6095
6096 void
6097 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6098 {
6099         list_del(&crp->cr_strhash);
6100         kfree(crp);
6101         nn->reclaim_str_hashtbl_size--;
6102 }
6103
6104 void
6105 nfs4_release_reclaim(struct nfsd_net *nn)
6106 {
6107         struct nfs4_client_reclaim *crp = NULL;
6108         int i;
6109
6110         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6111                 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6112                         crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6113                                         struct nfs4_client_reclaim, cr_strhash);
6114                         nfs4_remove_reclaim_record(crp, nn);
6115                 }
6116         }
6117         WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6118 }
6119
6120 /*
6121  * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6122 struct nfs4_client_reclaim *
6123 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6124 {
6125         unsigned int strhashval;
6126         struct nfs4_client_reclaim *crp = NULL;
6127
6128         dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6129
6130         strhashval = clientstr_hashval(recdir);
6131         list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6132                 if (same_name(crp->cr_recdir, recdir)) {
6133                         return crp;
6134                 }
6135         }
6136         return NULL;
6137 }
6138
6139 /*
6140 * Called from OPEN. Look for clientid in reclaim list.
6141 */
6142 __be32
6143 nfs4_check_open_reclaim(clientid_t *clid,
6144                 struct nfsd4_compound_state *cstate,
6145                 struct nfsd_net *nn)
6146 {
6147         __be32 status;
6148
6149         /* find clientid in conf_id_hashtbl */
6150         status = lookup_clientid(clid, cstate, nn);
6151         if (status)
6152                 return nfserr_reclaim_bad;
6153
6154         if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6155                 return nfserr_no_grace;
6156
6157         if (nfsd4_client_record_check(cstate->clp))
6158                 return nfserr_reclaim_bad;
6159
6160         return nfs_ok;
6161 }
6162
6163 #ifdef CONFIG_NFSD_FAULT_INJECTION
6164 static inline void
6165 put_client(struct nfs4_client *clp)
6166 {
6167         atomic_dec(&clp->cl_refcount);
6168 }
6169
6170 static struct nfs4_client *
6171 nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6172 {
6173         struct nfs4_client *clp;
6174         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6175                                           nfsd_net_id);
6176
6177         if (!nfsd_netns_ready(nn))
6178                 return NULL;
6179
6180         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6181                 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6182                         return clp;
6183         }
6184         return NULL;
6185 }
6186
6187 u64
6188 nfsd_inject_print_clients(void)
6189 {
6190         struct nfs4_client *clp;
6191         u64 count = 0;
6192         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6193                                           nfsd_net_id);
6194         char buf[INET6_ADDRSTRLEN];
6195
6196         if (!nfsd_netns_ready(nn))
6197                 return 0;
6198
6199         spin_lock(&nn->client_lock);
6200         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6201                 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6202                 pr_info("NFS Client: %s\n", buf);
6203                 ++count;
6204         }
6205         spin_unlock(&nn->client_lock);
6206
6207         return count;
6208 }
6209
6210 u64
6211 nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6212 {
6213         u64 count = 0;
6214         struct nfs4_client *clp;
6215         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6216                                           nfsd_net_id);
6217
6218         if (!nfsd_netns_ready(nn))
6219                 return count;
6220
6221         spin_lock(&nn->client_lock);
6222         clp = nfsd_find_client(addr, addr_size);
6223         if (clp) {
6224                 if (mark_client_expired_locked(clp) == nfs_ok)
6225                         ++count;
6226                 else
6227                         clp = NULL;
6228         }
6229         spin_unlock(&nn->client_lock);
6230
6231         if (clp)
6232                 expire_client(clp);
6233
6234         return count;
6235 }
6236
6237 u64
6238 nfsd_inject_forget_clients(u64 max)
6239 {
6240         u64 count = 0;
6241         struct nfs4_client *clp, *next;
6242         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6243                                                 nfsd_net_id);
6244         LIST_HEAD(reaplist);
6245
6246         if (!nfsd_netns_ready(nn))
6247                 return count;
6248
6249         spin_lock(&nn->client_lock);
6250         list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6251                 if (mark_client_expired_locked(clp) == nfs_ok) {
6252                         list_add(&clp->cl_lru, &reaplist);
6253                         if (max != 0 && ++count >= max)
6254                                 break;
6255                 }
6256         }
6257         spin_unlock(&nn->client_lock);
6258
6259         list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6260                 expire_client(clp);
6261
6262         return count;
6263 }
6264
6265 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6266                              const char *type)
6267 {
6268         char buf[INET6_ADDRSTRLEN];
6269         rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6270         printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6271 }
6272
6273 static void
6274 nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6275                              struct list_head *collect)
6276 {
6277         struct nfs4_client *clp = lst->st_stid.sc_client;
6278         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6279                                           nfsd_net_id);
6280
6281         if (!collect)
6282                 return;
6283
6284         lockdep_assert_held(&nn->client_lock);
6285         atomic_inc(&clp->cl_refcount);
6286         list_add(&lst->st_locks, collect);
6287 }
6288
6289 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6290                                     struct list_head *collect,
6291                                     bool (*func)(struct nfs4_ol_stateid *))
6292 {
6293         struct nfs4_openowner *oop;
6294         struct nfs4_ol_stateid *stp, *st_next;
6295         struct nfs4_ol_stateid *lst, *lst_next;
6296         u64 count = 0;
6297
6298         spin_lock(&clp->cl_lock);
6299         list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6300                 list_for_each_entry_safe(stp, st_next,
6301                                 &oop->oo_owner.so_stateids, st_perstateowner) {
6302                         list_for_each_entry_safe(lst, lst_next,
6303                                         &stp->st_locks, st_locks) {
6304                                 if (func) {
6305                                         if (func(lst))
6306                                                 nfsd_inject_add_lock_to_list(lst,
6307                                                                         collect);
6308                                 }
6309                                 ++count;
6310                                 /*
6311                                  * Despite the fact that these functions deal
6312                                  * with 64-bit integers for "count", we must
6313                                  * ensure that it doesn't blow up the
6314                                  * clp->cl_refcount. Throw a warning if we
6315                                  * start to approach INT_MAX here.
6316                                  */
6317                                 WARN_ON_ONCE(count == (INT_MAX / 2));
6318                                 if (count == max)
6319                                         goto out;
6320                         }
6321                 }
6322         }
6323 out:
6324         spin_unlock(&clp->cl_lock);
6325
6326         return count;
6327 }
6328
6329 static u64
6330 nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6331                           u64 max)
6332 {
6333         return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6334 }
6335
6336 static u64
6337 nfsd_print_client_locks(struct nfs4_client *clp)
6338 {
6339         u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6340         nfsd_print_count(clp, count, "locked files");
6341         return count;
6342 }
6343
6344 u64
6345 nfsd_inject_print_locks(void)
6346 {
6347         struct nfs4_client *clp;
6348         u64 count = 0;
6349         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6350                                                 nfsd_net_id);
6351
6352         if (!nfsd_netns_ready(nn))
6353                 return 0;
6354
6355         spin_lock(&nn->client_lock);
6356         list_for_each_entry(clp, &nn->client_lru, cl_lru)
6357                 count += nfsd_print_client_locks(clp);
6358         spin_unlock(&nn->client_lock);
6359
6360         return count;
6361 }
6362
6363 static void
6364 nfsd_reap_locks(struct list_head *reaplist)
6365 {
6366         struct nfs4_client *clp;
6367         struct nfs4_ol_stateid *stp, *next;
6368
6369         list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6370                 list_del_init(&stp->st_locks);
6371                 clp = stp->st_stid.sc_client;
6372                 nfs4_put_stid(&stp->st_stid);
6373                 put_client(clp);
6374         }
6375 }
6376
6377 u64
6378 nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6379 {
6380         unsigned int count = 0;
6381         struct nfs4_client *clp;
6382         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6383                                                 nfsd_net_id);
6384         LIST_HEAD(reaplist);
6385
6386         if (!nfsd_netns_ready(nn))
6387                 return count;
6388
6389         spin_lock(&nn->client_lock);
6390         clp = nfsd_find_client(addr, addr_size);
6391         if (clp)
6392                 count = nfsd_collect_client_locks(clp, &reaplist, 0);
6393         spin_unlock(&nn->client_lock);
6394         nfsd_reap_locks(&reaplist);
6395         return count;
6396 }
6397
6398 u64
6399 nfsd_inject_forget_locks(u64 max)
6400 {
6401         u64 count = 0;
6402         struct nfs4_client *clp;
6403         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6404                                                 nfsd_net_id);
6405         LIST_HEAD(reaplist);
6406
6407         if (!nfsd_netns_ready(nn))
6408                 return count;
6409
6410         spin_lock(&nn->client_lock);
6411         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6412                 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6413                 if (max != 0 && count >= max)
6414                         break;
6415         }
6416         spin_unlock(&nn->client_lock);
6417         nfsd_reap_locks(&reaplist);
6418         return count;
6419 }
6420
6421 static u64
6422 nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6423                               struct list_head *collect,
6424                               void (*func)(struct nfs4_openowner *))
6425 {
6426         struct nfs4_openowner *oop, *next;
6427         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6428                                                 nfsd_net_id);
6429         u64 count = 0;
6430
6431         lockdep_assert_held(&nn->client_lock);
6432
6433         spin_lock(&clp->cl_lock);
6434         list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6435                 if (func) {
6436                         func(oop);
6437                         if (collect) {
6438                                 atomic_inc(&clp->cl_refcount);
6439                                 list_add(&oop->oo_perclient, collect);
6440                         }
6441                 }
6442                 ++count;
6443                 /*
6444                  * Despite the fact that these functions deal with
6445                  * 64-bit integers for "count", we must ensure that
6446                  * it doesn't blow up the clp->cl_refcount. Throw a
6447                  * warning if we start to approach INT_MAX here.
6448                  */
6449                 WARN_ON_ONCE(count == (INT_MAX / 2));
6450                 if (count == max)
6451                         break;
6452         }
6453         spin_unlock(&clp->cl_lock);
6454
6455         return count;
6456 }
6457
6458 static u64
6459 nfsd_print_client_openowners(struct nfs4_client *clp)
6460 {
6461         u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6462
6463         nfsd_print_count(clp, count, "openowners");
6464         return count;
6465 }
6466
6467 static u64
6468 nfsd_collect_client_openowners(struct nfs4_client *clp,
6469                                struct list_head *collect, u64 max)
6470 {
6471         return nfsd_foreach_client_openowner(clp, max, collect,
6472                                                 unhash_openowner_locked);
6473 }
6474
6475 u64
6476 nfsd_inject_print_openowners(void)
6477 {
6478         struct nfs4_client *clp;
6479         u64 count = 0;
6480         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6481                                                 nfsd_net_id);
6482
6483         if (!nfsd_netns_ready(nn))
6484                 return 0;
6485
6486         spin_lock(&nn->client_lock);
6487         list_for_each_entry(clp, &nn->client_lru, cl_lru)
6488                 count += nfsd_print_client_openowners(clp);
6489         spin_unlock(&nn->client_lock);
6490
6491         return count;
6492 }
6493
6494 static void
6495 nfsd_reap_openowners(struct list_head *reaplist)
6496 {
6497         struct nfs4_client *clp;
6498         struct nfs4_openowner *oop, *next;
6499
6500         list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6501                 list_del_init(&oop->oo_perclient);
6502                 clp = oop->oo_owner.so_client;
6503                 release_openowner(oop);
6504                 put_client(clp);
6505         }
6506 }
6507
6508 u64
6509 nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6510                                      size_t addr_size)
6511 {
6512         unsigned int count = 0;
6513         struct nfs4_client *clp;
6514         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6515                                                 nfsd_net_id);
6516         LIST_HEAD(reaplist);
6517
6518         if (!nfsd_netns_ready(nn))
6519                 return count;
6520
6521         spin_lock(&nn->client_lock);
6522         clp = nfsd_find_client(addr, addr_size);
6523         if (clp)
6524                 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6525         spin_unlock(&nn->client_lock);
6526         nfsd_reap_openowners(&reaplist);
6527         return count;
6528 }
6529
6530 u64
6531 nfsd_inject_forget_openowners(u64 max)
6532 {
6533         u64 count = 0;
6534         struct nfs4_client *clp;
6535         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6536                                                 nfsd_net_id);
6537         LIST_HEAD(reaplist);
6538
6539         if (!nfsd_netns_ready(nn))
6540                 return count;
6541
6542         spin_lock(&nn->client_lock);
6543         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6544                 count += nfsd_collect_client_openowners(clp, &reaplist,
6545                                                         max - count);
6546                 if (max != 0 && count >= max)
6547                         break;
6548         }
6549         spin_unlock(&nn->client_lock);
6550         nfsd_reap_openowners(&reaplist);
6551         return count;
6552 }
6553
6554 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6555                                      struct list_head *victims)
6556 {
6557         struct nfs4_delegation *dp, *next;
6558         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6559                                                 nfsd_net_id);
6560         u64 count = 0;
6561
6562         lockdep_assert_held(&nn->client_lock);
6563
6564         spin_lock(&state_lock);
6565         list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6566                 if (victims) {
6567                         /*
6568                          * It's not safe to mess with delegations that have a
6569                          * non-zero dl_time. They might have already been broken
6570                          * and could be processed by the laundromat outside of
6571                          * the state_lock. Just leave them be.
6572                          */
6573                         if (dp->dl_time != 0)
6574                                 continue;
6575
6576                         atomic_inc(&clp->cl_refcount);
6577                         WARN_ON(!unhash_delegation_locked(dp));
6578                         list_add(&dp->dl_recall_lru, victims);
6579                 }
6580                 ++count;
6581                 /*
6582                  * Despite the fact that these functions deal with
6583                  * 64-bit integers for "count", we must ensure that
6584                  * it doesn't blow up the clp->cl_refcount. Throw a
6585                  * warning if we start to approach INT_MAX here.
6586                  */
6587                 WARN_ON_ONCE(count == (INT_MAX / 2));
6588                 if (count == max)
6589                         break;
6590         }
6591         spin_unlock(&state_lock);
6592         return count;
6593 }
6594
6595 static u64
6596 nfsd_print_client_delegations(struct nfs4_client *clp)
6597 {
6598         u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6599
6600         nfsd_print_count(clp, count, "delegations");
6601         return count;
6602 }
6603
6604 u64
6605 nfsd_inject_print_delegations(void)
6606 {
6607         struct nfs4_client *clp;
6608         u64 count = 0;
6609         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6610                                                 nfsd_net_id);
6611
6612         if (!nfsd_netns_ready(nn))
6613                 return 0;
6614
6615         spin_lock(&nn->client_lock);
6616         list_for_each_entry(clp, &nn->client_lru, cl_lru)
6617                 count += nfsd_print_client_delegations(clp);
6618         spin_unlock(&nn->client_lock);
6619
6620         return count;
6621 }
6622
6623 static void
6624 nfsd_forget_delegations(struct list_head *reaplist)
6625 {
6626         struct nfs4_client *clp;
6627         struct nfs4_delegation *dp, *next;
6628
6629         list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6630                 list_del_init(&dp->dl_recall_lru);
6631                 clp = dp->dl_stid.sc_client;
6632                 revoke_delegation(dp);
6633                 put_client(clp);
6634         }
6635 }
6636
6637 u64
6638 nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6639                                       size_t addr_size)
6640 {
6641         u64 count = 0;
6642         struct nfs4_client *clp;
6643         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6644                                                 nfsd_net_id);
6645         LIST_HEAD(reaplist);
6646
6647         if (!nfsd_netns_ready(nn))
6648                 return count;
6649
6650         spin_lock(&nn->client_lock);
6651         clp = nfsd_find_client(addr, addr_size);
6652         if (clp)
6653                 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6654         spin_unlock(&nn->client_lock);
6655
6656         nfsd_forget_delegations(&reaplist);
6657         return count;
6658 }
6659
6660 u64
6661 nfsd_inject_forget_delegations(u64 max)
6662 {
6663         u64 count = 0;
6664         struct nfs4_client *clp;
6665         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6666                                                 nfsd_net_id);
6667         LIST_HEAD(reaplist);
6668
6669         if (!nfsd_netns_ready(nn))
6670                 return count;
6671
6672         spin_lock(&nn->client_lock);
6673         list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6674                 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6675                 if (max != 0 && count >= max)
6676                         break;
6677         }
6678         spin_unlock(&nn->client_lock);
6679         nfsd_forget_delegations(&reaplist);
6680         return count;
6681 }
6682
6683 static void
6684 nfsd_recall_delegations(struct list_head *reaplist)
6685 {
6686         struct nfs4_client *clp;
6687         struct nfs4_delegation *dp, *next;
6688
6689         list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6690                 list_del_init(&dp->dl_recall_lru);
6691                 clp = dp->dl_stid.sc_client;
6692                 /*
6693                  * We skipped all entries that had a zero dl_time before,
6694                  * so we can now reset the dl_time back to 0. If a delegation
6695                  * break comes in now, then it won't make any difference since
6696                  * we're recalling it either way.
6697                  */
6698                 spin_lock(&state_lock);
6699                 dp->dl_time = 0;
6700                 spin_unlock(&state_lock);
6701                 nfsd_break_one_deleg(dp);
6702                 put_client(clp);
6703         }
6704 }
6705
6706 u64
6707 nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
6708                                       size_t addr_size)
6709 {
6710         u64 count = 0;
6711         struct nfs4_client *clp;
6712         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6713                                                 nfsd_net_id);
6714         LIST_HEAD(reaplist);
6715
6716         if (!nfsd_netns_ready(nn))
6717                 return count;
6718
6719         spin_lock(&nn->client_lock);
6720         clp = nfsd_find_client(addr, addr_size);
6721         if (clp)
6722                 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6723         spin_unlock(&nn->client_lock);
6724
6725         nfsd_recall_delegations(&reaplist);
6726         return count;
6727 }
6728
6729 u64
6730 nfsd_inject_recall_delegations(u64 max)
6731 {
6732         u64 count = 0;
6733         struct nfs4_client *clp, *next;
6734         struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6735                                                 nfsd_net_id);
6736         LIST_HEAD(reaplist);
6737
6738         if (!nfsd_netns_ready(nn))
6739                 return count;
6740
6741         spin_lock(&nn->client_lock);
6742         list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6743                 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6744                 if (max != 0 && ++count >= max)
6745                         break;
6746         }
6747         spin_unlock(&nn->client_lock);
6748         nfsd_recall_delegations(&reaplist);
6749         return count;
6750 }
6751 #endif /* CONFIG_NFSD_FAULT_INJECTION */
6752
6753 /*
6754  * Since the lifetime of a delegation isn't limited to that of an open, a
6755  * client may quite reasonably hang on to a delegation as long as it has
6756  * the inode cached.  This becomes an obvious problem the first time a
6757  * client's inode cache approaches the size of the server's total memory.
6758  *
6759  * For now we avoid this problem by imposing a hard limit on the number
6760  * of delegations, which varies according to the server's memory size.
6761  */
6762 static void
6763 set_max_delegations(void)
6764 {
6765         /*
6766          * Allow at most 4 delegations per megabyte of RAM.  Quick
6767          * estimates suggest that in the worst case (where every delegation
6768          * is for a different inode), a delegation could take about 1.5K,
6769          * giving a worst case usage of about 6% of memory.
6770          */
6771         max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
6772 }
6773
6774 static int nfs4_state_create_net(struct net *net)
6775 {
6776         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6777         int i;
6778
6779         nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6780                         CLIENT_HASH_SIZE, GFP_KERNEL);
6781         if (!nn->conf_id_hashtbl)
6782                 goto err;
6783         nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
6784                         CLIENT_HASH_SIZE, GFP_KERNEL);
6785         if (!nn->unconf_id_hashtbl)
6786                 goto err_unconf_id;
6787         nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
6788                         SESSION_HASH_SIZE, GFP_KERNEL);
6789         if (!nn->sessionid_hashtbl)
6790                 goto err_sessionid;
6791
6792         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6793                 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
6794                 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
6795         }
6796         for (i = 0; i < SESSION_HASH_SIZE; i++)
6797                 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
6798         nn->conf_name_tree = RB_ROOT;
6799         nn->unconf_name_tree = RB_ROOT;
6800         nn->boot_time = get_seconds();
6801         nn->grace_ended = false;
6802         nn->nfsd4_manager.block_opens = true;
6803         INIT_LIST_HEAD(&nn->nfsd4_manager.list);
6804         INIT_LIST_HEAD(&nn->client_lru);
6805         INIT_LIST_HEAD(&nn->close_lru);
6806         INIT_LIST_HEAD(&nn->del_recall_lru);
6807         spin_lock_init(&nn->client_lock);
6808
6809         INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
6810         get_net(net);
6811
6812         return 0;
6813
6814 err_sessionid:
6815         kfree(nn->unconf_id_hashtbl);
6816 err_unconf_id:
6817         kfree(nn->conf_id_hashtbl);
6818 err:
6819         return -ENOMEM;
6820 }
6821
6822 static void
6823 nfs4_state_destroy_net(struct net *net)
6824 {
6825         int i;
6826         struct nfs4_client *clp = NULL;
6827         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6828
6829         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6830                 while (!list_empty(&nn->conf_id_hashtbl[i])) {
6831                         clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6832                         destroy_client(clp);
6833                 }
6834         }
6835
6836         for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6837                 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
6838                         clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
6839                         destroy_client(clp);
6840                 }
6841         }
6842
6843         kfree(nn->sessionid_hashtbl);
6844         kfree(nn->unconf_id_hashtbl);
6845         kfree(nn->conf_id_hashtbl);
6846         put_net(net);
6847 }
6848
6849 int
6850 nfs4_state_start_net(struct net *net)
6851 {
6852         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6853         int ret;
6854
6855         ret = nfs4_state_create_net(net);
6856         if (ret)
6857                 return ret;
6858         locks_start_grace(net, &nn->nfsd4_manager);
6859         nfsd4_client_tracking_init(net);
6860         printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
6861                nn->nfsd4_grace, net);
6862         queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
6863         return 0;
6864 }
6865
6866 /* initialization to perform when the nfsd service is started: */
6867
6868 int
6869 nfs4_state_start(void)
6870 {
6871         int ret;
6872
6873         ret = set_callback_cred();
6874         if (ret)
6875                 return ret;
6876
6877         laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
6878         if (laundry_wq == NULL) {
6879                 ret = -ENOMEM;
6880                 goto out_cleanup_cred;
6881         }
6882         ret = nfsd4_create_callback_queue();
6883         if (ret)
6884                 goto out_free_laundry;
6885
6886         set_max_delegations();
6887         return 0;
6888
6889 out_free_laundry:
6890         destroy_workqueue(laundry_wq);
6891 out_cleanup_cred:
6892         cleanup_callback_cred();
6893         return ret;
6894 }
6895
6896 void
6897 nfs4_state_shutdown_net(struct net *net)
6898 {
6899         struct nfs4_delegation *dp = NULL;
6900         struct list_head *pos, *next, reaplist;
6901         struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6902
6903         cancel_delayed_work_sync(&nn->laundromat_work);
6904         locks_end_grace(&nn->nfsd4_manager);
6905
6906         INIT_LIST_HEAD(&reaplist);
6907         spin_lock(&state_lock);
6908         list_for_each_safe(pos, next, &nn->del_recall_lru) {
6909                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6910                 WARN_ON(!unhash_delegation_locked(dp));
6911                 list_add(&dp->dl_recall_lru, &reaplist);
6912         }
6913         spin_unlock(&state_lock);
6914         list_for_each_safe(pos, next, &reaplist) {
6915                 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6916                 list_del_init(&dp->dl_recall_lru);
6917                 put_clnt_odstate(dp->dl_clnt_odstate);
6918                 nfs4_put_deleg_lease(dp->dl_stid.sc_file);
6919                 nfs4_put_stid(&dp->dl_stid);
6920         }
6921
6922         nfsd4_client_tracking_exit(net);
6923         nfs4_state_destroy_net(net);
6924 }
6925
6926 void
6927 nfs4_state_shutdown(void)
6928 {
6929         destroy_workqueue(laundry_wq);
6930         nfsd4_destroy_callback_queue();
6931         cleanup_callback_cred();
6932 }
6933
6934 static void
6935 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6936 {
6937         if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
6938                 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
6939 }
6940
6941 static void
6942 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
6943 {
6944         if (cstate->minorversion) {
6945                 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
6946                 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6947         }
6948 }
6949
6950 void
6951 clear_current_stateid(struct nfsd4_compound_state *cstate)
6952 {
6953         CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
6954 }
6955
6956 /*
6957  * functions to set current state id
6958  */
6959 void
6960 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6961 {
6962         put_stateid(cstate, &odp->od_stateid);
6963 }
6964
6965 void
6966 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
6967 {
6968         put_stateid(cstate, &open->op_stateid);
6969 }
6970
6971 void
6972 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
6973 {
6974         put_stateid(cstate, &close->cl_stateid);
6975 }
6976
6977 void
6978 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
6979 {
6980         put_stateid(cstate, &lock->lk_resp_stateid);
6981 }
6982
6983 /*
6984  * functions to consume current state id
6985  */
6986
6987 void
6988 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
6989 {
6990         get_stateid(cstate, &odp->od_stateid);
6991 }
6992
6993 void
6994 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
6995 {
6996         get_stateid(cstate, &drp->dr_stateid);
6997 }
6998
6999 void
7000 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
7001 {
7002         get_stateid(cstate, &fsp->fr_stateid);
7003 }
7004
7005 void
7006 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
7007 {
7008         get_stateid(cstate, &setattr->sa_stateid);
7009 }
7010
7011 void
7012 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
7013 {
7014         get_stateid(cstate, &close->cl_stateid);
7015 }
7016
7017 void
7018 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
7019 {
7020         get_stateid(cstate, &locku->lu_stateid);
7021 }
7022
7023 void
7024 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
7025 {
7026         get_stateid(cstate, &read->rd_stateid);
7027 }
7028
7029 void
7030 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
7031 {
7032         get_stateid(cstate, &write->wr_stateid);
7033 }