GNU Linux-libre 6.1.86-gnu
[releases.git] / fs / ceph / locks.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/file.h>
5 #include <linux/namei.h>
6 #include <linux/random.h>
7
8 #include "super.h"
9 #include "mds_client.h"
10 #include <linux/ceph/pagelist.h>
11
12 static u64 lock_secret;
13 static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
14                                          struct ceph_mds_request *req);
15
16 static inline u64 secure_addr(void *addr)
17 {
18         u64 v = lock_secret ^ (u64)(unsigned long)addr;
19         /*
20          * Set the most significant bit, so that MDS knows the 'owner'
21          * is sufficient to identify the owner of lock. (old code uses
22          * both 'owner' and 'pid')
23          */
24         v |= (1ULL << 63);
25         return v;
26 }
27
28 void __init ceph_flock_init(void)
29 {
30         get_random_bytes(&lock_secret, sizeof(lock_secret));
31 }
32
33 static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
34 {
35         struct inode *inode = file_inode(dst->fl_file);
36         atomic_inc(&ceph_inode(inode)->i_filelock_ref);
37 }
38
39 static void ceph_fl_release_lock(struct file_lock *fl)
40 {
41         struct inode *inode = file_inode(fl->fl_file);
42         struct ceph_inode_info *ci = ceph_inode(inode);
43         if (atomic_dec_and_test(&ci->i_filelock_ref)) {
44                 /* clear error when all locks are released */
45                 spin_lock(&ci->i_ceph_lock);
46                 ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
47                 spin_unlock(&ci->i_ceph_lock);
48         }
49 }
50
51 static const struct file_lock_operations ceph_fl_lock_ops = {
52         .fl_copy_lock = ceph_fl_copy_lock,
53         .fl_release_private = ceph_fl_release_lock,
54 };
55
56 /*
57  * Implement fcntl and flock locking functions.
58  */
59 static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
60                              int cmd, u8 wait, struct file_lock *fl)
61 {
62         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
63         struct ceph_mds_request *req;
64         int err;
65         u64 length = 0;
66         u64 owner;
67
68         if (operation == CEPH_MDS_OP_SETFILELOCK) {
69                 /*
70                  * increasing i_filelock_ref closes race window between
71                  * handling request reply and adding file_lock struct to
72                  * inode. Otherwise, auth caps may get trimmed in the
73                  * window. Caller function will decrease the counter.
74                  */
75                 fl->fl_ops = &ceph_fl_lock_ops;
76                 fl->fl_ops->fl_copy_lock(fl, NULL);
77         }
78
79         if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
80                 wait = 0;
81
82         req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
83         if (IS_ERR(req))
84                 return PTR_ERR(req);
85         req->r_inode = inode;
86         ihold(inode);
87         req->r_num_caps = 1;
88
89         /* mds requires start and length rather than start and end */
90         if (LLONG_MAX == fl->fl_end)
91                 length = 0;
92         else
93                 length = fl->fl_end - fl->fl_start + 1;
94
95         owner = secure_addr(fl->fl_owner);
96
97         dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
98              "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type,
99              (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
100              wait, fl->fl_type);
101
102         req->r_args.filelock_change.rule = lock_type;
103         req->r_args.filelock_change.type = cmd;
104         req->r_args.filelock_change.owner = cpu_to_le64(owner);
105         req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
106         req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
107         req->r_args.filelock_change.length = cpu_to_le64(length);
108         req->r_args.filelock_change.wait = wait;
109
110         err = ceph_mdsc_submit_request(mdsc, inode, req);
111         if (!err)
112                 err = ceph_mdsc_wait_request(mdsc, req, wait ?
113                                         ceph_lock_wait_for_completion : NULL);
114         if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
115                 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
116                 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
117                         fl->fl_type = F_RDLCK;
118                 else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
119                         fl->fl_type = F_WRLCK;
120                 else
121                         fl->fl_type = F_UNLCK;
122
123                 fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
124                 length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
125                                                  le64_to_cpu(req->r_reply_info.filelock_reply->length);
126                 if (length >= 1)
127                         fl->fl_end = length -1;
128                 else
129                         fl->fl_end = 0;
130
131         }
132         ceph_mdsc_put_request(req);
133         dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
134              "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type,
135              (int)operation, (u64)fl->fl_pid, fl->fl_start,
136              length, wait, fl->fl_type, err);
137         return err;
138 }
139
140 static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
141                                          struct ceph_mds_request *req)
142 {
143         struct ceph_mds_request *intr_req;
144         struct inode *inode = req->r_inode;
145         int err, lock_type;
146
147         BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK);
148         if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL)
149                 lock_type = CEPH_LOCK_FCNTL_INTR;
150         else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK)
151                 lock_type = CEPH_LOCK_FLOCK_INTR;
152         else
153                 BUG_ON(1);
154         BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK);
155
156         err = wait_for_completion_interruptible(&req->r_completion);
157         if (!err)
158                 return 0;
159
160         dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
161              req->r_tid);
162
163         mutex_lock(&mdsc->mutex);
164         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
165                 err = 0;
166         } else {
167                 /*
168                  * ensure we aren't running concurrently with
169                  * ceph_fill_trace or ceph_readdir_prepopulate, which
170                  * rely on locks (dir mutex) held by our caller.
171                  */
172                 mutex_lock(&req->r_fill_mutex);
173                 req->r_err = err;
174                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
175                 mutex_unlock(&req->r_fill_mutex);
176
177                 if (!req->r_session) {
178                         // haven't sent the request
179                         err = 0;
180                 }
181         }
182         mutex_unlock(&mdsc->mutex);
183         if (!err)
184                 return 0;
185
186         intr_req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETFILELOCK,
187                                             USE_AUTH_MDS);
188         if (IS_ERR(intr_req))
189                 return PTR_ERR(intr_req);
190
191         intr_req->r_inode = inode;
192         ihold(inode);
193         intr_req->r_num_caps = 1;
194
195         intr_req->r_args.filelock_change = req->r_args.filelock_change;
196         intr_req->r_args.filelock_change.rule = lock_type;
197         intr_req->r_args.filelock_change.type = CEPH_LOCK_UNLOCK;
198
199         err = ceph_mdsc_do_request(mdsc, inode, intr_req);
200         ceph_mdsc_put_request(intr_req);
201
202         if (err && err != -ERESTARTSYS)
203                 return err;
204
205         wait_for_completion_killable(&req->r_safe_completion);
206         return 0;
207 }
208
209 static int try_unlock_file(struct file *file, struct file_lock *fl)
210 {
211         int err;
212         unsigned int orig_flags = fl->fl_flags;
213         fl->fl_flags |= FL_EXISTS;
214         err = locks_lock_file_wait(file, fl);
215         fl->fl_flags = orig_flags;
216         if (err == -ENOENT) {
217                 if (!(orig_flags & FL_EXISTS))
218                         err = 0;
219                 return err;
220         }
221         return 1;
222 }
223
224 /*
225  * Attempt to set an fcntl lock.
226  * For now, this just goes away to the server. Later it may be more awesome.
227  */
228 int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
229 {
230         struct inode *inode = file_inode(file);
231         struct ceph_inode_info *ci = ceph_inode(inode);
232         int err = 0;
233         u16 op = CEPH_MDS_OP_SETFILELOCK;
234         u8 wait = 0;
235         u8 lock_cmd;
236
237         if (!(fl->fl_flags & FL_POSIX))
238                 return -ENOLCK;
239
240         if (ceph_inode_is_shutdown(inode))
241                 return -ESTALE;
242
243         dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
244
245         /* set wait bit as appropriate, then make command as Ceph expects it*/
246         if (IS_GETLK(cmd))
247                 op = CEPH_MDS_OP_GETFILELOCK;
248         else if (IS_SETLKW(cmd))
249                 wait = 1;
250
251         spin_lock(&ci->i_ceph_lock);
252         if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
253                 err = -EIO;
254         }
255         spin_unlock(&ci->i_ceph_lock);
256         if (err < 0) {
257                 if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type)
258                         posix_lock_file(file, fl, NULL);
259                 return err;
260         }
261
262         if (F_RDLCK == fl->fl_type)
263                 lock_cmd = CEPH_LOCK_SHARED;
264         else if (F_WRLCK == fl->fl_type)
265                 lock_cmd = CEPH_LOCK_EXCL;
266         else
267                 lock_cmd = CEPH_LOCK_UNLOCK;
268
269         if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) {
270                 err = try_unlock_file(file, fl);
271                 if (err <= 0)
272                         return err;
273         }
274
275         err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
276         if (!err) {
277                 if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) {
278                         dout("mds locked, locking locally\n");
279                         err = posix_lock_file(file, fl, NULL);
280                         if (err) {
281                                 /* undo! This should only happen if
282                                  * the kernel detects local
283                                  * deadlock. */
284                                 ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
285                                                   CEPH_LOCK_UNLOCK, 0, fl);
286                                 dout("got %d on posix_lock_file, undid lock\n",
287                                      err);
288                         }
289                 }
290         }
291         return err;
292 }
293
294 int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
295 {
296         struct inode *inode = file_inode(file);
297         struct ceph_inode_info *ci = ceph_inode(inode);
298         int err = 0;
299         u8 wait = 0;
300         u8 lock_cmd;
301
302         if (!(fl->fl_flags & FL_FLOCK))
303                 return -ENOLCK;
304
305         if (ceph_inode_is_shutdown(inode))
306                 return -ESTALE;
307
308         dout("ceph_flock, fl_file: %p\n", fl->fl_file);
309
310         spin_lock(&ci->i_ceph_lock);
311         if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
312                 err = -EIO;
313         }
314         spin_unlock(&ci->i_ceph_lock);
315         if (err < 0) {
316                 if (F_UNLCK == fl->fl_type)
317                         locks_lock_file_wait(file, fl);
318                 return err;
319         }
320
321         if (IS_SETLKW(cmd))
322                 wait = 1;
323
324         if (F_RDLCK == fl->fl_type)
325                 lock_cmd = CEPH_LOCK_SHARED;
326         else if (F_WRLCK == fl->fl_type)
327                 lock_cmd = CEPH_LOCK_EXCL;
328         else
329                 lock_cmd = CEPH_LOCK_UNLOCK;
330
331         if (F_UNLCK == fl->fl_type) {
332                 err = try_unlock_file(file, fl);
333                 if (err <= 0)
334                         return err;
335         }
336
337         err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
338                                 inode, lock_cmd, wait, fl);
339         if (!err && F_UNLCK != fl->fl_type) {
340                 err = locks_lock_file_wait(file, fl);
341                 if (err) {
342                         ceph_lock_message(CEPH_LOCK_FLOCK,
343                                           CEPH_MDS_OP_SETFILELOCK,
344                                           inode, CEPH_LOCK_UNLOCK, 0, fl);
345                         dout("got %d on locks_lock_file_wait, undid lock\n", err);
346                 }
347         }
348         return err;
349 }
350
351 /*
352  * Fills in the passed counter variables, so you can prepare pagelist metadata
353  * before calling ceph_encode_locks.
354  */
355 void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
356 {
357         struct file_lock *lock;
358         struct file_lock_context *ctx;
359
360         *fcntl_count = 0;
361         *flock_count = 0;
362
363         ctx = inode->i_flctx;
364         if (ctx) {
365                 spin_lock(&ctx->flc_lock);
366                 list_for_each_entry(lock, &ctx->flc_posix, fl_list)
367                         ++(*fcntl_count);
368                 list_for_each_entry(lock, &ctx->flc_flock, fl_list)
369                         ++(*flock_count);
370                 spin_unlock(&ctx->flc_lock);
371         }
372         dout("counted %d flock locks and %d fcntl locks\n",
373              *flock_count, *fcntl_count);
374 }
375
376 /*
377  * Given a pointer to a lock, convert it to a ceph filelock
378  */
379 static int lock_to_ceph_filelock(struct file_lock *lock,
380                                  struct ceph_filelock *cephlock)
381 {
382         int err = 0;
383         cephlock->start = cpu_to_le64(lock->fl_start);
384         cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
385         cephlock->client = cpu_to_le64(0);
386         cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
387         cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
388
389         switch (lock->fl_type) {
390         case F_RDLCK:
391                 cephlock->type = CEPH_LOCK_SHARED;
392                 break;
393         case F_WRLCK:
394                 cephlock->type = CEPH_LOCK_EXCL;
395                 break;
396         case F_UNLCK:
397                 cephlock->type = CEPH_LOCK_UNLOCK;
398                 break;
399         default:
400                 dout("Have unknown lock type %d\n", lock->fl_type);
401                 err = -EINVAL;
402         }
403
404         return err;
405 }
406
407 /*
408  * Encode the flock and fcntl locks for the given inode into the ceph_filelock
409  * array. Must be called with inode->i_lock already held.
410  * If we encounter more of a specific lock type than expected, return -ENOSPC.
411  */
412 int ceph_encode_locks_to_buffer(struct inode *inode,
413                                 struct ceph_filelock *flocks,
414                                 int num_fcntl_locks, int num_flock_locks)
415 {
416         struct file_lock *lock;
417         struct file_lock_context *ctx = inode->i_flctx;
418         int err = 0;
419         int seen_fcntl = 0;
420         int seen_flock = 0;
421         int l = 0;
422
423         dout("encoding %d flock and %d fcntl locks\n", num_flock_locks,
424              num_fcntl_locks);
425
426         if (!ctx)
427                 return 0;
428
429         spin_lock(&ctx->flc_lock);
430         list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
431                 ++seen_fcntl;
432                 if (seen_fcntl > num_fcntl_locks) {
433                         err = -ENOSPC;
434                         goto fail;
435                 }
436                 err = lock_to_ceph_filelock(lock, &flocks[l]);
437                 if (err)
438                         goto fail;
439                 ++l;
440         }
441         list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
442                 ++seen_flock;
443                 if (seen_flock > num_flock_locks) {
444                         err = -ENOSPC;
445                         goto fail;
446                 }
447                 err = lock_to_ceph_filelock(lock, &flocks[l]);
448                 if (err)
449                         goto fail;
450                 ++l;
451         }
452 fail:
453         spin_unlock(&ctx->flc_lock);
454         return err;
455 }
456
457 /*
458  * Copy the encoded flock and fcntl locks into the pagelist.
459  * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
460  * sequential flock locks.
461  * Returns zero on success.
462  */
463 int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
464                            struct ceph_pagelist *pagelist,
465                            int num_fcntl_locks, int num_flock_locks)
466 {
467         int err = 0;
468         __le32 nlocks;
469
470         nlocks = cpu_to_le32(num_fcntl_locks);
471         err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
472         if (err)
473                 goto out_fail;
474
475         if (num_fcntl_locks > 0) {
476                 err = ceph_pagelist_append(pagelist, flocks,
477                                            num_fcntl_locks * sizeof(*flocks));
478                 if (err)
479                         goto out_fail;
480         }
481
482         nlocks = cpu_to_le32(num_flock_locks);
483         err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
484         if (err)
485                 goto out_fail;
486
487         if (num_flock_locks > 0) {
488                 err = ceph_pagelist_append(pagelist, &flocks[num_fcntl_locks],
489                                            num_flock_locks * sizeof(*flocks));
490         }
491 out_fail:
492         return err;
493 }