GNU Linux-libre 4.19.245-gnu1
[releases.git] / fs / afs / write.c
1 /* handling of writes to regular files and writing back to the server
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/backing-dev.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 #include <linux/pagevec.h>
18 #include "internal.h"
19
20 /*
21  * mark a page as having been made dirty and thus needing writeback
22  */
23 int afs_set_page_dirty(struct page *page)
24 {
25         _enter("");
26         return __set_page_dirty_nobuffers(page);
27 }
28
29 /*
30  * partly or wholly fill a page that's under preparation for writing
31  */
32 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
33                          loff_t pos, unsigned int len, struct page *page)
34 {
35         struct afs_read *req;
36         int ret;
37
38         _enter(",,%llu", (unsigned long long)pos);
39
40         req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
41                       GFP_KERNEL);
42         if (!req)
43                 return -ENOMEM;
44
45         refcount_set(&req->usage, 1);
46         req->pos = pos;
47         req->len = len;
48         req->nr_pages = 1;
49         req->pages = req->array;
50         req->pages[0] = page;
51         get_page(page);
52
53         ret = afs_fetch_data(vnode, key, req);
54         afs_put_read(req);
55         if (ret < 0) {
56                 if (ret == -ENOENT) {
57                         _debug("got NOENT from server"
58                                " - marking file deleted and stale");
59                         set_bit(AFS_VNODE_DELETED, &vnode->flags);
60                         ret = -ESTALE;
61                 }
62         }
63
64         _leave(" = %d", ret);
65         return ret;
66 }
67
68 /*
69  * prepare to perform part of a write to a page
70  */
71 int afs_write_begin(struct file *file, struct address_space *mapping,
72                     loff_t pos, unsigned len, unsigned flags,
73                     struct page **pagep, void **fsdata)
74 {
75         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
76         struct page *page;
77         struct key *key = afs_file_key(file);
78         unsigned long priv;
79         unsigned f, from = pos & (PAGE_SIZE - 1);
80         unsigned t, to = from + len;
81         pgoff_t index = pos >> PAGE_SHIFT;
82         int ret;
83
84         _enter("{%x:%u},{%lx},%u,%u",
85                vnode->fid.vid, vnode->fid.vnode, index, from, to);
86
87         /* We want to store information about how much of a page is altered in
88          * page->private.
89          */
90         BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
91
92         page = grab_cache_page_write_begin(mapping, index, flags);
93         if (!page)
94                 return -ENOMEM;
95
96         if (!PageUptodate(page) && len != PAGE_SIZE) {
97                 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
98                 if (ret < 0) {
99                         unlock_page(page);
100                         put_page(page);
101                         _leave(" = %d [prep]", ret);
102                         return ret;
103                 }
104                 SetPageUptodate(page);
105         }
106
107         /* page won't leak in error case: it eventually gets cleaned off LRU */
108         *pagep = page;
109
110 try_again:
111         /* See if this page is already partially written in a way that we can
112          * merge the new write with.
113          */
114         t = f = 0;
115         if (PagePrivate(page)) {
116                 priv = page_private(page);
117                 f = priv & AFS_PRIV_MAX;
118                 t = priv >> AFS_PRIV_SHIFT;
119                 ASSERTCMP(f, <=, t);
120         }
121
122         if (f != t) {
123                 if (PageWriteback(page)) {
124                         trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
125                                              page->index, priv);
126                         goto flush_conflicting_write;
127                 }
128                 /* If the file is being filled locally, allow inter-write
129                  * spaces to be merged into writes.  If it's not, only write
130                  * back what the user gives us.
131                  */
132                 if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
133                     (to < f || from > t))
134                         goto flush_conflicting_write;
135                 if (from < f)
136                         f = from;
137                 if (to > t)
138                         t = to;
139         } else {
140                 f = from;
141                 t = to;
142         }
143
144         priv = (unsigned long)t << AFS_PRIV_SHIFT;
145         priv |= f;
146         trace_afs_page_dirty(vnode, tracepoint_string("begin"),
147                              page->index, priv);
148         SetPagePrivate(page);
149         set_page_private(page, priv);
150         _leave(" = 0");
151         return 0;
152
153         /* The previous write and this write aren't adjacent or overlapping, so
154          * flush the page out.
155          */
156 flush_conflicting_write:
157         _debug("flush conflict");
158         ret = write_one_page(page);
159         if (ret < 0) {
160                 _leave(" = %d", ret);
161                 return ret;
162         }
163
164         ret = lock_page_killable(page);
165         if (ret < 0) {
166                 _leave(" = %d", ret);
167                 return ret;
168         }
169         goto try_again;
170 }
171
172 /*
173  * finalise part of a write to a page
174  */
175 int afs_write_end(struct file *file, struct address_space *mapping,
176                   loff_t pos, unsigned len, unsigned copied,
177                   struct page *page, void *fsdata)
178 {
179         struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
180         struct key *key = afs_file_key(file);
181         loff_t i_size, maybe_i_size;
182         int ret;
183
184         _enter("{%x:%u},{%lx}",
185                vnode->fid.vid, vnode->fid.vnode, page->index);
186
187         maybe_i_size = pos + copied;
188
189         i_size = i_size_read(&vnode->vfs_inode);
190         if (maybe_i_size > i_size) {
191                 write_seqlock(&vnode->cb_lock);
192                 i_size = i_size_read(&vnode->vfs_inode);
193                 if (maybe_i_size > i_size)
194                         i_size_write(&vnode->vfs_inode, maybe_i_size);
195                 write_sequnlock(&vnode->cb_lock);
196         }
197
198         if (!PageUptodate(page)) {
199                 if (copied < len) {
200                         /* Try and load any missing data from the server.  The
201                          * unmarshalling routine will take care of clearing any
202                          * bits that are beyond the EOF.
203                          */
204                         ret = afs_fill_page(vnode, key, pos + copied,
205                                             len - copied, page);
206                         if (ret < 0)
207                                 goto out;
208                 }
209                 SetPageUptodate(page);
210         }
211
212         set_page_dirty(page);
213         if (PageDirty(page))
214                 _debug("dirtied");
215         ret = copied;
216
217 out:
218         unlock_page(page);
219         put_page(page);
220         return ret;
221 }
222
223 /*
224  * kill all the pages in the given range
225  */
226 static void afs_kill_pages(struct address_space *mapping,
227                            pgoff_t first, pgoff_t last)
228 {
229         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
230         struct pagevec pv;
231         unsigned count, loop;
232
233         _enter("{%x:%u},%lx-%lx",
234                vnode->fid.vid, vnode->fid.vnode, first, last);
235
236         pagevec_init(&pv);
237
238         do {
239                 _debug("kill %lx-%lx", first, last);
240
241                 count = last - first + 1;
242                 if (count > PAGEVEC_SIZE)
243                         count = PAGEVEC_SIZE;
244                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
245                 ASSERTCMP(pv.nr, ==, count);
246
247                 for (loop = 0; loop < count; loop++) {
248                         struct page *page = pv.pages[loop];
249                         ClearPageUptodate(page);
250                         SetPageError(page);
251                         end_page_writeback(page);
252                         if (page->index >= first)
253                                 first = page->index + 1;
254                         lock_page(page);
255                         generic_error_remove_page(mapping, page);
256                         unlock_page(page);
257                 }
258
259                 __pagevec_release(&pv);
260         } while (first <= last);
261
262         _leave("");
263 }
264
265 /*
266  * Redirty all the pages in a given range.
267  */
268 static void afs_redirty_pages(struct writeback_control *wbc,
269                               struct address_space *mapping,
270                               pgoff_t first, pgoff_t last)
271 {
272         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
273         struct pagevec pv;
274         unsigned count, loop;
275
276         _enter("{%x:%u},%lx-%lx",
277                vnode->fid.vid, vnode->fid.vnode, first, last);
278
279         pagevec_init(&pv);
280
281         do {
282                 _debug("redirty %lx-%lx", first, last);
283
284                 count = last - first + 1;
285                 if (count > PAGEVEC_SIZE)
286                         count = PAGEVEC_SIZE;
287                 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
288                 ASSERTCMP(pv.nr, ==, count);
289
290                 for (loop = 0; loop < count; loop++) {
291                         struct page *page = pv.pages[loop];
292
293                         redirty_page_for_writepage(wbc, page);
294                         end_page_writeback(page);
295                         if (page->index >= first)
296                                 first = page->index + 1;
297                 }
298
299                 __pagevec_release(&pv);
300         } while (first <= last);
301
302         _leave("");
303 }
304
305 /*
306  * write to a file
307  */
308 static int afs_store_data(struct address_space *mapping,
309                           pgoff_t first, pgoff_t last,
310                           unsigned offset, unsigned to)
311 {
312         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
313         struct afs_fs_cursor fc;
314         struct afs_wb_key *wbk = NULL;
315         struct list_head *p;
316         int ret = -ENOKEY, ret2;
317
318         _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
319                vnode->volume->name,
320                vnode->fid.vid,
321                vnode->fid.vnode,
322                vnode->fid.unique,
323                first, last, offset, to);
324
325         spin_lock(&vnode->wb_lock);
326         p = vnode->wb_keys.next;
327
328         /* Iterate through the list looking for a valid key to use. */
329 try_next_key:
330         while (p != &vnode->wb_keys) {
331                 wbk = list_entry(p, struct afs_wb_key, vnode_link);
332                 _debug("wbk %u", key_serial(wbk->key));
333                 ret2 = key_validate(wbk->key);
334                 if (ret2 == 0)
335                         goto found_key;
336                 if (ret == -ENOKEY)
337                         ret = ret2;
338                 p = p->next;
339         }
340
341         spin_unlock(&vnode->wb_lock);
342         afs_put_wb_key(wbk);
343         _leave(" = %d [no keys]", ret);
344         return ret;
345
346 found_key:
347         refcount_inc(&wbk->usage);
348         spin_unlock(&vnode->wb_lock);
349
350         _debug("USE WB KEY %u", key_serial(wbk->key));
351
352         ret = -ERESTARTSYS;
353         if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
354                 while (afs_select_fileserver(&fc)) {
355                         fc.cb_break = afs_calc_vnode_cb_break(vnode);
356                         afs_fs_store_data(&fc, mapping, first, last, offset, to);
357                 }
358
359                 afs_check_for_remote_deletion(&fc, fc.vnode);
360                 afs_vnode_commit_status(&fc, vnode, fc.cb_break);
361                 ret = afs_end_vnode_operation(&fc);
362         }
363
364         switch (ret) {
365         case 0:
366                 afs_stat_v(vnode, n_stores);
367                 atomic_long_add((last * PAGE_SIZE + to) -
368                                 (first * PAGE_SIZE + offset),
369                                 &afs_v2net(vnode)->n_store_bytes);
370                 break;
371         case -EACCES:
372         case -EPERM:
373         case -ENOKEY:
374         case -EKEYEXPIRED:
375         case -EKEYREJECTED:
376         case -EKEYREVOKED:
377                 _debug("next");
378                 spin_lock(&vnode->wb_lock);
379                 p = wbk->vnode_link.next;
380                 afs_put_wb_key(wbk);
381                 goto try_next_key;
382         }
383
384         afs_put_wb_key(wbk);
385         _leave(" = %d", ret);
386         return ret;
387 }
388
389 /*
390  * Synchronously write back the locked page and any subsequent non-locked dirty
391  * pages.
392  */
393 static int afs_write_back_from_locked_page(struct address_space *mapping,
394                                            struct writeback_control *wbc,
395                                            struct page *primary_page,
396                                            pgoff_t final_page)
397 {
398         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
399         struct page *pages[8], *page;
400         unsigned long count, priv;
401         unsigned n, offset, to, f, t;
402         pgoff_t start, first, last;
403         int loop, ret;
404
405         _enter(",%lx", primary_page->index);
406
407         count = 1;
408         if (test_set_page_writeback(primary_page))
409                 BUG();
410
411         /* Find all consecutive lockable dirty pages that have contiguous
412          * written regions, stopping when we find a page that is not
413          * immediately lockable, is not dirty or is missing, or we reach the
414          * end of the range.
415          */
416         start = primary_page->index;
417         priv = page_private(primary_page);
418         offset = priv & AFS_PRIV_MAX;
419         to = priv >> AFS_PRIV_SHIFT;
420         trace_afs_page_dirty(vnode, tracepoint_string("store"),
421                              primary_page->index, priv);
422
423         WARN_ON(offset == to);
424         if (offset == to)
425                 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
426                                      primary_page->index, priv);
427
428         if (start >= final_page ||
429             (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
430                 goto no_more;
431
432         start++;
433         do {
434                 _debug("more %lx [%lx]", start, count);
435                 n = final_page - start + 1;
436                 if (n > ARRAY_SIZE(pages))
437                         n = ARRAY_SIZE(pages);
438                 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
439                 _debug("fgpc %u", n);
440                 if (n == 0)
441                         goto no_more;
442                 if (pages[0]->index != start) {
443                         do {
444                                 put_page(pages[--n]);
445                         } while (n > 0);
446                         goto no_more;
447                 }
448
449                 for (loop = 0; loop < n; loop++) {
450                         page = pages[loop];
451                         if (to != PAGE_SIZE &&
452                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
453                                 break;
454                         if (page->index > final_page)
455                                 break;
456                         if (!trylock_page(page))
457                                 break;
458                         if (!PageDirty(page) || PageWriteback(page)) {
459                                 unlock_page(page);
460                                 break;
461                         }
462
463                         priv = page_private(page);
464                         f = priv & AFS_PRIV_MAX;
465                         t = priv >> AFS_PRIV_SHIFT;
466                         if (f != 0 &&
467                             !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
468                                 unlock_page(page);
469                                 break;
470                         }
471                         to = t;
472
473                         trace_afs_page_dirty(vnode, tracepoint_string("store+"),
474                                              page->index, priv);
475
476                         if (!clear_page_dirty_for_io(page))
477                                 BUG();
478                         if (test_set_page_writeback(page))
479                                 BUG();
480                         unlock_page(page);
481                         put_page(page);
482                 }
483                 count += loop;
484                 if (loop < n) {
485                         for (; loop < n; loop++)
486                                 put_page(pages[loop]);
487                         goto no_more;
488                 }
489
490                 start += loop;
491         } while (start <= final_page && count < 65536);
492
493 no_more:
494         /* We now have a contiguous set of dirty pages, each with writeback
495          * set; the first page is still locked at this point, but all the rest
496          * have been unlocked.
497          */
498         unlock_page(primary_page);
499
500         first = primary_page->index;
501         last = first + count - 1;
502
503         _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
504
505         ret = afs_store_data(mapping, first, last, offset, to);
506         switch (ret) {
507         case 0:
508                 ret = count;
509                 break;
510
511         default:
512                 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
513                 /* Fall through */
514         case -EACCES:
515         case -EPERM:
516         case -ENOKEY:
517         case -EKEYEXPIRED:
518         case -EKEYREJECTED:
519         case -EKEYREVOKED:
520                 afs_redirty_pages(wbc, mapping, first, last);
521                 mapping_set_error(mapping, ret);
522                 break;
523
524         case -EDQUOT:
525         case -ENOSPC:
526                 afs_redirty_pages(wbc, mapping, first, last);
527                 mapping_set_error(mapping, -ENOSPC);
528                 break;
529
530         case -EROFS:
531         case -EIO:
532         case -EREMOTEIO:
533         case -EFBIG:
534         case -ENOENT:
535         case -ENOMEDIUM:
536         case -ENXIO:
537                 afs_kill_pages(mapping, first, last);
538                 mapping_set_error(mapping, ret);
539                 break;
540         }
541
542         _leave(" = %d", ret);
543         return ret;
544 }
545
546 /*
547  * write a page back to the server
548  * - the caller locked the page for us
549  */
550 int afs_writepage(struct page *page, struct writeback_control *wbc)
551 {
552         int ret;
553
554         _enter("{%lx},", page->index);
555
556         ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
557                                               wbc->range_end >> PAGE_SHIFT);
558         if (ret < 0) {
559                 _leave(" = %d", ret);
560                 return 0;
561         }
562
563         wbc->nr_to_write -= ret;
564
565         _leave(" = 0");
566         return 0;
567 }
568
569 /*
570  * write a region of pages back to the server
571  */
572 static int afs_writepages_region(struct address_space *mapping,
573                                  struct writeback_control *wbc,
574                                  pgoff_t index, pgoff_t end, pgoff_t *_next)
575 {
576         struct page *page;
577         int ret, n;
578
579         _enter(",,%lx,%lx,", index, end);
580
581         do {
582                 n = find_get_pages_range_tag(mapping, &index, end,
583                                         PAGECACHE_TAG_DIRTY, 1, &page);
584                 if (!n)
585                         break;
586
587                 _debug("wback %lx", page->index);
588
589                 /*
590                  * at this point we hold neither the i_pages lock nor the
591                  * page lock: the page may be truncated or invalidated
592                  * (changing page->mapping to NULL), or even swizzled
593                  * back from swapper_space to tmpfs file mapping
594                  */
595                 ret = lock_page_killable(page);
596                 if (ret < 0) {
597                         put_page(page);
598                         _leave(" = %d", ret);
599                         return ret;
600                 }
601
602                 if (page->mapping != mapping || !PageDirty(page)) {
603                         unlock_page(page);
604                         put_page(page);
605                         continue;
606                 }
607
608                 if (PageWriteback(page)) {
609                         unlock_page(page);
610                         if (wbc->sync_mode != WB_SYNC_NONE)
611                                 wait_on_page_writeback(page);
612                         put_page(page);
613                         continue;
614                 }
615
616                 if (!clear_page_dirty_for_io(page))
617                         BUG();
618                 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
619                 put_page(page);
620                 if (ret < 0) {
621                         _leave(" = %d", ret);
622                         return ret;
623                 }
624
625                 wbc->nr_to_write -= ret;
626
627                 cond_resched();
628         } while (index < end && wbc->nr_to_write > 0);
629
630         *_next = index;
631         _leave(" = 0 [%lx]", *_next);
632         return 0;
633 }
634
635 /*
636  * write some of the pending data back to the server
637  */
638 int afs_writepages(struct address_space *mapping,
639                    struct writeback_control *wbc)
640 {
641         pgoff_t start, end, next;
642         int ret;
643
644         _enter("");
645
646         if (wbc->range_cyclic) {
647                 start = mapping->writeback_index;
648                 end = -1;
649                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
650                 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
651                         ret = afs_writepages_region(mapping, wbc, 0, start,
652                                                     &next);
653                 mapping->writeback_index = next;
654         } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
655                 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
656                 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
657                 if (wbc->nr_to_write > 0)
658                         mapping->writeback_index = next;
659         } else {
660                 start = wbc->range_start >> PAGE_SHIFT;
661                 end = wbc->range_end >> PAGE_SHIFT;
662                 ret = afs_writepages_region(mapping, wbc, start, end, &next);
663         }
664
665         _leave(" = %d", ret);
666         return ret;
667 }
668
669 /*
670  * completion of write to server
671  */
672 void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
673 {
674         struct pagevec pv;
675         unsigned long priv;
676         unsigned count, loop;
677         pgoff_t first = call->first, last = call->last;
678
679         _enter("{%x:%u},{%lx-%lx}",
680                vnode->fid.vid, vnode->fid.vnode, first, last);
681
682         pagevec_init(&pv);
683
684         do {
685                 _debug("done %lx-%lx", first, last);
686
687                 count = last - first + 1;
688                 if (count > PAGEVEC_SIZE)
689                         count = PAGEVEC_SIZE;
690                 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
691                                               first, count, pv.pages);
692                 ASSERTCMP(pv.nr, ==, count);
693
694                 for (loop = 0; loop < count; loop++) {
695                         priv = page_private(pv.pages[loop]);
696                         trace_afs_page_dirty(vnode, tracepoint_string("clear"),
697                                              pv.pages[loop]->index, priv);
698                         set_page_private(pv.pages[loop], 0);
699                         end_page_writeback(pv.pages[loop]);
700                 }
701                 first += count;
702                 __pagevec_release(&pv);
703         } while (first <= last);
704
705         afs_prune_wb_keys(vnode);
706         _leave("");
707 }
708
709 /*
710  * write to an AFS file
711  */
712 ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
713 {
714         struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
715         ssize_t result;
716         size_t count = iov_iter_count(from);
717
718         _enter("{%x.%u},{%zu},",
719                vnode->fid.vid, vnode->fid.vnode, count);
720
721         if (IS_SWAPFILE(&vnode->vfs_inode)) {
722                 printk(KERN_INFO
723                        "AFS: Attempt to write to active swap file!\n");
724                 return -EBUSY;
725         }
726
727         if (!count)
728                 return 0;
729
730         result = generic_file_write_iter(iocb, from);
731
732         _leave(" = %zd", result);
733         return result;
734 }
735
736 /*
737  * flush any dirty pages for this process, and check for write errors.
738  * - the return status from this call provides a reliable indication of
739  *   whether any write errors occurred for this process.
740  */
741 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
742 {
743         struct inode *inode = file_inode(file);
744         struct afs_vnode *vnode = AFS_FS_I(inode);
745
746         _enter("{%x:%u},{n=%pD},%d",
747                vnode->fid.vid, vnode->fid.vnode, file,
748                datasync);
749
750         return file_write_and_wait_range(file, start, end);
751 }
752
753 /*
754  * notification that a previously read-only page is about to become writable
755  * - if it returns an error, the caller will deliver a bus error signal
756  */
757 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
758 {
759         struct file *file = vmf->vma->vm_file;
760         struct inode *inode = file_inode(file);
761         struct afs_vnode *vnode = AFS_FS_I(inode);
762         unsigned long priv;
763
764         _enter("{{%x:%u}},{%lx}",
765                vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
766
767         sb_start_pagefault(inode->i_sb);
768
769         /* Wait for the page to be written to the cache before we allow it to
770          * be modified.  We then assume the entire page will need writing back.
771          */
772 #ifdef CONFIG_AFS_FSCACHE
773         fscache_wait_on_page_write(vnode->cache, vmf->page);
774 #endif
775
776         if (PageWriteback(vmf->page) &&
777             wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
778                 return VM_FAULT_RETRY;
779
780         if (lock_page_killable(vmf->page) < 0)
781                 return VM_FAULT_RETRY;
782
783         /* We mustn't change page->private until writeback is complete as that
784          * details the portion of the page we need to write back and we might
785          * need to redirty the page if there's a problem.
786          */
787         wait_on_page_writeback(vmf->page);
788
789         priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
790         priv |= 0; /* From */
791         trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
792                              vmf->page->index, priv);
793         SetPagePrivate(vmf->page);
794         set_page_private(vmf->page, priv);
795         file_update_time(file);
796
797         sb_end_pagefault(inode->i_sb);
798         return VM_FAULT_LOCKED;
799 }
800
801 /*
802  * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
803  */
804 void afs_prune_wb_keys(struct afs_vnode *vnode)
805 {
806         LIST_HEAD(graveyard);
807         struct afs_wb_key *wbk, *tmp;
808
809         /* Discard unused keys */
810         spin_lock(&vnode->wb_lock);
811
812         if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
813             !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
814                 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
815                         if (refcount_read(&wbk->usage) == 1)
816                                 list_move(&wbk->vnode_link, &graveyard);
817                 }
818         }
819
820         spin_unlock(&vnode->wb_lock);
821
822         while (!list_empty(&graveyard)) {
823                 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
824                 list_del(&wbk->vnode_link);
825                 afs_put_wb_key(wbk);
826         }
827 }
828
829 /*
830  * Clean up a page during invalidation.
831  */
832 int afs_launder_page(struct page *page)
833 {
834         struct address_space *mapping = page->mapping;
835         struct afs_vnode *vnode = AFS_FS_I(mapping->host);
836         unsigned long priv;
837         unsigned int f, t;
838         int ret = 0;
839
840         _enter("{%lx}", page->index);
841
842         priv = page_private(page);
843         if (clear_page_dirty_for_io(page)) {
844                 f = 0;
845                 t = PAGE_SIZE;
846                 if (PagePrivate(page)) {
847                         f = priv & AFS_PRIV_MAX;
848                         t = priv >> AFS_PRIV_SHIFT;
849                 }
850
851                 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
852                                      page->index, priv);
853                 ret = afs_store_data(mapping, page->index, page->index, t, f);
854         }
855
856         trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
857                              page->index, priv);
858         set_page_private(page, 0);
859         ClearPagePrivate(page);
860
861 #ifdef CONFIG_AFS_FSCACHE
862         if (PageFsCache(page)) {
863                 fscache_wait_on_page_write(vnode->cache, page);
864                 fscache_uncache_page(vnode->cache, page);
865         }
866 #endif
867         return ret;
868 }