2 * dat.c - NILFS disk address translation.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * Written by Koji Sato.
19 #include <linux/types.h>
20 #include <linux/buffer_head.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
29 #define NILFS_CNO_MIN ((__u64)1)
30 #define NILFS_CNO_MAX (~(__u64)0)
33 * struct nilfs_dat_info - on-memory private data of DAT file
34 * @mi: on-memory private data of metadata file
35 * @palloc_cache: persistent object allocator cache of DAT file
36 * @shadow: shadow map of DAT file
38 struct nilfs_dat_info {
39 struct nilfs_mdt_info mi;
40 struct nilfs_palloc_cache palloc_cache;
41 struct nilfs_shadow_map shadow;
44 static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
46 return (struct nilfs_dat_info *)NILFS_MDT(dat);
49 static int nilfs_dat_prepare_entry(struct inode *dat,
50 struct nilfs_palloc_req *req, int create)
52 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
53 create, &req->pr_entry_bh);
56 static void nilfs_dat_commit_entry(struct inode *dat,
57 struct nilfs_palloc_req *req)
59 mark_buffer_dirty(req->pr_entry_bh);
60 nilfs_mdt_mark_dirty(dat);
61 brelse(req->pr_entry_bh);
64 static void nilfs_dat_abort_entry(struct inode *dat,
65 struct nilfs_palloc_req *req)
67 brelse(req->pr_entry_bh);
70 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
74 ret = nilfs_palloc_prepare_alloc_entry(dat, req);
78 ret = nilfs_dat_prepare_entry(dat, req, 1);
80 nilfs_palloc_abort_alloc_entry(dat, req);
85 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
87 struct nilfs_dat_entry *entry;
90 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
91 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
92 req->pr_entry_bh, kaddr);
93 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
94 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
95 entry->de_blocknr = cpu_to_le64(0);
98 nilfs_palloc_commit_alloc_entry(dat, req);
99 nilfs_dat_commit_entry(dat, req);
102 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
104 nilfs_dat_abort_entry(dat, req);
105 nilfs_palloc_abort_alloc_entry(dat, req);
108 static void nilfs_dat_commit_free(struct inode *dat,
109 struct nilfs_palloc_req *req)
111 struct nilfs_dat_entry *entry;
114 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
115 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
116 req->pr_entry_bh, kaddr);
117 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
118 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
119 entry->de_blocknr = cpu_to_le64(0);
120 kunmap_atomic(kaddr);
122 nilfs_dat_commit_entry(dat, req);
123 nilfs_palloc_commit_free_entry(dat, req);
126 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
130 ret = nilfs_dat_prepare_entry(dat, req, 0);
131 WARN_ON(ret == -ENOENT);
135 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
138 struct nilfs_dat_entry *entry;
141 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
142 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
143 req->pr_entry_bh, kaddr);
144 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
145 entry->de_blocknr = cpu_to_le64(blocknr);
146 kunmap_atomic(kaddr);
148 nilfs_dat_commit_entry(dat, req);
151 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
153 struct nilfs_dat_entry *entry;
158 ret = nilfs_dat_prepare_entry(dat, req, 0);
160 WARN_ON(ret == -ENOENT);
164 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
165 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
166 req->pr_entry_bh, kaddr);
167 blocknr = le64_to_cpu(entry->de_blocknr);
168 kunmap_atomic(kaddr);
171 ret = nilfs_palloc_prepare_free_entry(dat, req);
173 nilfs_dat_abort_entry(dat, req);
181 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
184 struct nilfs_dat_entry *entry;
189 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
190 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
191 req->pr_entry_bh, kaddr);
192 end = start = le64_to_cpu(entry->de_start);
194 end = nilfs_mdt_cno(dat);
195 WARN_ON(start > end);
197 entry->de_end = cpu_to_le64(end);
198 blocknr = le64_to_cpu(entry->de_blocknr);
199 kunmap_atomic(kaddr);
202 nilfs_dat_commit_free(dat, req);
204 nilfs_dat_commit_entry(dat, req);
207 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
209 struct nilfs_dat_entry *entry;
214 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
215 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
216 req->pr_entry_bh, kaddr);
217 start = le64_to_cpu(entry->de_start);
218 blocknr = le64_to_cpu(entry->de_blocknr);
219 kunmap_atomic(kaddr);
221 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
222 nilfs_palloc_abort_free_entry(dat, req);
223 nilfs_dat_abort_entry(dat, req);
226 int nilfs_dat_prepare_update(struct inode *dat,
227 struct nilfs_palloc_req *oldreq,
228 struct nilfs_palloc_req *newreq)
232 ret = nilfs_dat_prepare_end(dat, oldreq);
234 ret = nilfs_dat_prepare_alloc(dat, newreq);
236 nilfs_dat_abort_end(dat, oldreq);
241 void nilfs_dat_commit_update(struct inode *dat,
242 struct nilfs_palloc_req *oldreq,
243 struct nilfs_palloc_req *newreq, int dead)
245 nilfs_dat_commit_end(dat, oldreq, dead);
246 nilfs_dat_commit_alloc(dat, newreq);
249 void nilfs_dat_abort_update(struct inode *dat,
250 struct nilfs_palloc_req *oldreq,
251 struct nilfs_palloc_req *newreq)
253 nilfs_dat_abort_end(dat, oldreq);
254 nilfs_dat_abort_alloc(dat, newreq);
258 * nilfs_dat_mark_dirty -
259 * @dat: DAT file inode
260 * @vblocknr: virtual block number
264 * Return Value: On success, 0 is returned. On error, one of the following
265 * negative error codes is returned.
269 * %-ENOMEM - Insufficient amount of memory available.
271 int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
273 struct nilfs_palloc_req req;
276 req.pr_entry_nr = vblocknr;
277 ret = nilfs_dat_prepare_entry(dat, &req, 0);
279 nilfs_dat_commit_entry(dat, &req);
284 * nilfs_dat_freev - free virtual block numbers
285 * @dat: DAT file inode
286 * @vblocknrs: array of virtual block numbers
287 * @nitems: number of virtual block numbers
289 * Description: nilfs_dat_freev() frees the virtual block numbers specified by
290 * @vblocknrs and @nitems.
292 * Return Value: On success, 0 is returned. On error, one of the following
293 * negative error codes is returned.
297 * %-ENOMEM - Insufficient amount of memory available.
299 * %-ENOENT - The virtual block number have not been allocated.
301 int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
303 return nilfs_palloc_freev(dat, vblocknrs, nitems);
307 * nilfs_dat_move - change a block number
308 * @dat: DAT file inode
309 * @vblocknr: virtual block number
310 * @blocknr: block number
312 * Description: nilfs_dat_move() changes the block number associated with
313 * @vblocknr to @blocknr.
315 * Return Value: On success, 0 is returned. On error, one of the following
316 * negative error codes is returned.
320 * %-ENOMEM - Insufficient amount of memory available.
322 int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
324 struct buffer_head *entry_bh;
325 struct nilfs_dat_entry *entry;
329 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
334 * The given disk block number (blocknr) is not yet written to
335 * the device at this point.
337 * To prevent nilfs_dat_translate() from returning the
338 * uncommitted block number, this makes a copy of the entry
339 * buffer and redirects nilfs_dat_translate() to the copy.
341 if (!buffer_nilfs_redirected(entry_bh)) {
342 ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
349 kaddr = kmap_atomic(entry_bh->b_page);
350 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
351 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
352 nilfs_msg(dat->i_sb, KERN_CRIT,
353 "%s: invalid vblocknr = %llu, [%llu, %llu)",
354 __func__, (unsigned long long)vblocknr,
355 (unsigned long long)le64_to_cpu(entry->de_start),
356 (unsigned long long)le64_to_cpu(entry->de_end));
357 kunmap_atomic(kaddr);
361 WARN_ON(blocknr == 0);
362 entry->de_blocknr = cpu_to_le64(blocknr);
363 kunmap_atomic(kaddr);
365 mark_buffer_dirty(entry_bh);
366 nilfs_mdt_mark_dirty(dat);
374 * nilfs_dat_translate - translate a virtual block number to a block number
375 * @dat: DAT file inode
376 * @vblocknr: virtual block number
377 * @blocknrp: pointer to a block number
379 * Description: nilfs_dat_translate() maps the virtual block number @vblocknr
380 * to the corresponding block number.
382 * Return Value: On success, 0 is returned and the block number associated
383 * with @vblocknr is stored in the place pointed by @blocknrp. On error, one
384 * of the following negative error codes is returned.
388 * %-ENOMEM - Insufficient amount of memory available.
390 * %-ENOENT - A block number associated with @vblocknr does not exist.
392 int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
394 struct buffer_head *entry_bh, *bh;
395 struct nilfs_dat_entry *entry;
400 ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
404 if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
405 bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
407 WARN_ON(!buffer_uptodate(bh));
413 kaddr = kmap_atomic(entry_bh->b_page);
414 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
415 blocknr = le64_to_cpu(entry->de_blocknr);
423 kunmap_atomic(kaddr);
428 ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
431 struct buffer_head *entry_bh;
432 struct nilfs_dat_entry *entry;
433 struct nilfs_vinfo *vinfo = buf;
436 unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
439 for (i = 0; i < nvi; i += n) {
440 ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
444 kaddr = kmap_atomic(entry_bh->b_page);
445 /* last virtual block number in this block */
446 first = vinfo->vi_vblocknr;
447 do_div(first, entries_per_block);
448 first *= entries_per_block;
449 last = first + entries_per_block - 1;
451 j < nvi && vinfo->vi_vblocknr >= first &&
452 vinfo->vi_vblocknr <= last;
453 j++, n++, vinfo = (void *)vinfo + visz) {
454 entry = nilfs_palloc_block_get_entry(
455 dat, vinfo->vi_vblocknr, entry_bh, kaddr);
456 vinfo->vi_start = le64_to_cpu(entry->de_start);
457 vinfo->vi_end = le64_to_cpu(entry->de_end);
458 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
460 kunmap_atomic(kaddr);
468 * nilfs_dat_read - read or get dat inode
469 * @sb: super block instance
470 * @entry_size: size of a dat entry
471 * @raw_inode: on-disk dat inode
472 * @inodep: buffer to store the inode
474 int nilfs_dat_read(struct super_block *sb, size_t entry_size,
475 struct nilfs_inode *raw_inode, struct inode **inodep)
477 static struct lock_class_key dat_lock_key;
479 struct nilfs_dat_info *di;
482 if (entry_size > sb->s_blocksize) {
483 nilfs_msg(sb, KERN_ERR, "too large DAT entry size: %zu bytes",
486 } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
487 nilfs_msg(sb, KERN_ERR, "too small DAT entry size: %zu bytes",
492 dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
495 if (!(dat->i_state & I_NEW))
498 err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
502 err = nilfs_palloc_init_blockgroup(dat, entry_size);
506 di = NILFS_DAT_I(dat);
507 lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
508 nilfs_palloc_setup_cache(dat, &di->palloc_cache);
509 nilfs_mdt_setup_shadow_map(dat, &di->shadow);
511 err = nilfs_read_inode_common(dat, raw_inode);
515 unlock_new_inode(dat);