2 * direct.c - NILFS direct block pointer.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * Written by Koji Sato.
19 #include <linux/errno.h>
26 static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
29 ((struct nilfs_direct_node *)direct->b_u.u_data + 1);
33 nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
35 return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
38 static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
41 *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
44 static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
45 __u64 key, int level, __u64 *ptrp)
49 if (key > NILFS_DIRECT_KEY_MAX || level != 1)
51 ptr = nilfs_direct_get_ptr(direct, key);
52 if (ptr == NILFS_BMAP_INVALID_PTR)
59 static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
60 __u64 key, __u64 *ptrp,
61 unsigned int maxblocks)
63 struct inode *dat = NULL;
68 if (key > NILFS_DIRECT_KEY_MAX)
70 ptr = nilfs_direct_get_ptr(direct, key);
71 if (ptr == NILFS_BMAP_INVALID_PTR)
74 if (NILFS_BMAP_USE_VBN(direct)) {
75 dat = nilfs_bmap_get_dat(direct);
76 ret = nilfs_dat_translate(dat, ptr, &blocknr);
82 maxblocks = min_t(unsigned int, maxblocks,
83 NILFS_DIRECT_KEY_MAX - key + 1);
84 for (cnt = 1; cnt < maxblocks &&
85 (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
86 NILFS_BMAP_INVALID_PTR;
89 ret = nilfs_dat_translate(dat, ptr2, &blocknr);
94 if (ptr2 != ptr + cnt)
102 nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
106 ptr = nilfs_bmap_find_target_seq(direct, key);
107 if (ptr != NILFS_BMAP_INVALID_PTR)
108 /* sequential access */
112 return nilfs_bmap_find_target_in_group(direct);
115 static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
117 union nilfs_bmap_ptr_req req;
118 struct inode *dat = NULL;
119 struct buffer_head *bh;
122 if (key > NILFS_DIRECT_KEY_MAX)
124 if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
127 if (NILFS_BMAP_USE_VBN(bmap)) {
128 req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
129 dat = nilfs_bmap_get_dat(bmap);
131 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
133 /* ptr must be a pointer to a buffer head. */
134 bh = (struct buffer_head *)((unsigned long)ptr);
135 set_buffer_nilfs_volatile(bh);
137 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
138 nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
140 if (!nilfs_bmap_dirty(bmap))
141 nilfs_bmap_set_dirty(bmap);
143 if (NILFS_BMAP_USE_VBN(bmap))
144 nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
146 nilfs_inode_add_blocks(bmap->b_inode, 1);
151 static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
153 union nilfs_bmap_ptr_req req;
157 if (key > NILFS_DIRECT_KEY_MAX ||
158 nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
161 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
162 req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
164 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
166 nilfs_bmap_commit_end_ptr(bmap, &req, dat);
167 nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
168 nilfs_inode_sub_blocks(bmap->b_inode, 1);
173 static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
178 for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
179 if (nilfs_direct_get_ptr(direct, key) !=
180 NILFS_BMAP_INVALID_PTR) {
188 static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
192 lastkey = NILFS_DIRECT_KEY_MAX + 1;
193 for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
194 if (nilfs_direct_get_ptr(direct, key) !=
195 NILFS_BMAP_INVALID_PTR)
198 if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
206 static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
208 return key > NILFS_DIRECT_KEY_MAX;
211 static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
212 __u64 *keys, __u64 *ptrs, int nitems)
218 if (nitems > NILFS_DIRECT_NBLOCKS)
219 nitems = NILFS_DIRECT_NBLOCKS;
221 for (key = 0; key < nitems; key++) {
222 ptr = nilfs_direct_get_ptr(direct, key);
223 if (ptr != NILFS_BMAP_INVALID_PTR) {
232 int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
233 __u64 key, __u64 *keys, __u64 *ptrs, int n)
238 /* no need to allocate any resource for conversion */
241 ret = bmap->b_ops->bop_delete(bmap, key);
246 if (bmap->b_ops->bop_clear != NULL)
247 bmap->b_ops->bop_clear(bmap);
250 dptrs = nilfs_direct_dptrs(bmap);
251 for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
252 if ((j < n) && (i == keys[j])) {
253 dptrs[i] = (i != key) ?
254 cpu_to_le64(ptrs[j]) :
255 NILFS_BMAP_INVALID_PTR;
258 dptrs[i] = NILFS_BMAP_INVALID_PTR;
261 nilfs_direct_init(bmap);
265 static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
266 struct buffer_head *bh)
268 struct nilfs_palloc_req oldreq, newreq;
274 if (!NILFS_BMAP_USE_VBN(bmap))
277 dat = nilfs_bmap_get_dat(bmap);
278 key = nilfs_bmap_data_get_key(bmap, bh);
279 ptr = nilfs_direct_get_ptr(bmap, key);
280 if (!buffer_nilfs_volatile(bh)) {
281 oldreq.pr_entry_nr = ptr;
282 newreq.pr_entry_nr = ptr;
283 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
286 nilfs_dat_commit_update(dat, &oldreq, &newreq,
287 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
288 set_buffer_nilfs_volatile(bh);
289 nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
291 ret = nilfs_dat_mark_dirty(dat, ptr);
296 static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
297 __u64 key, __u64 ptr,
298 struct buffer_head **bh,
300 union nilfs_binfo *binfo)
302 struct inode *dat = nilfs_bmap_get_dat(direct);
303 union nilfs_bmap_ptr_req req;
307 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
309 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
310 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
311 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
316 static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
317 __u64 key, __u64 ptr,
318 struct buffer_head **bh,
320 union nilfs_binfo *binfo)
322 nilfs_direct_set_ptr(direct, key, blocknr);
324 binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
325 binfo->bi_dat.bi_level = 0;
330 static int nilfs_direct_assign(struct nilfs_bmap *bmap,
331 struct buffer_head **bh,
333 union nilfs_binfo *binfo)
338 key = nilfs_bmap_data_get_key(bmap, *bh);
339 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
340 nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT,
341 "%s (ino=%lu): invalid key: %llu", __func__,
342 bmap->b_inode->i_ino, (unsigned long long)key);
345 ptr = nilfs_direct_get_ptr(bmap, key);
346 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
347 nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT,
348 "%s (ino=%lu): invalid pointer: %llu", __func__,
349 bmap->b_inode->i_ino, (unsigned long long)ptr);
353 return NILFS_BMAP_USE_VBN(bmap) ?
354 nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
355 nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
358 static const struct nilfs_bmap_operations nilfs_direct_ops = {
359 .bop_lookup = nilfs_direct_lookup,
360 .bop_lookup_contig = nilfs_direct_lookup_contig,
361 .bop_insert = nilfs_direct_insert,
362 .bop_delete = nilfs_direct_delete,
365 .bop_propagate = nilfs_direct_propagate,
367 .bop_lookup_dirty_buffers = NULL,
369 .bop_assign = nilfs_direct_assign,
372 .bop_seek_key = nilfs_direct_seek_key,
373 .bop_last_key = nilfs_direct_last_key,
375 .bop_check_insert = nilfs_direct_check_insert,
376 .bop_check_delete = NULL,
377 .bop_gather_data = nilfs_direct_gather_data,
381 int nilfs_direct_init(struct nilfs_bmap *bmap)
383 bmap->b_ops = &nilfs_direct_ops;