1 // SPDX-License-Identifier: GPL-2.0+
3 * direct.c - NILFS direct block pointer.
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Koji Sato.
10 #include <linux/errno.h>
17 static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
20 ((struct nilfs_direct_node *)direct->b_u.u_data + 1);
24 nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
26 return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
29 static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
32 *(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
35 static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
36 __u64 key, int level, __u64 *ptrp)
40 if (key > NILFS_DIRECT_KEY_MAX || level != 1)
42 ptr = nilfs_direct_get_ptr(direct, key);
43 if (ptr == NILFS_BMAP_INVALID_PTR)
50 static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
51 __u64 key, __u64 *ptrp,
52 unsigned int maxblocks)
54 struct inode *dat = NULL;
59 if (key > NILFS_DIRECT_KEY_MAX)
61 ptr = nilfs_direct_get_ptr(direct, key);
62 if (ptr == NILFS_BMAP_INVALID_PTR)
65 if (NILFS_BMAP_USE_VBN(direct)) {
66 dat = nilfs_bmap_get_dat(direct);
67 ret = nilfs_dat_translate(dat, ptr, &blocknr);
73 maxblocks = min_t(unsigned int, maxblocks,
74 NILFS_DIRECT_KEY_MAX - key + 1);
75 for (cnt = 1; cnt < maxblocks &&
76 (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
77 NILFS_BMAP_INVALID_PTR;
80 ret = nilfs_dat_translate(dat, ptr2, &blocknr);
85 if (ptr2 != ptr + cnt)
93 ret = -EINVAL; /* Notify bmap layer of metadata corruption */
98 nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
102 ptr = nilfs_bmap_find_target_seq(direct, key);
103 if (ptr != NILFS_BMAP_INVALID_PTR)
104 /* sequential access */
108 return nilfs_bmap_find_target_in_group(direct);
111 static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
113 union nilfs_bmap_ptr_req req;
114 struct inode *dat = NULL;
115 struct buffer_head *bh;
118 if (key > NILFS_DIRECT_KEY_MAX)
120 if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
123 if (NILFS_BMAP_USE_VBN(bmap)) {
124 req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
125 dat = nilfs_bmap_get_dat(bmap);
127 ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
129 /* ptr must be a pointer to a buffer head. */
130 bh = (struct buffer_head *)((unsigned long)ptr);
131 set_buffer_nilfs_volatile(bh);
133 nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
134 nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
136 if (!nilfs_bmap_dirty(bmap))
137 nilfs_bmap_set_dirty(bmap);
139 if (NILFS_BMAP_USE_VBN(bmap))
140 nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
142 nilfs_inode_add_blocks(bmap->b_inode, 1);
147 static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
149 union nilfs_bmap_ptr_req req;
153 if (key > NILFS_DIRECT_KEY_MAX ||
154 nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
157 dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
158 req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
160 ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
162 nilfs_bmap_commit_end_ptr(bmap, &req, dat);
163 nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
164 nilfs_inode_sub_blocks(bmap->b_inode, 1);
169 static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
174 for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
175 if (nilfs_direct_get_ptr(direct, key) !=
176 NILFS_BMAP_INVALID_PTR) {
184 static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
188 lastkey = NILFS_DIRECT_KEY_MAX + 1;
189 for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
190 if (nilfs_direct_get_ptr(direct, key) !=
191 NILFS_BMAP_INVALID_PTR)
194 if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
202 static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
204 return key > NILFS_DIRECT_KEY_MAX;
207 static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
208 __u64 *keys, __u64 *ptrs, int nitems)
214 if (nitems > NILFS_DIRECT_NBLOCKS)
215 nitems = NILFS_DIRECT_NBLOCKS;
217 for (key = 0; key < nitems; key++) {
218 ptr = nilfs_direct_get_ptr(direct, key);
219 if (ptr != NILFS_BMAP_INVALID_PTR) {
228 int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
229 __u64 key, __u64 *keys, __u64 *ptrs, int n)
234 /* no need to allocate any resource for conversion */
237 ret = bmap->b_ops->bop_delete(bmap, key);
242 if (bmap->b_ops->bop_clear != NULL)
243 bmap->b_ops->bop_clear(bmap);
246 dptrs = nilfs_direct_dptrs(bmap);
247 for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
248 if ((j < n) && (i == keys[j])) {
249 dptrs[i] = (i != key) ?
250 cpu_to_le64(ptrs[j]) :
251 NILFS_BMAP_INVALID_PTR;
254 dptrs[i] = NILFS_BMAP_INVALID_PTR;
257 nilfs_direct_init(bmap);
261 static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
262 struct buffer_head *bh)
264 struct nilfs_palloc_req oldreq, newreq;
270 if (!NILFS_BMAP_USE_VBN(bmap))
273 dat = nilfs_bmap_get_dat(bmap);
274 key = nilfs_bmap_data_get_key(bmap, bh);
275 ptr = nilfs_direct_get_ptr(bmap, key);
276 if (!buffer_nilfs_volatile(bh)) {
277 oldreq.pr_entry_nr = ptr;
278 newreq.pr_entry_nr = ptr;
279 ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
282 nilfs_dat_commit_update(dat, &oldreq, &newreq,
283 bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
284 set_buffer_nilfs_volatile(bh);
285 nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
287 ret = nilfs_dat_mark_dirty(dat, ptr);
292 static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
293 __u64 key, __u64 ptr,
294 struct buffer_head **bh,
296 union nilfs_binfo *binfo)
298 struct inode *dat = nilfs_bmap_get_dat(direct);
299 union nilfs_bmap_ptr_req req;
303 ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
305 nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
306 binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
307 binfo->bi_v.bi_blkoff = cpu_to_le64(key);
312 static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
313 __u64 key, __u64 ptr,
314 struct buffer_head **bh,
316 union nilfs_binfo *binfo)
318 nilfs_direct_set_ptr(direct, key, blocknr);
320 binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
321 binfo->bi_dat.bi_level = 0;
326 static int nilfs_direct_assign(struct nilfs_bmap *bmap,
327 struct buffer_head **bh,
329 union nilfs_binfo *binfo)
334 key = nilfs_bmap_data_get_key(bmap, *bh);
335 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
336 nilfs_crit(bmap->b_inode->i_sb,
337 "%s (ino=%lu): invalid key: %llu",
339 bmap->b_inode->i_ino, (unsigned long long)key);
342 ptr = nilfs_direct_get_ptr(bmap, key);
343 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
344 nilfs_crit(bmap->b_inode->i_sb,
345 "%s (ino=%lu): invalid pointer: %llu",
347 bmap->b_inode->i_ino, (unsigned long long)ptr);
351 return NILFS_BMAP_USE_VBN(bmap) ?
352 nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
353 nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
356 static const struct nilfs_bmap_operations nilfs_direct_ops = {
357 .bop_lookup = nilfs_direct_lookup,
358 .bop_lookup_contig = nilfs_direct_lookup_contig,
359 .bop_insert = nilfs_direct_insert,
360 .bop_delete = nilfs_direct_delete,
363 .bop_propagate = nilfs_direct_propagate,
365 .bop_lookup_dirty_buffers = NULL,
367 .bop_assign = nilfs_direct_assign,
370 .bop_seek_key = nilfs_direct_seek_key,
371 .bop_last_key = nilfs_direct_last_key,
373 .bop_check_insert = nilfs_direct_check_insert,
374 .bop_check_delete = NULL,
375 .bop_gather_data = nilfs_direct_gather_data,
379 int nilfs_direct_init(struct nilfs_bmap *bmap)
381 bmap->b_ops = &nilfs_direct_ops;