2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/highmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/hdreg.h>
21 #include <linux/genhd.h>
22 #include <linux/sizes.h>
23 #include <linux/ndctl.h>
29 enum log_ent_request {
34 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
37 struct nd_btt *nd_btt = arena->nd_btt;
38 struct nd_namespace_common *ndns = nd_btt->ndns;
40 /* arena offsets are 4K from the base of the device */
42 return nvdimm_read_bytes(ndns, offset, buf, n);
45 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
48 struct nd_btt *nd_btt = arena->nd_btt;
49 struct nd_namespace_common *ndns = nd_btt->ndns;
51 /* arena offsets are 4K from the base of the device */
53 return nvdimm_write_bytes(ndns, offset, buf, n);
56 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
60 ret = arena_write_bytes(arena, arena->info2off, super,
61 sizeof(struct btt_sb));
65 return arena_write_bytes(arena, arena->infooff, super,
66 sizeof(struct btt_sb));
69 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
72 return arena_read_bytes(arena, arena->infooff, super,
73 sizeof(struct btt_sb));
77 * 'raw' version of btt_map write
79 * mapping is in little-endian
80 * mapping contains 'E' and 'Z' flags as desired
82 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping)
84 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
86 WARN_ON(lba >= arena->external_nlba);
87 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE);
90 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
91 u32 z_flag, u32 e_flag)
97 * This 'mapping' is supposed to be just the LBA mapping, without
98 * any flags set, so strip the flag bits.
100 mapping &= MAP_LBA_MASK;
102 ze = (z_flag << 1) + e_flag;
106 * We want to set neither of the Z or E flags, and
107 * in the actual layout, this means setting the bit
108 * positions of both to '1' to indicate a 'normal'
111 mapping |= MAP_ENT_NORMAL;
114 mapping |= (1 << MAP_ERR_SHIFT);
117 mapping |= (1 << MAP_TRIM_SHIFT);
121 * The case where Z and E are both sent in as '1' could be
122 * construed as a valid 'normal' case, but we decide not to,
125 WARN_ONCE(1, "Invalid use of Z and E flags\n");
129 mapping_le = cpu_to_le32(mapping);
130 return __btt_map_write(arena, lba, mapping_le);
133 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
134 int *trim, int *error)
138 u32 raw_mapping, postmap, ze, z_flag, e_flag;
139 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
141 WARN_ON(lba >= arena->external_nlba);
143 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE);
147 raw_mapping = le32_to_cpu(in);
149 z_flag = (raw_mapping & MAP_TRIM_MASK) >> MAP_TRIM_SHIFT;
150 e_flag = (raw_mapping & MAP_ERR_MASK) >> MAP_ERR_SHIFT;
151 ze = (z_flag << 1) + e_flag;
152 postmap = raw_mapping & MAP_LBA_MASK;
154 /* Reuse the {z,e}_flag variables for *trim and *error */
160 /* Initial state. Return postmap = premap */
186 static int btt_log_group_read(struct arena_info *arena, u32 lane,
187 struct log_group *log)
190 return arena_read_bytes(arena,
191 arena->logoff + (lane * LOG_GRP_SIZE), log,
195 static struct dentry *debugfs_root;
197 static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
203 /* If for some reason, parent bttN was not created, exit */
207 snprintf(dirname, 32, "arena%d", idx);
208 d = debugfs_create_dir(dirname, parent);
209 if (IS_ERR_OR_NULL(d))
213 debugfs_create_x64("size", S_IRUGO, d, &a->size);
214 debugfs_create_x64("external_lba_start", S_IRUGO, d,
215 &a->external_lba_start);
216 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
217 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
218 &a->internal_lbasize);
219 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
220 debugfs_create_u32("external_lbasize", S_IRUGO, d,
221 &a->external_lbasize);
222 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
223 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
224 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
225 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
226 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
227 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
228 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
229 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
230 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
231 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
232 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
233 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
236 static void btt_debugfs_init(struct btt *btt)
239 struct arena_info *arena;
241 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
243 if (IS_ERR_OR_NULL(btt->debugfs_dir))
246 list_for_each_entry(arena, &btt->arena_list, list) {
247 arena_debugfs_init(arena, btt->debugfs_dir, i);
252 static u32 log_seq(struct log_group *log, int log_idx)
254 return le32_to_cpu(log->ent[log_idx].seq);
258 * This function accepts two log entries, and uses the
259 * sequence number to find the 'older' entry.
260 * It also updates the sequence number in this old entry to
261 * make it the 'new' one if the mark_flag is set.
262 * Finally, it returns which of the entries was the older one.
264 * TODO The logic feels a bit kludge-y. make it better..
266 static int btt_log_get_old(struct arena_info *a, struct log_group *log)
268 int idx0 = a->log_index[0];
269 int idx1 = a->log_index[1];
273 * the first ever time this is seen, the entry goes into [0]
274 * the next time, the following logic works out to put this
275 * (next) entry into [1]
277 if (log_seq(log, idx0) == 0) {
278 log->ent[idx0].seq = cpu_to_le32(1);
282 if (log_seq(log, idx0) == log_seq(log, idx1))
284 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
287 if (log_seq(log, idx0) < log_seq(log, idx1)) {
288 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
293 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
302 static struct device *to_dev(struct arena_info *arena)
304 return &arena->nd_btt->dev;
308 * This function copies the desired (old/new) log entry into ent if
309 * it is not NULL. It returns the sub-slot number (0 or 1)
310 * where the desired log entry was found. Negative return values
313 static int btt_log_read(struct arena_info *arena, u32 lane,
314 struct log_entry *ent, int old_flag)
317 int old_ent, ret_ent;
318 struct log_group log;
320 ret = btt_log_group_read(arena, lane, &log);
324 old_ent = btt_log_get_old(arena, &log);
325 if (old_ent < 0 || old_ent > 1) {
326 dev_info(to_dev(arena),
327 "log corruption (%d): lane %d seq [%d, %d]\n",
328 old_ent, lane, log.ent[arena->log_index[0]].seq,
329 log.ent[arena->log_index[1]].seq);
330 /* TODO set error state? */
334 ret_ent = (old_flag ? old_ent : (1 - old_ent));
337 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
343 * This function commits a log entry to media
344 * It does _not_ prepare the freelist entry for the next write
345 * btt_flog_write is the wrapper for updating the freelist elements
347 static int __btt_log_write(struct arena_info *arena, u32 lane,
348 u32 sub, struct log_entry *ent)
351 u32 group_slot = arena->log_index[sub];
352 unsigned int log_half = LOG_ENT_SIZE / 2;
356 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
357 (group_slot * LOG_ENT_SIZE);
358 /* split the 16B write into atomic, durable halves */
359 ret = arena_write_bytes(arena, ns_off, src, log_half);
365 return arena_write_bytes(arena, ns_off, src, log_half);
368 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
369 struct log_entry *ent)
373 ret = __btt_log_write(arena, lane, sub, ent);
377 /* prepare the next free entry */
378 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
379 if (++(arena->freelist[lane].seq) == 4)
380 arena->freelist[lane].seq = 1;
381 arena->freelist[lane].block = le32_to_cpu(ent->old_map);
387 * This function initializes the BTT map to the initial state, which is
388 * all-zeroes, and indicates an identity mapping
390 static int btt_map_init(struct arena_info *arena)
395 size_t chunk_size = SZ_2M;
396 size_t mapsize = arena->logoff - arena->mapoff;
398 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
403 size_t size = min(mapsize, chunk_size);
405 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
421 * This function initializes the BTT log with 'fake' entries pointing
422 * to the initial reserved set of blocks as being free
424 static int btt_log_init(struct arena_info *arena)
428 struct log_entry ent, zerolog;
430 memset(&zerolog, 0, sizeof(zerolog));
432 for (i = 0; i < arena->nfree; i++) {
433 ent.lba = cpu_to_le32(i);
434 ent.old_map = cpu_to_le32(arena->external_nlba + i);
435 ent.new_map = cpu_to_le32(arena->external_nlba + i);
436 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
437 ret = __btt_log_write(arena, i, 0, &ent);
440 ret = __btt_log_write(arena, i, 1, &zerolog);
448 static int btt_freelist_init(struct arena_info *arena)
452 struct log_entry log_new;
454 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
456 if (!arena->freelist)
459 for (i = 0; i < arena->nfree; i++) {
460 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
464 /* sub points to the next one to be overwritten */
465 arena->freelist[i].sub = 1 - new;
466 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
467 arena->freelist[i].block = le32_to_cpu(log_new.old_map);
469 /* This implies a newly created or untouched flog entry */
470 if (log_new.old_map == log_new.new_map)
473 /* Check if map recovery is needed */
474 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
478 if ((le32_to_cpu(log_new.new_map) != map_entry) &&
479 (le32_to_cpu(log_new.old_map) == map_entry)) {
481 * Last transaction wrote the flog, but wasn't able
482 * to complete the map write. So fix up the map.
484 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
485 le32_to_cpu(log_new.new_map), 0, 0);
495 static bool ent_is_padding(struct log_entry *ent)
497 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
502 * Detecting valid log indices: We read a log group (see the comments in btt.h
503 * for a description of a 'log_group' and its 'slots'), and iterate over its
504 * four slots. We expect that a padding slot will be all-zeroes, and use this
505 * to detect a padding slot vs. an actual entry.
507 * If a log_group is in the initial state, i.e. hasn't been used since the
508 * creation of this BTT layout, it will have three of the four slots with
509 * zeroes. We skip over these log_groups for the detection of log_index. If
510 * all log_groups are in the initial state (i.e. the BTT has never been
511 * written to), it is safe to assume the 'new format' of log entries in slots
514 static int log_set_indices(struct arena_info *arena)
516 bool idx_set = false, initial_state = true;
517 int ret, log_index[2] = {-1, -1};
518 u32 i, j, next_idx = 0;
519 struct log_group log;
522 for (i = 0; i < arena->nfree; i++) {
523 ret = btt_log_group_read(arena, i, &log);
527 for (j = 0; j < 4; j++) {
529 if (ent_is_padding(&log.ent[j])) {
533 /* Skip if index has been recorded */
534 if ((next_idx == 1) &&
537 /* valid entry, record index */
538 log_index[next_idx] = j;
542 /* two valid entries found */
544 } else if (next_idx > 2) {
545 /* too many valid indices */
550 * once the indices have been set, just verify
551 * that all subsequent log groups are either in
552 * their initial state or follow the same
555 if (j == log_index[0]) {
556 /* entry must be 'valid' */
557 if (ent_is_padding(&log.ent[j]))
559 } else if (j == log_index[1]) {
562 * log_index[1] can be padding if the
563 * lane never got used and it is still
564 * in the initial state (three 'padding'
568 /* entry must be invalid (padding) */
569 if (!ent_is_padding(&log.ent[j]))
575 * If any of the log_groups have more than one valid,
576 * non-padding entry, then the we are no longer in the
580 initial_state = false;
584 if (!initial_state && !idx_set)
588 * If all the entries in the log were in the initial state,
589 * assume new padding scheme
595 * Only allow the known permutations of log/padding indices,
596 * i.e. (0, 1), and (0, 2)
598 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
599 ; /* known index possibilities */
601 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
605 arena->log_index[0] = log_index[0];
606 arena->log_index[1] = log_index[1];
607 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
608 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
612 static int btt_rtt_init(struct arena_info *arena)
614 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
615 if (arena->rtt == NULL)
621 static int btt_maplocks_init(struct arena_info *arena)
625 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
627 if (!arena->map_locks)
630 for (i = 0; i < arena->nfree; i++)
631 spin_lock_init(&arena->map_locks[i].lock);
636 static struct arena_info *alloc_arena(struct btt *btt, size_t size,
637 size_t start, size_t arena_off)
639 struct arena_info *arena;
640 u64 logsize, mapsize, datasize;
641 u64 available = size;
643 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
646 arena->nd_btt = btt->nd_btt;
652 arena->external_lba_start = start;
653 arena->external_lbasize = btt->lbasize;
654 arena->internal_lbasize = roundup(arena->external_lbasize,
655 INT_LBASIZE_ALIGNMENT);
656 arena->nfree = BTT_DEFAULT_NFREE;
657 arena->version_major = 1;
658 arena->version_minor = 1;
660 if (available % BTT_PG_SIZE)
661 available -= (available % BTT_PG_SIZE);
663 /* Two pages are reserved for the super block and its copy */
664 available -= 2 * BTT_PG_SIZE;
666 /* The log takes a fixed amount of space based on nfree */
667 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
668 available -= logsize;
670 /* Calculate optimal split between map and data area */
671 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
672 arena->internal_lbasize + MAP_ENT_SIZE);
673 arena->external_nlba = arena->internal_nlba - arena->nfree;
675 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
676 datasize = available - mapsize;
678 /* 'Absolute' values, relative to start of storage space */
679 arena->infooff = arena_off;
680 arena->dataoff = arena->infooff + BTT_PG_SIZE;
681 arena->mapoff = arena->dataoff + datasize;
682 arena->logoff = arena->mapoff + mapsize;
683 arena->info2off = arena->logoff + logsize;
685 /* Default log indices are (0,1) */
686 arena->log_index[0] = 0;
687 arena->log_index[1] = 1;
691 static void free_arenas(struct btt *btt)
693 struct arena_info *arena, *next;
695 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
696 list_del(&arena->list);
698 kfree(arena->map_locks);
699 kfree(arena->freelist);
700 debugfs_remove_recursive(arena->debugfs_dir);
706 * This function reads an existing valid btt superblock and
707 * populates the corresponding arena_info struct
709 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
712 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
713 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
714 arena->external_nlba = le32_to_cpu(super->external_nlba);
715 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
716 arena->nfree = le32_to_cpu(super->nfree);
717 arena->version_major = le16_to_cpu(super->version_major);
718 arena->version_minor = le16_to_cpu(super->version_minor);
720 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
721 le64_to_cpu(super->nextoff));
722 arena->infooff = arena_off;
723 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
724 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
725 arena->logoff = arena_off + le64_to_cpu(super->logoff);
726 arena->info2off = arena_off + le64_to_cpu(super->info2off);
728 arena->size = (le64_to_cpu(super->nextoff) > 0)
729 ? (le64_to_cpu(super->nextoff))
730 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
732 arena->flags = le32_to_cpu(super->flags);
735 static int discover_arenas(struct btt *btt)
738 struct arena_info *arena;
739 struct btt_sb *super;
740 size_t remaining = btt->rawsize;
745 super = kzalloc(sizeof(*super), GFP_KERNEL);
750 /* Alloc memory for arena */
751 arena = alloc_arena(btt, 0, 0, 0);
757 arena->infooff = cur_off;
758 ret = btt_info_read(arena, super);
762 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
763 if (remaining == btt->rawsize) {
764 btt->init_state = INIT_NOTFOUND;
765 dev_info(to_dev(arena), "No existing arenas\n");
768 dev_info(to_dev(arena),
769 "Found corrupted metadata!\n");
775 arena->external_lba_start = cur_nlba;
776 parse_arena_meta(arena, super, cur_off);
778 ret = log_set_indices(arena);
780 dev_err(to_dev(arena),
781 "Unable to deduce log/padding indices\n");
785 ret = btt_freelist_init(arena);
789 ret = btt_rtt_init(arena);
793 ret = btt_maplocks_init(arena);
797 list_add_tail(&arena->list, &btt->arena_list);
799 remaining -= arena->size;
800 cur_off += arena->size;
801 cur_nlba += arena->external_nlba;
804 if (arena->nextoff == 0)
807 btt->num_arenas = num_arenas;
808 btt->nlba = cur_nlba;
809 btt->init_state = INIT_READY;
822 static int create_arenas(struct btt *btt)
824 size_t remaining = btt->rawsize;
828 struct arena_info *arena;
829 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
831 remaining -= arena_size;
832 if (arena_size < ARENA_MIN_SIZE)
835 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
840 btt->nlba += arena->external_nlba;
841 if (remaining >= ARENA_MIN_SIZE)
842 arena->nextoff = arena->size;
845 cur_off += arena_size;
846 list_add_tail(&arena->list, &btt->arena_list);
853 * This function completes arena initialization by writing
855 * It is only called for an uninitialized arena when a write
856 * to that arena occurs for the first time.
858 static int btt_arena_write_layout(struct arena_info *arena)
862 struct btt_sb *super;
863 struct nd_btt *nd_btt = arena->nd_btt;
864 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
866 ret = btt_map_init(arena);
870 ret = btt_log_init(arena);
874 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
878 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
879 memcpy(super->uuid, nd_btt->uuid, 16);
880 memcpy(super->parent_uuid, parent_uuid, 16);
881 super->flags = cpu_to_le32(arena->flags);
882 super->version_major = cpu_to_le16(arena->version_major);
883 super->version_minor = cpu_to_le16(arena->version_minor);
884 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
885 super->external_nlba = cpu_to_le32(arena->external_nlba);
886 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
887 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
888 super->nfree = cpu_to_le32(arena->nfree);
889 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
890 super->nextoff = cpu_to_le64(arena->nextoff);
892 * Subtract arena->infooff (arena start) so numbers are relative
895 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
896 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
897 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
898 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
901 sum = nd_sb_checksum((struct nd_gen_sb *) super);
902 super->checksum = cpu_to_le64(sum);
904 ret = btt_info_write(arena, super);
911 * This function completes the initialization for the BTT namespace
912 * such that it is ready to accept IOs
914 static int btt_meta_init(struct btt *btt)
917 struct arena_info *arena;
919 mutex_lock(&btt->init_lock);
920 list_for_each_entry(arena, &btt->arena_list, list) {
921 ret = btt_arena_write_layout(arena);
925 ret = btt_freelist_init(arena);
929 ret = btt_rtt_init(arena);
933 ret = btt_maplocks_init(arena);
938 btt->init_state = INIT_READY;
941 mutex_unlock(&btt->init_lock);
945 static u32 btt_meta_size(struct btt *btt)
947 return btt->lbasize - btt->sector_size;
951 * This function calculates the arena in which the given LBA lies
952 * by doing a linear walk. This is acceptable since we expect only
953 * a few arenas. If we have backing devices that get much larger,
954 * we can construct a balanced binary tree of arenas at init time
955 * so that this range search becomes faster.
957 static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
958 struct arena_info **arena)
960 struct arena_info *arena_list;
961 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
963 list_for_each_entry(arena_list, &btt->arena_list, list) {
964 if (lba < arena_list->external_nlba) {
969 lba -= arena_list->external_nlba;
976 * The following (lock_map, unlock_map) are mostly just to improve
977 * readability, since they index into an array of locks
979 static void lock_map(struct arena_info *arena, u32 premap)
980 __acquires(&arena->map_locks[idx].lock)
982 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
984 spin_lock(&arena->map_locks[idx].lock);
987 static void unlock_map(struct arena_info *arena, u32 premap)
988 __releases(&arena->map_locks[idx].lock)
990 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
992 spin_unlock(&arena->map_locks[idx].lock);
995 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
997 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
1000 static int btt_data_read(struct arena_info *arena, struct page *page,
1001 unsigned int off, u32 lba, u32 len)
1004 u64 nsoff = to_namespace_offset(arena, lba);
1005 void *mem = kmap_atomic(page);
1007 ret = arena_read_bytes(arena, nsoff, mem + off, len);
1013 static int btt_data_write(struct arena_info *arena, u32 lba,
1014 struct page *page, unsigned int off, u32 len)
1017 u64 nsoff = to_namespace_offset(arena, lba);
1018 void *mem = kmap_atomic(page);
1020 ret = arena_write_bytes(arena, nsoff, mem + off, len);
1026 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1028 void *mem = kmap_atomic(page);
1030 memset(mem + off, 0, len);
1034 #ifdef CONFIG_BLK_DEV_INTEGRITY
1035 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1036 struct arena_info *arena, u32 postmap, int rw)
1038 unsigned int len = btt_meta_size(btt);
1045 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1048 unsigned int cur_len;
1052 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1054 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1055 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1056 * can use those directly
1059 cur_len = min(len, bv.bv_len);
1060 mem = kmap_atomic(bv.bv_page);
1062 ret = arena_write_bytes(arena, meta_nsoff,
1063 mem + bv.bv_offset, cur_len);
1065 ret = arena_read_bytes(arena, meta_nsoff,
1066 mem + bv.bv_offset, cur_len);
1073 meta_nsoff += cur_len;
1074 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
1080 #else /* CONFIG_BLK_DEV_INTEGRITY */
1081 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1082 struct arena_info *arena, u32 postmap, int rw)
1088 static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1089 struct page *page, unsigned int off, sector_t sector,
1094 struct arena_info *arena = NULL;
1095 u32 lane = 0, premap, postmap;
1100 lane = nd_region_acquire_lane(btt->nd_region);
1102 ret = lba_to_arena(btt, sector, &premap, &arena);
1106 cur_len = min(btt->sector_size, len);
1108 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag);
1113 * We loop to make sure that the post map LBA didn't change
1114 * from under us between writing the RTT and doing the actual
1121 zero_fill_data(page, off, cur_len);
1130 arena->rtt[lane] = RTT_VALID | postmap;
1132 * Barrier to make sure this write is not reordered
1133 * to do the verification map_read before the RTT store
1137 ret = btt_map_read(arena, premap, &new_map, &t_flag,
1142 if (postmap == new_map)
1148 ret = btt_data_read(arena, page, off, postmap, cur_len);
1153 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1158 arena->rtt[lane] = RTT_INVALID;
1159 nd_region_release_lane(btt->nd_region, lane);
1163 sector += btt->sector_size >> SECTOR_SHIFT;
1169 arena->rtt[lane] = RTT_INVALID;
1171 nd_region_release_lane(btt->nd_region, lane);
1175 static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1176 sector_t sector, struct page *page, unsigned int off,
1180 struct arena_info *arena = NULL;
1181 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1182 struct log_entry log;
1188 lane = nd_region_acquire_lane(btt->nd_region);
1190 ret = lba_to_arena(btt, sector, &premap, &arena);
1193 cur_len = min(btt->sector_size, len);
1195 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1200 new_postmap = arena->freelist[lane].block;
1202 /* Wait if the new block is being read from */
1203 for (i = 0; i < arena->nfree; i++)
1204 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1208 if (new_postmap >= arena->internal_nlba) {
1213 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1218 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1224 lock_map(arena, premap);
1225 ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
1228 if (old_postmap >= arena->internal_nlba) {
1233 log.lba = cpu_to_le32(premap);
1234 log.old_map = cpu_to_le32(old_postmap);
1235 log.new_map = cpu_to_le32(new_postmap);
1236 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1237 sub = arena->freelist[lane].sub;
1238 ret = btt_flog_write(arena, lane, sub, &log);
1242 ret = btt_map_write(arena, premap, new_postmap, 0, 0);
1246 unlock_map(arena, premap);
1247 nd_region_release_lane(btt->nd_region, lane);
1251 sector += btt->sector_size >> SECTOR_SHIFT;
1257 unlock_map(arena, premap);
1259 nd_region_release_lane(btt->nd_region, lane);
1263 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1264 struct page *page, unsigned int len, unsigned int off,
1265 bool is_write, sector_t sector)
1270 ret = btt_read_pg(btt, bip, page, off, sector, len);
1271 flush_dcache_page(page);
1273 flush_dcache_page(page);
1274 ret = btt_write_pg(btt, bip, sector, page, off, len);
1280 static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1282 struct bio_integrity_payload *bip = bio_integrity(bio);
1283 struct btt *btt = q->queuedata;
1284 struct bvec_iter iter;
1285 unsigned long start;
1286 struct bio_vec bvec;
1291 * bio_integrity_enabled also checks if the bio already has an
1292 * integrity payload attached. If it does, we *don't* do a
1293 * bio_integrity_prep here - the payload has been generated by
1294 * another kernel subsystem, and we just pass it through.
1296 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1297 bio->bi_error = -EIO;
1301 do_acct = nd_iostat_start(bio, &start);
1302 bio_for_each_segment(bvec, bio, iter) {
1303 unsigned int len = bvec.bv_len;
1305 BUG_ON(len > PAGE_SIZE);
1306 /* Make sure len is in multiples of sector size. */
1307 /* XXX is this right? */
1308 BUG_ON(len < btt->sector_size);
1309 BUG_ON(len % btt->sector_size);
1311 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1312 op_is_write(bio_op(bio)), iter.bi_sector);
1314 dev_info(&btt->nd_btt->dev,
1315 "io error in %s sector %lld, len %d,\n",
1316 (op_is_write(bio_op(bio))) ? "WRITE" :
1318 (unsigned long long) iter.bi_sector, len);
1319 bio->bi_error = err;
1324 nd_iostat_end(bio, start);
1328 return BLK_QC_T_NONE;
1331 static int btt_rw_page(struct block_device *bdev, sector_t sector,
1332 struct page *page, bool is_write)
1334 struct btt *btt = bdev->bd_disk->private_data;
1337 rc = btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
1339 page_endio(page, is_write, 0);
1345 static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1347 /* some standard values */
1348 geo->heads = 1 << 6;
1349 geo->sectors = 1 << 5;
1350 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1354 static const struct block_device_operations btt_fops = {
1355 .owner = THIS_MODULE,
1356 .rw_page = btt_rw_page,
1357 .getgeo = btt_getgeo,
1358 .revalidate_disk = nvdimm_revalidate_disk,
1361 static int btt_blk_init(struct btt *btt)
1363 struct nd_btt *nd_btt = btt->nd_btt;
1364 struct nd_namespace_common *ndns = nd_btt->ndns;
1366 /* create a new disk and request queue for btt */
1367 btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1368 if (!btt->btt_queue)
1371 btt->btt_disk = alloc_disk(0);
1372 if (!btt->btt_disk) {
1373 blk_cleanup_queue(btt->btt_queue);
1377 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1378 btt->btt_disk->first_minor = 0;
1379 btt->btt_disk->fops = &btt_fops;
1380 btt->btt_disk->private_data = btt;
1381 btt->btt_disk->queue = btt->btt_queue;
1382 btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1384 blk_queue_make_request(btt->btt_queue, btt_make_request);
1385 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1386 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1387 blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
1388 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1389 btt->btt_queue->queuedata = btt;
1391 if (btt_meta_size(btt)) {
1392 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1395 del_gendisk(btt->btt_disk);
1396 put_disk(btt->btt_disk);
1397 blk_cleanup_queue(btt->btt_queue);
1401 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1402 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1403 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1404 revalidate_disk(btt->btt_disk);
1409 static void btt_blk_cleanup(struct btt *btt)
1411 del_gendisk(btt->btt_disk);
1412 put_disk(btt->btt_disk);
1413 blk_cleanup_queue(btt->btt_queue);
1417 * btt_init - initialize a block translation table for the given device
1418 * @nd_btt: device with BTT geometry and backing device info
1419 * @rawsize: raw size in bytes of the backing device
1420 * @lbasize: lba size of the backing device
1421 * @uuid: A uuid for the backing device - this is stored on media
1422 * @maxlane: maximum number of parallel requests the device can handle
1424 * Initialize a Block Translation Table on a backing device to provide
1425 * single sector power fail atomicity.
1431 * Pointer to a new struct btt on success, NULL on failure.
1433 static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1434 u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1438 struct device *dev = &nd_btt->dev;
1440 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1444 btt->nd_btt = nd_btt;
1445 btt->rawsize = rawsize;
1446 btt->lbasize = lbasize;
1447 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1448 INIT_LIST_HEAD(&btt->arena_list);
1449 mutex_init(&btt->init_lock);
1450 btt->nd_region = nd_region;
1452 ret = discover_arenas(btt);
1454 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1458 if (btt->init_state != INIT_READY && nd_region->ro) {
1459 dev_info(dev, "%s is read-only, unable to init btt metadata\n",
1460 dev_name(&nd_region->dev));
1462 } else if (btt->init_state != INIT_READY) {
1463 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1464 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1465 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1466 btt->num_arenas, rawsize);
1468 ret = create_arenas(btt);
1470 dev_info(dev, "init: create_arenas: %d\n", ret);
1474 ret = btt_meta_init(btt);
1476 dev_err(dev, "init: error in meta_init: %d\n", ret);
1481 ret = btt_blk_init(btt);
1483 dev_err(dev, "init: error in blk_init: %d\n", ret);
1487 btt_debugfs_init(btt);
1493 * btt_fini - de-initialize a BTT
1494 * @btt: the BTT handle that was generated by btt_init
1496 * De-initialize a Block Translation Table on device removal
1501 static void btt_fini(struct btt *btt)
1504 btt_blk_cleanup(btt);
1506 debugfs_remove_recursive(btt->debugfs_dir);
1510 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1512 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1513 struct nd_region *nd_region;
1517 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1518 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1522 rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
1523 if (rawsize < ARENA_MIN_SIZE) {
1524 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1525 dev_name(&ndns->dev), ARENA_MIN_SIZE + SZ_4K);
1528 nd_region = to_nd_region(nd_btt->dev.parent);
1529 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1537 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1539 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1541 struct btt *btt = nd_btt->btt;
1548 EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1550 static int __init nd_btt_init(void)
1554 debugfs_root = debugfs_create_dir("btt", NULL);
1555 if (IS_ERR_OR_NULL(debugfs_root))
1561 static void __exit nd_btt_exit(void)
1563 debugfs_remove_recursive(debugfs_root);
1566 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1567 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1568 MODULE_LICENSE("GPL v2");
1569 module_init(nd_btt_init);
1570 module_exit(nd_btt_exit);