2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/device.h>
14 #include <linux/ndctl.h>
15 #include <linux/uuid.h>
16 #include <linux/slab.h>
23 static guid_t nvdimm_btt_guid;
24 static guid_t nvdimm_btt2_guid;
25 static guid_t nvdimm_pfn_guid;
26 static guid_t nvdimm_dax_guid;
28 static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
30 static u32 best_seq(u32 a, u32 b)
32 a &= NSINDEX_SEQ_MASK;
33 b &= NSINDEX_SEQ_MASK;
39 else if (nd_inc_seq(a) == b)
45 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
47 return ndd->nslabel_size;
50 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
52 return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
55 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
57 u32 nslot, space, size;
60 * The minimum index space is 512 bytes, with that amount of
61 * index we can describe ~1400 labels which is less than a byte
62 * of overhead per label. Round up to a byte of overhead per
63 * label and determine the size of the index region. Yes, this
64 * starts to waste space at larger config_sizes, but it's
65 * unlikely we'll ever see anything but 128K.
67 nslot = nvdimm_num_label_slots(ndd);
68 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
69 size = ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
74 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
75 ndd->nsarea.config_size, sizeof_namespace_label(ndd));
79 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
82 * On media label format consists of two index blocks followed
83 * by an array of labels. None of these structures are ever
84 * updated in place. A sequence number tracks the current
85 * active index and the next one to write, while labels are
86 * written to free slots.
108 struct nd_namespace_index *nsindex[] = {
109 to_namespace_index(ndd, 0),
110 to_namespace_index(ndd, 1),
112 const int num_index = ARRAY_SIZE(nsindex);
113 struct device *dev = ndd->dev;
114 bool valid[2] = { 0 };
115 int i, num_valid = 0;
118 for (i = 0; i < num_index; i++) {
120 u8 sig[NSINDEX_SIG_LEN];
121 u64 sum_save, sum, size;
122 unsigned int version, labelsize;
124 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
125 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
126 dev_dbg(dev, "%s: nsindex%d signature invalid\n",
131 /* label sizes larger than 128 arrived with v1.2 */
132 version = __le16_to_cpu(nsindex[i]->major) * 100
133 + __le16_to_cpu(nsindex[i]->minor);
135 labelsize = 1 << (7 + nsindex[i]->labelsize);
139 if (labelsize != sizeof_namespace_label(ndd)) {
140 dev_dbg(dev, "%s: nsindex%d labelsize %d invalid\n",
141 __func__, i, nsindex[i]->labelsize);
145 sum_save = __le64_to_cpu(nsindex[i]->checksum);
146 nsindex[i]->checksum = __cpu_to_le64(0);
147 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
148 nsindex[i]->checksum = __cpu_to_le64(sum_save);
149 if (sum != sum_save) {
150 dev_dbg(dev, "%s: nsindex%d checksum invalid\n",
155 seq = __le32_to_cpu(nsindex[i]->seq);
156 if ((seq & NSINDEX_SEQ_MASK) == 0) {
157 dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n",
162 /* sanity check the index against expected values */
163 if (__le64_to_cpu(nsindex[i]->myoff)
164 != i * sizeof_namespace_index(ndd)) {
165 dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n",
166 __func__, i, (unsigned long long)
167 __le64_to_cpu(nsindex[i]->myoff));
170 if (__le64_to_cpu(nsindex[i]->otheroff)
171 != (!i) * sizeof_namespace_index(ndd)) {
172 dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n",
173 __func__, i, (unsigned long long)
174 __le64_to_cpu(nsindex[i]->otheroff));
178 size = __le64_to_cpu(nsindex[i]->mysize);
179 if (size > sizeof_namespace_index(ndd)
180 || size < sizeof(struct nd_namespace_index)) {
181 dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n",
186 nslot = __le32_to_cpu(nsindex[i]->nslot);
187 if (nslot * sizeof_namespace_label(ndd)
188 + 2 * sizeof_namespace_index(ndd)
189 > ndd->nsarea.config_size) {
190 dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n",
192 ndd->nsarea.config_size);
203 for (i = 0; i < num_index; i++)
206 /* can't have num_valid > 0 but valid[] = { false, false } */
210 /* pick the best index... */
211 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
212 __le32_to_cpu(nsindex[1]->seq));
213 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
223 int nd_label_validate(struct nvdimm_drvdata *ndd)
226 * In order to probe for and validate namespace index blocks we
227 * need to know the size of the labels, and we can't trust the
228 * size of the labels until we validate the index blocks.
229 * Resolve this dependency loop by probing for known label
230 * sizes, but default to v1.2 256-byte namespace labels if
233 int label_size[] = { 128, 256 };
236 for (i = 0; i < ARRAY_SIZE(label_size); i++) {
237 ndd->nslabel_size = label_size[i];
238 rc = __nd_label_validate(ndd);
246 void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
247 struct nd_namespace_index *src)
254 memcpy(dst, src, sizeof_namespace_index(ndd));
257 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
259 void *base = to_namespace_index(ndd, 0);
261 return base + 2 * sizeof_namespace_index(ndd);
264 static int to_slot(struct nvdimm_drvdata *ndd,
265 struct nd_namespace_label *nd_label)
267 unsigned long label, base;
269 label = (unsigned long) nd_label;
270 base = (unsigned long) nd_label_base(ndd);
272 return (label - base) / sizeof_namespace_label(ndd);
275 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
277 unsigned long label, base;
279 base = (unsigned long) nd_label_base(ndd);
280 label = base + sizeof_namespace_label(ndd) * slot;
282 return (struct nd_namespace_label *) label;
285 #define for_each_clear_bit_le(bit, addr, size) \
286 for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
288 (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
291 * preamble_index - common variable initialization for nd_label_* routines
292 * @ndd: dimm container for the relevant label set
293 * @idx: namespace_index index
294 * @nsindex_out: on return set to the currently active namespace index
295 * @free: on return set to the free label bitmap in the index
296 * @nslot: on return set to the number of slots in the label space
298 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
299 struct nd_namespace_index **nsindex_out,
300 unsigned long **free, u32 *nslot)
302 struct nd_namespace_index *nsindex;
304 nsindex = to_namespace_index(ndd, idx);
308 *free = (unsigned long *) nsindex->free;
309 *nslot = __le32_to_cpu(nsindex->nslot);
310 *nsindex_out = nsindex;
315 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
317 if (!label_id || !uuid)
319 snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
320 flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
324 static bool preamble_current(struct nvdimm_drvdata *ndd,
325 struct nd_namespace_index **nsindex,
326 unsigned long **free, u32 *nslot)
328 return preamble_index(ndd, ndd->ns_current, nsindex,
332 static bool preamble_next(struct nvdimm_drvdata *ndd,
333 struct nd_namespace_index **nsindex,
334 unsigned long **free, u32 *nslot)
336 return preamble_index(ndd, ndd->ns_next, nsindex,
340 static bool slot_valid(struct nvdimm_drvdata *ndd,
341 struct nd_namespace_label *nd_label, u32 slot)
343 /* check that we are written where we expect to be written */
344 if (slot != __le32_to_cpu(nd_label->slot))
347 /* check that DPA allocations are page aligned */
348 if ((__le64_to_cpu(nd_label->dpa)
349 | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
353 if (namespace_label_has(ndd, checksum)) {
356 sum_save = __le64_to_cpu(nd_label->checksum);
357 nd_label->checksum = __cpu_to_le64(0);
358 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
359 nd_label->checksum = __cpu_to_le64(sum_save);
360 if (sum != sum_save) {
361 dev_dbg(ndd->dev, "%s fail checksum. slot: %d expect: %#llx\n",
362 __func__, slot, sum);
370 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
372 struct nd_namespace_index *nsindex;
376 if (!preamble_current(ndd, &nsindex, &free, &nslot))
377 return 0; /* no label, nothing to reserve */
379 for_each_clear_bit_le(slot, free, nslot) {
380 struct nd_namespace_label *nd_label;
381 struct nd_region *nd_region = NULL;
382 u8 label_uuid[NSLABEL_UUID_LEN];
383 struct nd_label_id label_id;
384 struct resource *res;
387 nd_label = to_label(ndd, slot);
389 if (!slot_valid(ndd, nd_label, slot))
392 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
393 flags = __le32_to_cpu(nd_label->flags);
394 nd_label_gen_id(&label_id, label_uuid, flags);
395 res = nvdimm_allocate_dpa(ndd, &label_id,
396 __le64_to_cpu(nd_label->dpa),
397 __le64_to_cpu(nd_label->rawsize));
398 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
406 int nd_label_active_count(struct nvdimm_drvdata *ndd)
408 struct nd_namespace_index *nsindex;
413 if (!preamble_current(ndd, &nsindex, &free, &nslot))
416 for_each_clear_bit_le(slot, free, nslot) {
417 struct nd_namespace_label *nd_label;
419 nd_label = to_label(ndd, slot);
421 if (!slot_valid(ndd, nd_label, slot)) {
422 u32 label_slot = __le32_to_cpu(nd_label->slot);
423 u64 size = __le64_to_cpu(nd_label->rawsize);
424 u64 dpa = __le64_to_cpu(nd_label->dpa);
427 "%s: slot%d invalid slot: %d dpa: %llx size: %llx\n",
428 __func__, slot, label_slot, dpa, size);
436 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
438 struct nd_namespace_index *nsindex;
442 if (!preamble_current(ndd, &nsindex, &free, &nslot))
445 for_each_clear_bit_le(slot, free, nslot) {
446 struct nd_namespace_label *nd_label;
448 nd_label = to_label(ndd, slot);
449 if (!slot_valid(ndd, nd_label, slot))
453 return to_label(ndd, slot);
459 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
461 struct nd_namespace_index *nsindex;
465 if (!preamble_next(ndd, &nsindex, &free, &nslot))
468 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
470 slot = find_next_bit_le(free, nslot, 0);
474 clear_bit_le(slot, free);
479 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
481 struct nd_namespace_index *nsindex;
485 if (!preamble_next(ndd, &nsindex, &free, &nslot))
488 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
491 return !test_and_set_bit_le(slot, free);
495 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
497 struct nd_namespace_index *nsindex;
501 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
503 if (!preamble_next(ndd, &nsindex, &free, &nslot))
504 return nvdimm_num_label_slots(ndd);
506 return bitmap_weight(free, nslot);
509 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
512 struct nd_namespace_index *nsindex;
513 unsigned long offset;
518 nsindex = to_namespace_index(ndd, index);
519 if (flags & ND_NSINDEX_INIT)
520 nslot = nvdimm_num_label_slots(ndd);
522 nslot = __le32_to_cpu(nsindex->nslot);
524 memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
525 memset(&nsindex->flags, 0, 3);
526 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
527 nsindex->seq = __cpu_to_le32(seq);
528 offset = (unsigned long) nsindex
529 - (unsigned long) to_namespace_index(ndd, 0);
530 nsindex->myoff = __cpu_to_le64(offset);
531 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
532 offset = (unsigned long) to_namespace_index(ndd,
533 nd_label_next_nsindex(index))
534 - (unsigned long) to_namespace_index(ndd, 0);
535 nsindex->otheroff = __cpu_to_le64(offset);
536 offset = (unsigned long) nd_label_base(ndd)
537 - (unsigned long) to_namespace_index(ndd, 0);
538 nsindex->labeloff = __cpu_to_le64(offset);
539 nsindex->nslot = __cpu_to_le32(nslot);
540 nsindex->major = __cpu_to_le16(1);
541 if (sizeof_namespace_label(ndd) < 256)
542 nsindex->minor = __cpu_to_le16(1);
544 nsindex->minor = __cpu_to_le16(2);
545 nsindex->checksum = __cpu_to_le64(0);
546 if (flags & ND_NSINDEX_INIT) {
547 unsigned long *free = (unsigned long *) nsindex->free;
548 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
551 memset(nsindex->free, 0xff, nfree / 8);
552 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
553 clear_bit_le(nslot + i, free);
555 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
556 nsindex->checksum = __cpu_to_le64(checksum);
557 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
558 nsindex, sizeof_namespace_index(ndd));
562 if (flags & ND_NSINDEX_INIT)
565 /* copy the index we just wrote to the new 'next' */
566 WARN_ON(index != ndd->ns_next);
567 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
568 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
569 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
570 WARN_ON(ndd->ns_current == ndd->ns_next);
575 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
576 struct nd_namespace_label *nd_label)
578 return (unsigned long) nd_label
579 - (unsigned long) to_namespace_index(ndd, 0);
582 enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
584 if (guid_equal(guid, &nvdimm_btt_guid))
585 return NVDIMM_CCLASS_BTT;
586 else if (guid_equal(guid, &nvdimm_btt2_guid))
587 return NVDIMM_CCLASS_BTT2;
588 else if (guid_equal(guid, &nvdimm_pfn_guid))
589 return NVDIMM_CCLASS_PFN;
590 else if (guid_equal(guid, &nvdimm_dax_guid))
591 return NVDIMM_CCLASS_DAX;
592 else if (guid_equal(guid, &guid_null))
593 return NVDIMM_CCLASS_NONE;
595 return NVDIMM_CCLASS_UNKNOWN;
598 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
601 if (claim_class == NVDIMM_CCLASS_BTT)
602 return &nvdimm_btt_guid;
603 else if (claim_class == NVDIMM_CCLASS_BTT2)
604 return &nvdimm_btt2_guid;
605 else if (claim_class == NVDIMM_CCLASS_PFN)
606 return &nvdimm_pfn_guid;
607 else if (claim_class == NVDIMM_CCLASS_DAX)
608 return &nvdimm_dax_guid;
609 else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
611 * If we're modifying a namespace for which we don't
612 * know the claim_class, don't touch the existing guid.
619 static void reap_victim(struct nd_mapping *nd_mapping,
620 struct nd_label_ent *victim)
622 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
623 u32 slot = to_slot(ndd, victim->label);
625 dev_dbg(ndd->dev, "free: %d\n", slot);
626 nd_label_free_slot(ndd, slot);
627 victim->label = NULL;
630 static int __pmem_label_update(struct nd_region *nd_region,
631 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
632 int pos, unsigned long flags)
634 struct nd_namespace_common *ndns = &nspm->nsio.common;
635 struct nd_interleave_set *nd_set = nd_region->nd_set;
636 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
637 struct nd_namespace_label *nd_label;
638 struct nd_namespace_index *nsindex;
639 struct nd_label_ent *label_ent;
640 struct nd_label_id label_id;
641 struct resource *res;
648 if (!preamble_next(ndd, &nsindex, &free, &nslot))
651 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
652 nd_label_gen_id(&label_id, nspm->uuid, 0);
653 for_each_dpa_resource(ndd, res)
654 if (strcmp(res->name, label_id.id) == 0)
662 /* allocate and write the label to the staging (next) index */
663 slot = nd_label_alloc_slot(ndd);
664 if (slot == UINT_MAX)
666 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
668 nd_label = to_label(ndd, slot);
669 memset(nd_label, 0, sizeof_namespace_label(ndd));
670 memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
672 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
673 nd_label->flags = __cpu_to_le32(flags);
674 nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
675 nd_label->position = __cpu_to_le16(pos);
676 nd_label->isetcookie = __cpu_to_le64(cookie);
677 nd_label->rawsize = __cpu_to_le64(resource_size(res));
678 nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
679 nd_label->dpa = __cpu_to_le64(res->start);
680 nd_label->slot = __cpu_to_le32(slot);
681 if (namespace_label_has(ndd, type_guid))
682 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
683 if (namespace_label_has(ndd, abstraction_guid))
684 guid_copy(&nd_label->abstraction_guid,
685 to_abstraction_guid(ndns->claim_class,
686 &nd_label->abstraction_guid));
687 if (namespace_label_has(ndd, checksum)) {
690 nd_label->checksum = __cpu_to_le64(0);
691 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
692 nd_label->checksum = __cpu_to_le64(sum);
694 nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__);
697 offset = nd_label_offset(ndd, nd_label);
698 rc = nvdimm_set_config_data(ndd, offset, nd_label,
699 sizeof_namespace_label(ndd));
703 /* Garbage collect the previous label */
704 mutex_lock(&nd_mapping->lock);
705 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
706 if (!label_ent->label)
708 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
709 || memcmp(nspm->uuid, label_ent->label->uuid,
710 NSLABEL_UUID_LEN) == 0)
711 reap_victim(nd_mapping, label_ent);
715 rc = nd_label_write_index(ndd, ndd->ns_next,
716 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
718 list_for_each_entry(label_ent, &nd_mapping->labels, list)
719 if (!label_ent->label) {
720 label_ent->label = nd_label;
724 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
725 "failed to track label: %d\n",
726 to_slot(ndd, nd_label));
730 mutex_unlock(&nd_mapping->lock);
735 static bool is_old_resource(struct resource *res, struct resource **list, int n)
739 if (res->flags & DPA_RESOURCE_ADJUSTED)
741 for (i = 0; i < n; i++)
747 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
748 struct nd_namespace_label *nd_label)
750 struct resource *res;
752 for_each_dpa_resource(ndd, res) {
753 if (res->start != __le64_to_cpu(nd_label->dpa))
755 if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
764 * 1/ Account all the labels that can be freed after this update
765 * 2/ Allocate and write the label to the staging (next) index
766 * 3/ Record the resources in the namespace device
768 static int __blk_label_update(struct nd_region *nd_region,
769 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
772 int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
773 struct nd_interleave_set *nd_set = nd_region->nd_set;
774 struct nd_namespace_common *ndns = &nsblk->common;
775 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
776 struct nd_namespace_label *nd_label;
777 struct nd_label_ent *label_ent, *e;
778 struct nd_namespace_index *nsindex;
779 unsigned long *free, *victim_map = NULL;
780 struct resource *res, **old_res_list;
781 struct nd_label_id label_id;
782 u8 uuid[NSLABEL_UUID_LEN];
787 if (!preamble_next(ndd, &nsindex, &free, &nslot))
790 old_res_list = nsblk->res;
791 nfree = nd_label_nfree(ndd);
792 old_num_resources = nsblk->num_resources;
793 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
796 * We need to loop over the old resources a few times, which seems a
797 * bit inefficient, but we need to know that we have the label
798 * space before we start mutating the tracking structures.
799 * Otherwise the recovery method of last resort for userspace is
800 * disable and re-enable the parent region.
803 for_each_dpa_resource(ndd, res) {
804 if (strcmp(res->name, label_id.id) != 0)
806 if (!is_old_resource(res, old_res_list, old_num_resources))
811 if (old_num_resources) {
812 /* convert old local-label-map to dimm-slot victim-map */
813 victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long),
818 /* mark unused labels for garbage collection */
819 for_each_clear_bit_le(slot, free, nslot) {
820 nd_label = to_label(ndd, slot);
821 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
822 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
824 res = to_resource(ndd, nd_label);
825 if (res && is_old_resource(res, old_res_list,
828 slot = to_slot(ndd, nd_label);
829 set_bit(slot, victim_map);
834 /* don't allow updates that consume the last label */
835 if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
836 dev_info(&nsblk->common.dev, "insufficient label space\n");
840 /* from here on we need to abort on error */
843 /* assign all resources to the namespace before writing the labels */
845 nsblk->num_resources = 0;
846 for_each_dpa_resource(ndd, res) {
847 if (strcmp(res->name, label_id.id) != 0)
849 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
855 /* release slots associated with any invalidated UUIDs */
856 mutex_lock(&nd_mapping->lock);
857 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
858 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
859 reap_victim(nd_mapping, label_ent);
860 list_move(&label_ent->list, &list);
862 mutex_unlock(&nd_mapping->lock);
865 * Find the resource associated with the first label in the set
866 * per the v1.2 namespace specification.
868 for (i = 0; i < nsblk->num_resources; i++) {
869 struct resource *min = nsblk->res[min_dpa_idx];
872 if (res->start < min->start)
876 for (i = 0; i < nsblk->num_resources; i++) {
880 if (is_old_resource(res, old_res_list, old_num_resources))
881 continue; /* carry-over */
882 slot = nd_label_alloc_slot(ndd);
883 if (slot == UINT_MAX)
885 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
887 nd_label = to_label(ndd, slot);
888 memset(nd_label, 0, sizeof_namespace_label(ndd));
889 memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
891 memcpy(nd_label->name, nsblk->alt_name,
893 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
896 * Use the presence of the type_guid as a flag to
897 * determine isetcookie usage and nlabel + position
898 * policy for blk-aperture namespaces.
900 if (namespace_label_has(ndd, type_guid)) {
901 if (i == min_dpa_idx) {
902 nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
903 nd_label->position = __cpu_to_le16(0);
905 nd_label->nlabel = __cpu_to_le16(0xffff);
906 nd_label->position = __cpu_to_le16(0xffff);
908 nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
910 nd_label->nlabel = __cpu_to_le16(0); /* N/A */
911 nd_label->position = __cpu_to_le16(0); /* N/A */
912 nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
915 nd_label->dpa = __cpu_to_le64(res->start);
916 nd_label->rawsize = __cpu_to_le64(resource_size(res));
917 nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
918 nd_label->slot = __cpu_to_le32(slot);
919 if (namespace_label_has(ndd, type_guid))
920 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
921 if (namespace_label_has(ndd, abstraction_guid))
922 guid_copy(&nd_label->abstraction_guid,
923 to_abstraction_guid(ndns->claim_class,
924 &nd_label->abstraction_guid));
926 if (namespace_label_has(ndd, checksum)) {
929 nd_label->checksum = __cpu_to_le64(0);
930 sum = nd_fletcher64(nd_label,
931 sizeof_namespace_label(ndd), 1);
932 nd_label->checksum = __cpu_to_le64(sum);
936 offset = nd_label_offset(ndd, nd_label);
937 rc = nvdimm_set_config_data(ndd, offset, nd_label,
938 sizeof_namespace_label(ndd));
943 /* free up now unused slots in the new index */
944 for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
945 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
946 nd_label_free_slot(ndd, slot);
950 rc = nd_label_write_index(ndd, ndd->ns_next,
951 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
956 * Now that the on-dimm labels are up to date, fix up the tracking
957 * entries in nd_mapping->labels
960 mutex_lock(&nd_mapping->lock);
961 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
962 nd_label = label_ent->label;
966 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
967 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
970 list_move(&label_ent->list, &list);
971 label_ent->label = NULL;
973 list_splice_tail_init(&list, &nd_mapping->labels);
974 mutex_unlock(&nd_mapping->lock);
976 if (nlabel + nsblk->num_resources > num_labels) {
978 * Bug, we can't end up with more resources than
986 mutex_lock(&nd_mapping->lock);
987 label_ent = list_first_entry_or_null(&nd_mapping->labels,
988 typeof(*label_ent), list);
991 mutex_unlock(&nd_mapping->lock);
995 for_each_clear_bit_le(slot, free, nslot) {
996 nd_label = to_label(ndd, slot);
997 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
998 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1000 res = to_resource(ndd, nd_label);
1001 res->flags &= ~DPA_RESOURCE_ADJUSTED;
1002 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
1003 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
1004 if (label_ent->label)
1006 label_ent->label = nd_label;
1011 dev_WARN(&nsblk->common.dev,
1012 "failed to track label slot%d\n", slot);
1014 mutex_unlock(&nd_mapping->lock);
1017 kfree(old_res_list);
1023 * 1/ repair the allocated label bitmap in the index
1024 * 2/ restore the resource list
1026 nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1028 nsblk->res = old_res_list;
1029 nsblk->num_resources = old_num_resources;
1030 old_res_list = NULL;
1034 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1036 int i, old_num_labels = 0;
1037 struct nd_label_ent *label_ent;
1038 struct nd_namespace_index *nsindex;
1039 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1041 mutex_lock(&nd_mapping->lock);
1042 list_for_each_entry(label_ent, &nd_mapping->labels, list)
1044 mutex_unlock(&nd_mapping->lock);
1047 * We need to preserve all the old labels for the mapping so
1048 * they can be garbage collected after writing the new labels.
1050 for (i = old_num_labels; i < num_labels; i++) {
1051 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1054 mutex_lock(&nd_mapping->lock);
1055 list_add_tail(&label_ent->list, &nd_mapping->labels);
1056 mutex_unlock(&nd_mapping->lock);
1059 if (ndd->ns_current == -1 || ndd->ns_next == -1)
1062 return max(num_labels, old_num_labels);
1064 nsindex = to_namespace_index(ndd, 0);
1065 memset(nsindex, 0, ndd->nsarea.config_size);
1066 for (i = 0; i < 2; i++) {
1067 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1073 ndd->ns_current = 0;
1075 return max(num_labels, old_num_labels);
1078 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1080 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1081 struct nd_label_ent *label_ent, *e;
1082 struct nd_namespace_index *nsindex;
1083 u8 label_uuid[NSLABEL_UUID_LEN];
1084 unsigned long *free;
1092 /* no index || no labels == nothing to delete */
1093 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1096 mutex_lock(&nd_mapping->lock);
1097 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1098 struct nd_namespace_label *nd_label = label_ent->label;
1103 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1104 if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1107 slot = to_slot(ndd, nd_label);
1108 nd_label_free_slot(ndd, slot);
1109 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
1110 list_move_tail(&label_ent->list, &list);
1111 label_ent->label = NULL;
1113 list_splice_tail_init(&list, &nd_mapping->labels);
1116 nd_mapping_free_labels(nd_mapping);
1117 dev_dbg(ndd->dev, "%s: no more active labels\n", __func__);
1119 mutex_unlock(&nd_mapping->lock);
1121 return nd_label_write_index(ndd, ndd->ns_next,
1122 nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1125 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1126 struct nd_namespace_pmem *nspm, resource_size_t size)
1130 for (i = 0; i < nd_region->ndr_mappings; i++) {
1131 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1132 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1133 struct resource *res;
1137 rc = del_labels(nd_mapping, nspm->uuid);
1143 for_each_dpa_resource(ndd, res)
1144 if (strncmp(res->name, "pmem", 4) == 0)
1146 WARN_ON_ONCE(!count);
1148 rc = init_labels(nd_mapping, count);
1152 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1153 NSLABEL_FLAG_UPDATING);
1161 /* Clear the UPDATING flag per UEFI 2.7 expectations */
1162 for (i = 0; i < nd_region->ndr_mappings; i++) {
1163 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1165 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1173 int nd_blk_namespace_label_update(struct nd_region *nd_region,
1174 struct nd_namespace_blk *nsblk, resource_size_t size)
1176 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1177 struct resource *res;
1181 return del_labels(nd_mapping, nsblk->uuid);
1183 for_each_dpa_resource(to_ndd(nd_mapping), res)
1186 count = init_labels(nd_mapping, count);
1190 return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1193 int __init nd_label_init(void)
1195 WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1196 WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1197 WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1198 WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));