GNU Linux-libre 4.14.324-gnu1
[releases.git] / drivers / nvdimm / label.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/device.h>
14 #include <linux/ndctl.h>
15 #include <linux/uuid.h>
16 #include <linux/slab.h>
17 #include <linux/io.h>
18 #include <linux/nd.h>
19 #include "nd-core.h"
20 #include "label.h"
21 #include "nd.h"
22
23 static guid_t nvdimm_btt_guid;
24 static guid_t nvdimm_btt2_guid;
25 static guid_t nvdimm_pfn_guid;
26 static guid_t nvdimm_dax_guid;
27
28 static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
29
30 static u32 best_seq(u32 a, u32 b)
31 {
32         a &= NSINDEX_SEQ_MASK;
33         b &= NSINDEX_SEQ_MASK;
34
35         if (a == 0 || a == b)
36                 return b;
37         else if (b == 0)
38                 return a;
39         else if (nd_inc_seq(a) == b)
40                 return b;
41         else
42                 return a;
43 }
44
45 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
46 {
47         return ndd->nslabel_size;
48 }
49
50 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
51 {
52         return ndd->nsarea.config_size / (sizeof_namespace_label(ndd) + 1);
53 }
54
55 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
56 {
57         u32 nslot, space, size;
58
59         /*
60          * The minimum index space is 512 bytes, with that amount of
61          * index we can describe ~1400 labels which is less than a byte
62          * of overhead per label.  Round up to a byte of overhead per
63          * label and determine the size of the index region.  Yes, this
64          * starts to waste space at larger config_sizes, but it's
65          * unlikely we'll ever see anything but 128K.
66          */
67         nslot = nvdimm_num_label_slots(ndd);
68         space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
69         size = ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
70                         NSINDEX_ALIGN) * 2;
71         if (size <= space)
72                 return size / 2;
73
74         dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
75                         ndd->nsarea.config_size, sizeof_namespace_label(ndd));
76         return 0;
77 }
78
79 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
80 {
81         /*
82          * On media label format consists of two index blocks followed
83          * by an array of labels.  None of these structures are ever
84          * updated in place.  A sequence number tracks the current
85          * active index and the next one to write, while labels are
86          * written to free slots.
87          *
88          *     +------------+
89          *     |            |
90          *     |  nsindex0  |
91          *     |            |
92          *     +------------+
93          *     |            |
94          *     |  nsindex1  |
95          *     |            |
96          *     +------------+
97          *     |   label0   |
98          *     +------------+
99          *     |   label1   |
100          *     +------------+
101          *     |            |
102          *      ....nslot...
103          *     |            |
104          *     +------------+
105          *     |   labelN   |
106          *     +------------+
107          */
108         struct nd_namespace_index *nsindex[] = {
109                 to_namespace_index(ndd, 0),
110                 to_namespace_index(ndd, 1),
111         };
112         const int num_index = ARRAY_SIZE(nsindex);
113         struct device *dev = ndd->dev;
114         bool valid[2] = { 0 };
115         int i, num_valid = 0;
116         u32 seq;
117
118         for (i = 0; i < num_index; i++) {
119                 u32 nslot;
120                 u8 sig[NSINDEX_SIG_LEN];
121                 u64 sum_save, sum, size;
122                 unsigned int version, labelsize;
123
124                 memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
125                 if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
126                         dev_dbg(dev, "%s: nsindex%d signature invalid\n",
127                                         __func__, i);
128                         continue;
129                 }
130
131                 /* label sizes larger than 128 arrived with v1.2 */
132                 version = __le16_to_cpu(nsindex[i]->major) * 100
133                         + __le16_to_cpu(nsindex[i]->minor);
134                 if (version >= 102)
135                         labelsize = 1 << (7 + nsindex[i]->labelsize);
136                 else
137                         labelsize = 128;
138
139                 if (labelsize != sizeof_namespace_label(ndd)) {
140                         dev_dbg(dev, "%s: nsindex%d labelsize %d invalid\n",
141                                         __func__, i, nsindex[i]->labelsize);
142                         continue;
143                 }
144
145                 sum_save = __le64_to_cpu(nsindex[i]->checksum);
146                 nsindex[i]->checksum = __cpu_to_le64(0);
147                 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
148                 nsindex[i]->checksum = __cpu_to_le64(sum_save);
149                 if (sum != sum_save) {
150                         dev_dbg(dev, "%s: nsindex%d checksum invalid\n",
151                                         __func__, i);
152                         continue;
153                 }
154
155                 seq = __le32_to_cpu(nsindex[i]->seq);
156                 if ((seq & NSINDEX_SEQ_MASK) == 0) {
157                         dev_dbg(dev, "%s: nsindex%d sequence: %#x invalid\n",
158                                         __func__, i, seq);
159                         continue;
160                 }
161
162                 /* sanity check the index against expected values */
163                 if (__le64_to_cpu(nsindex[i]->myoff)
164                                 != i * sizeof_namespace_index(ndd)) {
165                         dev_dbg(dev, "%s: nsindex%d myoff: %#llx invalid\n",
166                                         __func__, i, (unsigned long long)
167                                         __le64_to_cpu(nsindex[i]->myoff));
168                         continue;
169                 }
170                 if (__le64_to_cpu(nsindex[i]->otheroff)
171                                 != (!i) * sizeof_namespace_index(ndd)) {
172                         dev_dbg(dev, "%s: nsindex%d otheroff: %#llx invalid\n",
173                                         __func__, i, (unsigned long long)
174                                         __le64_to_cpu(nsindex[i]->otheroff));
175                         continue;
176                 }
177
178                 size = __le64_to_cpu(nsindex[i]->mysize);
179                 if (size > sizeof_namespace_index(ndd)
180                                 || size < sizeof(struct nd_namespace_index)) {
181                         dev_dbg(dev, "%s: nsindex%d mysize: %#llx invalid\n",
182                                         __func__, i, size);
183                         continue;
184                 }
185
186                 nslot = __le32_to_cpu(nsindex[i]->nslot);
187                 if (nslot * sizeof_namespace_label(ndd)
188                                 + 2 * sizeof_namespace_index(ndd)
189                                 > ndd->nsarea.config_size) {
190                         dev_dbg(dev, "%s: nsindex%d nslot: %u invalid, config_size: %#x\n",
191                                         __func__, i, nslot,
192                                         ndd->nsarea.config_size);
193                         continue;
194                 }
195                 valid[i] = true;
196                 num_valid++;
197         }
198
199         switch (num_valid) {
200         case 0:
201                 break;
202         case 1:
203                 for (i = 0; i < num_index; i++)
204                         if (valid[i])
205                                 return i;
206                 /* can't have num_valid > 0 but valid[] = { false, false } */
207                 WARN_ON(1);
208                 break;
209         default:
210                 /* pick the best index... */
211                 seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
212                                 __le32_to_cpu(nsindex[1]->seq));
213                 if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
214                         return 1;
215                 else
216                         return 0;
217                 break;
218         }
219
220         return -1;
221 }
222
223 int nd_label_validate(struct nvdimm_drvdata *ndd)
224 {
225         /*
226          * In order to probe for and validate namespace index blocks we
227          * need to know the size of the labels, and we can't trust the
228          * size of the labels until we validate the index blocks.
229          * Resolve this dependency loop by probing for known label
230          * sizes, but default to v1.2 256-byte namespace labels if
231          * discovery fails.
232          */
233         int label_size[] = { 128, 256 };
234         int i, rc;
235
236         for (i = 0; i < ARRAY_SIZE(label_size); i++) {
237                 ndd->nslabel_size = label_size[i];
238                 rc = __nd_label_validate(ndd);
239                 if (rc >= 0)
240                         return rc;
241         }
242
243         return -1;
244 }
245
246 void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
247                 struct nd_namespace_index *src)
248 {
249         if (dst && src)
250                 /* pass */;
251         else
252                 return;
253
254         memcpy(dst, src, sizeof_namespace_index(ndd));
255 }
256
257 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
258 {
259         void *base = to_namespace_index(ndd, 0);
260
261         return base + 2 * sizeof_namespace_index(ndd);
262 }
263
264 static int to_slot(struct nvdimm_drvdata *ndd,
265                 struct nd_namespace_label *nd_label)
266 {
267         unsigned long label, base;
268
269         label = (unsigned long) nd_label;
270         base = (unsigned long) nd_label_base(ndd);
271
272         return (label - base) / sizeof_namespace_label(ndd);
273 }
274
275 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
276 {
277         unsigned long label, base;
278
279         base = (unsigned long) nd_label_base(ndd);
280         label = base + sizeof_namespace_label(ndd) * slot;
281
282         return (struct nd_namespace_label *) label;
283 }
284
285 #define for_each_clear_bit_le(bit, addr, size) \
286         for ((bit) = find_next_zero_bit_le((addr), (size), 0);  \
287              (bit) < (size);                                    \
288              (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
289
290 /**
291  * preamble_index - common variable initialization for nd_label_* routines
292  * @ndd: dimm container for the relevant label set
293  * @idx: namespace_index index
294  * @nsindex_out: on return set to the currently active namespace index
295  * @free: on return set to the free label bitmap in the index
296  * @nslot: on return set to the number of slots in the label space
297  */
298 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
299                 struct nd_namespace_index **nsindex_out,
300                 unsigned long **free, u32 *nslot)
301 {
302         struct nd_namespace_index *nsindex;
303
304         nsindex = to_namespace_index(ndd, idx);
305         if (nsindex == NULL)
306                 return false;
307
308         *free = (unsigned long *) nsindex->free;
309         *nslot = __le32_to_cpu(nsindex->nslot);
310         *nsindex_out = nsindex;
311
312         return true;
313 }
314
315 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
316 {
317         if (!label_id || !uuid)
318                 return NULL;
319         snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
320                         flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
321         return label_id->id;
322 }
323
324 static bool preamble_current(struct nvdimm_drvdata *ndd,
325                 struct nd_namespace_index **nsindex,
326                 unsigned long **free, u32 *nslot)
327 {
328         return preamble_index(ndd, ndd->ns_current, nsindex,
329                         free, nslot);
330 }
331
332 static bool preamble_next(struct nvdimm_drvdata *ndd,
333                 struct nd_namespace_index **nsindex,
334                 unsigned long **free, u32 *nslot)
335 {
336         return preamble_index(ndd, ndd->ns_next, nsindex,
337                         free, nslot);
338 }
339
340 static bool slot_valid(struct nvdimm_drvdata *ndd,
341                 struct nd_namespace_label *nd_label, u32 slot)
342 {
343         /* check that we are written where we expect to be written */
344         if (slot != __le32_to_cpu(nd_label->slot))
345                 return false;
346
347         /* check that DPA allocations are page aligned */
348         if ((__le64_to_cpu(nd_label->dpa)
349                                 | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
350                 return false;
351
352         /* check checksum */
353         if (namespace_label_has(ndd, checksum)) {
354                 u64 sum, sum_save;
355
356                 sum_save = __le64_to_cpu(nd_label->checksum);
357                 nd_label->checksum = __cpu_to_le64(0);
358                 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
359                 nd_label->checksum = __cpu_to_le64(sum_save);
360                 if (sum != sum_save) {
361                         dev_dbg(ndd->dev, "%s fail checksum. slot: %d expect: %#llx\n",
362                                 __func__, slot, sum);
363                         return false;
364                 }
365         }
366
367         return true;
368 }
369
370 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
371 {
372         struct nd_namespace_index *nsindex;
373         unsigned long *free;
374         u32 nslot, slot;
375
376         if (!preamble_current(ndd, &nsindex, &free, &nslot))
377                 return 0; /* no label, nothing to reserve */
378
379         for_each_clear_bit_le(slot, free, nslot) {
380                 struct nd_namespace_label *nd_label;
381                 struct nd_region *nd_region = NULL;
382                 u8 label_uuid[NSLABEL_UUID_LEN];
383                 struct nd_label_id label_id;
384                 struct resource *res;
385                 u32 flags;
386
387                 nd_label = to_label(ndd, slot);
388
389                 if (!slot_valid(ndd, nd_label, slot))
390                         continue;
391
392                 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
393                 flags = __le32_to_cpu(nd_label->flags);
394                 nd_label_gen_id(&label_id, label_uuid, flags);
395                 res = nvdimm_allocate_dpa(ndd, &label_id,
396                                 __le64_to_cpu(nd_label->dpa),
397                                 __le64_to_cpu(nd_label->rawsize));
398                 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
399                 if (!res)
400                         return -EBUSY;
401         }
402
403         return 0;
404 }
405
406 int nd_label_active_count(struct nvdimm_drvdata *ndd)
407 {
408         struct nd_namespace_index *nsindex;
409         unsigned long *free;
410         u32 nslot, slot;
411         int count = 0;
412
413         if (!preamble_current(ndd, &nsindex, &free, &nslot))
414                 return 0;
415
416         for_each_clear_bit_le(slot, free, nslot) {
417                 struct nd_namespace_label *nd_label;
418
419                 nd_label = to_label(ndd, slot);
420
421                 if (!slot_valid(ndd, nd_label, slot)) {
422                         u32 label_slot = __le32_to_cpu(nd_label->slot);
423                         u64 size = __le64_to_cpu(nd_label->rawsize);
424                         u64 dpa = __le64_to_cpu(nd_label->dpa);
425
426                         dev_dbg(ndd->dev,
427                                 "%s: slot%d invalid slot: %d dpa: %llx size: %llx\n",
428                                         __func__, slot, label_slot, dpa, size);
429                         continue;
430                 }
431                 count++;
432         }
433         return count;
434 }
435
436 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
437 {
438         struct nd_namespace_index *nsindex;
439         unsigned long *free;
440         u32 nslot, slot;
441
442         if (!preamble_current(ndd, &nsindex, &free, &nslot))
443                 return NULL;
444
445         for_each_clear_bit_le(slot, free, nslot) {
446                 struct nd_namespace_label *nd_label;
447
448                 nd_label = to_label(ndd, slot);
449                 if (!slot_valid(ndd, nd_label, slot))
450                         continue;
451
452                 if (n-- == 0)
453                         return to_label(ndd, slot);
454         }
455
456         return NULL;
457 }
458
459 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
460 {
461         struct nd_namespace_index *nsindex;
462         unsigned long *free;
463         u32 nslot, slot;
464
465         if (!preamble_next(ndd, &nsindex, &free, &nslot))
466                 return UINT_MAX;
467
468         WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
469
470         slot = find_next_bit_le(free, nslot, 0);
471         if (slot == nslot)
472                 return UINT_MAX;
473
474         clear_bit_le(slot, free);
475
476         return slot;
477 }
478
479 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
480 {
481         struct nd_namespace_index *nsindex;
482         unsigned long *free;
483         u32 nslot;
484
485         if (!preamble_next(ndd, &nsindex, &free, &nslot))
486                 return false;
487
488         WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
489
490         if (slot < nslot)
491                 return !test_and_set_bit_le(slot, free);
492         return false;
493 }
494
495 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
496 {
497         struct nd_namespace_index *nsindex;
498         unsigned long *free;
499         u32 nslot;
500
501         WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
502
503         if (!preamble_next(ndd, &nsindex, &free, &nslot))
504                 return nvdimm_num_label_slots(ndd);
505
506         return bitmap_weight(free, nslot);
507 }
508
509 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
510                 unsigned long flags)
511 {
512         struct nd_namespace_index *nsindex;
513         unsigned long offset;
514         u64 checksum;
515         u32 nslot;
516         int rc;
517
518         nsindex = to_namespace_index(ndd, index);
519         if (flags & ND_NSINDEX_INIT)
520                 nslot = nvdimm_num_label_slots(ndd);
521         else
522                 nslot = __le32_to_cpu(nsindex->nslot);
523
524         memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
525         memset(&nsindex->flags, 0, 3);
526         nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
527         nsindex->seq = __cpu_to_le32(seq);
528         offset = (unsigned long) nsindex
529                 - (unsigned long) to_namespace_index(ndd, 0);
530         nsindex->myoff = __cpu_to_le64(offset);
531         nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
532         offset = (unsigned long) to_namespace_index(ndd,
533                         nd_label_next_nsindex(index))
534                 - (unsigned long) to_namespace_index(ndd, 0);
535         nsindex->otheroff = __cpu_to_le64(offset);
536         offset = (unsigned long) nd_label_base(ndd)
537                 - (unsigned long) to_namespace_index(ndd, 0);
538         nsindex->labeloff = __cpu_to_le64(offset);
539         nsindex->nslot = __cpu_to_le32(nslot);
540         nsindex->major = __cpu_to_le16(1);
541         if (sizeof_namespace_label(ndd) < 256)
542                 nsindex->minor = __cpu_to_le16(1);
543         else
544                 nsindex->minor = __cpu_to_le16(2);
545         nsindex->checksum = __cpu_to_le64(0);
546         if (flags & ND_NSINDEX_INIT) {
547                 unsigned long *free = (unsigned long *) nsindex->free;
548                 u32 nfree = ALIGN(nslot, BITS_PER_LONG);
549                 int last_bits, i;
550
551                 memset(nsindex->free, 0xff, nfree / 8);
552                 for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
553                         clear_bit_le(nslot + i, free);
554         }
555         checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
556         nsindex->checksum = __cpu_to_le64(checksum);
557         rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
558                         nsindex, sizeof_namespace_index(ndd));
559         if (rc < 0)
560                 return rc;
561
562         if (flags & ND_NSINDEX_INIT)
563                 return 0;
564
565         /* copy the index we just wrote to the new 'next' */
566         WARN_ON(index != ndd->ns_next);
567         nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
568         ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
569         ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
570         WARN_ON(ndd->ns_current == ndd->ns_next);
571
572         return 0;
573 }
574
575 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
576                 struct nd_namespace_label *nd_label)
577 {
578         return (unsigned long) nd_label
579                 - (unsigned long) to_namespace_index(ndd, 0);
580 }
581
582 enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
583 {
584         if (guid_equal(guid, &nvdimm_btt_guid))
585                 return NVDIMM_CCLASS_BTT;
586         else if (guid_equal(guid, &nvdimm_btt2_guid))
587                 return NVDIMM_CCLASS_BTT2;
588         else if (guid_equal(guid, &nvdimm_pfn_guid))
589                 return NVDIMM_CCLASS_PFN;
590         else if (guid_equal(guid, &nvdimm_dax_guid))
591                 return NVDIMM_CCLASS_DAX;
592         else if (guid_equal(guid, &guid_null))
593                 return NVDIMM_CCLASS_NONE;
594
595         return NVDIMM_CCLASS_UNKNOWN;
596 }
597
598 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
599         guid_t *target)
600 {
601         if (claim_class == NVDIMM_CCLASS_BTT)
602                 return &nvdimm_btt_guid;
603         else if (claim_class == NVDIMM_CCLASS_BTT2)
604                 return &nvdimm_btt2_guid;
605         else if (claim_class == NVDIMM_CCLASS_PFN)
606                 return &nvdimm_pfn_guid;
607         else if (claim_class == NVDIMM_CCLASS_DAX)
608                 return &nvdimm_dax_guid;
609         else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
610                 /*
611                  * If we're modifying a namespace for which we don't
612                  * know the claim_class, don't touch the existing guid.
613                  */
614                 return target;
615         } else
616                 return &guid_null;
617 }
618
619 static void reap_victim(struct nd_mapping *nd_mapping,
620                 struct nd_label_ent *victim)
621 {
622         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
623         u32 slot = to_slot(ndd, victim->label);
624
625         dev_dbg(ndd->dev, "free: %d\n", slot);
626         nd_label_free_slot(ndd, slot);
627         victim->label = NULL;
628 }
629
630 static int __pmem_label_update(struct nd_region *nd_region,
631                 struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
632                 int pos, unsigned long flags)
633 {
634         struct nd_namespace_common *ndns = &nspm->nsio.common;
635         struct nd_interleave_set *nd_set = nd_region->nd_set;
636         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
637         struct nd_namespace_label *nd_label;
638         struct nd_namespace_index *nsindex;
639         struct nd_label_ent *label_ent;
640         struct nd_label_id label_id;
641         struct resource *res;
642         unsigned long *free;
643         u32 nslot, slot;
644         size_t offset;
645         u64 cookie;
646         int rc;
647
648         if (!preamble_next(ndd, &nsindex, &free, &nslot))
649                 return -ENXIO;
650
651         cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
652         nd_label_gen_id(&label_id, nspm->uuid, 0);
653         for_each_dpa_resource(ndd, res)
654                 if (strcmp(res->name, label_id.id) == 0)
655                         break;
656
657         if (!res) {
658                 WARN_ON_ONCE(1);
659                 return -ENXIO;
660         }
661
662         /* allocate and write the label to the staging (next) index */
663         slot = nd_label_alloc_slot(ndd);
664         if (slot == UINT_MAX)
665                 return -ENXIO;
666         dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
667
668         nd_label = to_label(ndd, slot);
669         memset(nd_label, 0, sizeof_namespace_label(ndd));
670         memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
671         if (nspm->alt_name)
672                 memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
673         nd_label->flags = __cpu_to_le32(flags);
674         nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
675         nd_label->position = __cpu_to_le16(pos);
676         nd_label->isetcookie = __cpu_to_le64(cookie);
677         nd_label->rawsize = __cpu_to_le64(resource_size(res));
678         nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
679         nd_label->dpa = __cpu_to_le64(res->start);
680         nd_label->slot = __cpu_to_le32(slot);
681         if (namespace_label_has(ndd, type_guid))
682                 guid_copy(&nd_label->type_guid, &nd_set->type_guid);
683         if (namespace_label_has(ndd, abstraction_guid))
684                 guid_copy(&nd_label->abstraction_guid,
685                                 to_abstraction_guid(ndns->claim_class,
686                                         &nd_label->abstraction_guid));
687         if (namespace_label_has(ndd, checksum)) {
688                 u64 sum;
689
690                 nd_label->checksum = __cpu_to_le64(0);
691                 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
692                 nd_label->checksum = __cpu_to_le64(sum);
693         }
694         nd_dbg_dpa(nd_region, ndd, res, "%s\n", __func__);
695
696         /* update label */
697         offset = nd_label_offset(ndd, nd_label);
698         rc = nvdimm_set_config_data(ndd, offset, nd_label,
699                         sizeof_namespace_label(ndd));
700         if (rc < 0)
701                 return rc;
702
703         /* Garbage collect the previous label */
704         mutex_lock(&nd_mapping->lock);
705         list_for_each_entry(label_ent, &nd_mapping->labels, list) {
706                 if (!label_ent->label)
707                         continue;
708                 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
709                                 || memcmp(nspm->uuid, label_ent->label->uuid,
710                                         NSLABEL_UUID_LEN) == 0)
711                         reap_victim(nd_mapping, label_ent);
712         }
713
714         /* update index */
715         rc = nd_label_write_index(ndd, ndd->ns_next,
716                         nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
717         if (rc == 0) {
718                 list_for_each_entry(label_ent, &nd_mapping->labels, list)
719                         if (!label_ent->label) {
720                                 label_ent->label = nd_label;
721                                 nd_label = NULL;
722                                 break;
723                         }
724                 dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
725                                 "failed to track label: %d\n",
726                                 to_slot(ndd, nd_label));
727                 if (nd_label)
728                         rc = -ENXIO;
729         }
730         mutex_unlock(&nd_mapping->lock);
731
732         return rc;
733 }
734
735 static bool is_old_resource(struct resource *res, struct resource **list, int n)
736 {
737         int i;
738
739         if (res->flags & DPA_RESOURCE_ADJUSTED)
740                 return false;
741         for (i = 0; i < n; i++)
742                 if (res == list[i])
743                         return true;
744         return false;
745 }
746
747 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
748                 struct nd_namespace_label *nd_label)
749 {
750         struct resource *res;
751
752         for_each_dpa_resource(ndd, res) {
753                 if (res->start != __le64_to_cpu(nd_label->dpa))
754                         continue;
755                 if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
756                         continue;
757                 return res;
758         }
759
760         return NULL;
761 }
762
763 /*
764  * 1/ Account all the labels that can be freed after this update
765  * 2/ Allocate and write the label to the staging (next) index
766  * 3/ Record the resources in the namespace device
767  */
768 static int __blk_label_update(struct nd_region *nd_region,
769                 struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
770                 int num_labels)
771 {
772         int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
773         struct nd_interleave_set *nd_set = nd_region->nd_set;
774         struct nd_namespace_common *ndns = &nsblk->common;
775         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
776         struct nd_namespace_label *nd_label;
777         struct nd_label_ent *label_ent, *e;
778         struct nd_namespace_index *nsindex;
779         unsigned long *free, *victim_map = NULL;
780         struct resource *res, **old_res_list;
781         struct nd_label_id label_id;
782         u8 uuid[NSLABEL_UUID_LEN];
783         int min_dpa_idx = 0;
784         LIST_HEAD(list);
785         u32 nslot, slot;
786
787         if (!preamble_next(ndd, &nsindex, &free, &nslot))
788                 return -ENXIO;
789
790         old_res_list = nsblk->res;
791         nfree = nd_label_nfree(ndd);
792         old_num_resources = nsblk->num_resources;
793         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
794
795         /*
796          * We need to loop over the old resources a few times, which seems a
797          * bit inefficient, but we need to know that we have the label
798          * space before we start mutating the tracking structures.
799          * Otherwise the recovery method of last resort for userspace is
800          * disable and re-enable the parent region.
801          */
802         alloc = 0;
803         for_each_dpa_resource(ndd, res) {
804                 if (strcmp(res->name, label_id.id) != 0)
805                         continue;
806                 if (!is_old_resource(res, old_res_list, old_num_resources))
807                         alloc++;
808         }
809
810         victims = 0;
811         if (old_num_resources) {
812                 /* convert old local-label-map to dimm-slot victim-map */
813                 victim_map = kcalloc(BITS_TO_LONGS(nslot), sizeof(long),
814                                 GFP_KERNEL);
815                 if (!victim_map)
816                         return -ENOMEM;
817
818                 /* mark unused labels for garbage collection */
819                 for_each_clear_bit_le(slot, free, nslot) {
820                         nd_label = to_label(ndd, slot);
821                         memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
822                         if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
823                                 continue;
824                         res = to_resource(ndd, nd_label);
825                         if (res && is_old_resource(res, old_res_list,
826                                                 old_num_resources))
827                                 continue;
828                         slot = to_slot(ndd, nd_label);
829                         set_bit(slot, victim_map);
830                         victims++;
831                 }
832         }
833
834         /* don't allow updates that consume the last label */
835         if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
836                 dev_info(&nsblk->common.dev, "insufficient label space\n");
837                 kfree(victim_map);
838                 return -ENOSPC;
839         }
840         /* from here on we need to abort on error */
841
842
843         /* assign all resources to the namespace before writing the labels */
844         nsblk->res = NULL;
845         nsblk->num_resources = 0;
846         for_each_dpa_resource(ndd, res) {
847                 if (strcmp(res->name, label_id.id) != 0)
848                         continue;
849                 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
850                         rc = -ENOMEM;
851                         goto abort;
852                 }
853         }
854
855         /* release slots associated with any invalidated UUIDs */
856         mutex_lock(&nd_mapping->lock);
857         list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
858                 if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
859                         reap_victim(nd_mapping, label_ent);
860                         list_move(&label_ent->list, &list);
861                 }
862         mutex_unlock(&nd_mapping->lock);
863
864         /*
865          * Find the resource associated with the first label in the set
866          * per the v1.2 namespace specification.
867          */
868         for (i = 0; i < nsblk->num_resources; i++) {
869                 struct resource *min = nsblk->res[min_dpa_idx];
870
871                 res = nsblk->res[i];
872                 if (res->start < min->start)
873                         min_dpa_idx = i;
874         }
875
876         for (i = 0; i < nsblk->num_resources; i++) {
877                 size_t offset;
878
879                 res = nsblk->res[i];
880                 if (is_old_resource(res, old_res_list, old_num_resources))
881                         continue; /* carry-over */
882                 slot = nd_label_alloc_slot(ndd);
883                 if (slot == UINT_MAX)
884                         goto abort;
885                 dev_dbg(ndd->dev, "%s: allocated: %d\n", __func__, slot);
886
887                 nd_label = to_label(ndd, slot);
888                 memset(nd_label, 0, sizeof_namespace_label(ndd));
889                 memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
890                 if (nsblk->alt_name)
891                         memcpy(nd_label->name, nsblk->alt_name,
892                                         NSLABEL_NAME_LEN);
893                 nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
894
895                 /*
896                  * Use the presence of the type_guid as a flag to
897                  * determine isetcookie usage and nlabel + position
898                  * policy for blk-aperture namespaces.
899                  */
900                 if (namespace_label_has(ndd, type_guid)) {
901                         if (i == min_dpa_idx) {
902                                 nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
903                                 nd_label->position = __cpu_to_le16(0);
904                         } else {
905                                 nd_label->nlabel = __cpu_to_le16(0xffff);
906                                 nd_label->position = __cpu_to_le16(0xffff);
907                         }
908                         nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
909                 } else {
910                         nd_label->nlabel = __cpu_to_le16(0); /* N/A */
911                         nd_label->position = __cpu_to_le16(0); /* N/A */
912                         nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
913                 }
914
915                 nd_label->dpa = __cpu_to_le64(res->start);
916                 nd_label->rawsize = __cpu_to_le64(resource_size(res));
917                 nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
918                 nd_label->slot = __cpu_to_le32(slot);
919                 if (namespace_label_has(ndd, type_guid))
920                         guid_copy(&nd_label->type_guid, &nd_set->type_guid);
921                 if (namespace_label_has(ndd, abstraction_guid))
922                         guid_copy(&nd_label->abstraction_guid,
923                                         to_abstraction_guid(ndns->claim_class,
924                                                 &nd_label->abstraction_guid));
925
926                 if (namespace_label_has(ndd, checksum)) {
927                         u64 sum;
928
929                         nd_label->checksum = __cpu_to_le64(0);
930                         sum = nd_fletcher64(nd_label,
931                                         sizeof_namespace_label(ndd), 1);
932                         nd_label->checksum = __cpu_to_le64(sum);
933                 }
934
935                 /* update label */
936                 offset = nd_label_offset(ndd, nd_label);
937                 rc = nvdimm_set_config_data(ndd, offset, nd_label,
938                                 sizeof_namespace_label(ndd));
939                 if (rc < 0)
940                         goto abort;
941         }
942
943         /* free up now unused slots in the new index */
944         for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
945                 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
946                 nd_label_free_slot(ndd, slot);
947         }
948
949         /* update index */
950         rc = nd_label_write_index(ndd, ndd->ns_next,
951                         nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
952         if (rc)
953                 goto abort;
954
955         /*
956          * Now that the on-dimm labels are up to date, fix up the tracking
957          * entries in nd_mapping->labels
958          */
959         nlabel = 0;
960         mutex_lock(&nd_mapping->lock);
961         list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
962                 nd_label = label_ent->label;
963                 if (!nd_label)
964                         continue;
965                 nlabel++;
966                 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
967                 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
968                         continue;
969                 nlabel--;
970                 list_move(&label_ent->list, &list);
971                 label_ent->label = NULL;
972         }
973         list_splice_tail_init(&list, &nd_mapping->labels);
974         mutex_unlock(&nd_mapping->lock);
975
976         if (nlabel + nsblk->num_resources > num_labels) {
977                 /*
978                  * Bug, we can't end up with more resources than
979                  * available labels
980                  */
981                 WARN_ON_ONCE(1);
982                 rc = -ENXIO;
983                 goto out;
984         }
985
986         mutex_lock(&nd_mapping->lock);
987         label_ent = list_first_entry_or_null(&nd_mapping->labels,
988                         typeof(*label_ent), list);
989         if (!label_ent) {
990                 WARN_ON(1);
991                 mutex_unlock(&nd_mapping->lock);
992                 rc = -ENXIO;
993                 goto out;
994         }
995         for_each_clear_bit_le(slot, free, nslot) {
996                 nd_label = to_label(ndd, slot);
997                 memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
998                 if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
999                         continue;
1000                 res = to_resource(ndd, nd_label);
1001                 res->flags &= ~DPA_RESOURCE_ADJUSTED;
1002                 dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
1003                 list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
1004                         if (label_ent->label)
1005                                 continue;
1006                         label_ent->label = nd_label;
1007                         nd_label = NULL;
1008                         break;
1009                 }
1010                 if (nd_label)
1011                         dev_WARN(&nsblk->common.dev,
1012                                         "failed to track label slot%d\n", slot);
1013         }
1014         mutex_unlock(&nd_mapping->lock);
1015
1016  out:
1017         kfree(old_res_list);
1018         kfree(victim_map);
1019         return rc;
1020
1021  abort:
1022         /*
1023          * 1/ repair the allocated label bitmap in the index
1024          * 2/ restore the resource list
1025          */
1026         nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1027         kfree(nsblk->res);
1028         nsblk->res = old_res_list;
1029         nsblk->num_resources = old_num_resources;
1030         old_res_list = NULL;
1031         goto out;
1032 }
1033
1034 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1035 {
1036         int i, old_num_labels = 0;
1037         struct nd_label_ent *label_ent;
1038         struct nd_namespace_index *nsindex;
1039         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1040
1041         mutex_lock(&nd_mapping->lock);
1042         list_for_each_entry(label_ent, &nd_mapping->labels, list)
1043                 old_num_labels++;
1044         mutex_unlock(&nd_mapping->lock);
1045
1046         /*
1047          * We need to preserve all the old labels for the mapping so
1048          * they can be garbage collected after writing the new labels.
1049          */
1050         for (i = old_num_labels; i < num_labels; i++) {
1051                 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1052                 if (!label_ent)
1053                         return -ENOMEM;
1054                 mutex_lock(&nd_mapping->lock);
1055                 list_add_tail(&label_ent->list, &nd_mapping->labels);
1056                 mutex_unlock(&nd_mapping->lock);
1057         }
1058
1059         if (ndd->ns_current == -1 || ndd->ns_next == -1)
1060                 /* pass */;
1061         else
1062                 return max(num_labels, old_num_labels);
1063
1064         nsindex = to_namespace_index(ndd, 0);
1065         memset(nsindex, 0, ndd->nsarea.config_size);
1066         for (i = 0; i < 2; i++) {
1067                 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1068
1069                 if (rc)
1070                         return rc;
1071         }
1072         ndd->ns_next = 1;
1073         ndd->ns_current = 0;
1074
1075         return max(num_labels, old_num_labels);
1076 }
1077
1078 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1079 {
1080         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1081         struct nd_label_ent *label_ent, *e;
1082         struct nd_namespace_index *nsindex;
1083         u8 label_uuid[NSLABEL_UUID_LEN];
1084         unsigned long *free;
1085         LIST_HEAD(list);
1086         u32 nslot, slot;
1087         int active = 0;
1088
1089         if (!uuid)
1090                 return 0;
1091
1092         /* no index || no labels == nothing to delete */
1093         if (!preamble_next(ndd, &nsindex, &free, &nslot))
1094                 return 0;
1095
1096         mutex_lock(&nd_mapping->lock);
1097         list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1098                 struct nd_namespace_label *nd_label = label_ent->label;
1099
1100                 if (!nd_label)
1101                         continue;
1102                 active++;
1103                 memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1104                 if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1105                         continue;
1106                 active--;
1107                 slot = to_slot(ndd, nd_label);
1108                 nd_label_free_slot(ndd, slot);
1109                 dev_dbg(ndd->dev, "%s: free: %d\n", __func__, slot);
1110                 list_move_tail(&label_ent->list, &list);
1111                 label_ent->label = NULL;
1112         }
1113         list_splice_tail_init(&list, &nd_mapping->labels);
1114
1115         if (active == 0) {
1116                 nd_mapping_free_labels(nd_mapping);
1117                 dev_dbg(ndd->dev, "%s: no more active labels\n", __func__);
1118         }
1119         mutex_unlock(&nd_mapping->lock);
1120
1121         return nd_label_write_index(ndd, ndd->ns_next,
1122                         nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1123 }
1124
1125 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1126                 struct nd_namespace_pmem *nspm, resource_size_t size)
1127 {
1128         int i, rc;
1129
1130         for (i = 0; i < nd_region->ndr_mappings; i++) {
1131                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1132                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1133                 struct resource *res;
1134                 int count = 0;
1135
1136                 if (size == 0) {
1137                         rc = del_labels(nd_mapping, nspm->uuid);
1138                         if (rc)
1139                                 return rc;
1140                         continue;
1141                 }
1142
1143                 for_each_dpa_resource(ndd, res)
1144                         if (strncmp(res->name, "pmem", 4) == 0)
1145                                 count++;
1146                 WARN_ON_ONCE(!count);
1147
1148                 rc = init_labels(nd_mapping, count);
1149                 if (rc < 0)
1150                         return rc;
1151
1152                 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1153                                 NSLABEL_FLAG_UPDATING);
1154                 if (rc)
1155                         return rc;
1156         }
1157
1158         if (size == 0)
1159                 return 0;
1160
1161         /* Clear the UPDATING flag per UEFI 2.7 expectations */
1162         for (i = 0; i < nd_region->ndr_mappings; i++) {
1163                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1164
1165                 rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1166                 if (rc)
1167                         return rc;
1168         }
1169
1170         return 0;
1171 }
1172
1173 int nd_blk_namespace_label_update(struct nd_region *nd_region,
1174                 struct nd_namespace_blk *nsblk, resource_size_t size)
1175 {
1176         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1177         struct resource *res;
1178         int count = 0;
1179
1180         if (size == 0)
1181                 return del_labels(nd_mapping, nsblk->uuid);
1182
1183         for_each_dpa_resource(to_ndd(nd_mapping), res)
1184                 count++;
1185
1186         count = init_labels(nd_mapping, count);
1187         if (count < 0)
1188                 return count;
1189
1190         return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1191 }
1192
1193 int __init nd_label_init(void)
1194 {
1195         WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1196         WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1197         WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1198         WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1199
1200         return 0;
1201 }