GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / md / persistent-data / dm-btree-spine.c
1 /*
2  * Copyright (C) 2011 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-btree-internal.h"
8 #include "dm-transaction-manager.h"
9
10 #include <linux/device-mapper.h>
11
12 #define DM_MSG_PREFIX "btree spine"
13
14 /*----------------------------------------------------------------*/
15
16 #define BTREE_CSUM_XOR 121107
17
18 static void node_prepare_for_write(struct dm_block_validator *v,
19                                    struct dm_block *b,
20                                    size_t block_size)
21 {
22         struct btree_node *n = dm_block_data(b);
23         struct node_header *h = &n->header;
24
25         h->blocknr = cpu_to_le64(dm_block_location(b));
26         h->csum = cpu_to_le32(dm_bm_checksum(&h->flags,
27                                              block_size - sizeof(__le32),
28                                              BTREE_CSUM_XOR));
29 }
30
31 static int node_check(struct dm_block_validator *v,
32                       struct dm_block *b,
33                       size_t block_size)
34 {
35         struct btree_node *n = dm_block_data(b);
36         struct node_header *h = &n->header;
37         size_t value_size;
38         __le32 csum_disk;
39         uint32_t flags, nr_entries, max_entries;
40
41         if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
42                 DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
43                             le64_to_cpu(h->blocknr), dm_block_location(b));
44                 return -ENOTBLK;
45         }
46
47         csum_disk = cpu_to_le32(dm_bm_checksum(&h->flags,
48                                                block_size - sizeof(__le32),
49                                                BTREE_CSUM_XOR));
50         if (csum_disk != h->csum) {
51                 DMERR_LIMIT("node_check failed: csum %u != wanted %u",
52                             le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
53                 return -EILSEQ;
54         }
55
56         nr_entries = le32_to_cpu(h->nr_entries);
57         max_entries = le32_to_cpu(h->max_entries);
58         value_size = le32_to_cpu(h->value_size);
59
60         if (sizeof(struct node_header) +
61             (sizeof(__le64) + value_size) * max_entries > block_size) {
62                 DMERR_LIMIT("node_check failed: max_entries too large");
63                 return -EILSEQ;
64         }
65
66         if (nr_entries > max_entries) {
67                 DMERR_LIMIT("node_check failed: too many entries");
68                 return -EILSEQ;
69         }
70
71         /*
72          * The node must be either INTERNAL or LEAF.
73          */
74         flags = le32_to_cpu(h->flags);
75         if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
76                 DMERR_LIMIT("node_check failed: node is neither INTERNAL or LEAF");
77                 return -EILSEQ;
78         }
79
80         return 0;
81 }
82
83 struct dm_block_validator btree_node_validator = {
84         .name = "btree_node",
85         .prepare_for_write = node_prepare_for_write,
86         .check = node_check
87 };
88
89 /*----------------------------------------------------------------*/
90
91 int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
92                  struct dm_block **result)
93 {
94         return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
95 }
96
97 static int bn_shadow(struct dm_btree_info *info, dm_block_t orig,
98               struct dm_btree_value_type *vt,
99               struct dm_block **result)
100 {
101         int r, inc;
102
103         r = dm_tm_shadow_block(info->tm, orig, &btree_node_validator,
104                                result, &inc);
105         if (!r && inc)
106                 inc_children(info->tm, dm_block_data(*result), vt);
107
108         return r;
109 }
110
111 int new_block(struct dm_btree_info *info, struct dm_block **result)
112 {
113         return dm_tm_new_block(info->tm, &btree_node_validator, result);
114 }
115
116 void unlock_block(struct dm_btree_info *info, struct dm_block *b)
117 {
118         dm_tm_unlock(info->tm, b);
119 }
120
121 /*----------------------------------------------------------------*/
122
123 void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info)
124 {
125         s->info = info;
126         s->count = 0;
127         s->nodes[0] = NULL;
128         s->nodes[1] = NULL;
129 }
130
131 void exit_ro_spine(struct ro_spine *s)
132 {
133         int i;
134
135         for (i = 0; i < s->count; i++) {
136                 unlock_block(s->info, s->nodes[i]);
137         }
138 }
139
140 int ro_step(struct ro_spine *s, dm_block_t new_child)
141 {
142         int r;
143
144         if (s->count == 2) {
145                 unlock_block(s->info, s->nodes[0]);
146                 s->nodes[0] = s->nodes[1];
147                 s->count--;
148         }
149
150         r = bn_read_lock(s->info, new_child, s->nodes + s->count);
151         if (!r)
152                 s->count++;
153
154         return r;
155 }
156
157 void ro_pop(struct ro_spine *s)
158 {
159         BUG_ON(!s->count);
160         --s->count;
161         unlock_block(s->info, s->nodes[s->count]);
162 }
163
164 struct btree_node *ro_node(struct ro_spine *s)
165 {
166         struct dm_block *block;
167
168         BUG_ON(!s->count);
169         block = s->nodes[s->count - 1];
170
171         return dm_block_data(block);
172 }
173
174 /*----------------------------------------------------------------*/
175
176 void init_shadow_spine(struct shadow_spine *s, struct dm_btree_info *info)
177 {
178         s->info = info;
179         s->count = 0;
180 }
181
182 void exit_shadow_spine(struct shadow_spine *s)
183 {
184         int i;
185
186         for (i = 0; i < s->count; i++) {
187                 unlock_block(s->info, s->nodes[i]);
188         }
189 }
190
191 int shadow_step(struct shadow_spine *s, dm_block_t b,
192                 struct dm_btree_value_type *vt)
193 {
194         int r;
195
196         if (s->count == 2) {
197                 unlock_block(s->info, s->nodes[0]);
198                 s->nodes[0] = s->nodes[1];
199                 s->count--;
200         }
201
202         r = bn_shadow(s->info, b, vt, s->nodes + s->count);
203         if (!r) {
204                 if (!s->count)
205                         s->root = dm_block_location(s->nodes[0]);
206
207                 s->count++;
208         }
209
210         return r;
211 }
212
213 struct dm_block *shadow_current(struct shadow_spine *s)
214 {
215         BUG_ON(!s->count);
216
217         return s->nodes[s->count - 1];
218 }
219
220 struct dm_block *shadow_parent(struct shadow_spine *s)
221 {
222         BUG_ON(s->count != 2);
223
224         return s->count == 2 ? s->nodes[0] : NULL;
225 }
226
227 int shadow_has_parent(struct shadow_spine *s)
228 {
229         return s->count >= 2;
230 }
231
232 dm_block_t shadow_root(struct shadow_spine *s)
233 {
234         return s->root;
235 }
236
237 static void le64_inc(void *context, const void *value_le, unsigned count)
238 {
239         dm_tm_with_runs(context, value_le, count, dm_tm_inc_range);
240 }
241
242 static void le64_dec(void *context, const void *value_le, unsigned count)
243 {
244         dm_tm_with_runs(context, value_le, count, dm_tm_dec_range);
245 }
246
247 static int le64_equal(void *context, const void *value1_le, const void *value2_le)
248 {
249         __le64 v1_le, v2_le;
250
251         memcpy(&v1_le, value1_le, sizeof(v1_le));
252         memcpy(&v2_le, value2_le, sizeof(v2_le));
253         return v1_le == v2_le;
254 }
255
256 void init_le64_type(struct dm_transaction_manager *tm,
257                     struct dm_btree_value_type *vt)
258 {
259         vt->context = tm;
260         vt->size = sizeof(__le64);
261         vt->inc = le64_inc;
262         vt->dec = le64_dec;
263         vt->equal = le64_equal;
264 }