GNU Linux-libre 4.4.292-gnu1
[releases.git] / drivers / block / xen-blkback / common.h
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License version 2
4  * as published by the Free Software Foundation; or, when distributed
5  * separately from the Linux kernel or incorporated into other
6  * software packages, subject to the following license:
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy
9  * of this source file (the "Software"), to deal in the Software without
10  * restriction, including without limitation the rights to use, copy, modify,
11  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12  * and to permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in
16  * all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24  * IN THE SOFTWARE.
25  */
26
27 #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28 #define __XEN_BLKIF__BACKEND__COMMON_H__
29
30 #include <linux/module.h>
31 #include <linux/interrupt.h>
32 #include <linux/slab.h>
33 #include <linux/blkdev.h>
34 #include <linux/vmalloc.h>
35 #include <linux/wait.h>
36 #include <linux/io.h>
37 #include <linux/rbtree.h>
38 #include <asm/setup.h>
39 #include <asm/pgalloc.h>
40 #include <asm/hypervisor.h>
41 #include <xen/grant_table.h>
42 #include <xen/page.h>
43 #include <xen/xenbus.h>
44 #include <xen/interface/io/ring.h>
45 #include <xen/interface/io/blkif.h>
46 #include <xen/interface/io/protocols.h>
47
48 extern unsigned int xen_blkif_max_ring_order;
49 /*
50  * This is the maximum number of segments that would be allowed in indirect
51  * requests. This value will also be passed to the frontend.
52  */
53 #define MAX_INDIRECT_SEGMENTS 256
54
55 /*
56  * Xen use 4K pages. The guest may use different page size (4K or 64K)
57  * Number of Xen pages per segment
58  */
59 #define XEN_PAGES_PER_SEGMENT   (PAGE_SIZE / XEN_PAGE_SIZE)
60
61 #define XEN_PAGES_PER_INDIRECT_FRAME \
62         (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
63 #define SEGS_PER_INDIRECT_FRAME \
64         (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
65
66 #define MAX_INDIRECT_PAGES \
67         ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
68 #define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
69
70 /* Not a real protocol.  Used to generate ring structs which contain
71  * the elements common to all protocols only.  This way we get a
72  * compiler-checkable way to use common struct elements, so we can
73  * avoid using switch(protocol) in a number of places.  */
74 struct blkif_common_request {
75         char dummy;
76 };
77
78 /* i386 protocol version */
79
80 struct blkif_x86_32_request_rw {
81         uint8_t        nr_segments;  /* number of segments                   */
82         blkif_vdev_t   handle;       /* only for read/write requests         */
83         uint64_t       id;           /* private guest value, echoed in resp  */
84         blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
85         struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
86 } __attribute__((__packed__));
87
88 struct blkif_x86_32_request_discard {
89         uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
90         blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
91         uint64_t       id;           /* private guest value, echoed in resp  */
92         blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
93         uint64_t       nr_sectors;
94 } __attribute__((__packed__));
95
96 struct blkif_x86_32_request_other {
97         uint8_t        _pad1;
98         blkif_vdev_t   _pad2;
99         uint64_t       id;           /* private guest value, echoed in resp  */
100 } __attribute__((__packed__));
101
102 struct blkif_x86_32_request_indirect {
103         uint8_t        indirect_op;
104         uint16_t       nr_segments;
105         uint64_t       id;
106         blkif_sector_t sector_number;
107         blkif_vdev_t   handle;
108         uint16_t       _pad1;
109         grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
110         /*
111          * The maximum number of indirect segments (and pages) that will
112          * be used is determined by MAX_INDIRECT_SEGMENTS, this value
113          * is also exported to the guest (via xenstore
114          * feature-max-indirect-segments entry), so the frontend knows how
115          * many indirect segments the backend supports.
116          */
117         uint64_t       _pad2;        /* make it 64 byte aligned */
118 } __attribute__((__packed__));
119
120 struct blkif_x86_32_request {
121         uint8_t        operation;    /* BLKIF_OP_???                         */
122         union {
123                 struct blkif_x86_32_request_rw rw;
124                 struct blkif_x86_32_request_discard discard;
125                 struct blkif_x86_32_request_other other;
126                 struct blkif_x86_32_request_indirect indirect;
127         } u;
128 } __attribute__((__packed__));
129
130 /* x86_64 protocol version */
131
132 struct blkif_x86_64_request_rw {
133         uint8_t        nr_segments;  /* number of segments                   */
134         blkif_vdev_t   handle;       /* only for read/write requests         */
135         uint32_t       _pad1;        /* offsetof(blkif_reqest..,u.rw.id)==8  */
136         uint64_t       id;
137         blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
138         struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
139 } __attribute__((__packed__));
140
141 struct blkif_x86_64_request_discard {
142         uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero         */
143         blkif_vdev_t   _pad1;        /* was "handle" for read/write requests */
144         uint32_t       _pad2;        /* offsetof(blkif_..,u.discard.id)==8   */
145         uint64_t       id;
146         blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
147         uint64_t       nr_sectors;
148 } __attribute__((__packed__));
149
150 struct blkif_x86_64_request_other {
151         uint8_t        _pad1;
152         blkif_vdev_t   _pad2;
153         uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
154         uint64_t       id;           /* private guest value, echoed in resp  */
155 } __attribute__((__packed__));
156
157 struct blkif_x86_64_request_indirect {
158         uint8_t        indirect_op;
159         uint16_t       nr_segments;
160         uint32_t       _pad1;        /* offsetof(blkif_..,u.indirect.id)==8   */
161         uint64_t       id;
162         blkif_sector_t sector_number;
163         blkif_vdev_t   handle;
164         uint16_t       _pad2;
165         grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
166         /*
167          * The maximum number of indirect segments (and pages) that will
168          * be used is determined by MAX_INDIRECT_SEGMENTS, this value
169          * is also exported to the guest (via xenstore
170          * feature-max-indirect-segments entry), so the frontend knows how
171          * many indirect segments the backend supports.
172          */
173         uint32_t       _pad3;        /* make it 64 byte aligned */
174 } __attribute__((__packed__));
175
176 struct blkif_x86_64_request {
177         uint8_t        operation;    /* BLKIF_OP_???                         */
178         union {
179                 struct blkif_x86_64_request_rw rw;
180                 struct blkif_x86_64_request_discard discard;
181                 struct blkif_x86_64_request_other other;
182                 struct blkif_x86_64_request_indirect indirect;
183         } u;
184 } __attribute__((__packed__));
185
186 DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
187                   struct blkif_response);
188 DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
189                   struct blkif_response __packed);
190 DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
191                   struct blkif_response);
192
193 union blkif_back_rings {
194         struct blkif_back_ring        native;
195         struct blkif_common_back_ring common;
196         struct blkif_x86_32_back_ring x86_32;
197         struct blkif_x86_64_back_ring x86_64;
198 };
199
200 enum blkif_protocol {
201         BLKIF_PROTOCOL_NATIVE = 1,
202         BLKIF_PROTOCOL_X86_32 = 2,
203         BLKIF_PROTOCOL_X86_64 = 3,
204 };
205
206 /*
207  * Default protocol if the frontend doesn't specify one.
208  */
209 #ifdef CONFIG_X86
210 #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
211 #else
212 #  define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
213 #endif
214
215 struct xen_vbd {
216         /* What the domain refers to this vbd as. */
217         blkif_vdev_t            handle;
218         /* Non-zero -> read-only */
219         unsigned char           readonly;
220         /* VDISK_xxx */
221         unsigned char           type;
222         /* phys device that this vbd maps to. */
223         u32                     pdevice;
224         struct block_device     *bdev;
225         /* Cached size parameter. */
226         sector_t                size;
227         unsigned int            flush_support:1;
228         unsigned int            discard_secure:1;
229         unsigned int            feature_gnt_persistent:1;
230         unsigned int            overflow_max_grants:1;
231 };
232
233 struct backend_info;
234
235 /* Number of available flags */
236 #define PERSISTENT_GNT_FLAGS_SIZE       2
237 /* This persistent grant is currently in use */
238 #define PERSISTENT_GNT_ACTIVE           0
239 /*
240  * This persistent grant has been used, this flag is set when we remove the
241  * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
242  */
243 #define PERSISTENT_GNT_WAS_ACTIVE       1
244
245 /* Number of requests that we can fit in a ring */
246 #define XEN_BLKIF_REQS_PER_PAGE         32
247
248 struct persistent_gnt {
249         struct page *page;
250         grant_ref_t gnt;
251         grant_handle_t handle;
252         DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
253         struct rb_node node;
254         struct list_head remove_node;
255 };
256
257 struct xen_blkif {
258         /* Unique identifier for this interface. */
259         domid_t                 domid;
260         unsigned int            handle;
261         /* Physical parameters of the comms window. */
262         unsigned int            irq;
263         /* Comms information. */
264         enum blkif_protocol     blk_protocol;
265         union blkif_back_rings  blk_rings;
266         void                    *blk_ring;
267         /* The VBD attached to this interface. */
268         struct xen_vbd          vbd;
269         /* Back pointer to the backend_info. */
270         struct backend_info     *be;
271         /* Private fields. */
272         spinlock_t              blk_ring_lock;
273         atomic_t                refcnt;
274
275         wait_queue_head_t       wq;
276         /* for barrier (drain) requests */
277         struct completion       drain_complete;
278         atomic_t                drain;
279         atomic_t                inflight;
280         /* One thread per one blkif. */
281         struct task_struct      *xenblkd;
282         unsigned int            waiting_reqs;
283
284         /* tree to store persistent grants */
285         struct rb_root          persistent_gnts;
286         unsigned int            persistent_gnt_c;
287         atomic_t                persistent_gnt_in_use;
288         unsigned long           next_lru;
289
290         /* used by the kworker that offload work from the persistent purge */
291         struct list_head        persistent_purge_list;
292         struct work_struct      persistent_purge_work;
293
294         /* buffer of free pages to map grant refs */
295         spinlock_t              free_pages_lock;
296         int                     free_pages_num;
297         struct list_head        free_pages;
298
299         /* List of all 'pending_req' available */
300         struct list_head        pending_free;
301         /* And its spinlock. */
302         spinlock_t              pending_free_lock;
303         wait_queue_head_t       pending_free_wq;
304
305         /* statistics */
306         unsigned long           st_print;
307         unsigned long long                      st_rd_req;
308         unsigned long long                      st_wr_req;
309         unsigned long long                      st_oo_req;
310         unsigned long long                      st_f_req;
311         unsigned long long                      st_ds_req;
312         unsigned long long                      st_rd_sect;
313         unsigned long long                      st_wr_sect;
314
315         struct work_struct      free_work;
316         /* Thread shutdown wait queue. */
317         wait_queue_head_t       shutdown_wq;
318         unsigned int nr_ring_pages;
319 };
320
321 struct seg_buf {
322         unsigned long offset;
323         unsigned int nsec;
324 };
325
326 struct grant_page {
327         struct page             *page;
328         struct persistent_gnt   *persistent_gnt;
329         grant_handle_t          handle;
330         grant_ref_t             gref;
331 };
332
333 /*
334  * Each outstanding request that we've passed to the lower device layers has a
335  * 'pending_req' allocated to it. Each buffer_head that completes decrements
336  * the pendcnt towards zero. When it hits zero, the specified domain has a
337  * response queued for it, with the saved 'id' passed back.
338  */
339 struct pending_req {
340         struct xen_blkif        *blkif;
341         u64                     id;
342         int                     nr_segs;
343         atomic_t                pendcnt;
344         unsigned short          operation;
345         int                     status;
346         struct list_head        free_list;
347         struct grant_page       *segments[MAX_INDIRECT_SEGMENTS];
348         /* Indirect descriptors */
349         struct grant_page       *indirect_pages[MAX_INDIRECT_PAGES];
350         struct seg_buf          seg[MAX_INDIRECT_SEGMENTS];
351         struct bio              *biolist[MAX_INDIRECT_SEGMENTS];
352         struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
353         struct page                   *unmap_pages[MAX_INDIRECT_SEGMENTS];
354         struct gntab_unmap_queue_data gnttab_unmap_data;
355 };
356
357
358 #define vbd_sz(_v)      ((_v)->bdev->bd_part ? \
359                          (_v)->bdev->bd_part->nr_sects : \
360                           get_capacity((_v)->bdev->bd_disk))
361
362 #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
363 #define xen_blkif_put(_b)                               \
364         do {                                            \
365                 if (atomic_dec_and_test(&(_b)->refcnt)) \
366                         schedule_work(&(_b)->free_work);\
367         } while (0)
368
369 struct phys_req {
370         unsigned short          dev;
371         blkif_sector_t          nr_sects;
372         struct block_device     *bdev;
373         blkif_sector_t          sector_number;
374 };
375 int xen_blkif_interface_init(void);
376
377 int xen_blkif_xenbus_init(void);
378
379 irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
380 int xen_blkif_schedule(void *arg);
381 int xen_blkif_purge_persistent(void *arg);
382 void xen_blkbk_free_caches(struct xen_blkif *blkif);
383
384 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
385                               struct backend_info *be, int state);
386
387 int xen_blkbk_barrier(struct xenbus_transaction xbt,
388                       struct backend_info *be, int state);
389 struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
390 void xen_blkbk_unmap_purged_grants(struct work_struct *work);
391
392 static inline void blkif_get_x86_32_req(struct blkif_request *dst,
393                                         struct blkif_x86_32_request *src)
394 {
395         int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
396         dst->operation = READ_ONCE(src->operation);
397         switch (dst->operation) {
398         case BLKIF_OP_READ:
399         case BLKIF_OP_WRITE:
400         case BLKIF_OP_WRITE_BARRIER:
401         case BLKIF_OP_FLUSH_DISKCACHE:
402                 dst->u.rw.nr_segments = src->u.rw.nr_segments;
403                 dst->u.rw.handle = src->u.rw.handle;
404                 dst->u.rw.id = src->u.rw.id;
405                 dst->u.rw.sector_number = src->u.rw.sector_number;
406                 barrier();
407                 if (n > dst->u.rw.nr_segments)
408                         n = dst->u.rw.nr_segments;
409                 for (i = 0; i < n; i++)
410                         dst->u.rw.seg[i] = src->u.rw.seg[i];
411                 break;
412         case BLKIF_OP_DISCARD:
413                 dst->u.discard.flag = src->u.discard.flag;
414                 dst->u.discard.id = src->u.discard.id;
415                 dst->u.discard.sector_number = src->u.discard.sector_number;
416                 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
417                 break;
418         case BLKIF_OP_INDIRECT:
419                 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
420                 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
421                 dst->u.indirect.handle = src->u.indirect.handle;
422                 dst->u.indirect.id = src->u.indirect.id;
423                 dst->u.indirect.sector_number = src->u.indirect.sector_number;
424                 barrier();
425                 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
426                 for (i = 0; i < j; i++)
427                         dst->u.indirect.indirect_grefs[i] =
428                                 src->u.indirect.indirect_grefs[i];
429                 break;
430         default:
431                 /*
432                  * Don't know how to translate this op. Only get the
433                  * ID so failure can be reported to the frontend.
434                  */
435                 dst->u.other.id = src->u.other.id;
436                 break;
437         }
438 }
439
440 static inline void blkif_get_x86_64_req(struct blkif_request *dst,
441                                         struct blkif_x86_64_request *src)
442 {
443         int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
444         dst->operation = READ_ONCE(src->operation);
445         switch (dst->operation) {
446         case BLKIF_OP_READ:
447         case BLKIF_OP_WRITE:
448         case BLKIF_OP_WRITE_BARRIER:
449         case BLKIF_OP_FLUSH_DISKCACHE:
450                 dst->u.rw.nr_segments = src->u.rw.nr_segments;
451                 dst->u.rw.handle = src->u.rw.handle;
452                 dst->u.rw.id = src->u.rw.id;
453                 dst->u.rw.sector_number = src->u.rw.sector_number;
454                 barrier();
455                 if (n > dst->u.rw.nr_segments)
456                         n = dst->u.rw.nr_segments;
457                 for (i = 0; i < n; i++)
458                         dst->u.rw.seg[i] = src->u.rw.seg[i];
459                 break;
460         case BLKIF_OP_DISCARD:
461                 dst->u.discard.flag = src->u.discard.flag;
462                 dst->u.discard.id = src->u.discard.id;
463                 dst->u.discard.sector_number = src->u.discard.sector_number;
464                 dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
465                 break;
466         case BLKIF_OP_INDIRECT:
467                 dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
468                 dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
469                 dst->u.indirect.handle = src->u.indirect.handle;
470                 dst->u.indirect.id = src->u.indirect.id;
471                 dst->u.indirect.sector_number = src->u.indirect.sector_number;
472                 barrier();
473                 j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
474                 for (i = 0; i < j; i++)
475                         dst->u.indirect.indirect_grefs[i] =
476                                 src->u.indirect.indirect_grefs[i];
477                 break;
478         default:
479                 /*
480                  * Don't know how to translate this op. Only get the
481                  * ID so failure can be reported to the frontend.
482                  */
483                 dst->u.other.id = src->u.other.id;
484                 break;
485         }
486 }
487
488 #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */