GNU Linux-libre 4.4.290-gnu1
[releases.git] / drivers / block / xen-blkback / blkback.c
1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36
37 #define pr_fmt(fmt) "xen-blkback: " fmt
38
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
45
46 #include <xen/events.h>
47 #include <xen/page.h>
48 #include <xen/xen.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
53 #include "common.h"
54
55 /*
56  * Maximum number of unused free pages to keep in the internal buffer.
57  * Setting this to a value too low will reduce memory used in each backend,
58  * but can have a performance penalty.
59  *
60  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61  * be set to a lower value that might degrade performance on some intensive
62  * IO workloads.
63  */
64
65 static int xen_blkif_max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
69
70 /*
71  * Maximum number of grants to map persistently in blkback. For maximum
72  * performance this should be the total numbers of grants that can be used
73  * to fill the ring, but since this might become too high, specially with
74  * the use of indirect descriptors, we set it to a value that provides good
75  * performance without using too much memory.
76  *
77  * When the list of persistent grants is full we clean it up using a LRU
78  * algorithm.
79  */
80
81 static int xen_blkif_max_pgrants = 1056;
82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84                  "Maximum number of grants to map persistently");
85
86 /*
87  * Maximum order of pages to be used for the shared ring between front and
88  * backend, 4KB page granularity is used.
89  */
90 unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
91 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
92 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
93 /*
94  * The LRU mechanism to clean the lists of persistent grants needs to
95  * be executed periodically. The time interval between consecutive executions
96  * of the purge mechanism is set in ms.
97  */
98 #define LRU_INTERVAL 100
99
100 /*
101  * When the persistent grants list is full we will remove unused grants
102  * from the list. The percent number of grants to be removed at each LRU
103  * execution.
104  */
105 #define LRU_PERCENT_CLEAN 5
106
107 /* Run-time switchable: /sys/module/blkback/parameters/ */
108 static unsigned int log_stats;
109 module_param(log_stats, int, 0644);
110
111 #define BLKBACK_INVALID_HANDLE (~0)
112
113 /* Number of free pages to remove on each call to gnttab_free_pages */
114 #define NUM_BATCH_FREE_PAGES 10
115
116 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
117 {
118         unsigned long flags;
119
120         spin_lock_irqsave(&blkif->free_pages_lock, flags);
121         if (list_empty(&blkif->free_pages)) {
122                 BUG_ON(blkif->free_pages_num != 0);
123                 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
124                 return gnttab_alloc_pages(1, page);
125         }
126         BUG_ON(blkif->free_pages_num == 0);
127         page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
128         list_del(&page[0]->lru);
129         blkif->free_pages_num--;
130         spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
131
132         return 0;
133 }
134
135 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
136                                   int num)
137 {
138         unsigned long flags;
139         int i;
140
141         spin_lock_irqsave(&blkif->free_pages_lock, flags);
142         for (i = 0; i < num; i++)
143                 list_add(&page[i]->lru, &blkif->free_pages);
144         blkif->free_pages_num += num;
145         spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
146 }
147
148 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
149 {
150         /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
151         struct page *page[NUM_BATCH_FREE_PAGES];
152         unsigned int num_pages = 0;
153         unsigned long flags;
154
155         spin_lock_irqsave(&blkif->free_pages_lock, flags);
156         while (blkif->free_pages_num > num) {
157                 BUG_ON(list_empty(&blkif->free_pages));
158                 page[num_pages] = list_first_entry(&blkif->free_pages,
159                                                    struct page, lru);
160                 list_del(&page[num_pages]->lru);
161                 blkif->free_pages_num--;
162                 if (++num_pages == NUM_BATCH_FREE_PAGES) {
163                         spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
164                         gnttab_free_pages(num_pages, page);
165                         spin_lock_irqsave(&blkif->free_pages_lock, flags);
166                         num_pages = 0;
167                 }
168         }
169         spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
170         if (num_pages != 0)
171                 gnttab_free_pages(num_pages, page);
172 }
173
174 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
175
176 static int do_block_io_op(struct xen_blkif *blkif, unsigned int *eoi_flags);
177 static int dispatch_rw_block_io(struct xen_blkif *blkif,
178                                 struct blkif_request *req,
179                                 struct pending_req *pending_req);
180 static void make_response(struct xen_blkif *blkif, u64 id,
181                           unsigned short op, int st);
182
183 #define foreach_grant_safe(pos, n, rbtree, node) \
184         for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
185              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
186              &(pos)->node != NULL; \
187              (pos) = container_of(n, typeof(*(pos)), node), \
188              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
189
190
191 /*
192  * We don't need locking around the persistent grant helpers
193  * because blkback uses a single-thread for each backed, so we
194  * can be sure that this functions will never be called recursively.
195  *
196  * The only exception to that is put_persistent_grant, that can be called
197  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
198  * bit operations to modify the flags of a persistent grant and to count
199  * the number of used grants.
200  */
201 static int add_persistent_gnt(struct xen_blkif *blkif,
202                                struct persistent_gnt *persistent_gnt)
203 {
204         struct rb_node **new = NULL, *parent = NULL;
205         struct persistent_gnt *this;
206
207         if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
208                 if (!blkif->vbd.overflow_max_grants)
209                         blkif->vbd.overflow_max_grants = 1;
210                 return -EBUSY;
211         }
212         /* Figure out where to put new node */
213         new = &blkif->persistent_gnts.rb_node;
214         while (*new) {
215                 this = container_of(*new, struct persistent_gnt, node);
216
217                 parent = *new;
218                 if (persistent_gnt->gnt < this->gnt)
219                         new = &((*new)->rb_left);
220                 else if (persistent_gnt->gnt > this->gnt)
221                         new = &((*new)->rb_right);
222                 else {
223                         pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
224                         return -EINVAL;
225                 }
226         }
227
228         bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
229         set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
230         /* Add new node and rebalance tree. */
231         rb_link_node(&(persistent_gnt->node), parent, new);
232         rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
233         blkif->persistent_gnt_c++;
234         atomic_inc(&blkif->persistent_gnt_in_use);
235         return 0;
236 }
237
238 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
239                                                  grant_ref_t gref)
240 {
241         struct persistent_gnt *data;
242         struct rb_node *node = NULL;
243
244         node = blkif->persistent_gnts.rb_node;
245         while (node) {
246                 data = container_of(node, struct persistent_gnt, node);
247
248                 if (gref < data->gnt)
249                         node = node->rb_left;
250                 else if (gref > data->gnt)
251                         node = node->rb_right;
252                 else {
253                         if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
254                                 pr_alert_ratelimited("requesting a grant already in use\n");
255                                 return NULL;
256                         }
257                         set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
258                         atomic_inc(&blkif->persistent_gnt_in_use);
259                         return data;
260                 }
261         }
262         return NULL;
263 }
264
265 static void put_persistent_gnt(struct xen_blkif *blkif,
266                                struct persistent_gnt *persistent_gnt)
267 {
268         if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
269                 pr_alert_ratelimited("freeing a grant already unused\n");
270         set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
271         clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
272         atomic_dec(&blkif->persistent_gnt_in_use);
273 }
274
275 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
276                                  unsigned int num)
277 {
278         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
279         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
280         struct persistent_gnt *persistent_gnt;
281         struct rb_node *n;
282         int segs_to_unmap = 0;
283         struct gntab_unmap_queue_data unmap_data;
284
285         unmap_data.pages = pages;
286         unmap_data.unmap_ops = unmap;
287         unmap_data.kunmap_ops = NULL;
288
289         foreach_grant_safe(persistent_gnt, n, root, node) {
290                 BUG_ON(persistent_gnt->handle ==
291                         BLKBACK_INVALID_HANDLE);
292                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
293                         (unsigned long) pfn_to_kaddr(page_to_pfn(
294                                 persistent_gnt->page)),
295                         GNTMAP_host_map,
296                         persistent_gnt->handle);
297
298                 pages[segs_to_unmap] = persistent_gnt->page;
299
300                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
301                         !rb_next(&persistent_gnt->node)) {
302
303                         unmap_data.count = segs_to_unmap;
304                         BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
305
306                         put_free_pages(blkif, pages, segs_to_unmap);
307                         segs_to_unmap = 0;
308                 }
309
310                 rb_erase(&persistent_gnt->node, root);
311                 kfree(persistent_gnt);
312                 num--;
313         }
314         BUG_ON(num != 0);
315 }
316
317 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
318 {
319         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
320         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
321         struct persistent_gnt *persistent_gnt;
322         int segs_to_unmap = 0;
323         struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
324         struct gntab_unmap_queue_data unmap_data;
325
326         unmap_data.pages = pages;
327         unmap_data.unmap_ops = unmap;
328         unmap_data.kunmap_ops = NULL;
329
330         while(!list_empty(&blkif->persistent_purge_list)) {
331                 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
332                                                   struct persistent_gnt,
333                                                   remove_node);
334                 list_del(&persistent_gnt->remove_node);
335
336                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
337                         vaddr(persistent_gnt->page),
338                         GNTMAP_host_map,
339                         persistent_gnt->handle);
340
341                 pages[segs_to_unmap] = persistent_gnt->page;
342
343                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
344                         unmap_data.count = segs_to_unmap;
345                         BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
346                         put_free_pages(blkif, pages, segs_to_unmap);
347                         segs_to_unmap = 0;
348                 }
349                 kfree(persistent_gnt);
350         }
351         if (segs_to_unmap > 0) {
352                 unmap_data.count = segs_to_unmap;
353                 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
354                 put_free_pages(blkif, pages, segs_to_unmap);
355         }
356 }
357
358 static void purge_persistent_gnt(struct xen_blkif *blkif)
359 {
360         struct persistent_gnt *persistent_gnt;
361         struct rb_node *n;
362         unsigned int num_clean, total;
363         bool scan_used = false, clean_used = false;
364         struct rb_root *root;
365
366         if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
367             (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
368             !blkif->vbd.overflow_max_grants)) {
369                 return;
370         }
371
372         if (work_busy(&blkif->persistent_purge_work)) {
373                 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
374                 return;
375         }
376
377         num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
378         num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
379         num_clean = min(blkif->persistent_gnt_c, num_clean);
380         if ((num_clean == 0) ||
381             (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
382                 return;
383
384         /*
385          * At this point, we can assure that there will be no calls
386          * to get_persistent_grant (because we are executing this code from
387          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
388          * which means that the number of currently used grants will go down,
389          * but never up, so we will always be able to remove the requested
390          * number of grants.
391          */
392
393         total = num_clean;
394
395         pr_debug("Going to purge %u persistent grants\n", num_clean);
396
397         BUG_ON(!list_empty(&blkif->persistent_purge_list));
398         root = &blkif->persistent_gnts;
399 purge_list:
400         foreach_grant_safe(persistent_gnt, n, root, node) {
401                 BUG_ON(persistent_gnt->handle ==
402                         BLKBACK_INVALID_HANDLE);
403
404                 if (clean_used) {
405                         clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
406                         continue;
407                 }
408
409                 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
410                         continue;
411                 if (!scan_used &&
412                     (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
413                         continue;
414
415                 rb_erase(&persistent_gnt->node, root);
416                 list_add(&persistent_gnt->remove_node,
417                          &blkif->persistent_purge_list);
418                 if (--num_clean == 0)
419                         goto finished;
420         }
421         /*
422          * If we get here it means we also need to start cleaning
423          * grants that were used since last purge in order to cope
424          * with the requested num
425          */
426         if (!scan_used && !clean_used) {
427                 pr_debug("Still missing %u purged frames\n", num_clean);
428                 scan_used = true;
429                 goto purge_list;
430         }
431 finished:
432         if (!clean_used) {
433                 pr_debug("Finished scanning for grants to clean, removing used flag\n");
434                 clean_used = true;
435                 goto purge_list;
436         }
437
438         blkif->persistent_gnt_c -= (total - num_clean);
439         blkif->vbd.overflow_max_grants = 0;
440
441         /* We can defer this work */
442         schedule_work(&blkif->persistent_purge_work);
443         pr_debug("Purged %u/%u\n", (total - num_clean), total);
444         return;
445 }
446
447 /*
448  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
449  */
450 static struct pending_req *alloc_req(struct xen_blkif *blkif)
451 {
452         struct pending_req *req = NULL;
453         unsigned long flags;
454
455         spin_lock_irqsave(&blkif->pending_free_lock, flags);
456         if (!list_empty(&blkif->pending_free)) {
457                 req = list_entry(blkif->pending_free.next, struct pending_req,
458                                  free_list);
459                 list_del(&req->free_list);
460         }
461         spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
462         return req;
463 }
464
465 /*
466  * Return the 'pending_req' structure back to the freepool. We also
467  * wake up the thread if it was waiting for a free page.
468  */
469 static void free_req(struct xen_blkif *blkif, struct pending_req *req)
470 {
471         unsigned long flags;
472         int was_empty;
473
474         spin_lock_irqsave(&blkif->pending_free_lock, flags);
475         was_empty = list_empty(&blkif->pending_free);
476         list_add(&req->free_list, &blkif->pending_free);
477         spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
478         if (was_empty)
479                 wake_up(&blkif->pending_free_wq);
480 }
481
482 /*
483  * Routines for managing virtual block devices (vbds).
484  */
485 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
486                              int operation)
487 {
488         struct xen_vbd *vbd = &blkif->vbd;
489         int rc = -EACCES;
490
491         if ((operation != READ) && vbd->readonly)
492                 goto out;
493
494         if (likely(req->nr_sects)) {
495                 blkif_sector_t end = req->sector_number + req->nr_sects;
496
497                 if (unlikely(end < req->sector_number))
498                         goto out;
499                 if (unlikely(end > vbd_sz(vbd)))
500                         goto out;
501         }
502
503         req->dev  = vbd->pdevice;
504         req->bdev = vbd->bdev;
505         rc = 0;
506
507  out:
508         return rc;
509 }
510
511 static void xen_vbd_resize(struct xen_blkif *blkif)
512 {
513         struct xen_vbd *vbd = &blkif->vbd;
514         struct xenbus_transaction xbt;
515         int err;
516         struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
517         unsigned long long new_size = vbd_sz(vbd);
518
519         pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
520                 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
521         pr_info("VBD Resize: new size %llu\n", new_size);
522         vbd->size = new_size;
523 again:
524         err = xenbus_transaction_start(&xbt);
525         if (err) {
526                 pr_warn("Error starting transaction\n");
527                 return;
528         }
529         err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
530                             (unsigned long long)vbd_sz(vbd));
531         if (err) {
532                 pr_warn("Error writing new size\n");
533                 goto abort;
534         }
535         /*
536          * Write the current state; we will use this to synchronize
537          * the front-end. If the current state is "connected" the
538          * front-end will get the new size information online.
539          */
540         err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
541         if (err) {
542                 pr_warn("Error writing the state\n");
543                 goto abort;
544         }
545
546         err = xenbus_transaction_end(xbt, 0);
547         if (err == -EAGAIN)
548                 goto again;
549         if (err)
550                 pr_warn("Error ending transaction\n");
551         return;
552 abort:
553         xenbus_transaction_end(xbt, 1);
554 }
555
556 /*
557  * Notification from the guest OS.
558  */
559 static void blkif_notify_work(struct xen_blkif *blkif)
560 {
561         blkif->waiting_reqs = 1;
562         wake_up(&blkif->wq);
563 }
564
565 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
566 {
567         blkif_notify_work(dev_id);
568         return IRQ_HANDLED;
569 }
570
571 /*
572  * SCHEDULER FUNCTIONS
573  */
574
575 static void print_stats(struct xen_blkif *blkif)
576 {
577         pr_info("(%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
578                  "  |  ds %4llu | pg: %4u/%4d\n",
579                  current->comm, blkif->st_oo_req,
580                  blkif->st_rd_req, blkif->st_wr_req,
581                  blkif->st_f_req, blkif->st_ds_req,
582                  blkif->persistent_gnt_c,
583                  xen_blkif_max_pgrants);
584         blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
585         blkif->st_rd_req = 0;
586         blkif->st_wr_req = 0;
587         blkif->st_oo_req = 0;
588         blkif->st_ds_req = 0;
589 }
590
591 int xen_blkif_schedule(void *arg)
592 {
593         struct xen_blkif *blkif = arg;
594         struct xen_vbd *vbd = &blkif->vbd;
595         unsigned long timeout;
596         int ret;
597         bool do_eoi;
598         unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
599
600         while (!kthread_should_stop()) {
601                 if (try_to_freeze())
602                         continue;
603                 if (unlikely(vbd->size != vbd_sz(vbd)))
604                         xen_vbd_resize(blkif);
605
606                 timeout = msecs_to_jiffies(LRU_INTERVAL);
607
608                 timeout = wait_event_interruptible_timeout(
609                         blkif->wq,
610                         blkif->waiting_reqs || kthread_should_stop(),
611                         timeout);
612                 if (timeout == 0)
613                         goto purge_gnt_list;
614                 timeout = wait_event_interruptible_timeout(
615                         blkif->pending_free_wq,
616                         !list_empty(&blkif->pending_free) ||
617                         kthread_should_stop(),
618                         timeout);
619                 if (timeout == 0)
620                         goto purge_gnt_list;
621
622                 do_eoi = blkif->waiting_reqs;
623
624                 blkif->waiting_reqs = 0;
625                 smp_mb(); /* clear flag *before* checking for work */
626
627                 ret = do_block_io_op(blkif, &eoi_flags);
628                 if (ret > 0)
629                         blkif->waiting_reqs = 1;
630                 if (ret == -EACCES)
631                         wait_event_interruptible(blkif->shutdown_wq,
632                                                  kthread_should_stop());
633
634                 if (do_eoi && !blkif->waiting_reqs) {
635                         xen_irq_lateeoi(blkif->irq, eoi_flags);
636                         eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
637                 }
638
639 purge_gnt_list:
640                 if (blkif->vbd.feature_gnt_persistent &&
641                     time_after(jiffies, blkif->next_lru)) {
642                         purge_persistent_gnt(blkif);
643                         blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
644                 }
645
646                 /* Shrink if we have more than xen_blkif_max_buffer_pages */
647                 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
648
649                 if (log_stats && time_after(jiffies, blkif->st_print))
650                         print_stats(blkif);
651         }
652
653         /* Drain pending purge work */
654         flush_work(&blkif->persistent_purge_work);
655
656         if (log_stats)
657                 print_stats(blkif);
658
659         blkif->xenblkd = NULL;
660
661         return 0;
662 }
663
664 /*
665  * Remove persistent grants and empty the pool of free pages
666  */
667 void xen_blkbk_free_caches(struct xen_blkif *blkif)
668 {
669         /* Free all persistent grant pages */
670         if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
671                 free_persistent_gnts(blkif, &blkif->persistent_gnts,
672                         blkif->persistent_gnt_c);
673
674         BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
675         blkif->persistent_gnt_c = 0;
676
677         /* Since we are shutting down remove all pages from the buffer */
678         shrink_free_pagepool(blkif, 0 /* All */);
679 }
680
681 static unsigned int xen_blkbk_unmap_prepare(
682         struct xen_blkif *blkif,
683         struct grant_page **pages,
684         unsigned int num,
685         struct gnttab_unmap_grant_ref *unmap_ops,
686         struct page **unmap_pages)
687 {
688         unsigned int i, invcount = 0;
689
690         for (i = 0; i < num; i++) {
691                 if (pages[i]->persistent_gnt != NULL) {
692                         put_persistent_gnt(blkif, pages[i]->persistent_gnt);
693                         continue;
694                 }
695                 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
696                         continue;
697                 unmap_pages[invcount] = pages[i]->page;
698                 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
699                                     GNTMAP_host_map, pages[i]->handle);
700                 pages[i]->handle = BLKBACK_INVALID_HANDLE;
701                 invcount++;
702        }
703
704        return invcount;
705 }
706
707 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
708 {
709         struct pending_req* pending_req = (struct pending_req*) (data->data);
710         struct xen_blkif *blkif = pending_req->blkif;
711
712         /* BUG_ON used to reproduce existing behaviour,
713            but is this the best way to deal with this? */
714         BUG_ON(result);
715
716         put_free_pages(blkif, data->pages, data->count);
717         make_response(blkif, pending_req->id,
718                       pending_req->operation, pending_req->status);
719         free_req(blkif, pending_req);
720         /*
721          * Make sure the request is freed before releasing blkif,
722          * or there could be a race between free_req and the
723          * cleanup done in xen_blkif_free during shutdown.
724          *
725          * NB: The fact that we might try to wake up pending_free_wq
726          * before drain_complete (in case there's a drain going on)
727          * it's not a problem with our current implementation
728          * because we can assure there's no thread waiting on
729          * pending_free_wq if there's a drain going on, but it has
730          * to be taken into account if the current model is changed.
731          */
732         if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
733                 complete(&blkif->drain_complete);
734         }
735         xen_blkif_put(blkif);
736 }
737
738 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
739 {
740         struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
741         struct xen_blkif *blkif = req->blkif;
742         struct grant_page **pages = req->segments;
743         unsigned int invcount;
744
745         invcount = xen_blkbk_unmap_prepare(blkif, pages, req->nr_segs,
746                                            req->unmap, req->unmap_pages);
747
748         work->data = req;
749         work->done = xen_blkbk_unmap_and_respond_callback;
750         work->unmap_ops = req->unmap;
751         work->kunmap_ops = NULL;
752         work->pages = req->unmap_pages;
753         work->count = invcount;
754
755         gnttab_unmap_refs_async(&req->gnttab_unmap_data);
756 }
757
758
759 /*
760  * Unmap the grant references.
761  *
762  * This could accumulate ops up to the batch size to reduce the number
763  * of hypercalls, but since this is only used in error paths there's
764  * no real need.
765  */
766 static void xen_blkbk_unmap(struct xen_blkif *blkif,
767                             struct grant_page *pages[],
768                             int num)
769 {
770         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
771         struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
772         unsigned int invcount = 0;
773         int ret;
774
775         while (num) {
776                 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
777                 
778                 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch,
779                                                    unmap, unmap_pages);
780                 if (invcount) {
781                         ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
782                         BUG_ON(ret);
783                         put_free_pages(blkif, unmap_pages, invcount);
784                 }
785                 pages += batch;
786                 num -= batch;
787         }
788 }
789
790 static int xen_blkbk_map(struct xen_blkif *blkif,
791                          struct grant_page *pages[],
792                          int num, bool ro)
793 {
794         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
795         struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
796         struct persistent_gnt *persistent_gnt = NULL;
797         phys_addr_t addr = 0;
798         int i, seg_idx, new_map_idx;
799         int segs_to_map = 0;
800         int ret = 0;
801         int last_map = 0, map_until = 0;
802         int use_persistent_gnts;
803
804         use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
805
806         /*
807          * Fill out preq.nr_sects with proper amount of sectors, and setup
808          * assign map[..] with the PFN of the page in our domain with the
809          * corresponding grant reference for each page.
810          */
811 again:
812         for (i = map_until; i < num; i++) {
813                 uint32_t flags;
814
815                 if (use_persistent_gnts)
816                         persistent_gnt = get_persistent_gnt(
817                                 blkif,
818                                 pages[i]->gref);
819
820                 if (persistent_gnt) {
821                         /*
822                          * We are using persistent grants and
823                          * the grant is already mapped
824                          */
825                         pages[i]->page = persistent_gnt->page;
826                         pages[i]->persistent_gnt = persistent_gnt;
827                 } else {
828                         if (get_free_page(blkif, &pages[i]->page)) {
829                                 put_free_pages(blkif, pages_to_gnt, segs_to_map);
830                                 ret = -ENOMEM;
831                                 goto out;
832                         }
833                         addr = vaddr(pages[i]->page);
834                         pages_to_gnt[segs_to_map] = pages[i]->page;
835                         pages[i]->persistent_gnt = NULL;
836                         flags = GNTMAP_host_map;
837                         if (!use_persistent_gnts && ro)
838                                 flags |= GNTMAP_readonly;
839                         gnttab_set_map_op(&map[segs_to_map++], addr,
840                                           flags, pages[i]->gref,
841                                           blkif->domid);
842                 }
843                 map_until = i + 1;
844                 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
845                         break;
846         }
847
848         if (segs_to_map)
849                 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
850
851         /*
852          * Now swizzle the MFN in our domain with the MFN from the other domain
853          * so that when we access vaddr(pending_req,i) it has the contents of
854          * the page from the other domain.
855          */
856         for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
857                 if (!pages[seg_idx]->persistent_gnt) {
858                         /* This is a newly mapped grant */
859                         BUG_ON(new_map_idx >= segs_to_map);
860                         if (unlikely(map[new_map_idx].status != 0)) {
861                                 pr_debug("invalid buffer -- could not remap it\n");
862                                 put_free_pages(blkif, &pages[seg_idx]->page, 1);
863                                 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
864                                 ret |= !ret;
865                                 goto next;
866                         }
867                         pages[seg_idx]->handle = map[new_map_idx].handle;
868                 } else {
869                         continue;
870                 }
871                 if (use_persistent_gnts &&
872                     blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
873                         /*
874                          * We are using persistent grants, the grant is
875                          * not mapped but we might have room for it.
876                          */
877                         persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
878                                                  GFP_KERNEL);
879                         if (!persistent_gnt) {
880                                 /*
881                                  * If we don't have enough memory to
882                                  * allocate the persistent_gnt struct
883                                  * map this grant non-persistenly
884                                  */
885                                 goto next;
886                         }
887                         persistent_gnt->gnt = map[new_map_idx].ref;
888                         persistent_gnt->handle = map[new_map_idx].handle;
889                         persistent_gnt->page = pages[seg_idx]->page;
890                         if (add_persistent_gnt(blkif,
891                                                persistent_gnt)) {
892                                 kfree(persistent_gnt);
893                                 persistent_gnt = NULL;
894                                 goto next;
895                         }
896                         pages[seg_idx]->persistent_gnt = persistent_gnt;
897                         pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
898                                  persistent_gnt->gnt, blkif->persistent_gnt_c,
899                                  xen_blkif_max_pgrants);
900                         goto next;
901                 }
902                 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
903                         blkif->vbd.overflow_max_grants = 1;
904                         pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
905                                  blkif->domid, blkif->vbd.handle);
906                 }
907                 /*
908                  * We could not map this grant persistently, so use it as
909                  * a non-persistent grant.
910                  */
911 next:
912                 new_map_idx++;
913         }
914         segs_to_map = 0;
915         last_map = map_until;
916         if (!ret && map_until != num)
917                 goto again;
918
919 out:
920         for (i = last_map; i < num; i++) {
921                 /* Don't zap current batch's valid persistent grants. */
922                 if(i >= map_until)
923                         pages[i]->persistent_gnt = NULL;
924                 pages[i]->handle = BLKBACK_INVALID_HANDLE;
925         }
926
927         return ret;
928 }
929
930 static int xen_blkbk_map_seg(struct pending_req *pending_req)
931 {
932         int rc;
933
934         rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
935                            pending_req->nr_segs,
936                            (pending_req->operation != BLKIF_OP_READ));
937
938         return rc;
939 }
940
941 static int xen_blkbk_parse_indirect(struct blkif_request *req,
942                                     struct pending_req *pending_req,
943                                     struct seg_buf seg[],
944                                     struct phys_req *preq)
945 {
946         struct grant_page **pages = pending_req->indirect_pages;
947         struct xen_blkif *blkif = pending_req->blkif;
948         int indirect_grefs, rc, n, nseg, i;
949         struct blkif_request_segment *segments = NULL;
950
951         nseg = pending_req->nr_segs;
952         indirect_grefs = INDIRECT_PAGES(nseg);
953         BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
954
955         for (i = 0; i < indirect_grefs; i++)
956                 pages[i]->gref = req->u.indirect.indirect_grefs[i];
957
958         rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
959         if (rc)
960                 goto unmap;
961
962         for (n = 0, i = 0; n < nseg; n++) {
963                 uint8_t first_sect, last_sect;
964
965                 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
966                         /* Map indirect segments */
967                         if (segments)
968                                 kunmap_atomic(segments);
969                         segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
970                 }
971                 i = n % SEGS_PER_INDIRECT_FRAME;
972
973                 pending_req->segments[n]->gref = segments[i].gref;
974
975                 first_sect = READ_ONCE(segments[i].first_sect);
976                 last_sect = READ_ONCE(segments[i].last_sect);
977                 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
978                         rc = -EINVAL;
979                         goto unmap;
980                 }
981
982                 seg[n].nsec = last_sect - first_sect + 1;
983                 seg[n].offset = first_sect << 9;
984                 preq->nr_sects += seg[n].nsec;
985         }
986
987 unmap:
988         if (segments)
989                 kunmap_atomic(segments);
990         xen_blkbk_unmap(blkif, pages, indirect_grefs);
991         return rc;
992 }
993
994 static int dispatch_discard_io(struct xen_blkif *blkif,
995                                 struct blkif_request *req)
996 {
997         int err = 0;
998         int status = BLKIF_RSP_OKAY;
999         struct block_device *bdev = blkif->vbd.bdev;
1000         unsigned long secure;
1001         struct phys_req preq;
1002
1003         xen_blkif_get(blkif);
1004
1005         preq.sector_number = req->u.discard.sector_number;
1006         preq.nr_sects      = req->u.discard.nr_sectors;
1007
1008         err = xen_vbd_translate(&preq, blkif, WRITE);
1009         if (err) {
1010                 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1011                         preq.sector_number,
1012                         preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1013                 goto fail_response;
1014         }
1015         blkif->st_ds_req++;
1016
1017         secure = (blkif->vbd.discard_secure &&
1018                  (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1019                  BLKDEV_DISCARD_SECURE : 0;
1020
1021         err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1022                                    req->u.discard.nr_sectors,
1023                                    GFP_KERNEL, secure);
1024 fail_response:
1025         if (err == -EOPNOTSUPP) {
1026                 pr_debug("discard op failed, not supported\n");
1027                 status = BLKIF_RSP_EOPNOTSUPP;
1028         } else if (err)
1029                 status = BLKIF_RSP_ERROR;
1030
1031         make_response(blkif, req->u.discard.id, req->operation, status);
1032         xen_blkif_put(blkif);
1033         return err;
1034 }
1035
1036 static int dispatch_other_io(struct xen_blkif *blkif,
1037                              struct blkif_request *req,
1038                              struct pending_req *pending_req)
1039 {
1040         free_req(blkif, pending_req);
1041         make_response(blkif, req->u.other.id, req->operation,
1042                       BLKIF_RSP_EOPNOTSUPP);
1043         return -EIO;
1044 }
1045
1046 static void xen_blk_drain_io(struct xen_blkif *blkif)
1047 {
1048         atomic_set(&blkif->drain, 1);
1049         do {
1050                 if (atomic_read(&blkif->inflight) == 0)
1051                         break;
1052                 wait_for_completion_interruptible_timeout(
1053                                 &blkif->drain_complete, HZ);
1054
1055                 if (!atomic_read(&blkif->drain))
1056                         break;
1057         } while (!kthread_should_stop());
1058         atomic_set(&blkif->drain, 0);
1059 }
1060
1061 /*
1062  * Completion callback on the bio's. Called as bh->b_end_io()
1063  */
1064
1065 static void __end_block_io_op(struct pending_req *pending_req, int error)
1066 {
1067         /* An error fails the entire request. */
1068         if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
1069             (error == -EOPNOTSUPP)) {
1070                 pr_debug("flush diskcache op failed, not supported\n");
1071                 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
1072                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1073         } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
1074                     (error == -EOPNOTSUPP)) {
1075                 pr_debug("write barrier op failed, not supported\n");
1076                 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
1077                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1078         } else if (error) {
1079                 pr_debug("Buffer not up-to-date at end of operation,"
1080                          " error=%d\n", error);
1081                 pending_req->status = BLKIF_RSP_ERROR;
1082         }
1083
1084         /*
1085          * If all of the bio's have completed it is time to unmap
1086          * the grant references associated with 'request' and provide
1087          * the proper response on the ring.
1088          */
1089         if (atomic_dec_and_test(&pending_req->pendcnt))
1090                 xen_blkbk_unmap_and_respond(pending_req);
1091 }
1092
1093 /*
1094  * bio callback.
1095  */
1096 static void end_block_io_op(struct bio *bio)
1097 {
1098         __end_block_io_op(bio->bi_private, bio->bi_error);
1099         bio_put(bio);
1100 }
1101
1102
1103
1104 /*
1105  * Function to copy the from the ring buffer the 'struct blkif_request'
1106  * (which has the sectors we want, number of them, grant references, etc),
1107  * and transmute  it to the block API to hand it over to the proper block disk.
1108  */
1109 static int
1110 __do_block_io_op(struct xen_blkif *blkif, unsigned int *eoi_flags)
1111 {
1112         union blkif_back_rings *blk_rings = &blkif->blk_rings;
1113         struct blkif_request req;
1114         struct pending_req *pending_req;
1115         RING_IDX rc, rp;
1116         int more_to_do = 0;
1117
1118         rc = blk_rings->common.req_cons;
1119         rp = blk_rings->common.sring->req_prod;
1120         rmb(); /* Ensure we see queued requests up to 'rp'. */
1121
1122         if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1123                 rc = blk_rings->common.rsp_prod_pvt;
1124                 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1125                         rp, rc, rp - rc, blkif->vbd.pdevice);
1126                 return -EACCES;
1127         }
1128         while (rc != rp) {
1129
1130                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1131                         break;
1132
1133                 /* We've seen a request, so clear spurious eoi flag. */
1134                 *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
1135
1136                 if (kthread_should_stop()) {
1137                         more_to_do = 1;
1138                         break;
1139                 }
1140
1141                 pending_req = alloc_req(blkif);
1142                 if (NULL == pending_req) {
1143                         blkif->st_oo_req++;
1144                         more_to_do = 1;
1145                         break;
1146                 }
1147
1148                 switch (blkif->blk_protocol) {
1149                 case BLKIF_PROTOCOL_NATIVE:
1150                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1151                         break;
1152                 case BLKIF_PROTOCOL_X86_32:
1153                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1154                         break;
1155                 case BLKIF_PROTOCOL_X86_64:
1156                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1157                         break;
1158                 default:
1159                         BUG();
1160                 }
1161                 blk_rings->common.req_cons = ++rc; /* before make_response() */
1162
1163                 /* Apply all sanity checks to /private copy/ of request. */
1164                 barrier();
1165
1166                 switch (req.operation) {
1167                 case BLKIF_OP_READ:
1168                 case BLKIF_OP_WRITE:
1169                 case BLKIF_OP_WRITE_BARRIER:
1170                 case BLKIF_OP_FLUSH_DISKCACHE:
1171                 case BLKIF_OP_INDIRECT:
1172                         if (dispatch_rw_block_io(blkif, &req, pending_req))
1173                                 goto done;
1174                         break;
1175                 case BLKIF_OP_DISCARD:
1176                         free_req(blkif, pending_req);
1177                         if (dispatch_discard_io(blkif, &req))
1178                                 goto done;
1179                         break;
1180                 default:
1181                         if (dispatch_other_io(blkif, &req, pending_req))
1182                                 goto done;
1183                         break;
1184                 }
1185
1186                 /* Yield point for this unbounded loop. */
1187                 cond_resched();
1188         }
1189 done:
1190         return more_to_do;
1191 }
1192
1193 static int
1194 do_block_io_op(struct xen_blkif *blkif, unsigned int *eoi_flags)
1195 {
1196         union blkif_back_rings *blk_rings = &blkif->blk_rings;
1197         int more_to_do;
1198
1199         do {
1200                 more_to_do = __do_block_io_op(blkif, eoi_flags);
1201                 if (more_to_do)
1202                         break;
1203
1204                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1205         } while (more_to_do);
1206
1207         return more_to_do;
1208 }
1209 /*
1210  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1211  * and call the 'submit_bio' to pass it to the underlying storage.
1212  */
1213 static int dispatch_rw_block_io(struct xen_blkif *blkif,
1214                                 struct blkif_request *req,
1215                                 struct pending_req *pending_req)
1216 {
1217         struct phys_req preq;
1218         struct seg_buf *seg = pending_req->seg;
1219         unsigned int nseg;
1220         struct bio *bio = NULL;
1221         struct bio **biolist = pending_req->biolist;
1222         int i, nbio = 0;
1223         int operation;
1224         struct blk_plug plug;
1225         bool drain = false;
1226         struct grant_page **pages = pending_req->segments;
1227         unsigned short req_operation;
1228
1229         req_operation = req->operation == BLKIF_OP_INDIRECT ?
1230                         req->u.indirect.indirect_op : req->operation;
1231
1232         if ((req->operation == BLKIF_OP_INDIRECT) &&
1233             (req_operation != BLKIF_OP_READ) &&
1234             (req_operation != BLKIF_OP_WRITE)) {
1235                 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1236                 goto fail_response;
1237         }
1238
1239         switch (req_operation) {
1240         case BLKIF_OP_READ:
1241                 blkif->st_rd_req++;
1242                 operation = READ;
1243                 break;
1244         case BLKIF_OP_WRITE:
1245                 blkif->st_wr_req++;
1246                 operation = WRITE_ODIRECT;
1247                 break;
1248         case BLKIF_OP_WRITE_BARRIER:
1249                 drain = true;
1250         case BLKIF_OP_FLUSH_DISKCACHE:
1251                 blkif->st_f_req++;
1252                 operation = WRITE_FLUSH;
1253                 break;
1254         default:
1255                 operation = 0; /* make gcc happy */
1256                 goto fail_response;
1257                 break;
1258         }
1259
1260         /* Check that the number of segments is sane. */
1261         nseg = req->operation == BLKIF_OP_INDIRECT ?
1262                req->u.indirect.nr_segments : req->u.rw.nr_segments;
1263
1264         if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1265             unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1266                      (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1267             unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1268                      (nseg > MAX_INDIRECT_SEGMENTS))) {
1269                 pr_debug("Bad number of segments in request (%d)\n", nseg);
1270                 /* Haven't submitted any bio's yet. */
1271                 goto fail_response;
1272         }
1273
1274         preq.nr_sects      = 0;
1275
1276         pending_req->blkif     = blkif;
1277         pending_req->id        = req->u.rw.id;
1278         pending_req->operation = req_operation;
1279         pending_req->status    = BLKIF_RSP_OKAY;
1280         pending_req->nr_segs   = nseg;
1281
1282         if (req->operation != BLKIF_OP_INDIRECT) {
1283                 preq.dev               = req->u.rw.handle;
1284                 preq.sector_number     = req->u.rw.sector_number;
1285                 for (i = 0; i < nseg; i++) {
1286                         pages[i]->gref = req->u.rw.seg[i].gref;
1287                         seg[i].nsec = req->u.rw.seg[i].last_sect -
1288                                 req->u.rw.seg[i].first_sect + 1;
1289                         seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1290                         if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1291                             (req->u.rw.seg[i].last_sect <
1292                              req->u.rw.seg[i].first_sect))
1293                                 goto fail_response;
1294                         preq.nr_sects += seg[i].nsec;
1295                 }
1296         } else {
1297                 preq.dev               = req->u.indirect.handle;
1298                 preq.sector_number     = req->u.indirect.sector_number;
1299                 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1300                         goto fail_response;
1301         }
1302
1303         if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1304                 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1305                          operation == READ ? "read" : "write",
1306                          preq.sector_number,
1307                          preq.sector_number + preq.nr_sects,
1308                          blkif->vbd.pdevice);
1309                 goto fail_response;
1310         }
1311
1312         /*
1313          * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1314          * is set there.
1315          */
1316         for (i = 0; i < nseg; i++) {
1317                 if (((int)preq.sector_number|(int)seg[i].nsec) &
1318                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1319                         pr_debug("Misaligned I/O request from domain %d\n",
1320                                  blkif->domid);
1321                         goto fail_response;
1322                 }
1323         }
1324
1325         /* Wait on all outstanding I/O's and once that has been completed
1326          * issue the WRITE_FLUSH.
1327          */
1328         if (drain)
1329                 xen_blk_drain_io(pending_req->blkif);
1330
1331         /*
1332          * If we have failed at this point, we need to undo the M2P override,
1333          * set gnttab_set_unmap_op on all of the grant references and perform
1334          * the hypercall to unmap the grants - that is all done in
1335          * xen_blkbk_unmap.
1336          */
1337         if (xen_blkbk_map_seg(pending_req))
1338                 goto fail_flush;
1339
1340         /*
1341          * This corresponding xen_blkif_put is done in __end_block_io_op, or
1342          * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1343          */
1344         xen_blkif_get(blkif);
1345         atomic_inc(&blkif->inflight);
1346
1347         for (i = 0; i < nseg; i++) {
1348                 while ((bio == NULL) ||
1349                        (bio_add_page(bio,
1350                                      pages[i]->page,
1351                                      seg[i].nsec << 9,
1352                                      seg[i].offset) == 0)) {
1353
1354                         int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1355                         bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1356                         if (unlikely(bio == NULL))
1357                                 goto fail_put_bio;
1358
1359                         biolist[nbio++] = bio;
1360                         bio->bi_bdev    = preq.bdev;
1361                         bio->bi_private = pending_req;
1362                         bio->bi_end_io  = end_block_io_op;
1363                         bio->bi_iter.bi_sector  = preq.sector_number;
1364                 }
1365
1366                 preq.sector_number += seg[i].nsec;
1367         }
1368
1369         /* This will be hit if the operation was a flush or discard. */
1370         if (!bio) {
1371                 BUG_ON(operation != WRITE_FLUSH);
1372
1373                 bio = bio_alloc(GFP_KERNEL, 0);
1374                 if (unlikely(bio == NULL))
1375                         goto fail_put_bio;
1376
1377                 biolist[nbio++] = bio;
1378                 bio->bi_bdev    = preq.bdev;
1379                 bio->bi_private = pending_req;
1380                 bio->bi_end_io  = end_block_io_op;
1381         }
1382
1383         atomic_set(&pending_req->pendcnt, nbio);
1384         blk_start_plug(&plug);
1385
1386         for (i = 0; i < nbio; i++)
1387                 submit_bio(operation, biolist[i]);
1388
1389         /* Let the I/Os go.. */
1390         blk_finish_plug(&plug);
1391
1392         if (operation == READ)
1393                 blkif->st_rd_sect += preq.nr_sects;
1394         else if (operation & WRITE)
1395                 blkif->st_wr_sect += preq.nr_sects;
1396
1397         return 0;
1398
1399  fail_flush:
1400         xen_blkbk_unmap(blkif, pending_req->segments,
1401                         pending_req->nr_segs);
1402  fail_response:
1403         /* Haven't submitted any bio's yet. */
1404         make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1405         free_req(blkif, pending_req);
1406         msleep(1); /* back off a bit */
1407         return -EIO;
1408
1409  fail_put_bio:
1410         for (i = 0; i < nbio; i++)
1411                 bio_put(biolist[i]);
1412         atomic_set(&pending_req->pendcnt, 1);
1413         __end_block_io_op(pending_req, -EINVAL);
1414         msleep(1); /* back off a bit */
1415         return -EIO;
1416 }
1417
1418
1419
1420 /*
1421  * Put a response on the ring on how the operation fared.
1422  */
1423 static void make_response(struct xen_blkif *blkif, u64 id,
1424                           unsigned short op, int st)
1425 {
1426         struct blkif_response *resp;
1427         unsigned long     flags;
1428         union blkif_back_rings *blk_rings = &blkif->blk_rings;
1429         int notify;
1430
1431         spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1432         /* Place on the response ring for the relevant domain. */
1433         switch (blkif->blk_protocol) {
1434         case BLKIF_PROTOCOL_NATIVE:
1435                 resp = RING_GET_RESPONSE(&blk_rings->native,
1436                                          blk_rings->native.rsp_prod_pvt);
1437                 break;
1438         case BLKIF_PROTOCOL_X86_32:
1439                 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1440                                          blk_rings->x86_32.rsp_prod_pvt);
1441                 break;
1442         case BLKIF_PROTOCOL_X86_64:
1443                 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1444                                          blk_rings->x86_64.rsp_prod_pvt);
1445                 break;
1446         default:
1447                 BUG();
1448         }
1449
1450         resp->id        = id;
1451         resp->operation = op;
1452         resp->status    = st;
1453
1454         blk_rings->common.rsp_prod_pvt++;
1455         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1456         spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1457         if (notify)
1458                 notify_remote_via_irq(blkif->irq);
1459 }
1460
1461 static int __init xen_blkif_init(void)
1462 {
1463         int rc = 0;
1464
1465         if (!xen_domain())
1466                 return -ENODEV;
1467
1468         if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1469                 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1470                         xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1471                 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1472         }
1473
1474         rc = xen_blkif_interface_init();
1475         if (rc)
1476                 goto failed_init;
1477
1478         rc = xen_blkif_xenbus_init();
1479         if (rc)
1480                 goto failed_init;
1481
1482  failed_init:
1483         return rc;
1484 }
1485
1486 module_init(xen_blkif_init);
1487
1488 MODULE_LICENSE("Dual BSD/GPL");
1489 MODULE_ALIAS("xen-backend:vbd");