GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_2835_arm.c
1 /**
2  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions, and the following disclaimer,
9  *    without modification.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The names of the above-listed copyright holders may not be used
14  *    to endorse or promote products derived from this software without
15  *    specific prior written permission.
16  *
17  * ALTERNATIVELY, this software may be distributed under the terms of the
18  * GNU General Public License ("GPL") version 2, as published by the Free
19  * Software Foundation.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/pagemap.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/io.h>
41 #include <linux/platform_device.h>
42 #include <linux/uaccess.h>
43 #include <linux/mm.h>
44 #include <linux/of.h>
45 #include <soc/bcm2835/raspberrypi-firmware.h>
46
47 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
48
49 #include "vchiq_arm.h"
50 #include "vchiq_connected.h"
51 #include "vchiq_killable.h"
52 #include "vchiq_pagelist.h"
53
54 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
55
56 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
57 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
58
59 #define BELL0   0x00
60 #define BELL2   0x08
61
62 struct vchiq_2835_state {
63         int inited;
64         VCHIQ_ARM_STATE_T arm_state;
65 };
66
67 struct vchiq_pagelist_info {
68         PAGELIST_T *pagelist;
69         size_t pagelist_buffer_size;
70         dma_addr_t dma_addr;
71         enum dma_data_direction dma_dir;
72         unsigned int num_pages;
73         unsigned int pages_need_release;
74         struct page **pages;
75         struct scatterlist *scatterlist;
76         unsigned int scatterlist_mapped;
77 };
78
79 static void __iomem *g_regs;
80 /* This value is the size of the L2 cache lines as understood by the
81  * VPU firmware, which determines the required alignment of the
82  * offsets/sizes in pagelists.
83  *
84  * Modern VPU firmware looks for a DT "cache-line-size" property in
85  * the VCHIQ node and will overwrite it with the actual L2 cache size,
86  * which the kernel must then respect.  That property was rejected
87  * upstream, so we have to use the VPU firmware's compatibility value
88  * of 32.
89  */
90 static unsigned int g_cache_line_size = 32;
91 static unsigned int g_fragments_size;
92 static char *g_fragments_base;
93 static char *g_free_fragments;
94 static struct semaphore g_free_fragments_sema;
95 static struct device *g_dev;
96
97 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
98
99 static irqreturn_t
100 vchiq_doorbell_irq(int irq, void *dev_id);
101
102 static struct vchiq_pagelist_info *
103 create_pagelist(char __user *buf, size_t count, unsigned short type);
104
105 static void
106 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
107               int actual);
108
109 int vchiq_platform_init(struct platform_device *pdev, VCHIQ_STATE_T *state)
110 {
111         struct device *dev = &pdev->dev;
112         struct rpi_firmware *fw = platform_get_drvdata(pdev);
113         VCHIQ_SLOT_ZERO_T *vchiq_slot_zero;
114         struct resource *res;
115         void *slot_mem;
116         dma_addr_t slot_phys;
117         u32 channelbase;
118         int slot_mem_size, frag_mem_size;
119         int err, irq, i;
120
121         /*
122          * VCHI messages between the CPU and firmware use
123          * 32-bit bus addresses.
124          */
125         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
126
127         if (err < 0)
128                 return err;
129
130         g_fragments_size = 2 * g_cache_line_size;
131
132         /* Allocate space for the channels in coherent memory */
133         slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
134         frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
135
136         slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
137                                        &slot_phys, GFP_KERNEL);
138         if (!slot_mem) {
139                 dev_err(dev, "could not allocate DMA memory\n");
140                 return -ENOMEM;
141         }
142
143         WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
144
145         vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
146         if (!vchiq_slot_zero)
147                 return -EINVAL;
148
149         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
150                 (int)slot_phys + slot_mem_size;
151         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
152                 MAX_FRAGMENTS;
153
154         g_fragments_base = (char *)slot_mem + slot_mem_size;
155
156         g_free_fragments = g_fragments_base;
157         for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
158                 *(char **)&g_fragments_base[i*g_fragments_size] =
159                         &g_fragments_base[(i + 1)*g_fragments_size];
160         }
161         *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
162         sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
163
164         if (vchiq_init_state(state, vchiq_slot_zero, 0) != VCHIQ_SUCCESS)
165                 return -EINVAL;
166
167         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
168         g_regs = devm_ioremap_resource(&pdev->dev, res);
169         if (IS_ERR(g_regs))
170                 return PTR_ERR(g_regs);
171
172         irq = platform_get_irq(pdev, 0);
173         if (irq <= 0) {
174                 dev_err(dev, "failed to get IRQ\n");
175                 return irq;
176         }
177
178         err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
179                                "VCHIQ doorbell", state);
180         if (err) {
181                 dev_err(dev, "failed to register irq=%d\n", irq);
182                 return err;
183         }
184
185         /* Send the base address of the slots to VideoCore */
186         channelbase = slot_phys;
187         err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
188                                     &channelbase, sizeof(channelbase));
189         if (err || channelbase) {
190                 dev_err(dev, "failed to set channelbase\n");
191                 return err ? : -ENXIO;
192         }
193
194         g_dev = dev;
195         vchiq_log_info(vchiq_arm_log_level,
196                 "vchiq_init - done (slots %pK, phys %pad)",
197                 vchiq_slot_zero, &slot_phys);
198
199         vchiq_call_connected_callbacks();
200
201         return 0;
202 }
203
204 VCHIQ_STATUS_T
205 vchiq_platform_init_state(VCHIQ_STATE_T *state)
206 {
207         VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
208         struct vchiq_2835_state *platform_state;
209
210         state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
211         if (!state->platform_state)
212                 return VCHIQ_ERROR;
213
214         platform_state = (struct vchiq_2835_state *)state->platform_state;
215
216         platform_state->inited = 1;
217         status = vchiq_arm_init_state(state, &platform_state->arm_state);
218
219         if (status != VCHIQ_SUCCESS)
220                 platform_state->inited = 0;
221
222         return status;
223 }
224
225 VCHIQ_ARM_STATE_T*
226 vchiq_platform_get_arm_state(VCHIQ_STATE_T *state)
227 {
228         struct vchiq_2835_state *platform_state;
229
230         platform_state   = (struct vchiq_2835_state *)state->platform_state;
231
232         WARN_ON_ONCE(!platform_state->inited);
233
234         return &platform_state->arm_state;
235 }
236
237 void
238 remote_event_signal(REMOTE_EVENT_T *event)
239 {
240         wmb();
241
242         event->fired = 1;
243
244         dsb(sy);         /* data barrier operation */
245
246         if (event->armed)
247                 writel(0, g_regs + BELL2); /* trigger vc interrupt */
248 }
249
250 VCHIQ_STATUS_T
251 vchiq_prepare_bulk_data(VCHIQ_BULK_T *bulk, VCHI_MEM_HANDLE_T memhandle,
252         void *offset, int size, int dir)
253 {
254         struct vchiq_pagelist_info *pagelistinfo;
255
256         WARN_ON(memhandle != VCHI_MEM_HANDLE_INVALID);
257
258         pagelistinfo = create_pagelist((char __user *)offset, size,
259                                        (dir == VCHIQ_BULK_RECEIVE)
260                                        ? PAGELIST_READ
261                                        : PAGELIST_WRITE);
262
263         if (!pagelistinfo)
264                 return VCHIQ_ERROR;
265
266         bulk->handle = memhandle;
267         bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
268
269         /*
270          * Store the pagelistinfo address in remote_data,
271          * which isn't used by the slave.
272          */
273         bulk->remote_data = pagelistinfo;
274
275         return VCHIQ_SUCCESS;
276 }
277
278 void
279 vchiq_complete_bulk(VCHIQ_BULK_T *bulk)
280 {
281         if (bulk && bulk->remote_data && bulk->actual)
282                 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
283                               bulk->actual);
284 }
285
286 void
287 vchiq_transfer_bulk(VCHIQ_BULK_T *bulk)
288 {
289         /*
290          * This should only be called on the master (VideoCore) side, but
291          * provide an implementation to avoid the need for ifdefery.
292          */
293         BUG();
294 }
295
296 void
297 vchiq_dump_platform_state(void *dump_context)
298 {
299         char buf[80];
300         int len;
301
302         len = snprintf(buf, sizeof(buf),
303                 "  Platform: 2835 (VC master)");
304         vchiq_dump(dump_context, buf, len + 1);
305 }
306
307 VCHIQ_STATUS_T
308 vchiq_platform_suspend(VCHIQ_STATE_T *state)
309 {
310         return VCHIQ_ERROR;
311 }
312
313 VCHIQ_STATUS_T
314 vchiq_platform_resume(VCHIQ_STATE_T *state)
315 {
316         return VCHIQ_SUCCESS;
317 }
318
319 void
320 vchiq_platform_paused(VCHIQ_STATE_T *state)
321 {
322 }
323
324 void
325 vchiq_platform_resumed(VCHIQ_STATE_T *state)
326 {
327 }
328
329 int
330 vchiq_platform_videocore_wanted(VCHIQ_STATE_T *state)
331 {
332         return 1; // autosuspend not supported - videocore always wanted
333 }
334
335 int
336 vchiq_platform_use_suspend_timer(void)
337 {
338         return 0;
339 }
340 void
341 vchiq_dump_platform_use_state(VCHIQ_STATE_T *state)
342 {
343         vchiq_log_info(vchiq_arm_log_level, "Suspend timer not in use");
344 }
345 void
346 vchiq_platform_handle_timeout(VCHIQ_STATE_T *state)
347 {
348         (void)state;
349 }
350 /*
351  * Local functions
352  */
353
354 static irqreturn_t
355 vchiq_doorbell_irq(int irq, void *dev_id)
356 {
357         VCHIQ_STATE_T *state = dev_id;
358         irqreturn_t ret = IRQ_NONE;
359         unsigned int status;
360
361         /* Read (and clear) the doorbell */
362         status = readl(g_regs + BELL0);
363
364         if (status & 0x4) {  /* Was the doorbell rung? */
365                 remote_event_pollall(state);
366                 ret = IRQ_HANDLED;
367         }
368
369         return ret;
370 }
371
372 static void
373 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
374 {
375         if (pagelistinfo->scatterlist_mapped) {
376                 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
377                              pagelistinfo->num_pages, pagelistinfo->dma_dir);
378         }
379
380         if (pagelistinfo->pages_need_release) {
381                 unsigned int i;
382
383                 for (i = 0; i < pagelistinfo->num_pages; i++)
384                         put_page(pagelistinfo->pages[i]);
385         }
386
387         dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
388                           pagelistinfo->pagelist, pagelistinfo->dma_addr);
389 }
390
391 /* There is a potential problem with partial cache lines (pages?)
392  * at the ends of the block when reading. If the CPU accessed anything in
393  * the same line (page?) then it may have pulled old data into the cache,
394  * obscuring the new data underneath. We can solve this by transferring the
395  * partial cache lines separately, and allowing the ARM to copy into the
396  * cached area.
397  */
398
399 static struct vchiq_pagelist_info *
400 create_pagelist(char __user *buf, size_t count, unsigned short type)
401 {
402         PAGELIST_T *pagelist;
403         struct vchiq_pagelist_info *pagelistinfo;
404         struct page **pages;
405         u32 *addrs;
406         unsigned int num_pages, offset, i, k;
407         int actual_pages;
408         size_t pagelist_size;
409         struct scatterlist *scatterlist, *sg;
410         int dma_buffers;
411         dma_addr_t dma_addr;
412
413         if (count >= INT_MAX - PAGE_SIZE)
414                 return NULL;
415
416         offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
417         num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
418
419         if (num_pages > (SIZE_MAX - sizeof(PAGELIST_T) -
420                          sizeof(struct vchiq_pagelist_info)) /
421                         (sizeof(u32) + sizeof(pages[0]) +
422                          sizeof(struct scatterlist)))
423                 return NULL;
424
425         pagelist_size = sizeof(PAGELIST_T) +
426                         (num_pages * sizeof(u32)) +
427                         (num_pages * sizeof(pages[0]) +
428                         (num_pages * sizeof(struct scatterlist))) +
429                         sizeof(struct vchiq_pagelist_info);
430
431         /* Allocate enough storage to hold the page pointers and the page
432          * list
433          */
434         pagelist = dma_zalloc_coherent(g_dev,
435                                        pagelist_size,
436                                        &dma_addr,
437                                        GFP_KERNEL);
438
439         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
440
441         if (!pagelist)
442                 return NULL;
443
444         addrs           = pagelist->addrs;
445         pages           = (struct page **)(addrs + num_pages);
446         scatterlist     = (struct scatterlist *)(pages + num_pages);
447         pagelistinfo    = (struct vchiq_pagelist_info *)
448                           (scatterlist + num_pages);
449
450         pagelist->length = count;
451         pagelist->type = type;
452         pagelist->offset = offset;
453
454         /* Populate the fields of the pagelistinfo structure */
455         pagelistinfo->pagelist = pagelist;
456         pagelistinfo->pagelist_buffer_size = pagelist_size;
457         pagelistinfo->dma_addr = dma_addr;
458         pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
459                                   DMA_TO_DEVICE : DMA_FROM_DEVICE;
460         pagelistinfo->num_pages = num_pages;
461         pagelistinfo->pages_need_release = 0;
462         pagelistinfo->pages = pages;
463         pagelistinfo->scatterlist = scatterlist;
464         pagelistinfo->scatterlist_mapped = 0;
465
466         if (is_vmalloc_addr(buf)) {
467                 unsigned long length = count;
468                 unsigned int off = offset;
469
470                 for (actual_pages = 0; actual_pages < num_pages;
471                      actual_pages++) {
472                         struct page *pg = vmalloc_to_page(buf + (actual_pages *
473                                                                  PAGE_SIZE));
474                         size_t bytes = PAGE_SIZE - off;
475
476                         if (!pg) {
477                                 cleanup_pagelistinfo(pagelistinfo);
478                                 return NULL;
479                         }
480
481                         if (bytes > length)
482                                 bytes = length;
483                         pages[actual_pages] = pg;
484                         length -= bytes;
485                         off = 0;
486                 }
487                 /* do not try and release vmalloc pages */
488         } else {
489                 actual_pages = get_user_pages_fast(
490                                           (unsigned long)buf & PAGE_MASK,
491                                           num_pages,
492                                           type == PAGELIST_READ,
493                                           pages);
494
495                 if (actual_pages != num_pages) {
496                         vchiq_log_info(vchiq_arm_log_level,
497                                        "%s - only %d/%d pages locked",
498                                        __func__, actual_pages, num_pages);
499
500                         /* This is probably due to the process being killed */
501                         while (actual_pages > 0) {
502                                 actual_pages--;
503                                 put_page(pages[actual_pages]);
504                         }
505                         cleanup_pagelistinfo(pagelistinfo);
506                         return NULL;
507                 }
508                  /* release user pages */
509                 pagelistinfo->pages_need_release = 1;
510         }
511
512         /*
513          * Initialize the scatterlist so that the magic cookie
514          *  is filled if debugging is enabled
515          */
516         sg_init_table(scatterlist, num_pages);
517         /* Now set the pages for each scatterlist */
518         for (i = 0; i < num_pages; i++) {
519                 unsigned int len = PAGE_SIZE - offset;
520
521                 if (len > count)
522                         len = count;
523                 sg_set_page(scatterlist + i, pages[i], len, offset);
524                 offset = 0;
525                 count -= len;
526         }
527
528         dma_buffers = dma_map_sg(g_dev,
529                                  scatterlist,
530                                  num_pages,
531                                  pagelistinfo->dma_dir);
532
533         if (dma_buffers == 0) {
534                 cleanup_pagelistinfo(pagelistinfo);
535                 return NULL;
536         }
537
538         pagelistinfo->scatterlist_mapped = 1;
539
540         /* Combine adjacent blocks for performance */
541         k = 0;
542         for_each_sg(scatterlist, sg, dma_buffers, i) {
543                 u32 len = sg_dma_len(sg);
544                 u32 addr = sg_dma_address(sg);
545
546                 /* Note: addrs is the address + page_count - 1
547                  * The firmware expects blocks after the first to be page-
548                  * aligned and a multiple of the page size
549                  */
550                 WARN_ON(len == 0);
551                 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
552                 WARN_ON(i && (addr & ~PAGE_MASK));
553                 if (k > 0 &&
554                     ((addrs[k - 1] & PAGE_MASK) +
555                      (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
556                     == (addr & PAGE_MASK))
557                         addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
558                 else
559                         addrs[k++] = (addr & PAGE_MASK) |
560                                 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
561         }
562
563         /* Partial cache lines (fragments) require special measures */
564         if ((type == PAGELIST_READ) &&
565                 ((pagelist->offset & (g_cache_line_size - 1)) ||
566                 ((pagelist->offset + pagelist->length) &
567                 (g_cache_line_size - 1)))) {
568                 char *fragments;
569
570                 if (down_interruptible(&g_free_fragments_sema) != 0) {
571                         cleanup_pagelistinfo(pagelistinfo);
572                         return NULL;
573                 }
574
575                 WARN_ON(g_free_fragments == NULL);
576
577                 down(&g_free_fragments_mutex);
578                 fragments = g_free_fragments;
579                 WARN_ON(fragments == NULL);
580                 g_free_fragments = *(char **) g_free_fragments;
581                 up(&g_free_fragments_mutex);
582                 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
583                         (fragments - g_fragments_base) / g_fragments_size;
584         }
585
586         return pagelistinfo;
587 }
588
589 static void
590 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
591               int actual)
592 {
593         PAGELIST_T *pagelist   = pagelistinfo->pagelist;
594         struct page **pages    = pagelistinfo->pages;
595         unsigned int num_pages = pagelistinfo->num_pages;
596
597         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
598                         __func__, pagelistinfo->pagelist, actual);
599
600         /*
601          * NOTE: dma_unmap_sg must be called before the
602          * cpu can touch any of the data/pages.
603          */
604         dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
605                      pagelistinfo->num_pages, pagelistinfo->dma_dir);
606         pagelistinfo->scatterlist_mapped = 0;
607
608         /* Deal with any partial cache lines (fragments) */
609         if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
610                 char *fragments = g_fragments_base +
611                         (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
612                         g_fragments_size;
613                 int head_bytes, tail_bytes;
614
615                 head_bytes = (g_cache_line_size - pagelist->offset) &
616                         (g_cache_line_size - 1);
617                 tail_bytes = (pagelist->offset + actual) &
618                         (g_cache_line_size - 1);
619
620                 if ((actual >= 0) && (head_bytes != 0)) {
621                         if (head_bytes > actual)
622                                 head_bytes = actual;
623
624                         memcpy((char *)kmap(pages[0]) +
625                                 pagelist->offset,
626                                 fragments,
627                                 head_bytes);
628                         kunmap(pages[0]);
629                 }
630                 if ((actual >= 0) && (head_bytes < actual) &&
631                         (tail_bytes != 0)) {
632                         memcpy((char *)kmap(pages[num_pages - 1]) +
633                                 ((pagelist->offset + actual) &
634                                 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
635                                 fragments + g_cache_line_size,
636                                 tail_bytes);
637                         kunmap(pages[num_pages - 1]);
638                 }
639
640                 down(&g_free_fragments_mutex);
641                 *(char **)fragments = g_free_fragments;
642                 g_free_fragments = fragments;
643                 up(&g_free_fragments_mutex);
644                 up(&g_free_fragments_sema);
645         }
646
647         /* Need to mark all the pages dirty. */
648         if (pagelist->type != PAGELIST_WRITE &&
649             pagelistinfo->pages_need_release) {
650                 unsigned int i;
651
652                 for (i = 0; i < num_pages; i++)
653                         set_page_dirty(pages[i]);
654         }
655
656         cleanup_pagelistinfo(pagelistinfo);
657 }