2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "huge_gem_object.h"
27 static void huge_free_pages(struct drm_i915_gem_object *obj,
28 struct sg_table *pages)
30 unsigned long nreal = obj->scratch / PAGE_SIZE;
31 struct scatterlist *sg;
33 for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
34 __free_page(sg_page(sg));
40 static struct sg_table *
41 huge_get_pages(struct drm_i915_gem_object *obj)
43 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
44 const unsigned long nreal = obj->scratch / PAGE_SIZE;
45 const unsigned long npages = obj->base.size / PAGE_SIZE;
46 struct scatterlist *sg, *src, *end;
47 struct sg_table *pages;
50 pages = kmalloc(sizeof(*pages), GFP);
52 return ERR_PTR(-ENOMEM);
54 if (sg_alloc_table(pages, npages, GFP)) {
56 return ERR_PTR(-ENOMEM);
60 for (n = 0; n < nreal; n++) {
63 page = alloc_page(GFP | __GFP_HIGHMEM);
69 sg_set_page(sg, page, PAGE_SIZE, 0);
73 for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
74 sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
81 if (i915_gem_gtt_prepare_pages(obj, pages))
87 huge_free_pages(obj, pages);
88 return ERR_PTR(-ENOMEM);
92 static void huge_put_pages(struct drm_i915_gem_object *obj,
93 struct sg_table *pages)
95 i915_gem_gtt_finish_pages(obj, pages);
96 huge_free_pages(obj, pages);
98 obj->mm.dirty = false;
101 static const struct drm_i915_gem_object_ops huge_ops = {
102 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
103 I915_GEM_OBJECT_IS_SHRINKABLE,
104 .get_pages = huge_get_pages,
105 .put_pages = huge_put_pages,
108 struct drm_i915_gem_object *
109 huge_gem_object(struct drm_i915_private *i915,
110 phys_addr_t phys_size,
113 struct drm_i915_gem_object *obj;
114 unsigned int cache_level;
116 GEM_BUG_ON(!phys_size || phys_size > dma_size);
117 GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
118 GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
120 if (overflows_type(dma_size, obj->base.size))
121 return ERR_PTR(-E2BIG);
123 obj = i915_gem_object_alloc(i915);
125 return ERR_PTR(-ENOMEM);
127 drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
128 i915_gem_object_init(obj, &huge_ops);
130 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
131 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
132 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
133 i915_gem_object_set_cache_coherency(obj, cache_level);
134 obj->scratch = phys_size;