GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / gpu / drm / i915 / gem / i915_gem_region.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5
6 #include "intel_memory_region.h"
7 #include "i915_gem_region.h"
8 #include "i915_drv.h"
9 #include "i915_trace.h"
10
11 void
12 i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
13                                 struct sg_table *pages)
14 {
15         __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
16
17         obj->mm.dirty = false;
18         sg_free_table(pages);
19         kfree(pages);
20 }
21
22 int
23 i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
24 {
25         struct intel_memory_region *mem = obj->mm.region;
26         struct list_head *blocks = &obj->mm.blocks;
27         resource_size_t size = obj->base.size;
28         resource_size_t prev_end;
29         struct i915_buddy_block *block;
30         unsigned int flags;
31         struct sg_table *st;
32         struct scatterlist *sg;
33         unsigned int sg_page_sizes;
34         int ret;
35
36         st = kmalloc(sizeof(*st), GFP_KERNEL);
37         if (!st)
38                 return -ENOMEM;
39
40         if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) {
41                 kfree(st);
42                 return -ENOMEM;
43         }
44
45         flags = I915_ALLOC_MIN_PAGE_SIZE;
46         if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
47                 flags |= I915_ALLOC_CONTIGUOUS;
48
49         ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
50         if (ret)
51                 goto err_free_sg;
52
53         GEM_BUG_ON(list_empty(blocks));
54
55         sg = st->sgl;
56         st->nents = 0;
57         sg_page_sizes = 0;
58         prev_end = (resource_size_t)-1;
59
60         list_for_each_entry(block, blocks, link) {
61                 u64 block_size, offset;
62
63                 block_size = min_t(u64, size,
64                                    i915_buddy_block_size(&mem->mm, block));
65                 offset = i915_buddy_block_offset(block);
66
67                 GEM_BUG_ON(overflows_type(block_size, sg->length));
68
69                 if (offset != prev_end ||
70                     add_overflows_t(typeof(sg->length), sg->length, block_size)) {
71                         if (st->nents) {
72                                 sg_page_sizes |= sg->length;
73                                 sg = __sg_next(sg);
74                         }
75
76                         sg_dma_address(sg) = mem->region.start + offset;
77                         sg_dma_len(sg) = block_size;
78
79                         sg->length = block_size;
80
81                         st->nents++;
82                 } else {
83                         sg->length += block_size;
84                         sg_dma_len(sg) += block_size;
85                 }
86
87                 prev_end = offset + block_size;
88         }
89
90         sg_page_sizes |= sg->length;
91         sg_mark_end(sg);
92         i915_sg_trim(st);
93
94         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
95
96         return 0;
97
98 err_free_sg:
99         sg_free_table(st);
100         kfree(st);
101         return ret;
102 }
103
104 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
105                                         struct intel_memory_region *mem,
106                                         unsigned long flags)
107 {
108         INIT_LIST_HEAD(&obj->mm.blocks);
109         obj->mm.region = intel_memory_region_get(mem);
110
111         obj->flags |= flags;
112         if (obj->base.size <= mem->min_page_size)
113                 obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
114
115         mutex_lock(&mem->objects.lock);
116
117         if (obj->flags & I915_BO_ALLOC_VOLATILE)
118                 list_add(&obj->mm.region_link, &mem->objects.purgeable);
119         else
120                 list_add(&obj->mm.region_link, &mem->objects.list);
121
122         mutex_unlock(&mem->objects.lock);
123 }
124
125 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
126 {
127         struct intel_memory_region *mem = obj->mm.region;
128
129         mutex_lock(&mem->objects.lock);
130         list_del(&obj->mm.region_link);
131         mutex_unlock(&mem->objects.lock);
132
133         intel_memory_region_put(mem);
134 }
135
136 struct drm_i915_gem_object *
137 i915_gem_object_create_region(struct intel_memory_region *mem,
138                               resource_size_t size,
139                               unsigned int flags)
140 {
141         struct drm_i915_gem_object *obj;
142
143         /*
144          * NB: Our use of resource_size_t for the size stems from using struct
145          * resource for the mem->region. We might need to revisit this in the
146          * future.
147          */
148
149         GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
150
151         if (!mem)
152                 return ERR_PTR(-ENODEV);
153
154         size = round_up(size, mem->min_page_size);
155
156         GEM_BUG_ON(!size);
157         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
158
159         /*
160          * XXX: There is a prevalence of the assumption that we fit the
161          * object's page count inside a 32bit _signed_ variable. Let's document
162          * this and catch if we ever need to fix it. In the meantime, if you do
163          * spot such a local variable, please consider fixing!
164          */
165
166         if (size >> PAGE_SHIFT > INT_MAX)
167                 return ERR_PTR(-E2BIG);
168
169         if (overflows_type(size, obj->base.size))
170                 return ERR_PTR(-E2BIG);
171
172         obj = mem->ops->create_object(mem, size, flags);
173         if (!IS_ERR(obj))
174                 trace_i915_gem_object_create(obj);
175
176         return obj;
177 }