1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2019 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include "vmwgfx_drv.h"
30 * Different methods for tracking dirty:
31 * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits
32 * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write-
33 * accesses in the VM mkwrite() callback
35 enum vmw_bo_dirty_method {
36 VMW_BO_DIRTY_PAGETABLE,
41 * No dirtied pages at scan trigger a transition to the _MKWRITE method,
42 * similarly a certain percentage of dirty pages trigger a transition to
43 * the _PAGETABLE method. How many triggers should we wait for before
46 #define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2
48 /* Percentage to trigger a transition to the _PAGETABLE method */
49 #define VMW_DIRTY_PERCENTAGE 10
52 * struct vmw_bo_dirty - Dirty information for buffer objects
53 * @start: First currently dirty bit
54 * @end: Last currently dirty bit + 1
55 * @method: The currently used dirty method
56 * @change_count: Number of consecutive method change triggers
57 * @ref_count: Reference count for this structure
58 * @bitmap_size: The size of the bitmap in bits. Typically equal to the
59 * nuber of pages in the bo.
60 * @bitmap: A bitmap where each bit represents a page. A set bit means a
66 enum vmw_bo_dirty_method method;
67 unsigned int change_count;
68 unsigned int ref_count;
69 unsigned long bitmap_size;
70 unsigned long bitmap[];
74 * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits
75 * @vbo: The buffer object to scan
77 * Scans the pagetable for dirty bits. Clear those bits and modify the
78 * dirty structure with the results. This function may change the
79 * dirty-tracking method.
81 static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
83 struct vmw_bo_dirty *dirty = vbo->dirty;
84 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
85 struct address_space *mapping = vbo->base.bdev->dev_mapping;
88 num_marked = clean_record_shared_mapping_range
90 offset, dirty->bitmap_size,
91 offset, &dirty->bitmap[0],
92 &dirty->start, &dirty->end);
94 dirty->change_count++;
96 dirty->change_count = 0;
98 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
99 dirty->change_count = 0;
100 dirty->method = VMW_BO_DIRTY_MKWRITE;
101 wp_shared_mapping_range(mapping,
102 offset, dirty->bitmap_size);
103 clean_record_shared_mapping_range(mapping,
104 offset, dirty->bitmap_size,
105 offset, &dirty->bitmap[0],
106 &dirty->start, &dirty->end);
111 * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method
112 * @vbo: The buffer object to scan
114 * Write-protect pages written to so that consecutive write accesses will
115 * trigger a call to mkwrite.
117 * This function may change the dirty-tracking method.
119 static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
121 struct vmw_bo_dirty *dirty = vbo->dirty;
122 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
123 struct address_space *mapping = vbo->base.bdev->dev_mapping;
126 if (dirty->end <= dirty->start)
129 num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping,
130 dirty->start + offset,
131 dirty->end - dirty->start);
133 if (100UL * num_marked / dirty->bitmap_size >
134 VMW_DIRTY_PERCENTAGE) {
135 dirty->change_count++;
137 dirty->change_count = 0;
140 if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
142 pgoff_t end = dirty->bitmap_size;
144 dirty->method = VMW_BO_DIRTY_PAGETABLE;
145 clean_record_shared_mapping_range(mapping, offset, end, offset,
148 bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size);
149 if (dirty->start < dirty->end)
150 bitmap_set(&dirty->bitmap[0], dirty->start,
151 dirty->end - dirty->start);
152 dirty->change_count = 0;
157 * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty
159 * @vbo: The buffer object to scan
161 * This function may change the dirty tracking method.
163 void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
165 struct vmw_bo_dirty *dirty = vbo->dirty;
167 if (dirty->method == VMW_BO_DIRTY_PAGETABLE)
168 vmw_bo_dirty_scan_pagetable(vbo);
170 vmw_bo_dirty_scan_mkwrite(vbo);
174 * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before
175 * an unmap_mapping_range operation.
176 * @vbo: The buffer object,
177 * @start: First page of the range within the buffer object.
178 * @end: Last page of the range within the buffer object + 1.
180 * If we're using the _PAGETABLE scan method, we may leak dirty pages
181 * when calling unmap_mapping_range(). This function makes sure we pick
182 * up all dirty pages.
184 static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
185 pgoff_t start, pgoff_t end)
187 struct vmw_bo_dirty *dirty = vbo->dirty;
188 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
189 struct address_space *mapping = vbo->base.bdev->dev_mapping;
191 if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
194 wp_shared_mapping_range(mapping, start + offset, end - start);
195 clean_record_shared_mapping_range(mapping, start + offset,
197 &dirty->bitmap[0], &dirty->start,
202 * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo
203 * @vbo: The buffer object,
204 * @start: First page of the range within the buffer object.
205 * @end: Last page of the range within the buffer object + 1.
207 * This is similar to ttm_bo_unmap_virtual() except it takes a subrange.
209 void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
210 pgoff_t start, pgoff_t end)
212 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
213 struct address_space *mapping = vbo->base.bdev->dev_mapping;
215 vmw_bo_dirty_pre_unmap(vbo, start, end);
216 unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
217 (loff_t) (end - start) << PAGE_SHIFT);
221 * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object
222 * @vbo: The buffer object
224 * This function registers a dirty-tracking user to a buffer object.
225 * A user can be for example a resource or a vma in a special user-space
228 * Return: Zero on success, -ENOMEM on memory allocation failure.
230 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
232 struct vmw_bo_dirty *dirty = vbo->dirty;
233 pgoff_t num_pages = vbo->base.resource->num_pages;
242 size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long);
243 dirty = kvzalloc(size, GFP_KERNEL);
249 dirty->bitmap_size = num_pages;
250 dirty->start = dirty->bitmap_size;
252 dirty->ref_count = 1;
253 if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
254 dirty->method = VMW_BO_DIRTY_PAGETABLE;
256 struct address_space *mapping = vbo->base.bdev->dev_mapping;
257 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
259 dirty->method = VMW_BO_DIRTY_MKWRITE;
261 /* Write-protect and then pick up already dirty bits */
262 wp_shared_mapping_range(mapping, offset, num_pages);
263 clean_record_shared_mapping_range(mapping, offset, num_pages,
266 &dirty->start, &dirty->end);
278 * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object
279 * @vbo: The buffer object
281 * This function releases a dirty-tracking user from a buffer object.
282 * If the reference count reaches zero, then the dirty-tracking object is
283 * freed and the pointer to it cleared.
285 * Return: Zero on success, -ENOMEM on memory allocation failure.
287 void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
289 struct vmw_bo_dirty *dirty = vbo->dirty;
291 if (dirty && --dirty->ref_count == 0) {
298 * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from
302 * This function will pick up all dirty ranges affecting the resource from
303 * it's backup mob, and call vmw_resource_dirty_update() once for each
304 * range. The transferred ranges will be cleared from the backing mob's
307 void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
309 struct vmw_buffer_object *vbo = res->backup;
310 struct vmw_bo_dirty *dirty = vbo->dirty;
311 pgoff_t start, cur, end;
312 unsigned long res_start = res->backup_offset;
313 unsigned long res_end = res->backup_offset + res->backup_size;
315 WARN_ON_ONCE(res_start & ~PAGE_MASK);
316 res_start >>= PAGE_SHIFT;
317 res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
319 if (res_start >= dirty->end || res_end <= dirty->start)
322 cur = max(res_start, dirty->start);
323 res_end = max(res_end, dirty->end);
324 while (cur < res_end) {
327 start = find_next_bit(&dirty->bitmap[0], res_end, cur);
328 if (start >= res_end)
331 end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1);
334 bitmap_clear(&dirty->bitmap[0], start, num);
335 vmw_resource_dirty_update(res, start, end);
338 if (res_start <= dirty->start && res_end > dirty->start)
339 dirty->start = res_end;
340 if (res_start < dirty->end && res_end >= dirty->end)
341 dirty->end = res_start;
345 * vmw_bo_dirty_clear_res - Clear a resource's dirty region from
349 * This function will clear all dirty ranges affecting the resource from
350 * it's backup mob's dirty tracking.
352 void vmw_bo_dirty_clear_res(struct vmw_resource *res)
354 unsigned long res_start = res->backup_offset;
355 unsigned long res_end = res->backup_offset + res->backup_size;
356 struct vmw_buffer_object *vbo = res->backup;
357 struct vmw_bo_dirty *dirty = vbo->dirty;
359 res_start >>= PAGE_SHIFT;
360 res_end = DIV_ROUND_UP(res_end, PAGE_SIZE);
362 if (res_start >= dirty->end || res_end <= dirty->start)
365 res_start = max(res_start, dirty->start);
366 res_end = min(res_end, dirty->end);
367 bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start);
369 if (res_start <= dirty->start && res_end > dirty->start)
370 dirty->start = res_end;
371 if (res_start < dirty->end && res_end >= dirty->end)
372 dirty->end = res_start;
375 vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
377 struct vm_area_struct *vma = vmf->vma;
378 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
379 vma->vm_private_data;
381 unsigned long page_offset;
382 unsigned int save_flags;
383 struct vmw_buffer_object *vbo =
384 container_of(bo, typeof(*vbo), base);
387 * mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
388 * So make sure the TTM helpers are aware.
390 save_flags = vmf->flags;
391 vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
392 ret = ttm_bo_vm_reserve(bo, vmf);
393 vmf->flags = save_flags;
397 page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
398 if (unlikely(page_offset >= bo->resource->num_pages)) {
399 ret = VM_FAULT_SIGBUS;
403 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE &&
404 !test_bit(page_offset, &vbo->dirty->bitmap[0])) {
405 struct vmw_bo_dirty *dirty = vbo->dirty;
407 __set_bit(page_offset, &dirty->bitmap[0]);
408 dirty->start = min(dirty->start, page_offset);
409 dirty->end = max(dirty->end, page_offset + 1);
413 dma_resv_unlock(bo->base.resv);
417 vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
419 struct vm_area_struct *vma = vmf->vma;
420 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
421 vma->vm_private_data;
422 struct vmw_buffer_object *vbo =
423 container_of(bo, struct vmw_buffer_object, base);
424 pgoff_t num_prefault;
428 ret = ttm_bo_vm_reserve(bo, vmf);
432 num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 :
433 TTM_BO_VM_NUM_PREFAULT;
436 pgoff_t allowed_prefault;
437 unsigned long page_offset;
439 page_offset = vmf->pgoff -
440 drm_vma_node_start(&bo->base.vma_node);
441 if (page_offset >= bo->resource->num_pages ||
442 vmw_resources_clean(vbo, page_offset,
443 page_offset + PAGE_SIZE,
444 &allowed_prefault)) {
445 ret = VM_FAULT_SIGBUS;
449 num_prefault = min(num_prefault, allowed_prefault);
453 * If we don't track dirty using the MKWRITE method, make sure
454 * sure the page protection is write-enabled so we don't get
455 * a lot of unnecessary write faults.
457 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE)
458 prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED);
460 prot = vm_get_page_prot(vma->vm_flags);
462 ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault);
463 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
467 dma_resv_unlock(bo->base.resv);