1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder_alloc_selftest.c
4 * Android IPC Subsystem
6 * Copyright (C) 2017 Google, Inc.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/mm_types.h>
12 #include <linux/err.h>
13 #include "binder_alloc.h"
16 #define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
18 static bool binder_selftest_run = true;
19 static int binder_selftest_failures;
20 static DEFINE_MUTEX(binder_selftest_lock);
23 * enum buf_end_align_type - Page alignment of a buffer
24 * end with regard to the end of the previous buffer.
26 * In the pictures below, buf2 refers to the buffer we
27 * are aligning. buf1 refers to previous buffer by addr.
28 * Symbol [ means the start of a buffer, ] means the end
29 * of a buffer, and | means page boundaries.
31 enum buf_end_align_type {
33 * @SAME_PAGE_UNALIGNED: The end of this buffer is on
34 * the same page as the end of the previous buffer and
35 * is not page aligned. Examples:
37 * buf1 ]|[ buf2 ][ ...
39 SAME_PAGE_UNALIGNED = 0,
41 * @SAME_PAGE_ALIGNED: When the end of the previous buffer
42 * is not page aligned, the end of this buffer is on the
43 * same page as the end of the previous buffer and is page
44 * aligned. When the previous buffer is page aligned, the
45 * end of this buffer is aligned to the next page boundary.
48 * buf1 ]|[ buf2 ]| ...
52 * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
53 * the page next to the end of the previous buffer and
54 * is not page aligned. Examples:
55 * buf1 ][ buf2 | buf2 ][ ...
56 * buf1 ]|[ buf2 | buf2 ][ ...
60 * @NEXT_PAGE_ALIGNED: The end of this buffer is on
61 * the page next to the end of the previous buffer and
62 * is page aligned. Examples:
63 * buf1 ][ buf2 | buf2 ]| ...
64 * buf1 ]|[ buf2 | buf2 ]| ...
68 * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
69 * the page that follows the page after the end of the
70 * previous buffer and is not page aligned. Examples:
71 * buf1 ][ buf2 | buf2 | buf2 ][ ...
72 * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
76 * @LOOP_END: The number of enum values in &buf_end_align_type.
77 * It is used for controlling loop termination.
82 static void pr_err_size_seq(size_t *sizes, int *seq)
86 pr_err("alloc sizes: ");
87 for (i = 0; i < BUFFER_NUM; i++)
88 pr_cont("[%zu]", sizes[i]);
91 for (i = 0; i < BUFFER_NUM; i++)
92 pr_cont("[%d]", seq[i]);
96 static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
97 struct binder_buffer *buffer,
100 unsigned long page_addr;
104 end = PAGE_ALIGN(buffer->user_data + size);
105 page_addr = buffer->user_data;
106 for (; page_addr < end; page_addr += PAGE_SIZE) {
107 page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
108 if (!alloc->pages[page_index].page_ptr ||
109 !list_empty(&alloc->pages[page_index].lru)) {
110 pr_err("expect alloc but is %s at page index %d\n",
111 alloc->pages[page_index].page_ptr ?
112 "lru" : "free", page_index);
119 static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
120 struct binder_buffer *buffers[],
121 size_t *sizes, int *seq)
125 for (i = 0; i < BUFFER_NUM; i++) {
126 buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
127 if (IS_ERR(buffers[i]) ||
128 !check_buffer_pages_allocated(alloc, buffers[i],
130 pr_err_size_seq(sizes, seq);
131 binder_selftest_failures++;
136 static void binder_selftest_free_buf(struct binder_alloc *alloc,
137 struct binder_buffer *buffers[],
138 size_t *sizes, int *seq, size_t end)
142 for (i = 0; i < BUFFER_NUM; i++)
143 binder_alloc_free_buf(alloc, buffers[seq[i]]);
145 for (i = 0; i < end / PAGE_SIZE; i++) {
147 * Error message on a free page can be false positive
148 * if binder shrinker ran during binder_alloc_free_buf
151 if (list_empty(&alloc->pages[i].lru)) {
152 pr_err_size_seq(sizes, seq);
153 pr_err("expect lru but is %s at page index %d\n",
154 alloc->pages[i].page_ptr ? "alloc" : "free", i);
155 binder_selftest_failures++;
160 static void binder_selftest_free_page(struct binder_alloc *alloc)
165 while ((count = list_lru_count(&binder_freelist))) {
166 list_lru_walk(&binder_freelist, binder_alloc_free_page,
170 for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
171 if (alloc->pages[i].page_ptr) {
172 pr_err("expect free but is %s at page index %d\n",
173 list_empty(&alloc->pages[i].lru) ?
175 binder_selftest_failures++;
180 static void binder_selftest_alloc_free(struct binder_alloc *alloc,
181 size_t *sizes, int *seq, size_t end)
183 struct binder_buffer *buffers[BUFFER_NUM];
185 binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
186 binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
188 /* Allocate from lru. */
189 binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
190 if (list_lru_count(&binder_freelist))
191 pr_err("lru list should be empty but is not\n");
193 binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
194 binder_selftest_free_page(alloc);
197 static bool is_dup(int *seq, int index, int val)
201 for (i = 0; i < index; i++) {
208 /* Generate BUFFER_NUM factorial free orders. */
209 static void binder_selftest_free_seq(struct binder_alloc *alloc,
210 size_t *sizes, int *seq,
211 int index, size_t end)
215 if (index == BUFFER_NUM) {
216 binder_selftest_alloc_free(alloc, sizes, seq, end);
219 for (i = 0; i < BUFFER_NUM; i++) {
220 if (is_dup(seq, index, i))
223 binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
227 static void binder_selftest_alloc_size(struct binder_alloc *alloc,
231 int seq[BUFFER_NUM] = {0};
232 size_t front_sizes[BUFFER_NUM];
233 size_t back_sizes[BUFFER_NUM];
234 size_t last_offset, offset = 0;
236 for (i = 0; i < BUFFER_NUM; i++) {
237 last_offset = offset;
238 offset = end_offset[i];
239 front_sizes[i] = offset - last_offset;
240 back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
243 * Buffers share the first or last few pages.
244 * Only BUFFER_NUM - 1 buffer sizes are adjustable since
245 * we need one giant buffer before getting to the last page.
247 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
248 binder_selftest_free_seq(alloc, front_sizes, seq, 0,
249 end_offset[BUFFER_NUM - 1]);
250 binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
253 static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
254 size_t *end_offset, int index)
259 if (index == BUFFER_NUM) {
260 binder_selftest_alloc_size(alloc, end_offset);
263 prev = index == 0 ? 0 : end_offset[index - 1];
266 BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
268 for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
270 end = ALIGN(end, PAGE_SIZE);
272 end += BUFFER_MIN_SIZE;
273 end_offset[index] = end;
274 binder_selftest_alloc_offset(alloc, end_offset, index + 1);
279 * binder_selftest_alloc() - Test alloc and free of buffer pages.
280 * @alloc: Pointer to alloc struct.
282 * Allocate BUFFER_NUM buffers to cover all page alignment cases,
283 * then free them in all orders possible. Check that pages are
284 * correctly allocated, put onto lru when buffers are freed, and
285 * are freed when binder_alloc_free_page is called.
287 void binder_selftest_alloc(struct binder_alloc *alloc)
289 size_t end_offset[BUFFER_NUM];
291 if (!binder_selftest_run)
293 mutex_lock(&binder_selftest_lock);
294 if (!binder_selftest_run || !alloc->vma)
296 pr_info("STARTED\n");
297 binder_selftest_alloc_offset(alloc, end_offset, 0);
298 binder_selftest_run = false;
299 if (binder_selftest_failures > 0)
300 pr_info("%d tests FAILED\n", binder_selftest_failures);
305 mutex_unlock(&binder_selftest_lock);