1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/init.h>
4 #include <linux/log2.h>
5 #include <kunit/test.h>
7 #include <asm/guest-state-buffer.h>
9 static void test_creating_buffer(struct kunit *test)
11 struct kvmppc_gs_buff *gsb;
14 gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
15 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
17 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb->hdr);
19 KUNIT_EXPECT_EQ(test, gsb->capacity, roundup_pow_of_two(size));
20 KUNIT_EXPECT_EQ(test, gsb->len, sizeof(__be32));
25 static void test_adding_element(struct kunit *test)
27 const struct kvmppc_gs_elem *head, *curr;
33 struct kvmppc_gs_buff *gsb;
38 gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
39 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
41 /* Single elements, direct use of __kvmppc_gse_put() */
43 rc = __kvmppc_gse_put(gsb, KVMPPC_GSID_GPR(0), 8, &data);
44 KUNIT_EXPECT_GE(test, rc, 0);
46 head = kvmppc_gsb_data(gsb);
47 KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(head), KVMPPC_GSID_GPR(0));
48 KUNIT_EXPECT_EQ(test, kvmppc_gse_len(head), 8);
50 memcpy(&data, kvmppc_gse_data(head), 8);
51 KUNIT_EXPECT_EQ(test, data, 0xdeadbeef);
53 /* Multiple elements, simple wrapper */
54 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(1), 0xcafef00d);
55 KUNIT_EXPECT_GE(test, rc, 0);
59 rc = kvmppc_gse_put_vector128(gsb, KVMPPC_GSID_VSRS(0), &u.v);
60 KUNIT_EXPECT_GE(test, rc, 0);
64 kvmppc_gsb_for_each_elem(i, curr, gsb, rem) {
67 KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
69 KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
70 KUNIT_EXPECT_EQ(test, kvmppc_gse_get_be64(curr),
74 KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
76 KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
77 KUNIT_EXPECT_EQ(test, kvmppc_gse_get_u64(curr),
81 KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
83 KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 16);
84 kvmppc_gse_get_vector128(curr, &u.v);
85 KUNIT_EXPECT_EQ(test, u.dw[0], 0x1);
86 KUNIT_EXPECT_EQ(test, u.dw[1], 0x2);
90 KUNIT_EXPECT_EQ(test, i, 3);
92 kvmppc_gsb_reset(gsb);
93 KUNIT_EXPECT_EQ(test, kvmppc_gsb_nelems(gsb), 0);
94 KUNIT_EXPECT_EQ(test, kvmppc_gsb_len(gsb),
95 sizeof(struct kvmppc_gs_header));
100 static void test_gs_parsing(struct kunit *test)
102 struct kvmppc_gs_elem *gse;
103 struct kvmppc_gs_parser gsp = { 0 };
104 struct kvmppc_gs_buff *gsb;
105 size_t size = 0x1000;
108 gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
109 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
111 tmp1 = 0xdeadbeefull;
112 kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), tmp1);
114 KUNIT_EXPECT_GE(test, kvmppc_gse_parse(&gsp, gsb), 0);
116 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
117 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gse);
119 tmp2 = kvmppc_gse_get_u64(gse);
120 KUNIT_EXPECT_EQ(test, tmp2, 0xdeadbeefull);
122 kvmppc_gsb_free(gsb);
125 static void test_gs_bitmap(struct kunit *test)
127 struct kvmppc_gs_bitmap gsbm = { 0 };
128 struct kvmppc_gs_bitmap gsbm1 = { 0 };
129 struct kvmppc_gs_bitmap gsbm2 = { 0 };
134 for (u16 iden = KVMPPC_GSID_HOST_STATE_SIZE;
135 iden <= KVMPPC_GSID_PROCESS_TABLE; iden++) {
136 kvmppc_gsbm_set(&gsbm, iden);
137 kvmppc_gsbm_set(&gsbm1, iden);
138 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
139 kvmppc_gsbm_clear(&gsbm, iden);
140 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
144 for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA;
146 kvmppc_gsbm_set(&gsbm, iden);
147 kvmppc_gsbm_set(&gsbm1, iden);
148 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
149 kvmppc_gsbm_clear(&gsbm, iden);
150 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
154 for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) {
155 kvmppc_gsbm_set(&gsbm, iden);
156 kvmppc_gsbm_set(&gsbm1, iden);
157 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
158 kvmppc_gsbm_clear(&gsbm, iden);
159 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
163 for (u16 iden = KVMPPC_GSID_CR; iden <= KVMPPC_GSID_PSPB; iden++) {
164 kvmppc_gsbm_set(&gsbm, iden);
165 kvmppc_gsbm_set(&gsbm1, iden);
166 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
167 kvmppc_gsbm_clear(&gsbm, iden);
168 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
172 for (u16 iden = KVMPPC_GSID_VSRS(0); iden <= KVMPPC_GSID_VSRS(63);
174 kvmppc_gsbm_set(&gsbm, iden);
175 kvmppc_gsbm_set(&gsbm1, iden);
176 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
177 kvmppc_gsbm_clear(&gsbm, iden);
178 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
182 for (u16 iden = KVMPPC_GSID_HDAR; iden <= KVMPPC_GSID_ASDR; iden++) {
183 kvmppc_gsbm_set(&gsbm, iden);
184 kvmppc_gsbm_set(&gsbm1, iden);
185 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
186 kvmppc_gsbm_clear(&gsbm, iden);
187 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
192 kvmppc_gsbm_for_each(&gsbm1, iden)
194 kvmppc_gsbm_set(&gsbm2, iden);
197 KUNIT_EXPECT_EQ(test, i, j);
198 KUNIT_EXPECT_MEMEQ(test, &gsbm1, &gsbm2, sizeof(gsbm1));
201 struct kvmppc_gs_msg_test1_data {
204 struct kvmppc_gs_part_table c;
205 struct kvmppc_gs_proc_table d;
206 struct kvmppc_gs_buff_info e;
209 static size_t test1_get_size(struct kvmppc_gs_msg *gsm)
213 KVMPPC_GSID_PARTITION_TABLE,
214 KVMPPC_GSID_PROCESS_TABLE,
215 KVMPPC_GSID_RUN_INPUT,
220 for (int i = 0; i < ARRAY_SIZE(ids); i++)
221 size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
225 static int test1_fill_info(struct kvmppc_gs_buff *gsb,
226 struct kvmppc_gs_msg *gsm)
228 struct kvmppc_gs_msg_test1_data *data = gsm->data;
230 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_GPR(0)))
231 kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), data->a);
233 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_CR))
234 kvmppc_gse_put_u32(gsb, KVMPPC_GSID_CR, data->b);
236 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PARTITION_TABLE))
237 kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
240 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PROCESS_TABLE))
241 kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
244 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT))
245 kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT, data->e);
250 static int test1_refresh_info(struct kvmppc_gs_msg *gsm,
251 struct kvmppc_gs_buff *gsb)
253 struct kvmppc_gs_parser gsp = { 0 };
254 struct kvmppc_gs_msg_test1_data *data = gsm->data;
255 struct kvmppc_gs_elem *gse;
258 rc = kvmppc_gse_parse(&gsp, gsb);
262 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
264 data->a = kvmppc_gse_get_u64(gse);
266 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_CR);
268 data->b = kvmppc_gse_get_u32(gse);
273 static struct kvmppc_gs_msg_ops gs_msg_test1_ops = {
274 .get_size = test1_get_size,
275 .fill_info = test1_fill_info,
276 .refresh_info = test1_refresh_info,
279 static void test_gs_msg(struct kunit *test)
281 struct kvmppc_gs_msg_test1_data test1_data = {
285 struct kvmppc_gs_msg *gsm;
286 struct kvmppc_gs_buff *gsb;
288 gsm = kvmppc_gsm_new(&gs_msg_test1_ops, &test1_data, GSM_SEND,
290 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
292 gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
293 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
295 kvmppc_gsm_include(gsm, KVMPPC_GSID_PARTITION_TABLE);
296 kvmppc_gsm_include(gsm, KVMPPC_GSID_PROCESS_TABLE);
297 kvmppc_gsm_include(gsm, KVMPPC_GSID_RUN_INPUT);
298 kvmppc_gsm_include(gsm, KVMPPC_GSID_GPR(0));
299 kvmppc_gsm_include(gsm, KVMPPC_GSID_CR);
301 kvmppc_gsm_fill_info(gsm, gsb);
303 memset(&test1_data, 0, sizeof(test1_data));
305 kvmppc_gsm_refresh_info(gsm, gsb);
306 KUNIT_EXPECT_EQ(test, test1_data.a, 0xdeadbeef);
307 KUNIT_EXPECT_EQ(test, test1_data.b, 0x1);
309 kvmppc_gsm_free(gsm);
312 static struct kunit_case guest_state_buffer_testcases[] = {
313 KUNIT_CASE(test_creating_buffer),
314 KUNIT_CASE(test_adding_element),
315 KUNIT_CASE(test_gs_bitmap),
316 KUNIT_CASE(test_gs_parsing),
317 KUNIT_CASE(test_gs_msg),
321 static struct kunit_suite guest_state_buffer_test_suite = {
322 .name = "guest_state_buffer_test",
323 .test_cases = guest_state_buffer_testcases,
326 kunit_test_suites(&guest_state_buffer_test_suite);
328 MODULE_LICENSE("GPL");