Skip to content

Commit 21f5a80

Browse files
Yonghong SongAlexei Starovoitov
authored andcommitted
selftests/bpf: Cope with 512 bytes limit with bpf_global_percpu_ma
In the previous patch, the maximum data size for bpf_global_percpu_ma is 512 bytes. This breaks selftest test_bpf_ma. The test is adjusted in two aspects: - Since the maximum allowed data size for bpf_global_percpu_ma is 512, remove all tests beyond that, names sizes 1024, 2048 and 4096. - Previously the percpu data size is bucket_size - 8 in order to avoid percpu allocation into the next bucket. This patch removed such data size adjustment thanks to Patch 1. Also, a better way to generate BTF type is used than adding a member to the value struct. Acked-by: Hou Tao <[email protected]> Signed-off-by: Yonghong Song <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 5c1a376 commit 21f5a80

File tree

2 files changed

+46
-40
lines changed

2 files changed

+46
-40
lines changed

tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,8 @@ static void do_bpf_ma_test(const char *name)
1414
struct test_bpf_ma *skel;
1515
struct bpf_program *prog;
1616
struct btf *btf;
17-
int i, err;
17+
int i, err, id;
18+
char tname[32];
1819

1920
skel = test_bpf_ma__open();
2021
if (!ASSERT_OK_PTR(skel, "open"))
@@ -25,16 +26,21 @@ static void do_bpf_ma_test(const char *name)
2526
goto out;
2627

2728
for (i = 0; i < ARRAY_SIZE(skel->rodata->data_sizes); i++) {
28-
char name[32];
29-
int id;
30-
31-
snprintf(name, sizeof(name), "bin_data_%u", skel->rodata->data_sizes[i]);
32-
id = btf__find_by_name_kind(btf, name, BTF_KIND_STRUCT);
33-
if (!ASSERT_GT(id, 0, "bin_data"))
29+
snprintf(tname, sizeof(tname), "bin_data_%u", skel->rodata->data_sizes[i]);
30+
id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
31+
if (!ASSERT_GT(id, 0, tname))
3432
goto out;
3533
skel->rodata->data_btf_ids[i] = id;
3634
}
3735

36+
for (i = 0; i < ARRAY_SIZE(skel->rodata->percpu_data_sizes); i++) {
37+
snprintf(tname, sizeof(tname), "percpu_bin_data_%u", skel->rodata->percpu_data_sizes[i]);
38+
id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
39+
if (!ASSERT_GT(id, 0, tname))
40+
goto out;
41+
skel->rodata->percpu_data_btf_ids[i] = id;
42+
}
43+
3844
prog = bpf_object__find_program_by_name(skel->obj, name);
3945
if (!ASSERT_OK_PTR(prog, "invalid prog name"))
4046
goto out;

tools/testing/selftests/bpf/progs/test_bpf_ma.c

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -20,17 +20,20 @@ char _license[] SEC("license") = "GPL";
2020
const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
2121
const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
2222

23+
const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
24+
const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
25+
2326
int err = 0;
2427
u32 pid = 0;
2528

2629
#define DEFINE_ARRAY_WITH_KPTR(_size) \
2730
struct bin_data_##_size { \
2831
char data[_size - sizeof(void *)]; \
2932
}; \
33+
/* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */ \
34+
struct bin_data_##_size *__bin_data_##_size; \
3035
struct map_value_##_size { \
3136
struct bin_data_##_size __kptr * data; \
32-
/* To emit BTF info for bin_data_xx */ \
33-
struct bin_data_##_size not_used; \
3437
}; \
3538
struct { \
3639
__uint(type, BPF_MAP_TYPE_ARRAY); \
@@ -40,8 +43,12 @@ u32 pid = 0;
4043
} array_##_size SEC(".maps")
4144

4245
#define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
46+
struct percpu_bin_data_##_size { \
47+
char data[_size]; \
48+
}; \
49+
struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
4350
struct map_value_percpu_##_size { \
44-
struct bin_data_##_size __percpu_kptr * data; \
51+
struct percpu_bin_data_##_size __percpu_kptr * data; \
4552
}; \
4653
struct { \
4754
__uint(type, BPF_MAP_TYPE_ARRAY); \
@@ -114,7 +121,7 @@ static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int
114121
return;
115122
}
116123
/* per-cpu allocator may not be able to refill in time */
117-
new = bpf_percpu_obj_new_impl(data_btf_ids[idx], NULL);
124+
new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
118125
if (!new)
119126
continue;
120127

@@ -179,7 +186,7 @@ DEFINE_ARRAY_WITH_KPTR(1024);
179186
DEFINE_ARRAY_WITH_KPTR(2048);
180187
DEFINE_ARRAY_WITH_KPTR(4096);
181188

182-
/* per-cpu kptr doesn't support bin_data_8 which is a zero-sized array */
189+
DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
183190
DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
184191
DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
185192
DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
@@ -188,9 +195,6 @@ DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
188195
DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
189196
DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
190197
DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
191-
DEFINE_ARRAY_WITH_PERCPU_KPTR(1024);
192-
DEFINE_ARRAY_WITH_PERCPU_KPTR(2048);
193-
DEFINE_ARRAY_WITH_PERCPU_KPTR(4096);
194198

195199
SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
196200
int test_batch_alloc_free(void *ctx)
@@ -246,20 +250,18 @@ int test_batch_percpu_alloc_free(void *ctx)
246250
if ((u32)bpf_get_current_pid_tgid() != pid)
247251
return 0;
248252

249-
/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
250-
* then free 128 16-bytes per-cpu objects in batch to trigger freeing.
253+
/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
254+
* then free 128 8-bytes per-cpu objects in batch to trigger freeing.
251255
*/
252-
CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 0);
253-
CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 1);
254-
CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 2);
255-
CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 3);
256-
CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 4);
257-
CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 5);
258-
CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 6);
259-
CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 7);
260-
CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 8);
261-
CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 9);
262-
CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 10);
256+
CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
257+
CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
258+
CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
259+
CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
260+
CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
261+
CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
262+
CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
263+
CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
264+
CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
263265

264266
return 0;
265267
}
@@ -270,20 +272,18 @@ int test_percpu_free_through_map_free(void *ctx)
270272
if ((u32)bpf_get_current_pid_tgid() != pid)
271273
return 0;
272274

273-
/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
275+
/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
274276
* then free these object through map free.
275277
*/
276-
CALL_BATCH_PERCPU_ALLOC(16, 128, 0);
277-
CALL_BATCH_PERCPU_ALLOC(32, 128, 1);
278-
CALL_BATCH_PERCPU_ALLOC(64, 128, 2);
279-
CALL_BATCH_PERCPU_ALLOC(96, 128, 3);
280-
CALL_BATCH_PERCPU_ALLOC(128, 128, 4);
281-
CALL_BATCH_PERCPU_ALLOC(192, 128, 5);
282-
CALL_BATCH_PERCPU_ALLOC(256, 128, 6);
283-
CALL_BATCH_PERCPU_ALLOC(512, 64, 7);
284-
CALL_BATCH_PERCPU_ALLOC(1024, 32, 8);
285-
CALL_BATCH_PERCPU_ALLOC(2048, 16, 9);
286-
CALL_BATCH_PERCPU_ALLOC(4096, 8, 10);
278+
CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
279+
CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
280+
CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
281+
CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
282+
CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
283+
CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
284+
CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
285+
CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
286+
CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
287287

288288
return 0;
289289
}

0 commit comments

Comments
 (0)