Skip to content

Commit c75ce08

Browse files
CI benchmarking suite take 2 (#636)
* create simple benchmark suite * more hierarchy in benchmark * create AirspeedVelocity github action for benchmarks * fix imports in benchmark * fix default branch name in CI * benchmarks: more hierarchy * timing + benchmark * benchmarks: include gcbench in runs * benchmarks: remove unused code * benchmarks: modularize * benchmarks: give more time to GC benchmark * benchmarks: simplify naming scheme * benchmarks: avoid issue of `tune`ing away from `evals=1` * use getptr a lot less * ci: update to new ASV * ci: switch to job summary version * ci: bench on PR sha * ci: run on PR to any branch, not just main * ci: run tuned benchmarks * undo changes from merge --------- Co-authored-by: Eric Hanson <[email protected]> Co-authored-by: Christopher Doris <github.com/cjdoris>
1 parent b649be2 commit c75ce08

File tree

4 files changed

+93
-0
lines changed

4 files changed

+93
-0
lines changed

.github/workflows/benchmark_pr.yml

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
name: Benchmark PR
2+
on:
3+
pull_request:
4+
5+
jobs:
6+
bench:
7+
runs-on: ubuntu-latest
8+
steps:
9+
- uses: MilesCranmer/AirspeedVelocity.jl@action-v1
10+
with:
11+
julia-version: "1"
12+
tune: "true"
13+
# Post to "summary" tab of workflow run:
14+
job-summary: "true"
15+
# Run benchmark using PR's version of the script:
16+
bench-on: ${{ github.event.pull_request.head.sha }}

benchmark/Project.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
[deps]
2+
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"

benchmark/benchmarks.jl

Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
using BenchmarkTools
2+
using PythonCall
3+
using PythonCall: pydel!, pyimport, pydict, pystr, pyrange
4+
5+
const SUITE = BenchmarkGroup()
6+
7+
function test_pydict_init()
8+
random = pyimport("random").random
9+
x = pydict()
10+
for i in pyrange(1000)
11+
x[pystr(i)] = i + random()
12+
end
13+
return x
14+
end
15+
16+
SUITE["basic"]["julia"]["pydict"]["init"] = @benchmarkable test_pydict_init()
17+
18+
function test_pydict_pydel()
19+
random = pyimport("random").random
20+
x = pydict()
21+
for i in pyrange(1000)
22+
k = pystr(i)
23+
r = random()
24+
v = i + r
25+
x[k] = v
26+
pydel!(k)
27+
pydel!(r)
28+
pydel!(v)
29+
pydel!(i)
30+
end
31+
return x
32+
end
33+
34+
SUITE["basic"]["julia"]["pydict"]["pydel"] = @benchmarkable test_pydict_pydel()
35+
36+
@generated function test_atpy(::Val{use_pydel}) where {use_pydel}
37+
quote
38+
@py begin
39+
import random: random
40+
x = {}
41+
for i in range(1000)
42+
x[str(i)] = i + random()
43+
$(use_pydel ? :(@jl PythonCall.pydel!(i)) : :(nothing))
44+
end
45+
x
46+
end
47+
end
48+
end
49+
50+
SUITE["basic"]["@py"]["pydict"]["init"] = @benchmarkable test_atpy(Val(false))
51+
SUITE["basic"]["@py"]["pydict"]["pydel"] = @benchmarkable test_atpy(Val(true))
52+
53+
54+
include("gcbench.jl")
55+
using .GCBench: append_lots
56+
57+
SUITE["gc"]["full"] = @benchmarkable(
58+
GC.gc(true),
59+
setup=(GC.gc(true); append_lots(size=159)),
60+
seconds=30,
61+
evals=1,
62+
)

benchmark/gcbench.jl

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
module GCBench
2+
3+
using PythonCall
4+
5+
function append_lots(; iters=100 * 1024, size=1596)
6+
v = pylist()
7+
for i = 1:iters
8+
v.append(pylist(rand(size)))
9+
end
10+
return v
11+
end
12+
13+
end

0 commit comments

Comments
 (0)