benchmarking: updated benchmarking suite and prepared for taking the benchmarks
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run

This commit is contained in:
Daniel
2025-05-15 16:25:32 +02:00
parent 3d80ae95e4
commit d7e18f183d
6 changed files with 57 additions and 48 deletions

View File

@ -50,37 +50,30 @@ expr_reps = 100 # 100 parameter optimisation steps (local search; sequentially;
# Add /usr/local/cuda/bin in .bashrc to PATH to access ncu and nsys (do the tests on FH PCs)
# University setup at 10.20.1.7 and 10.20.1.13
compareWithCPU = false
compareWithCPU = true
suite = BenchmarkGroup()
suite["CPU"] = BenchmarkGroup(["CPUInterpreter"])
suite["GPUI"] = BenchmarkGroup(["GPUInterpreter"])
suite["GPUT"] = BenchmarkGroup(["GPUTranspiler"])
if compareWithCPU
suite["CPU"]["nikuradse_1"] = @benchmarkable interpret_cpu(exprs, X, parameters; repetitions=expr_reps)
suite["CPU"]["nikuradse_1_parallel"] = @benchmarkable interpret_cpu(exprs, X, parameters; repetitions=expr_reps, parallel=true)
end
# cacheInterpreter = Dict{Expr, PostfixType}()
suite["GPUI"]["nikuradse_1"] = @benchmarkable interpret_gpu(exprs, X_t, parameters; repetitions=expr_reps)
# cacheTranspilerFront = Dict{Expr, PostfixType}()
# cacheTranspilerRes = Dict{Expr, CuFunction}()
suite["GPUT"]["nikuradse_1"] = @benchmarkable evaluate_gpu(exprs, X_t, parameters; repetitions=expr_reps) # Takes forever. Needs more investigation
suite["GPUT"]["nikuradse_1"] = @benchmarkable evaluate_gpu(exprs, X_t, parameters; repetitions=expr_reps)
tune!(suite)
BenchmarkTools.save("params.json", params(suite))
throw("finished tuning")
# tune!(suite)
# BenchmarkTools.save("params.json", params(suite))
loadparams!(suite, BenchmarkTools.load("params.json")[1], :samples, :evals, :gctrial, :time_tolerance, :evals_set, :gcsample, :seconds, :overhead, :memory_tolerance)
results = run(suite, verbose=true, seconds=3600) # 1 hour because of CPU. lets see if more is needed
results = run(suite, verbose=true, seconds=28800) # 8 hour timeout
resultsCPU = BenchmarkTools.load("./results-fh-new/cpu.json")[1]
if compareWithCPU
medianCPU = median(results["CPU"])
stdCPU = std(results["CPU"])
medianCPU = median(resultsCPU["CPU"])
stdCPU = std(resultsCPU["CPU"])
medianInterpreter = median(results["GPUI"])
stdInterpreter = std(results["GPUI"])