benchmarking: added interpreter results
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run

This commit is contained in:
Daniel
2025-05-17 16:07:03 +02:00
parent 47dcc29b33
commit 88ee8d20bd
3 changed files with 3 additions and 3 deletions

View File

@ -69,7 +69,7 @@ suite["GPUT"]["nikuradse_1"] = @benchmarkable evaluate_gpu(exprs, X_t, parameter
loadparams!(suite, BenchmarkTools.load("params.json")[1], :samples, :evals, :gctrial, :time_tolerance, :evals_set, :gcsample, :seconds, :overhead, :memory_tolerance)
results = run(suite, verbose=true, seconds=28800) # 8 hour timeout
resultsCPU = BenchmarkTools.load("./results-fh-new/cpu.json")[1]
resultsCPU = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATH/cpu.json")[1]
if compareWithCPU
medianCPU = median(resultsCPU["CPU"])
@ -104,7 +104,7 @@ if compareWithCPU
println(gpuiVsGPUT_median)
println(gpuiVsGPUT_std)
BenchmarkTools.save("$BENCHMARKS_RESULTS_PATH/0_initial.json", results)
BenchmarkTools.save("$BENCHMARKS_RESULTS_PATH/0-initial.json", results)
else
resultsOld = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATH/3-tuned-blocksize_I128_T96.json")[1]
# resultsOld = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATH/3-tuned-blocksize_I128_T96.json")[1]