3 Commits

Author SHA1 Message Date
7c3616a264 benchmarking: added results for transpiler 2025-05-19 09:06:36 +02:00
e03f20006f benchmarking: added results for interpreter after first performance improvement
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run
2025-05-18 10:11:33 +02:00
a5518dd63e benchmarking: moved frontend calls and sending postfixExprs+vars outside to drastically reduce amount of calculations
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run
2025-05-17 18:32:04 +02:00
9 changed files with 274 additions and 256 deletions

View File

@ -9,6 +9,7 @@ include("Code.jl")
include("CpuInterpreter.jl")
end
using CUDA
using ..ExpressionProcessing
export interpret_gpu,interpret_cpu
@ -22,36 +23,45 @@ export evaluate_gpu
#
# Evaluate Expressions on the GPU
function interpret_gpu(exprs::Vector{Expr}, X::Matrix{Float32}, p::Vector{Vector{Float32}}; repetitions=1)::Matrix{Float32}
@assert axes(exprs) == axes(p)
ncols = size(X, 2)
function interpret_gpu(expressions::Vector{Expr}, X::Matrix{Float32}, p::Vector{Vector{Float32}}; repetitions=1)::Matrix{Float32}
@assert axes(expressions) == axes(p)
variableCols = size(X, 2)
variableRows = size(X, 1)
results = Matrix{Float32}(undef, ncols, length(exprs))
# TODO: create CuArray for variables here already, as they never change
# could/should be done even before calling this, but I guess it would be diminishing returns
# TODO: test how this would impact performance, if it gets faster, adapt implementation section
# TODO: create CuArray for expressions here already. They also do not change over the course of parameter optimisation and therefore a lot of unnecessary calls to expr_to_postfix can be save (even though a cache is used, this should still be faster)
variables = CuArray(X)
exprs = Vector{ExpressionProcessing.PostfixType}(undef, length(expressions))
@inbounds Threads.@threads for i in eachindex(expressions)
exprs[i] = ExpressionProcessing.expr_to_postfix(expressions[i])
end
cudaExprs = Utils.create_cuda_array(exprs, ExpressionProcessing.ExpressionElement(EMPTY, 0)) # column corresponds to data for one expression;
exprsLength = length(exprs)
exprsInnerLength = Utils.get_max_inner_length(exprs)
results = Matrix{Float32}(undef, variableCols, length(exprs))
for i in 1:repetitions # Simulate parameter tuning -> local search (X remains the same, p gets changed in small steps and must be performed sequentially, which it is with this impl)
results = Interpreter.interpret(exprs, X, p)
results = Interpreter.interpret(cudaExprs, exprsLength, exprsInnerLength, variables, variableCols, variableRows, p)
end
return results
end
# Convert Expressions to PTX Code and execute that instead
function evaluate_gpu(exprs::Vector{Expr}, X::Matrix{Float32}, p::Vector{Vector{Float32}}; repetitions=1)::Matrix{Float32}
@assert axes(exprs) == axes(p)
ncols = size(X, 2)
function evaluate_gpu(expressions::Vector{Expr}, X::Matrix{Float32}, p::Vector{Vector{Float32}}; repetitions=1)::Matrix{Float32}
@assert axes(expressions) == axes(p)
variableCols = size(X, 2)
variableRows = size(X, 1)
results = Matrix{Float32}(undef, ncols, length(exprs))
# TODO: create CuArray for variables here already, as they never change
# could/should be done even before calling this, but I guess it would be diminishing returns
# TODO: test how this would impact performance, if it gets faster, adapt implementation section
# TODO: create CuArray for expressions here already. They also do not change over the course of parameter optimisation and therefore a lot of unnecessary calls to expr_to_postfix can be save (even though a cache is used, this should still be faster)
variables = CuArray(X)
exprs = Vector{ExpressionProcessing.PostfixType}(undef, length(expressions))
@inbounds Threads.@threads for i in eachindex(expressions)
exprs[i] = ExpressionProcessing.expr_to_postfix(expressions[i])
end
results = Matrix{Float32}(undef, variableCols, length(exprs))
for i in 1:repetitions # Simulate parameter tuning -> local search (X remains the same, p gets changed in small steps and must be performed sequentially, which it is with this impl)
results = Transpiler.evaluate(exprs, X, p)
results = Transpiler.evaluate(exprs, variables, variableCols, variableRows, p)
end
return results

View File

@ -22,7 +22,6 @@ const PostfixType = Vector{ExpressionElement}
"
Converts a julia expression to its postfix notation.
NOTE: All 64-Bit values will be converted to 32-Bit. Be aware of the lost precision.
NOTE: This function is not thread save, especially cache access is not thread save
"
function expr_to_postfix(expression::Expr)::PostfixType
expr = expression

View File

@ -8,31 +8,25 @@ export interpret
"Interprets the given expressions with the values provided.
# Arguments
- expressions::Vector{ExpressionProcessing.PostfixType} : The expressions to execute in postfix form
- variables::Matrix{Float32} : The variables to use. Each column is mapped to the variables x1..xn
- cudaExprs::CuArray{ExpressionProcessing.PostfixType} : The expressions to execute in postfix form and already sent to the GPU. The type information in the signature is missing, because creating a CuArray{ExpressionProcessing.PostfixType} results in a mor everbose type definition
- cudaVars::CuArray{Float32} : The variables to use. Each column is mapped to the variables x1..xn. The type information is missing due to the same reasons as cudaExprs
- parameters::Vector{Vector{Float32}} : The parameters to use. Each Vector contains the values for the parameters p1..pn. The number of parameters can be different for every expression
- kwparam ```frontendCache```: The cache that stores the (partial) results of the frontend
"
function interpret(expressions::Vector{Expr}, variables::Matrix{Float32}, parameters::Vector{Vector{Float32}})::Matrix{Float32}
exprs = Vector{ExpressionProcessing.PostfixType}(undef, length(expressions))
@inbounds for i in eachindex(expressions)
exprs[i] = ExpressionProcessing.expr_to_postfix(expressions[i])
end
function interpret(cudaExprs, numExprs::Integer, exprsInnerLength::Integer,
cudaVars, variableColumns::Integer, variableRows::Integer, parameters::Vector{Vector{Float32}})::Matrix{Float32}
variableCols = size(variables, 2) # number of variable sets to use for each expression
cudaVars = CuArray(variables)
cudaParams = Utils.create_cuda_array(parameters, NaN32) # column corresponds to data for one expression
cudaExprs = Utils.create_cuda_array(exprs, ExpressionElement(EMPTY, 0)) # column corresponds to data for one expression;
# put into seperate cuArray, as this is static and would be inefficient to send seperatly to each kernel
cudaStepsize = CuArray([Utils.get_max_inner_length(exprs), Utils.get_max_inner_length(parameters), size(variables, 1)]) # max num of values per expression; max nam of parameters per expression; number of variables per expression
cudaStepsize = CuArray([exprsInnerLength, Utils.get_max_inner_length(parameters), variableRows]) # max num of values per expression; max nam of parameters per expression; number of variables per expression
# each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions
cudaResults = CuArray{Float32}(undef, variableCols, length(exprs))
cudaResults = CuArray{Float32}(undef, variableColumns, numExprs)
# Start kernel for each expression to ensure that no warp is working on different expressions
@inbounds Threads.@threads for i in eachindex(exprs)
numThreads = min(variableCols, 256)
numBlocks = cld(variableCols, numThreads)
@inbounds Threads.@threads for i in 1:numExprs # multithreaded to speedup dispatching (seems to have improved performance)
numThreads = min(variableColumns, 256)
numBlocks = cld(variableColumns, numThreads)
@cuda threads=numThreads blocks=numBlocks fastmath=true interpret_expression(cudaExprs, cudaVars, cudaParams, cudaResults, cudaStepsize, i)
end

View File

@ -12,10 +12,7 @@ const Operand = Union{Float32, String} # Operand is either fixed value or regist
- kwparam ```frontendCache```: The cache that stores the (partial) results of the frontend, to speedup the pre-processing
- kwparam ```frontendCache```: The cache that stores the result of the transpilation. Useful for parameter optimisation, as the same expression gets executed multiple times
"
function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, parameters::Vector{Vector{Float32}})::Matrix{Float32}
varRows = size(variables, 1)
variableCols = size(variables, 2)
# kernels = Vector{CuFunction}(undef, length(expressions))
function evaluate(expressions::Vector{ExpressionProcessing.PostfixType}, cudaVars::CuArray{Float32}, variableColumns::Integer, variableRows::Integer, parameters::Vector{Vector{Float32}})::Matrix{Float32}
# TODO: test this again with multiple threads. The first time I tried, I was using only one thread
# Test this parallel version again when doing performance tests. With the simple "functionality" tests this took 0.03 seconds while sequential took "0.00009" seconds
@ -35,7 +32,7 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
# formattedExpr = ExpressionProcessing.expr_to_postfix(expressions[i])
# kernel = transpile(formattedExpr, varRows, Utils.get_max_inner_length(parameters), variableCols, i-1) # i-1 because julia is 1-based but PTX needs 0-based indexing
# kernel = transpile(formattedExpr, varRows, Utils.get_max_inner_length(parameters), variableColumns, i-1) # i-1 because julia is 1-based but PTX needs 0-based indexing
# linker = CuLink()
# add_data!(linker, "ExpressionProcessing", kernel)
@ -48,14 +45,13 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
# @lock cacheLock transpilerCache[expressions[i]] = kernels[i]
# end
cudaVars = CuArray(variables) # maybe put in shared memory (see PerformanceTests.jl for more info)
cudaParams = Utils.create_cuda_array(parameters, NaN32) # maybe make constant (see PerformanceTests.jl for more info)
# each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions
cudaResults = CuArray{Float32}(undef, variableCols, length(expressions))
cudaResults = CuArray{Float32}(undef, variableColumns, length(expressions))
threads = min(variableCols, 256)
blocks = cld(variableCols, threads)
threads = min(variableColumns, 256)
blocks = cld(variableColumns, threads)
kernelName = "evaluate_gpu"
# TODO: Implement batching as a middleground between "transpile everything and then run" and "tranpile one run one" even though cudacall is async
@ -65,8 +61,8 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
# continue
# end
formattedExpr = ExpressionProcessing.expr_to_postfix(expressions[i])
kernel = transpile(formattedExpr, varRows, Utils.get_max_inner_length(parameters), variableCols, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing
# formattedExpr = ExpressionProcessing.expr_to_postfix(expressions[i])
kernel = transpile(expressions[i], variableRows, Utils.get_max_inner_length(parameters), variableColumns, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing
linker = CuLink()
add_data!(linker, kernelName, kernel)

View File

@ -21,8 +21,16 @@ parameters[2][1] = 5.0
parameters[2][2] = 0.0
function testHelper(expression::Expr, variables::Matrix{Float32}, parameters::Vector{Vector{Float32}}, expectedResult)
exprs = Vector([expression])
result = Interpreter.interpret(exprs, variables, parameters)
exprs = [ExpressionProcessing.expr_to_postfix(expression)]
cudaExprs = Utils.create_cuda_array(exprs, ExpressionProcessing.ExpressionElement(EMPTY, 0))
exprsLength = length(exprs)
exprsInnerLength = Utils.get_max_inner_length(exprs)
X = CuArray(variables)
variableCols = size(variables, 2)
variableRows = size(variables, 1)
result = Interpreter.interpret(cudaExprs, exprsLength, exprsInnerLength, X, variableCols, variableRows, parameters)
expectedResult32 = convert(Float32, expectedResult)
@test isequal(result[1,1], expectedResult32)
@ -127,8 +135,16 @@ end
expr1 = :((x1 + 5) * p1 - 3 / abs(x2) + (2^4) - log(8))
expr2 = :(1 + 5 * x1 - 10^2 + (p1 - p2) / 9 + exp(x2))
exprs = Vector([expr1, expr2])
result = Interpreter.interpret(exprs, var, param)
exprs = [ExpressionProcessing.expr_to_postfix(expr1), ExpressionProcessing.expr_to_postfix(expr2)]
cudaExprs = Utils.create_cuda_array(exprs, ExpressionProcessing.ExpressionElement(EMPTY, 0))
exprsLength = length(exprs)
exprsInnerLength = Utils.get_max_inner_length(exprs)
X = CuArray(var)
variableCols = size(var, 2)
variableRows = size(var, 1)
result = Interpreter.interpret(cudaExprs, exprsLength, exprsInnerLength, X, variableCols, variableRows, param)
# var set 1
@test isapprox(result[1,1], 37.32, atol=0.01) # expr1

View File

@ -10,6 +10,7 @@ using .ExpressionProcessing
include("parser.jl") # to parse expressions from a file
# ATTENTAION: Evaluation information at the very bottom
const BENCHMARKS_RESULTS_PATH = "./results-fh-new"
# Number of expressions can get really big (into millions)
@ -68,7 +69,7 @@ suite["GPUT"]["nikuradse_1"] = @benchmarkable evaluate_gpu(exprs, X_t, parameter
loadparams!(suite, BenchmarkTools.load("params.json")[1], :samples, :evals, :gctrial, :time_tolerance, :evals_set, :gcsample, :seconds, :overhead, :memory_tolerance)
results = run(suite, verbose=true, seconds=28800) # 8 hour timeout
results = run(suite, verbose=true, seconds=43200) # 12 hour timeout
resultsCPU = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATH/cpu.json")[1]
if compareWithCPU
@ -104,7 +105,7 @@ if compareWithCPU
println(gpuiVsGPUT_median)
println(gpuiVsGPUT_std)
BenchmarkTools.save("$BENCHMARKS_RESULTS_PATH/0-initial.json", results)
BenchmarkTools.save("$BENCHMARKS_RESULTS_PATH/1-fronted-and-data-transfer-to-ExpressionExecutor.json", results)
else
resultsOld = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATH/3-tuned-blocksize_I128_T96.json")[1]
# resultsOld = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATH/3-tuned-blocksize_I128_T96.json")[1]
@ -139,3 +140,8 @@ else
println(oldVsGPUT_std)
end
# Initial implementation:
# - Interpreter: no cache; 256 blocksize; exprs pre-processed and sent to GPU on every call; vars sent on every call; frontend + dispatch are multithreaded
# - Transpiler: no cahce; 256 blocksize; exprs pre-processed and transpiled on every call; vars sent on every call; frontend + transpilation + dispatch are multithreaded

View File

@ -41,19 +41,15 @@ parameters[2][1] = 5.0
parameters[2][2] = 0.0
parameters[3][1] = 16.0
@testset "TEMP" begin
return
exprs = [:(x1 + p1)]
vars = Matrix{Float32}(undef, 1, 1)
params = Vector{Vector{Float32}}(undef, 1)
vars[1, 1] = 1
params[1] = [1]
Transpiler.evaluate(exprs, vars, params)
end
@testset "Test transpiler evaluation" begin
results = Transpiler.evaluate(expressions, variables, parameters)
variableCols = size(variables, 2)
variableRows = size(variables, 1)
X = CuArray(variables)
exprs = [ExpressionProcessing.expr_to_postfix(expressions[1]), ExpressionProcessing.expr_to_postfix(expressions[2]), ExpressionProcessing.expr_to_postfix(expressions[3])]
results = Transpiler.evaluate(exprs, X, variableCols, variableRows, parameters)
# dump(expressions[3]; maxdepth=10)
# Expr 1:

View File

@ -1,194 +1 @@
[
{
"Julia": "1.11.5",
"BenchmarkTools": {
"major": 1,
"minor": 6,
"patch": 0,
"prerelease": [],
"build": []
}
},
[
[
"BenchmarkGroup",
{
"data": {
"GPUT": [
"BenchmarkGroup",
{
"data": {
"nikuradse_1": [
"Trial",
{
"allocs": 10537236713,
"gctimes": [
6.422630609021e12
],
"memory": 99746249534032,
"params": [
"Parameters",
{
"gctrial": true,
"time_tolerance": 0.05,
"evals_set": false,
"samples": 50,
"evals": 1,
"gcsample": false,
"seconds": 28800.0,
"overhead": 0.0,
"memory_tolerance": 0.01
}
],
"times": [
5.4294504010681e13
]
}
]
},
"tags": [
"GPUTranspiler"
]
}
],
"GPUI": [
"BenchmarkGroup",
{
"data": {
"nikuradse_1": [
"Trial",
{
"allocs": 1825331206,
"gctimes": [
1.8938185191e10,
1.7792800779e10,
1.8160529276e10,
1.7946505031e10,
1.77973843e10,
1.7616008261e10,
1.7620413248e10,
1.768910028e10,
1.772636066e10,
1.7706216778e10,
1.8173891003e10,
1.7667273912e10,
1.7526904901e10,
1.749445276e10,
1.7567194654e10,
1.7649119926e10,
1.7639951452e10,
1.7533807088e10,
1.7517726514e10,
1.7626783198e10,
1.7511788769e10,
1.7492068732e10,
1.7553945009e10,
1.7478083952e10,
1.7437663283e10,
1.7472329594e10,
1.7519969261e10,
1.7519953931e10,
1.7526082936e10,
1.751558218e10,
1.7402059945e10,
1.7250338348e10,
1.7250474046e10,
1.7291033872e10,
1.7551432788e10,
1.7850397239e10,
1.7847877387e10,
1.7447038841e10,
1.754309134e10,
1.7566433958e10,
1.7503437877e10,
1.7647987775e10,
1.7401002748e10,
1.7385713445e10,
1.7385171642e10,
1.7348026466e10,
1.7438744763e10,
1.7309013112e10,
1.7577725655e10,
1.7432755306e10
],
"memory": 115414870368,
"params": [
"Parameters",
{
"gctrial": true,
"time_tolerance": 0.05,
"evals_set": false,
"samples": 50,
"evals": 1,
"gcsample": false,
"seconds": 28800.0,
"overhead": 0.0,
"memory_tolerance": 0.01
}
],
"times": [
5.31951749725e11,
5.31404501757e11,
5.33657147801e11,
5.31489160462e11,
5.30386250505e11,
5.30026023598e11,
5.29887080071e11,
5.34175638749e11,
5.32476620162e11,
5.32276123554e11,
5.43002738488e11,
5.30251592144e11,
5.30190125835e11,
5.28451973319e11,
5.30828202555e11,
5.29236820908e11,
5.3205118374e11,
5.30259980405e11,
5.29369982343e11,
5.29968522607e11,
5.29094509442e11,
5.3023736481e11,
5.3026832017e11,
5.30138026522e11,
5.30291814111e11,
5.28886430445e11,
5.30786719418e11,
5.31872294453e11,
5.29735616869e11,
5.32322531477e11,
5.32945923244e11,
5.28063077052e11,
5.26379810748e11,
5.2904720469e11,
5.33989526381e11,
5.37245240551e11,
5.37790009675e11,
5.30206196299e11,
5.30276314709e11,
5.30385782035e11,
5.29114269928e11,
5.31785585619e11,
5.28768646361e11,
5.27012226469e11,
5.26681637262e11,
5.28646301524e11,
5.27917175176e11,
5.28633753225e11,
5.29807712794e11,
5.27063144055e11
]
}
]
},
"tags": [
"GPUInterpreter"
]
}
]
},
"tags": []
}
]
]
]
[{"Julia":"1.11.5","BenchmarkTools":{"major":1,"minor":6,"patch":0,"prerelease":[],"build":[]}},[["BenchmarkGroup",{"data":{"GPUT":["BenchmarkGroup",{"data":{},"tags":["GPUTranspiler"]}],"GPUI":["BenchmarkGroup",{"data":{"nikuradse_1":["Trial",{"allocs":1825331206,"gctimes":[1.8938185191e10,1.7792800779e10,1.8160529276e10,1.7946505031e10,1.77973843e10,1.7616008261e10,1.7620413248e10,1.768910028e10,1.772636066e10,1.7706216778e10,1.8173891003e10,1.7667273912e10,1.7526904901e10,1.749445276e10,1.7567194654e10,1.7649119926e10,1.7639951452e10,1.7533807088e10,1.7517726514e10,1.7626783198e10,1.7511788769e10,1.7492068732e10,1.7553945009e10,1.7478083952e10,1.7437663283e10,1.7472329594e10,1.7519969261e10,1.7519953931e10,1.7526082936e10,1.751558218e10,1.7402059945e10,1.7250338348e10,1.7250474046e10,1.7291033872e10,1.7551432788e10,1.7850397239e10,1.7847877387e10,1.7447038841e10,1.754309134e10,1.7566433958e10,1.7503437877e10,1.7647987775e10,1.7401002748e10,1.7385713445e10,1.7385171642e10,1.7348026466e10,1.7438744763e10,1.7309013112e10,1.7577725655e10,1.7432755306e10],"memory":115414870368,"params":["Parameters",{"gctrial":true,"time_tolerance":0.05,"evals_set":false,"samples":50,"evals":1,"gcsample":false,"seconds":28800.0,"overhead":0.0,"memory_tolerance":0.01}],"times":[5.31951749725e11,5.31404501757e11,5.33657147801e11,5.31489160462e11,5.30386250505e11,5.30026023598e11,5.29887080071e11,5.34175638749e11,5.32476620162e11,5.32276123554e11,5.43002738488e11,5.30251592144e11,5.30190125835e11,5.28451973319e11,5.30828202555e11,5.29236820908e11,5.3205118374e11,5.30259980405e11,5.29369982343e11,5.29968522607e11,5.29094509442e11,5.3023736481e11,5.3026832017e11,5.30138026522e11,5.30291814111e11,5.28886430445e11,5.30786719418e11,5.31872294453e11,5.29735616869e11,5.32322531477e11,5.32945923244e11,5.28063077052e11,5.26379810748e11,5.2904720469e11,5.33989526381e11,5.37245240551e11,5.37790009675e11,5.30206196299e11,5.30276314709e11,5.30385782035e11,5.29114269928e11,5.31785585619e11,5.28768646361e11,5.27012226469e11,5.26681637262e11,5.28646301524e11,5.27917175176e11,5.28633753225e11,5.29807712794e11,5.27063144055e11]}]},"tags":["GPUInterpreter"]}]},"tags":[]}]]]

View File

@ -0,0 +1,194 @@
[
{
"Julia": "1.11.5",
"BenchmarkTools": {
"major": 1,
"minor": 6,
"patch": 0,
"prerelease": [],
"build": []
}
},
[
[
"BenchmarkGroup",
{
"data": {
"GPUT": [
"BenchmarkGroup",
{
"data": {
"nikuradse_1": [
"Trial",
{
"allocs": 9578295211,
"gctimes": [
5.773640884485e12
],
"memory": 99694581250168,
"params": [
"Parameters",
{
"gctrial": true,
"time_tolerance": 0.05,
"evals_set": false,
"samples": 50,
"evals": 1,
"gcsample": false,
"seconds": 43200.0,
"overhead": 0.0,
"memory_tolerance": 0.01
}
],
"times": [
5.1630263257036e13
]
}
]
},
"tags": [
"GPUTranspiler"
]
}
],
"GPUI": [
"BenchmarkGroup",
{
"data": {
"nikuradse_1": [
"Trial",
{
"allocs": 768768117,
"gctimes": [
1.1975019005e10,
7.985238732e9,
1.4256539541e10,
8.877686056e9,
1.4680883881e10,
7.692335492e9,
9.536354709e9,
1.3536376614e10,
1.4238839111e10,
1.9925752838e10,
9.025028453e9,
1.5572506957e10,
1.952938358e10,
1.1815896105e10,
1.3613672963e10,
1.155423324e10,
1.4004956257e10,
8.806173097e9,
8.174429914e9,
1.3263383027e10,
1.0794204698e10,
1.5559450665e10,
1.1655933294e10,
1.0337481053e10,
1.736781041e10,
1.7557373752e10,
1.0408159512e10,
1.9575876788e10,
1.1552463317e10,
1.226612493e10,
1.39046431e10,
1.4741246638e10,
1.3349550404e10,
1.1029748223e10,
1.2336413042e10,
1.8974104972e10,
1.62980404e10,
1.7060266354e10,
1.4275735627e10,
1.1090002413e10,
9.354486934e9,
1.0120009791e10,
1.2904978229e10,
1.9392024576e10,
1.4288312066e10,
9.172039439e9,
1.1963691856e10,
1.7642492412e10,
1.4929130699e10,
1.5905152758e10
],
"memory": 54082719144,
"params": [
"Parameters",
{
"gctrial": true,
"time_tolerance": 0.05,
"evals_set": false,
"samples": 50,
"evals": 1,
"gcsample": false,
"seconds": 43200.0,
"overhead": 0.0,
"memory_tolerance": 0.01
}
],
"times": [
5.14174363969e11,
5.18689077274e11,
5.1025535864e11,
5.10803229124e11,
5.23299818383e11,
5.16455770592e11,
5.02350694438e11,
5.0439224751e11,
5.04269366358e11,
5.06595858959e11,
5.11724089224e11,
5.1262595436e11,
5.03168612131e11,
5.21219083737e11,
5.00099394667e11,
5.11001185335e11,
5.08254610458e11,
5.15228010681e11,
5.1538764885e11,
5.00595179658e11,
5.09523742228e11,
5.09818545112e11,
5.14655215639e11,
5.14933349609e11,
5.0169600001e11,
5.12605187963e11,
5.08668518972e11,
4.99756633692e11,
5.04657100071e11,
4.96300433311e11,
5.02859857609e11,
5.00544153225e11,
5.01888246474e11,
5.10711561485e11,
5.1255887708e11,
5.03690773615e11,
4.98071106526e11,
5.14512763271e11,
5.06840174712e11,
5.18008421655e11,
5.1741870342e11,
5.01369775936e11,
5.08726698998e11,
5.04550273414e11,
5.06774233833e11,
5.16671635611e11,
5.09574401096e11,
5.03123609086e11,
5.11987873937e11,
5.03337347704e11
]
}
]
},
"tags": [
"GPUInterpreter"
]
}
]
},
"tags": []
}
]
]
]