benchmarking: changes to not fill up all of the RAM
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled

This commit is contained in:
Daniel 2025-05-10 15:10:26 +02:00
parent 2ba1fef5ba
commit 5b31fbb270
3 changed files with 30 additions and 20 deletions

View File

@ -27,6 +27,13 @@ NOTE: This function is not thread save, especially cache access is not thread sa
function expr_to_postfix(expression::Expr)::PostfixType
expr = expression
if expression.head === :->
if typeof(expression.args[2]) == Float64
println()
println("Expression: $expression")
println("Expr: $expr")
println()
dump(expression; maxdepth=10)
end
# if the expression equals (x, p) -> (...) then the below statement extracts the expression to evaluate
if expression.args[2].head == :block # expressions that are not generated with the parser (./test/parser.jl) contain this extra "block" node, which needs to be skipped
expr = expression.args[2].args[2]
@ -41,6 +48,8 @@ function expr_to_postfix(expression::Expr)::PostfixType
postfix = PostfixType()
# Special handling in the case where the expression is an array access
# This can happen if the token is a variable/parameter of the form x[n]/p[n]
if expr.head == :ref

View File

@ -15,7 +15,7 @@ const Operand = Union{Float32, String} # Operand is either fixed value or regist
function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, parameters::Vector{Vector{Float32}})::Matrix{Float32}
varRows = size(variables, 1)
variableCols = size(variables, 2)
kernels = Vector{CuFunction}(undef, length(expressions))
# kernels = Vector{CuFunction}(undef, length(expressions))
# TODO: test this again with multiple threads. The first time I tried, I was using only one thread
# Test this parallel version again when doing performance tests. With the simple "functionality" tests this took 0.03 seconds while sequential took "0.00009" seconds
@ -48,6 +48,16 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
# @lock cacheLock transpilerCache[expressions[i]] = kernels[i]
# end
cudaVars = CuArray(variables) # maybe put in shared memory (see PerformanceTests.jl for more info)
cudaParams = Utils.create_cuda_array(parameters, NaN32) # maybe make constant (see PerformanceTests.jl for more info)
# each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions
cudaResults = CuArray{Float32}(undef, variableCols, length(expressions))
threads = min(variableCols, 256)
blocks = cld(variableCols, threads)
# TODO: Implement batching as a middleground between "transpile everything and then run" and "tranpile one run one" even though cudacall is async
@inbounds for i in eachindex(expressions)
# if haskey(resultCache, expressions[i])
# kernels[i] = resultCache[expressions[i]]
@ -64,7 +74,11 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
image = complete(linker)
mod = CuModule(image)
kernels[i] = CuFunction(mod, "ExpressionProcessing")
compiledKernel = CuFunction(mod, "ExpressionProcessing")
cudacall(compiledKernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks)
# kernels[i] = CuFunction(mod, "ExpressionProcessing")
# resultCache[expressions[i]] = kernels[i]
# catch
# dump(expressions[i]; maxdepth=10)
@ -78,20 +92,9 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
end
cudaVars = CuArray(variables) # maybe put in shared memory (see PerformanceTests.jl for more info)
cudaParams = Utils.create_cuda_array(parameters, NaN32) # maybe make constant (see PerformanceTests.jl for more info)
# each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions
cudaResults = CuArray{Float32}(undef, variableCols, length(expressions))
# execute each kernel (also try doing this with Threads.@threads. Since we can have multiple grids, this might improve performance)
for kernel in kernels
# config = launch_configuration(kernels[i])
threads = min(variableCols, 256)
blocks = cld(variableCols, threads)
cudacall(kernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks)
end
# for kernel in kernels
# cudacall(kernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks)
# end
return cudaResults
end

View File

@ -63,15 +63,13 @@ if compareWithCPU
end
# cacheInterpreter = Dict{Expr, PostfixType}()
suite["GPUI"]["nikuradse_1"] = @benchmarkable interpret_gpu(exprs, X_t, parameters; repetitions=expr_reps)
# suite["GPUI"]["nikuradse_1"] = @benchmarkable interpret_gpu(exprs, X_t, parameters; repetitions=expr_reps)
# cacheTranspilerFront = Dict{Expr, PostfixType}()
# cacheTranspilerRes = Dict{Expr, CuFunction}()
suite["GPUT"]["nikuradse_1"] = @benchmarkable evaluate_gpu(exprs, X_t, parameters; repetitions=expr_reps)
for i in 1:1
tune!(suite)
end
BenchmarkTools.save("params.json", params(suite))
throw("finished tuning")