benchmarking: moved frontend calls and sending postfixExprs+vars outside to drastically reduce amount of calculations
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run

This commit is contained in:
Daniel
2025-05-17 18:32:04 +02:00
parent 88ee8d20bd
commit a5518dd63e
8 changed files with 79 additions and 63 deletions

View File

@ -8,31 +8,25 @@ export interpret
"Interprets the given expressions with the values provided.
# Arguments
- expressions::Vector{ExpressionProcessing.PostfixType} : The expressions to execute in postfix form
- variables::Matrix{Float32} : The variables to use. Each column is mapped to the variables x1..xn
- cudaExprs::CuArray{ExpressionProcessing.PostfixType} : The expressions to execute in postfix form and already sent to the GPU. The type information in the signature is missing, because creating a CuArray{ExpressionProcessing.PostfixType} results in a mor everbose type definition
- cudaVars::CuArray{Float32} : The variables to use. Each column is mapped to the variables x1..xn. The type information is missing due to the same reasons as cudaExprs
- parameters::Vector{Vector{Float32}} : The parameters to use. Each Vector contains the values for the parameters p1..pn. The number of parameters can be different for every expression
- kwparam ```frontendCache```: The cache that stores the (partial) results of the frontend
"
function interpret(expressions::Vector{Expr}, variables::Matrix{Float32}, parameters::Vector{Vector{Float32}})::Matrix{Float32}
exprs = Vector{ExpressionProcessing.PostfixType}(undef, length(expressions))
@inbounds for i in eachindex(expressions)
exprs[i] = ExpressionProcessing.expr_to_postfix(expressions[i])
end
function interpret(cudaExprs, numExprs::Integer, exprsInnerLength::Integer,
cudaVars, variableColumns::Integer, variableRows::Integer, parameters::Vector{Vector{Float32}})::Matrix{Float32}
variableCols = size(variables, 2) # number of variable sets to use for each expression
cudaVars = CuArray(variables)
cudaParams = Utils.create_cuda_array(parameters, NaN32) # column corresponds to data for one expression
cudaExprs = Utils.create_cuda_array(exprs, ExpressionElement(EMPTY, 0)) # column corresponds to data for one expression;
# put into seperate cuArray, as this is static and would be inefficient to send seperatly to each kernel
cudaStepsize = CuArray([Utils.get_max_inner_length(exprs), Utils.get_max_inner_length(parameters), size(variables, 1)]) # max num of values per expression; max nam of parameters per expression; number of variables per expression
cudaStepsize = CuArray([exprsInnerLength, Utils.get_max_inner_length(parameters), variableRows]) # max num of values per expression; max nam of parameters per expression; number of variables per expression
# each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions
cudaResults = CuArray{Float32}(undef, variableCols, length(exprs))
cudaResults = CuArray{Float32}(undef, variableColumns, numExprs)
# Start kernel for each expression to ensure that no warp is working on different expressions
@inbounds Threads.@threads for i in eachindex(exprs)
numThreads = min(variableCols, 256)
numBlocks = cld(variableCols, numThreads)
@inbounds Threads.@threads for i in 1:numExprs # multithreaded to speedup dispatching (seems to have improved performance)
numThreads = min(variableColumns, 256)
numBlocks = cld(variableColumns, numThreads)
@cuda threads=numThreads blocks=numBlocks fastmath=true interpret_expression(cudaExprs, cudaVars, cudaParams, cudaResults, cudaStepsize, i)
end