evaluation: continued with interpreter benchmarking and performance tuning
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run

This commit is contained in:
2025-05-29 15:13:17 +02:00
parent 99a222341d
commit 381a4819c9
13 changed files with 398 additions and 320 deletions

View File

@ -56,18 +56,19 @@ function evaluate_gpu(expressions::Vector{Expr}, X::Matrix{Float32}, p::Vector{V
largestParameterSetSize = Utils.get_max_inner_length(p) # parameters get transformed into matrix. Will be nr. of rows in parameter matrix
ptxKernels = Vector{String}(undef, length(expressions))
compiledKernels = Vector{CuFunction}(undef, length(expressions))
kernelName = "evaluate_gpu"
@inbounds Threads.@threads for i in eachindex(expressions)
ex = ExpressionProcessing.expr_to_postfix(expressions[i])
ptxKernels[i] = Transpiler.transpile(ex, variableSetSize, largestParameterSetSize, numVariableSets, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing
ptxKernel = Transpiler.transpile(ex, variableSetSize, largestParameterSetSize, numVariableSets, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing
compiledKernels[i] = Transpiler.compile_kernel(ptxKernel, kernelName)
end
results = Matrix{Float32}(undef, numVariableSets, length(expressions))
for i in 1:repetitions # Simulate parameter tuning -> local search (X remains the same, p gets changed in small steps and must be performed sequentially, which it is with this impl)
# evaluate
# results = Transpiler.evaluate(exprs, variables, numVariableSets, variableSetSize, p)
results = Transpiler.evaluate(ptxKernels, variables, numVariableSets, p, kernelName)
results = Transpiler.evaluate(compiledKernels, variables, numVariableSets, p, kernelName)
end
return results

View File

@ -25,7 +25,7 @@ function evaluate(expressions::Vector{ExpressionProcessing.PostfixType}, cudaVar
kernelName = "evaluate_gpu"
@inbounds Threads.@threads for i in eachindex(expressions)
kernel = transpile(expressions[i], variableRows, Utils.get_max_inner_length(parameters), variableColumns, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing
compiledKernel = CompileKernel(kernel, kernelName)
compiledKernel = compile_kernel(kernel, kernelName)
cudacall(compiledKernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks)
end
@ -37,7 +37,7 @@ end
"
A simplified version of the evaluate function. It takes a list of already transpiled kernels to be executed. This should yield better performance, where the same expressions should be evaluated multiple times i.e. for parameter optimisation.
"
function evaluate(kernels::Vector{String}, cudaVars::CuArray{Float32}, nrOfVariableSets::Integer, parameters::Vector{Vector{Float32}}, kernelName::String)::Matrix{Float32}
function evaluate(kernels::Vector{CuFunction}, cudaVars::CuArray{Float32}, nrOfVariableSets::Integer, parameters::Vector{Vector{Float32}}, kernelName::String)::Matrix{Float32}
cudaParams = Utils.create_cuda_array(parameters, NaN32) # maybe make constant (see PerformanceTests.jl for more info)
@ -48,14 +48,13 @@ function evaluate(kernels::Vector{String}, cudaVars::CuArray{Float32}, nrOfVaria
blocks = cld(nrOfVariableSets, threads)
@inbounds Threads.@threads for i in eachindex(kernels)
compiledKernel = CompileKernel(kernels[i], kernelName)
cudacall(compiledKernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks)
cudacall(kernels[i], (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks)
end
return cudaResults
end
function CompileKernel(ptxKernel::String, kernelName::String)::CuFunction
function compile_kernel(ptxKernel::String, kernelName::String)::CuFunction
linker = CuLink()
add_data!(linker, kernelName, ptxKernel)