benchmarking: fixed bugs introduced by modification of transpiler
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
This commit is contained in:
@ -61,14 +61,13 @@ function evaluate_gpu(expressions::Vector{Expr}, X::Matrix{Float32}, p::Vector{V
|
|||||||
@inbounds Threads.@threads for i in eachindex(expressions)
|
@inbounds Threads.@threads for i in eachindex(expressions)
|
||||||
ex = ExpressionProcessing.expr_to_postfix(expressions[i])
|
ex = ExpressionProcessing.expr_to_postfix(expressions[i])
|
||||||
ptxKernels[i] = Transpiler.transpile(ex, variableSetSize, largestParameterSetSize, numVariableSets, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing
|
ptxKernels[i] = Transpiler.transpile(ex, variableSetSize, largestParameterSetSize, numVariableSets, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing
|
||||||
# compiledKernels[i] = Transpiler.CompileKernel(ptxKernel, kernelName)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
results = Matrix{Float32}(undef, numVariableSets, length(exprs))
|
results = Matrix{Float32}(undef, numVariableSets, length(expressions))
|
||||||
for i in 1:repetitions # Simulate parameter tuning -> local search (X remains the same, p gets changed in small steps and must be performed sequentially, which it is with this impl)
|
for i in 1:repetitions # Simulate parameter tuning -> local search (X remains the same, p gets changed in small steps and must be performed sequentially, which it is with this impl)
|
||||||
# evaluate
|
# evaluate
|
||||||
# results = Transpiler.evaluate(exprs, variables, numVariableSets, variableSetSize, p)
|
# results = Transpiler.evaluate(exprs, variables, numVariableSets, variableSetSize, p)
|
||||||
results = Transpiler.evaluate(ptxKernels, variables, variableSetSize, p, kernelName)
|
results = Transpiler.evaluate(ptxKernels, variables, numVariableSets, p, kernelName)
|
||||||
end
|
end
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
@ -33,20 +33,20 @@ function evaluate(expressions::Vector{ExpressionProcessing.PostfixType}, cudaVar
|
|||||||
end
|
end
|
||||||
|
|
||||||
"
|
"
|
||||||
A simplified version of the evaluate function. It takes a list of already compiled kernels to be executed. This should yield better performance, where the same expressions should be evaluated multiple times i.e. for parameter optimisation.
|
A simplified version of the evaluate function. It takes a list of already transpiled kernels to be executed. This should yield better performance, where the same expressions should be evaluated multiple times i.e. for parameter optimisation.
|
||||||
"
|
"
|
||||||
function evaluate(kernels::Vector{String}, cudaVars::CuArray{Float32}, nrOfVariableSets::Integer, parameters::Vector{Vector{Float32}}, kernelName::String)::Matrix{Float32}
|
function evaluate(kernels::Vector{String}, cudaVars::CuArray{Float32}, nrOfVariableSets::Integer, parameters::Vector{Vector{Float32}}, kernelName::String)::Matrix{Float32}
|
||||||
|
|
||||||
cudaParams = Utils.create_cuda_array(parameters, NaN32) # maybe make constant (see PerformanceTests.jl for more info)
|
cudaParams = Utils.create_cuda_array(parameters, NaN32) # maybe make constant (see PerformanceTests.jl for more info)
|
||||||
|
|
||||||
# each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions
|
# each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions
|
||||||
cudaResults = CuArray{Float32}(undef, nrOfVariableSets, length(expressions))
|
cudaResults = CuArray{Float32}(undef, nrOfVariableSets, length(kernels))
|
||||||
|
|
||||||
threads = min(nrOfVariableSets, 256)
|
threads = min(nrOfVariableSets, 256)
|
||||||
blocks = cld(nrOfVariableSets, threads)
|
blocks = cld(nrOfVariableSets, threads)
|
||||||
|
|
||||||
@inbounds Threads.@threads for i in eachindex(kernels)
|
@inbounds Threads.@threads for i in eachindex(kernels)
|
||||||
compiledKernel = CompileKernel(kernel[i], kernelName)
|
compiledKernel = CompileKernel(kernels[i], kernelName)
|
||||||
cudacall(compiledKernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks)
|
cudacall(compiledKernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user