diff --git a/package/src/ExpressionExecutorCuda.jl b/package/src/ExpressionExecutorCuda.jl index 81cd408..939cd0c 100644 --- a/package/src/ExpressionExecutorCuda.jl +++ b/package/src/ExpressionExecutorCuda.jl @@ -56,19 +56,19 @@ function evaluate_gpu(expressions::Vector{Expr}, X::Matrix{Float32}, p::Vector{V largestParameterSetSize = Utils.get_max_inner_length(p) # parameters get transformed into matrix. Will be nr. of rows in parameter matrix - compiledKernels = Vector{CuFunction}(undef, length(expressions)) + ptxKernels = Vector{String}(undef, length(expressions)) kernelName = "evaluate_gpu" @inbounds Threads.@threads for i in eachindex(expressions) ex = ExpressionProcessing.expr_to_postfix(expressions[i]) - ptxKernel = Transpiler.transpile(ex, variableSetSize, largestParameterSetSize, numVariableSets, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing - compiledKernels[i] = Transpiler.CompileKernel(ptxKernel, kernelName) + ptxKernels[i] = Transpiler.transpile(ex, variableSetSize, largestParameterSetSize, numVariableSets, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing + # compiledKernels[i] = Transpiler.CompileKernel(ptxKernel, kernelName) end results = Matrix{Float32}(undef, numVariableSets, length(exprs)) for i in 1:repetitions # Simulate parameter tuning -> local search (X remains the same, p gets changed in small steps and must be performed sequentially, which it is with this impl) # evaluate # results = Transpiler.evaluate(exprs, variables, numVariableSets, variableSetSize, p) - results = Transpiler.evaluate(compiledKernels, variables, variableSetSize, p) + results = Transpiler.evaluate(ptxKernels, variables, variableSetSize, p, kernelName) end return results diff --git a/package/src/Transpiler.jl b/package/src/Transpiler.jl index 33afd7e..101b698 100644 --- a/package/src/Transpiler.jl +++ b/package/src/Transpiler.jl @@ -35,7 +35,7 @@ end " A simplified version of the evaluate function. It takes a list of already compiled kernels to be executed. This should yield better performance, where the same expressions should be evaluated multiple times i.e. for parameter optimisation. " -function evaluate(kernels::Vector{CuFunction}, cudaVars::CuArray{Float32}, nrOfVariableSets::Integer, parameters::Vector{Vector{Float32}})::Matrix{Float32} +function evaluate(kernels::Vector{String}, cudaVars::CuArray{Float32}, nrOfVariableSets::Integer, parameters::Vector{Vector{Float32}}, kernelName::String)::Matrix{Float32} cudaParams = Utils.create_cuda_array(parameters, NaN32) # maybe make constant (see PerformanceTests.jl for more info) @@ -46,7 +46,8 @@ function evaluate(kernels::Vector{CuFunction}, cudaVars::CuArray{Float32}, nrOfV blocks = cld(nrOfVariableSets, threads) @inbounds Threads.@threads for i in eachindex(kernels) - cudacall(kernels[i], (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks) + compiledKernel = CompileKernel(kernel[i], kernelName) + cudacall(compiledKernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks) end return cudaResults