diff --git a/package/src/ExpressionProcessing.jl b/package/src/ExpressionProcessing.jl index d5f4f7e..9d386d4 100644 --- a/package/src/ExpressionProcessing.jl +++ b/package/src/ExpressionProcessing.jl @@ -27,6 +27,13 @@ NOTE: This function is not thread save, especially cache access is not thread sa function expr_to_postfix(expression::Expr)::PostfixType expr = expression if expression.head === :-> + if typeof(expression.args[2]) == Float64 + println() + println("Expression: $expression") + println("Expr: $expr") + println() + dump(expression; maxdepth=10) + end # if the expression equals (x, p) -> (...) then the below statement extracts the expression to evaluate if expression.args[2].head == :block # expressions that are not generated with the parser (./test/parser.jl) contain this extra "block" node, which needs to be skipped expr = expression.args[2].args[2] @@ -41,6 +48,8 @@ function expr_to_postfix(expression::Expr)::PostfixType postfix = PostfixType() + + # Special handling in the case where the expression is an array access # This can happen if the token is a variable/parameter of the form x[n]/p[n] if expr.head == :ref diff --git a/package/src/Transpiler.jl b/package/src/Transpiler.jl index a5b2cda..fe40619 100644 --- a/package/src/Transpiler.jl +++ b/package/src/Transpiler.jl @@ -15,7 +15,7 @@ const Operand = Union{Float32, String} # Operand is either fixed value or regist function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, parameters::Vector{Vector{Float32}})::Matrix{Float32} varRows = size(variables, 1) variableCols = size(variables, 2) - kernels = Vector{CuFunction}(undef, length(expressions)) + # kernels = Vector{CuFunction}(undef, length(expressions)) # TODO: test this again with multiple threads. The first time I tried, I was using only one thread # Test this parallel version again when doing performance tests. With the simple "functionality" tests this took 0.03 seconds while sequential took "0.00009" seconds @@ -48,6 +48,16 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet # @lock cacheLock transpilerCache[expressions[i]] = kernels[i] # end + cudaVars = CuArray(variables) # maybe put in shared memory (see PerformanceTests.jl for more info) + cudaParams = Utils.create_cuda_array(parameters, NaN32) # maybe make constant (see PerformanceTests.jl for more info) + + # each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions + cudaResults = CuArray{Float32}(undef, variableCols, length(expressions)) + + threads = min(variableCols, 256) + blocks = cld(variableCols, threads) + + # TODO: Implement batching as a middleground between "transpile everything and then run" and "tranpile one run one" even though cudacall is async @inbounds for i in eachindex(expressions) # if haskey(resultCache, expressions[i]) # kernels[i] = resultCache[expressions[i]] @@ -64,7 +74,11 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet image = complete(linker) mod = CuModule(image) - kernels[i] = CuFunction(mod, "ExpressionProcessing") + + compiledKernel = CuFunction(mod, "ExpressionProcessing") + cudacall(compiledKernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks) + + # kernels[i] = CuFunction(mod, "ExpressionProcessing") # resultCache[expressions[i]] = kernels[i] # catch # dump(expressions[i]; maxdepth=10) @@ -78,20 +92,9 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet end - cudaVars = CuArray(variables) # maybe put in shared memory (see PerformanceTests.jl for more info) - cudaParams = Utils.create_cuda_array(parameters, NaN32) # maybe make constant (see PerformanceTests.jl for more info) - - # each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions - cudaResults = CuArray{Float32}(undef, variableCols, length(expressions)) - - # execute each kernel (also try doing this with Threads.@threads. Since we can have multiple grids, this might improve performance) - for kernel in kernels - # config = launch_configuration(kernels[i]) - threads = min(variableCols, 256) - blocks = cld(variableCols, threads) - - cudacall(kernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks) - end + # for kernel in kernels + # cudacall(kernel, (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks) + # end return cudaResults end diff --git a/package/test/PerformanceTests.jl b/package/test/PerformanceTests.jl index 28dab05..edfb6c8 100644 --- a/package/test/PerformanceTests.jl +++ b/package/test/PerformanceTests.jl @@ -63,15 +63,13 @@ if compareWithCPU end # cacheInterpreter = Dict{Expr, PostfixType}() -suite["GPUI"]["nikuradse_1"] = @benchmarkable interpret_gpu(exprs, X_t, parameters; repetitions=expr_reps) +# suite["GPUI"]["nikuradse_1"] = @benchmarkable interpret_gpu(exprs, X_t, parameters; repetitions=expr_reps) # cacheTranspilerFront = Dict{Expr, PostfixType}() # cacheTranspilerRes = Dict{Expr, CuFunction}() suite["GPUT"]["nikuradse_1"] = @benchmarkable evaluate_gpu(exprs, X_t, parameters; repetitions=expr_reps) -for i in 1:1 - tune!(suite) -end +tune!(suite) BenchmarkTools.save("params.json", params(suite)) throw("finished tuning")