benchmarking: minor improvements to interpreter
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run
This commit is contained in:
@ -24,17 +24,17 @@ function interpret(cudaExprs, numExprs::Integer, exprsInnerLength::Integer,
|
||||
cudaResults = CuArray{Float32}(undef, variableColumns, numExprs)
|
||||
|
||||
# Start kernel for each expression to ensure that no warp is working on different expressions
|
||||
@inbounds Threads.@threads for i in 1:numExprs # multithreaded to speedup dispatching (seems to have improved performance)
|
||||
numThreads = min(variableColumns, 121)
|
||||
numBlocks = cld(variableColumns, numThreads)
|
||||
numThreads = min(variableColumns, 121)
|
||||
numBlocks = cld(variableColumns, numThreads)
|
||||
|
||||
@inbounds Threads.@threads for i in 1:numExprs # multithreaded to speedup dispatching (seems to have improved performance)
|
||||
@cuda threads=numThreads blocks=numBlocks fastmath=true interpret_expression(cudaExprs, cudaVars, cudaParams, cudaResults, cudaStepsize, i)
|
||||
end
|
||||
|
||||
return cudaResults
|
||||
end
|
||||
|
||||
const MAX_STACK_SIZE = 25 # The depth of the stack to store the values and intermediate results
|
||||
const MAX_STACK_SIZE = 10 # The depth of the stack to store the values and intermediate results
|
||||
function interpret_expression(expressions::CuDeviceArray{ExpressionElement}, variables::CuDeviceArray{Float32}, parameters::CuDeviceArray{Float32}, results::CuDeviceArray{Float32}, stepsize::CuDeviceArray{Int}, exprIndex::Int)
|
||||
varSetIndex = (blockIdx().x - 1) * blockDim().x + threadIdx().x # ctaid.x * ntid.x + tid.x (1-based)
|
||||
@inbounds variableCols = length(variables) / stepsize[3] # number of variable sets
|
||||
@ -92,7 +92,6 @@ function interpret_expression(expressions::CuDeviceArray{ExpressionElement}, var
|
||||
elseif opcode == SQRT
|
||||
operationStack[operationStackTop] = sqrt(operationStack[operationStackTop])
|
||||
elseif opcode == INV
|
||||
# operationStack[operationStackTop] = 1f0 / operationStack[operationStackTop]
|
||||
operationStack[operationStackTop] = inv(operationStack[operationStackTop])
|
||||
end
|
||||
else
|
||||
|
@ -29,10 +29,10 @@ expr_reps = 1
|
||||
|
||||
|
||||
@testset "Interpreter Tuning" begin
|
||||
# CUDA.@profile interpret_gpu(exprs, X, parameters; repetitions=expr_reps)
|
||||
CUDA.@profile interpret_gpu(exprs, X, parameters; repetitions=expr_reps)
|
||||
end
|
||||
|
||||
|
||||
@testset "Transpiler Tuning" begin
|
||||
CUDA.@profile evaluate_gpu(exprs, X, parameters; repetitions=expr_reps)
|
||||
# CUDA.@profile evaluate_gpu(exprs, X, parameters; repetitions=expr_reps)
|
||||
end
|
Reference in New Issue
Block a user