benchmarking: improved performance with @inbounds. still slower in most cases
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled

This commit is contained in:
Daniel 2025-04-01 21:48:59 +02:00
parent d9c83caad9
commit 2b9c394f1b
4 changed files with 36 additions and 20 deletions

View File

@ -15,7 +15,7 @@ export interpret
function interpret(expressions::Vector{Expr}, variables::Matrix{Float32}, parameters::Vector{Vector{Float32}})::Matrix{Float32}
exprs = Vector{ExpressionProcessing.PostfixType}(undef, length(expressions))
for i in eachindex(expressions)
@inbounds for i in eachindex(expressions)
exprs[i] = ExpressionProcessing.expr_to_postfix(expressions[i])
end
@ -30,7 +30,7 @@ function interpret(expressions::Vector{Expr}, variables::Matrix{Float32}, parame
cudaResults = CuArray{Float32}(undef, variableCols, length(exprs))
# Start kernel for each expression to ensure that no warp is working on different expressions
for i in eachindex(exprs)
@inbounds for i in eachindex(exprs)
kernel = @cuda launch=false interpret_expression(cudaExprs, cudaVars, cudaParams, cudaResults, cudaStepsize, i)
# config = launch_configuration(kernel.fun)
threads = min(variableCols, 256)
@ -61,11 +61,11 @@ function interpret_expression(expressions::CuDeviceArray{ExpressionElement}, var
firstVariableIndex = ((varSetIndex-1) * stepsize[3]) # Exclusive
for i in firstExprIndex:lastExprIndex
if expressions[i].Type == EMPTY
@inbounds for expr in expressions
if expr.Type == EMPTY
break
elseif expressions[i].Type == INDEX
val = expressions[i].Value
elseif expr.Type == INDEX
val = expr.Value
operationStackTop += 1
if val > 0
@ -74,11 +74,11 @@ function interpret_expression(expressions::CuDeviceArray{ExpressionElement}, var
val = abs(val)
operationStack[operationStackTop] = parameters[firstParamIndex + val]
end
elseif expressions[i].Type == FLOAT32
elseif expr.Type == FLOAT32
operationStackTop += 1
operationStack[operationStackTop] = reinterpret(Float32, expressions[i].Value)
elseif expressions[i].Type == OPERATOR
type = reinterpret(Operator, expressions[i].Value)
operationStack[operationStackTop] = reinterpret(Float32, expr.Value)
elseif expr.Type == OPERATOR
type = reinterpret(Operator, expr.Value)
if type == ADD
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] + operationStack[operationStackTop + 1]
@ -108,10 +108,11 @@ function interpret_expression(expressions::CuDeviceArray{ExpressionElement}, var
break
end
end
# "(exprIndex - 1) * variableCols" -> calculates the column in which to insert the result (expression = column)
# "+ varSetIndex" -> to get the row inside the column at which to insert the result of the variable set (variable set = row)
resultIndex = convert(Int, (exprIndex - 1) * variableCols + varSetIndex) # Inclusive
results[resultIndex] = operationStack[operationStackTop]
@inbounds results[resultIndex] = operationStack[operationStackTop]
return
end

View File

@ -14,10 +14,25 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
variableCols = size(variables, 2)
kernels = Vector{CuFunction}(undef, length(expressions))
# TODO: test this again with multiple threads. The first time I tried, I was using only one thread
# Test this parallel version again when doing performance tests. With the simple "functionality" tests this took 0.03 seconds while sequential took "0.00009" seconds
# Threads.@threads for i in eachindex(expressions)
# TODO: Use cache
# kernel = transpile(expressions[i], varRows, Utils.get_max_inner_length(parameters))
# cacheLock = ReentrantLock()
# cacheHit = false
# lock(cacheLock) do
# if haskey(cache, expressions[i])
# kernels[i] = cache[expressions[i]]
# cacheHit = true
# end
# end
# if cacheHit
# continue
# end
# formattedExpr = ExpressionProcessing.expr_to_postfix(expressions[i])
# kernel = transpile(formattedExpr, varRows, Utils.get_max_inner_length(parameters), variableCols, i-1) # i-1 because julia is 1-based but PTX needs 0-based indexing
# linker = CuLink()
# add_data!(linker, "ExpressionProcessing", kernel)
@ -26,9 +41,11 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
# mod = CuModule(image)
# kernels[i] = CuFunction(mod, "ExpressionProcessing")
# @lock cacheLock cache[expressions[i]] = kernels[i]
# end
for i in eachindex(expressions)
@inbounds for i in eachindex(expressions)
if haskey(cache, expressions[i])
kernels[i] = cache[expressions[i]]
continue
@ -54,7 +71,7 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
cudaResults = CuArray{Float32}(undef, variableCols, length(expressions))
# execute each kernel (also try doing this with Threads.@threads. Since we can have multiple grids, this might improve performance)
for i in eachindex(kernels)
@inbounds for i in eachindex(kernels)
# config = launch_configuration(kernels[i])
threads = min(variableCols, 256)
blocks = cld(variableCols, threads)

View File

@ -105,10 +105,6 @@ loadparams!(suite, BenchmarkTools.load("params.json")[1], :samples, :evals, :gct
results = run(suite, verbose=true, seconds=180)
BenchmarkTools.save("$BENCHMARKS_RESULTS_PATH/256_blocksize.json", results)
if compareWithCPU
medianCPU = median(results["CPU"])
stdCPU = std(results["CPU"])
@ -142,8 +138,9 @@ if compareWithCPU
println(gpuiVsGPUT_median)
println(gpuiVsGPUT_std)
BenchmarkTools.save("$BENCHMARKS_RESULTS_PATH/using_inbounds.json", results)
else
resultsOld = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATH/initial_results.json")[1]
resultsOld = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATH/256_blocksize.json")[1]
medianGPUI_old = median(resultsOld["GPUI"])
stdGPUI_old = std(resultsOld["GPUI"])

File diff suppressed because one or more lines are too long