benchmarking: improved performance with @inbounds. still slower in most cases
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled

This commit is contained in:
2025-04-01 21:48:59 +02:00
parent d9c83caad9
commit 2b9c394f1b
4 changed files with 36 additions and 20 deletions

View File

@ -14,10 +14,25 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
variableCols = size(variables, 2)
kernels = Vector{CuFunction}(undef, length(expressions))
# TODO: test this again with multiple threads. The first time I tried, I was using only one thread
# Test this parallel version again when doing performance tests. With the simple "functionality" tests this took 0.03 seconds while sequential took "0.00009" seconds
# Threads.@threads for i in eachindex(expressions)
# TODO: Use cache
# kernel = transpile(expressions[i], varRows, Utils.get_max_inner_length(parameters))
# cacheLock = ReentrantLock()
# cacheHit = false
# lock(cacheLock) do
# if haskey(cache, expressions[i])
# kernels[i] = cache[expressions[i]]
# cacheHit = true
# end
# end
# if cacheHit
# continue
# end
# formattedExpr = ExpressionProcessing.expr_to_postfix(expressions[i])
# kernel = transpile(formattedExpr, varRows, Utils.get_max_inner_length(parameters), variableCols, i-1) # i-1 because julia is 1-based but PTX needs 0-based indexing
# linker = CuLink()
# add_data!(linker, "ExpressionProcessing", kernel)
@ -26,9 +41,11 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
# mod = CuModule(image)
# kernels[i] = CuFunction(mod, "ExpressionProcessing")
# @lock cacheLock cache[expressions[i]] = kernels[i]
# end
for i in eachindex(expressions)
@inbounds for i in eachindex(expressions)
if haskey(cache, expressions[i])
kernels[i] = cache[expressions[i]]
continue
@ -54,7 +71,7 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
cudaResults = CuArray{Float32}(undef, variableCols, length(expressions))
# execute each kernel (also try doing this with Threads.@threads. Since we can have multiple grids, this might improve performance)
for i in eachindex(kernels)
@inbounds for i in eachindex(kernels)
# config = launch_configuration(kernels[i])
threads = min(variableCols, 256)
blocks = cld(variableCols, threads)