benchmarking: updated blocksize to 256 with moderate improvements
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled

This commit is contained in:
Daniel 2025-03-30 13:56:25 +02:00
parent 1dc0c1898d
commit d9c83caad9
5 changed files with 129 additions and 100 deletions

View File

@ -32,8 +32,8 @@ function interpret(expressions::Vector{Expr}, variables::Matrix{Float32}, parame
# Start kernel for each expression to ensure that no warp is working on different expressions
for i in eachindex(exprs)
kernel = @cuda launch=false interpret_expression(cudaExprs, cudaVars, cudaParams, cudaResults, cudaStepsize, i)
config = launch_configuration(kernel.fun)
threads = min(variableCols, config.threads)
# config = launch_configuration(kernel.fun)
threads = min(variableCols, 256)
blocks = cld(variableCols, threads)
kernel(cudaExprs, cudaVars, cudaParams, cudaResults, cudaStepsize, i; threads, blocks)
@ -46,7 +46,6 @@ end
const MAX_STACK_SIZE = 25 # The depth of the stack to store the values and intermediate results
function interpret_expression(expressions::CuDeviceArray{ExpressionElement}, variables::CuDeviceArray{Float32}, parameters::CuDeviceArray{Float32}, results::CuDeviceArray{Float32}, stepsize::CuDeviceArray{Int}, exprIndex::Int)
varSetIndex = (blockIdx().x - 1) * blockDim().x + threadIdx().x # ctaid.x * ntid.x + tid.x (1-based)
# stride = gridDim().x * blockDim().x # nctaid.x * ntid.x
variableCols = length(variables) / stepsize[3]
if varSetIndex > variableCols
@ -60,61 +59,59 @@ function interpret_expression(expressions::CuDeviceArray{ExpressionElement}, var
operationStack = MVector{MAX_STACK_SIZE, Float32}(undef) # Try to get this to function with variable size too, to allow better memory usage
operationStackTop = 0 # stores index of the last defined/valid value
# for varSetIndex in index:stride
firstVariableIndex = ((varSetIndex-1) * stepsize[3]) # Exclusive
for i in firstExprIndex:lastExprIndex
if expressions[i].Type == EMPTY
break
elseif expressions[i].Type == INDEX
val = expressions[i].Value
operationStackTop += 1
firstVariableIndex = ((varSetIndex-1) * stepsize[3]) # Exclusive
for i in firstExprIndex:lastExprIndex
if expressions[i].Type == EMPTY
break
elseif expressions[i].Type == INDEX
val = expressions[i].Value
operationStackTop += 1
if val > 0
operationStack[operationStackTop] = variables[firstVariableIndex + val]
else
val = abs(val)
operationStack[operationStackTop] = parameters[firstParamIndex + val]
end
elseif expressions[i].Type == FLOAT32
operationStackTop += 1
operationStack[operationStackTop] = reinterpret(Float32, expressions[i].Value)
elseif expressions[i].Type == OPERATOR
type = reinterpret(Operator, expressions[i].Value)
if type == ADD
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] + operationStack[operationStackTop + 1]
elseif type == SUBTRACT
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] - operationStack[operationStackTop + 1]
elseif type == MULTIPLY
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] * operationStack[operationStackTop + 1]
elseif type == DIVIDE
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] / operationStack[operationStackTop + 1]
elseif type == POWER
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] ^ operationStack[operationStackTop + 1]
elseif type == ABS
operationStack[operationStackTop] = abs(operationStack[operationStackTop])
elseif type == LOG
operationStack[operationStackTop] = log(operationStack[operationStackTop])
elseif type == EXP
operationStack[operationStackTop] = exp(operationStack[operationStackTop])
elseif type == SQRT
operationStack[operationStackTop] = sqrt(operationStack[operationStackTop])
end
if val > 0
operationStack[operationStackTop] = variables[firstVariableIndex + val]
else
operationStack[operationStackTop] = NaN
break
val = abs(val)
operationStack[operationStackTop] = parameters[firstParamIndex + val]
end
elseif expressions[i].Type == FLOAT32
operationStackTop += 1
operationStack[operationStackTop] = reinterpret(Float32, expressions[i].Value)
elseif expressions[i].Type == OPERATOR
type = reinterpret(Operator, expressions[i].Value)
if type == ADD
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] + operationStack[operationStackTop + 1]
elseif type == SUBTRACT
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] - operationStack[operationStackTop + 1]
elseif type == MULTIPLY
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] * operationStack[operationStackTop + 1]
elseif type == DIVIDE
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] / operationStack[operationStackTop + 1]
elseif type == POWER
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] ^ operationStack[operationStackTop + 1]
elseif type == ABS
operationStack[operationStackTop] = abs(operationStack[operationStackTop])
elseif type == LOG
operationStack[operationStackTop] = log(operationStack[operationStackTop])
elseif type == EXP
operationStack[operationStackTop] = exp(operationStack[operationStackTop])
elseif type == SQRT
operationStack[operationStackTop] = sqrt(operationStack[operationStackTop])
end
else
operationStack[operationStackTop] = NaN
break
end
# "(exprIndex - 1) * variableCols" -> calculates the column in which to insert the result (expression = column)
# "+ varSetIndex" -> to get the row inside the column at which to insert the result of the variable set (variable set = row)
resultIndex = convert(Int, (exprIndex - 1) * variableCols + varSetIndex) # Inclusive
results[resultIndex] = operationStack[operationStackTop]
# end
end
# "(exprIndex - 1) * variableCols" -> calculates the column in which to insert the result (expression = column)
# "+ varSetIndex" -> to get the row inside the column at which to insert the result of the variable set (variable set = row)
resultIndex = convert(Int, (exprIndex - 1) * variableCols + varSetIndex) # Inclusive
results[resultIndex] = operationStack[operationStackTop]
return
end

View File

@ -55,8 +55,8 @@ function evaluate(expressions::Vector{Expr}, variables::Matrix{Float32}, paramet
# execute each kernel (also try doing this with Threads.@threads. Since we can have multiple grids, this might improve performance)
for i in eachindex(kernels)
config = launch_configuration(kernels[i])
threads = min(variableCols, config.threads)
# config = launch_configuration(kernels[i])
threads = min(variableCols, 256)
blocks = cld(variableCols, threads)
cudacall(kernels[i], (CuPtr{Float32},CuPtr{Float32},CuPtr{Float32}), cudaVars, cudaParams, cudaResults; threads=threads, blocks=blocks)

View File

@ -64,6 +64,7 @@ end
# https://cuda.juliagpu.org/v2.6/lib/driver/#Memory-Management
end
compareWithCPU = true
suite = BenchmarkGroup()
@ -74,12 +75,14 @@ varsets_small = 100
varsets_medium = 1000
varsets_large = 10000
X_small = randn(Float32, varsets_small, 5)
suite["CPU"]["small varset"] = @benchmarkable interpret_cpu(exprsCPU, X_small, p; repetitions=expr_reps)
X_medium = randn(Float32, varsets_medium, 5)
suite["CPU"]["medium varset"] = @benchmarkable interpret_cpu(exprsCPU, X_medium, p; repetitions=expr_reps)
X_large = randn(Float32, varsets_large, 5)
suite["CPU"]["large varset"] = @benchmarkable interpret_cpu(exprsCPU, X_large, p; repetitions=expr_reps)
if compareWithCPU
X_small = randn(Float32, varsets_small, 5)
suite["CPU"]["small varset"] = @benchmarkable interpret_cpu(exprsCPU, X_small, p; repetitions=expr_reps)
X_medium = randn(Float32, varsets_medium, 5)
suite["CPU"]["medium varset"] = @benchmarkable interpret_cpu(exprsCPU, X_medium, p; repetitions=expr_reps)
X_large = randn(Float32, varsets_large, 5)
suite["CPU"]["large varset"] = @benchmarkable interpret_cpu(exprsCPU, X_large, p; repetitions=expr_reps)
end
X_small_GPU = randn(Float32, 5, varsets_small)
suite["GPUI"]["small varset"] = @benchmarkable interpret_gpu(exprsGPU, X_small_GPU, p; repetitions=expr_reps)
@ -102,45 +105,73 @@ loadparams!(suite, BenchmarkTools.load("params.json")[1], :samples, :evals, :gct
results = run(suite, verbose=true, seconds=180)
# BenchmarkTools.save("$BENCHMARKS_RESULTS_PATH/initial_results.json", results)
# initial_results = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATHinitial_results.json")
medianCPU = median(results["CPU"])
minimumCPU = minimum(results["CPU"])
stdCPU = std(results["CPU"])
medianInterpreter = median(results["GPUI"])
minimumInterpreter = minimum(results["GPUI"])
stdInterpreter = std(results["GPUI"])
medianTranspiler = median(results["GPUT"])
minimumTranspiler = minimum(results["GPUT"])
stdTranspiler = std(results["GPUT"])
cpuVsGPUI_median = judge(medianInterpreter, medianCPU) # is interpreter better than cpu?
cpuVsGPUT_median = judge(medianTranspiler, medianCPU) # is transpiler better than cpu?
gpuiVsGPUT_median = judge(medianTranspiler, medianInterpreter) # is tranpiler better than interpreter?
cpuVsGPUI_minimum = judge(minimumInterpreter, minimumCPU) # is interpreter better than cpu?
cpuVsGPUT_minimum = judge(minimumTranspiler, minimumCPU) # is transpiler better than cpu?
gpuiVsGPUT_minimum = judge(minimumTranspiler, minimumInterpreter) # is tranpiler better than interpreter?
cpuVsGPUI_std = judge(stdInterpreter, stdCPU) # is interpreter better than cpu?
cpuVsGPUT_std = judge(stdTranspiler, stdCPU) # is transpiler better than cpu?
gpuiVsGPUT_std = judge(stdTranspiler, stdInterpreter) # is tranpiler better than interpreter?
BenchmarkTools.save("$BENCHMARKS_RESULTS_PATH/256_blocksize.json", results)
println("Is the interpreter better than the CPU implementation:")
println(cpuVsGPUI_median)
println(cpuVsGPUI_minimum)
println(cpuVsGPUI_std)
println("Is the transpiler better than the CPU implementation:")
println(cpuVsGPUT_median)
println(cpuVsGPUT_minimum)
println(cpuVsGPUT_std)
if compareWithCPU
medianCPU = median(results["CPU"])
stdCPU = std(results["CPU"])
medianInterpreter = median(results["GPUI"])
stdInterpreter = std(results["GPUI"])
medianTranspiler = median(results["GPUT"])
stdTranspiler = std(results["GPUT"])
cpuVsGPUI_median = judge(medianInterpreter, medianCPU) # is interpreter better than cpu?
cpuVsGPUT_median = judge(medianTranspiler, medianCPU) # is transpiler better than cpu?
gpuiVsGPUT_median = judge(medianTranspiler, medianInterpreter) # is tranpiler better than interpreter?
cpuVsGPUI_std = judge(stdInterpreter, stdCPU) # is interpreter better than cpu?
cpuVsGPUT_std = judge(stdTranspiler, stdCPU) # is transpiler better than cpu?
gpuiVsGPUT_std = judge(stdTranspiler, stdInterpreter) # is tranpiler better than interpreter?
println()
println("Is the interpreter better than the CPU implementation:")
println(cpuVsGPUI_median)
println(cpuVsGPUI_std)
println()
println("Is the transpiler better than the CPU implementation:")
println(cpuVsGPUT_median)
println(cpuVsGPUT_std)
println()
println("Is the transpiler better than the interpreter:")
println(gpuiVsGPUT_median)
println(gpuiVsGPUT_std)
else
resultsOld = BenchmarkTools.load("$BENCHMARKS_RESULTS_PATH/initial_results.json")[1]
medianGPUI_old = median(resultsOld["GPUI"])
stdGPUI_old = std(resultsOld["GPUI"])
medianGPUT_old = median(resultsOld["GPUT"])
stdGPUT_old = std(resultsOld["GPUT"])
medianInterpreter = median(results["GPUI"])
stdInterpreter = std(results["GPUI"])
medianTranspiler = median(results["GPUT"])
stdTranspiler = std(results["GPUT"])
oldVsGPUI_median = judge(medianInterpreter, medianGPUI_old) # is interpreter better than old?
oldVsGPUI_std = judge(stdInterpreter, stdGPUI_old) # is interpreter better than old?
oldVsGPUT_median = judge(medianTranspiler, medianGPUT_old) # is transpiler better than old?
oldVsGPUT_std = judge(stdTranspiler, stdGPUT_old) # is transpiler better than old?
println()
println("Is the interpreter better than the old implementation:")
println(oldVsGPUI_median)
println(oldVsGPUI_std)
println()
println("Is the transpiler better than the old implementation:")
println(oldVsGPUT_median)
println(oldVsGPUT_std)
end
println("Is the transpiler better than the interpreter:")
println(gpuiVsGPUT_median)
println(gpuiVsGPUT_minimum)
println(gpuiVsGPUT_std)

File diff suppressed because one or more lines are too long