benchmarking: fixed bugs introduced by modification of transpiler
Some checks are pending
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Waiting to run
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Waiting to run

This commit is contained in:
Daniel
2025-05-19 12:29:05 +02:00
parent e29199d865
commit a9ffd5da63
2 changed files with 5 additions and 6 deletions

View File

@ -61,14 +61,13 @@ function evaluate_gpu(expressions::Vector{Expr}, X::Matrix{Float32}, p::Vector{V
@inbounds Threads.@threads for i in eachindex(expressions)
ex = ExpressionProcessing.expr_to_postfix(expressions[i])
ptxKernels[i] = Transpiler.transpile(ex, variableSetSize, largestParameterSetSize, numVariableSets, i-1, kernelName) # i-1 because julia is 1-based but PTX needs 0-based indexing
# compiledKernels[i] = Transpiler.CompileKernel(ptxKernel, kernelName)
end
results = Matrix{Float32}(undef, numVariableSets, length(exprs))
results = Matrix{Float32}(undef, numVariableSets, length(expressions))
for i in 1:repetitions # Simulate parameter tuning -> local search (X remains the same, p gets changed in small steps and must be performed sequentially, which it is with this impl)
# evaluate
# results = Transpiler.evaluate(exprs, variables, numVariableSets, variableSetSize, p)
results = Transpiler.evaluate(ptxKernels, variables, variableSetSize, p, kernelName)
results = Transpiler.evaluate(ptxKernels, variables, numVariableSets, p, kernelName)
end
return results