implementation: started writing interpreter section; finished CPU-side part; also found error with interpreter
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled
This commit is contained in:
@ -26,7 +26,10 @@ function interpret_gpu(exprs::Vector{Expr}, X::Matrix{Float32}, p::Vector{Vector
|
||||
ncols = size(X, 2)
|
||||
|
||||
results = Matrix{Float32}(undef, ncols, length(exprs))
|
||||
# TODO: create CuArray for variables here already, as they never change
|
||||
# TODO: create CuArray for variables here already, as they never change
|
||||
# could/should be done even before calling this, but I guess it would be diminishing returns
|
||||
# TODO: test how this would impact performance, if it gets faster, adapt implementation section
|
||||
# TODO: create CuArray for expressions here already. They also do not change over the course of parameter optimisation and therefore a lot of unnecessary calls to expr_to_postfix can be save (even though a cache is used, this should still be faster)
|
||||
|
||||
for i in 1:repetitions # Simulate parameter tuning -> local search (X remains the same, p gets changed in small steps and must be performed sequentially, which it is with this impl)
|
||||
results = Interpreter.interpret(exprs, X, p)
|
||||
@ -41,7 +44,10 @@ function evaluate_gpu(exprs::Vector{Expr}, X::Matrix{Float32}, p::Vector{Vector{
|
||||
ncols = size(X, 2)
|
||||
|
||||
results = Matrix{Float32}(undef, ncols, length(exprs))
|
||||
# TODO: create CuArray for variables here already, as they never change
|
||||
# TODO: create CuArray for variables here already, as they never change
|
||||
# could/should be done even before calling this, but I guess it would be diminishing returns
|
||||
# TODO: test how this would impact performance, if it gets faster, adapt implementation section
|
||||
# TODO: create CuArray for expressions here already. They also do not change over the course of parameter optimisation and therefore a lot of unnecessary calls to expr_to_postfix can be save (even though a cache is used, this should still be faster)
|
||||
|
||||
for i in 1:repetitions # Simulate parameter tuning -> local search (X remains the same, p gets changed in small steps and must be performed sequentially, which it is with this impl)
|
||||
results = Transpiler.evaluate(exprs, X, p)
|
||||
|
@ -23,7 +23,7 @@ function interpret(expressions::Vector{Expr}, variables::Matrix{Float32}, parame
|
||||
variableCols = size(variables, 2) # number of variable sets to use for each expression
|
||||
cudaVars = CuArray(variables)
|
||||
cudaParams = Utils.create_cuda_array(parameters, NaN32) # column corresponds to data for one expression
|
||||
cudaExprs = Utils.create_cuda_array(exprs, ExpressionElement(EMPTY, 0)) # column corresponds to data for one expression
|
||||
cudaExprs = Utils.create_cuda_array(exprs, ExpressionElement(EMPTY, 0)) # column corresponds to data for one expression; TODO: replace this 0 with 'undef' if possible
|
||||
# put into seperate cuArray, as this is static and would be inefficient to send seperatly to every kernel
|
||||
cudaStepsize = CuArray([Utils.get_max_inner_length(parameters), size(variables, 1)]) # max num of values per expression; max nam of parameters per expression; number of variables per expression
|
||||
|
||||
@ -32,6 +32,7 @@ function interpret(expressions::Vector{Expr}, variables::Matrix{Float32}, parame
|
||||
|
||||
# Start kernel for each expression to ensure that no warp is working on different expressions
|
||||
@inbounds for i in eachindex(exprs)
|
||||
# TODO: Currently only the first expression gets evaluated. Either use a view on "cudaExprs" to determine the correct expression or extend cudaStepsize to include this information (this information was removed in a previous commit)
|
||||
kernel = @cuda launch=false fastmath=true interpret_expression(cudaExprs, cudaVars, cudaParams, cudaResults, cudaStepsize, i)
|
||||
# config = launch_configuration(kernel.fun)
|
||||
threads = min(variableCols, 128)
|
||||
|
Reference in New Issue
Block a user