added operators to the interpreter. storing result still missing
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled

This commit is contained in:
2024-08-08 12:06:42 +02:00
parent f1f8c3b2a4
commit 62d10845e9
5 changed files with 195 additions and 79 deletions

View File

@ -12,80 +12,94 @@ export interpret
- parameters::Vector{Vector{Float64}} : The parameters to use. Each Vector contains the values for the parameters p1..pn. The number of parameters can be different for every expression
"
function interpret(expressions::Vector{ExpressionProcessing.PostfixType}, variables::Matrix{Float64}, parameters::Vector{Vector{Float64}})
# TODO:
# create CUDA array for calculation results
variableRows = size(variables, 1)
variableCols = size(variables, 2) # number of sets of variables to use for each expression
cudaVars = CuArray(variables)
cudaParams = create_cuda_array(parameters, NaN64) # column corresponds to data for one expression
cudaExprs = create_cuda_array(expressions, ExpressionElement(EMPTY, 0)) # column corresponds to data for one expression
cudaStepsize = CuArray([get_max_inner_length(expressions), get_max_inner_length(parameters)]) # put into seperate cuArray, as this is static and would be inefficient to send seperatly to every kernel
# put into seperate cuArray, as this is static and would be inefficient to send seperatly to every kernel
cudaStepsize = CuArray([get_max_inner_length(expressions), get_max_inner_length(parameters), size(variables, 1)]) # max num of values per expression; max nam of parameters per expression; number of variables per expression
# each expression has nr. of variable sets (nr. of columns of the variables) results and there are n expressions
cudaResults = CuArray{Float64}(undef, length(expressions), variableCols)
# Start kernel for each expression to ensure that no warp is working on different expressions
for i in eachindex(expressions)
kernel = @cuda launch=false interpret_expression(cudaExprs, cudaVars, cudaParams, cudaStepsize, i)
kernel = @cuda launch=false interpret_expression(cudaExprs, cudaVars, cudaParams, cudaResults, cudaStepsize, i)
config = launch_configuration(kernel.fun)
threads = min(variableRows, config.threads)
blocks = cld(variableRows, threads)
threads = min(variableCols, config.threads)
blocks = cld(variableCols, threads)
# TODO: Operation stack should be n-dims. nr. of Rows == length of this expression
# nr. of columns == nr. of rows in Vars
# This means every run with different variable set has its own stack
# cudaOperationStack = CuArray{Float64}(undef, get_max_inner_length(expressions), length(expressions))
kernel(cudaExprs, cudaVars, cudaParams, cudaStepsize, i; threads, blocks)
kernel(cudaExprs, cudaVars, cudaParams, cudaResults, cudaStepsize, i; threads, blocks)
end
end
const MAX_STACK_SIZE = 25 # The max number of values the expression can have. so Constant values, Variables and parameters
function interpret_expression(expressions::CuDeviceArray{ExpressionElement}, variables::CuDeviceArray{Float64}, parameters::CuDeviceArray{Float64}, stepsize::CuDeviceArray{Int}, exprIndex::Int)
function interpret_expression(expressions::CuDeviceArray{ExpressionElement}, variables::CuDeviceArray{Float64}, parameters::CuDeviceArray{Float64}, results::CuDeviceArray{Float64}, stepsize::CuDeviceArray{Int}, exprIndex::Int)
firstExprIndex = ((exprIndex - 1) * stepsize[1]) + 1 # Inclusive
lastExprIndex = firstExprIndex + stepsize[1] - 1 # Inclusive
firstParamIndex = ((exprIndex - 1) * stepsize[2]) # Exclusive
# lastParamIndex = firstParamIndex + stepsize[2] - 1 # Inclusive (probably not needed)
operationStack = MVector{MAX_STACK_SIZE, Float64}(undef) # Vector{Float64}(undef, MAX_STACK_SIZE) # Try to get this to function with variable size too
operationStackTop = 1
for i in reverse(firstExprIndex:lastExprIndex) # Calculate real "lastExprIndex"
if expressions[i].Type != EMPTY
lastExprIndex = i
break
end
end
for i in 1:5
@cuprintln(variables[i])
end
variableCols = length(variables) / stepsize[3]
firstVariableIndex = ((exprIndex - 1) * stepsize[3]) # Exclusive # TODO: This is obviously not right because each expression calculates the cudaResults for each variable set and therefore needs to incorporate the block index + stride. This is only done for testing
firstResultsIndex = ((exprIndex - 1) * variableCols) + 1 # Inclusive # TODO: Same as above. to get the index of the variable set and therefore the index in the results matrix, use the block index and stride
operationStack = MVector{MAX_STACK_SIZE, Float64}(undef) # Try to get this to function with variable size too, to allow better memory usage
operationStackTop = 0 # stores index of the last defined/valid value
# TODO: Look into Index and stride for the case that one thread handles multiple "variable sets"
return
for i in firstExprIndex:lastExprIndex
if expressions[i].Type == EMPTY
break
elseif expressions[i].Type == INDEX
# TODO: Load value from variables/parameters matrix and store for calculation
val = expressions[i].Value
operationStackTop += 1
if val > 0
# TODO: access variables
operationStack[operationStackTop] = variables[firstVariableIndex + val]
else
val = abs(val)
operationStack[operationStackTop] = parameters[firstParamIndex + val]
end
operationStackTop += 1
elseif expressions[i].Type == FLOAT64
operationStack[operationStackTop] = expressions[i].Value
operationStackTop += 1
operationStack[operationStackTop] = reinterpret(Float64, expressions[i].Value)
elseif expressions[i].Type == OPERATOR
# TODO: Perform calculation of the stored values. Either 1 or 2, depending on the operator
continue
# TODO Maybe put this in seperate function
type = expressions[i].Type
if type == ADD
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] + operationStack[operationStackTop + 1]
elseif type == SUBTRACT
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] - operationStack[operationStackTop + 1]
elseif type == MULTIPLY
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] * operationStack[operationStackTop + 1]
elseif type == DIVIDE
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] / operationStack[operationStackTop + 1]
elseif type == POWER
operationStackTop -= 1
operationStack[operationStackTop] = operationStack[operationStackTop] ^ operationStack[operationStackTop + 1]
elseif type == ABS
operationStack[operationStackTop] = abs(operationStack[operationStackTop])
elseif type == LOG
operationStack[operationStackTop] = log(operationStack[operationStackTop])
elseif type == EXP
operationStack[operationStackTop] = exp(operationStack[operationStackTop])
elseif type == SQRT
operationStack[operationStackTop] = sqrt(operationStack[operationStackTop])
end
else
# TODO: handle this case. Should not happen but in case it does, it needs to do something
continue
operationStack[operationStackTop] = NaN
break
end
end
# TODO: Store computed value in output matrix
# results[] = operationStack[operationStackTop]
return
end
@ -131,7 +145,6 @@ end
# @deprecate InterpretExplicit!(op::Operator, x, y) interpret_expression(expression, variables, parameters, exprIndex::Int)
# Kernel
function InterpretExplicit!(op::Operator, x, y)
index = (blockIdx().x - 1) * blockDim().x + threadIdx().x

View File

@ -39,4 +39,8 @@ end
result = Interpreter.convert_to_matrix(parameters, NaN64)
@test isequal(result, reference)
end
end
# TODO: Add several tests fo the mathematical expressions
# One test for each operator. A second test if the operation order matters
# And some more complicated expressions, with some only having variables, some only having parameters and some having both