master-thesis/package/src/Transpiler.jl
Daniel 219c0bb14e
Some checks failed
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.10) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, 1.6) (push) Has been cancelled
CI / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} (x64, ubuntu-latest, pre) (push) Has been cancelled
started implementing parameter loading
2025-01-06 14:01:55 +01:00

262 lines
9.7 KiB
Julia

module Transpiler
using CUDA
using ..ExpressionProcessing
# Number of threads per block/SM + max number of registers
# https://docs.nvidia.com/cuda/cuda-c-programming-guide/#features-and-technical-specifications
# Need to assume a max of 2048 threads per Streaming Multiprocessor (SM)
# One SM can have 64*1024 32-bit registers at max
# One thread can at max use 255 registers
# Meaning one has access to at most 32 registers in the worst case. Using 64 bit values this number gets halfed (see: https://docs.nvidia.com/cuda/cuda-c-programming-guide/#multiprocessor-level (almost at the end of the linked section))
# Maybe helpful for future performance tuning: https://docs.nvidia.com/cuda/cuda-c-programming-guide/#maximum-number-of-registers-per-thread
# https://docs.nvidia.com/cuda/cuda-c-programming-guide/#multiprocessor-level
# This states, that using fewer registers allows more threads to reside on a single SM which improves performance.
# So I could use more registers at the expense for performance. Depending on how this would simplify my algorithm, I might do this and leave more optimisation to future work
# Since the generated expressions should have between 10 and 50 symbols, I think allowing a max. of 128 32-bit registers should make for an easy algorithm. If during testing the result is slow, maybe try reducing the number of registers and perform more intelligent allocation/assignment
# With 128 Registers, one could have 32 Warps on one SM ((128 * 16 = 2048) * 32 == 64*1024 == max number of registers per SM) This means 512 Threads per SM in the worst case
#
# Make a "function execute(...)" that takes the data and the transpiled code. Pass the data to the kernel and start executing
# Note: Maybe make an additional function that transpiles and executed the code. This would then be the function the user calls
#
const Operand = Union{Float32, String} # Operand is either fixed value or register
# To increase performance, it would probably be best for all helper functions to return their IO Buffer and not a string
# seekstart(buf1); write(buf2, buf1)
function transpile(expression::ExpressionProcessing.PostfixType)::String
exitJumpLocationMarker = "\$L__BB0_2"
ptxBuffer = IOBuffer()
# TODO: Suboptimal solution
signature, paramLoading = get_kernel_signature("ExpressionProcessing", [Int32, Int32, Float32]) # nrOfVarSets, nrOfVarsPerSet, Vars
guardClause = get_guard_clause(exitJumpLocationMarker, "%parameter0") # r0 because first entry holds the number of variables and that is always stored in %r0
println(ptxBuffer, get_cuda_header())
println(ptxBuffer, signature)
println(ptxBuffer, "{")
calc_code = generate_calculation_code(expression, "%parameter2")
println(ptxBuffer, get_register_definitions())
println(ptxBuffer, paramLoading)
println(ptxBuffer, guardClause)
println(ptxBuffer, calc_code)
# exit jump location
print(ptxBuffer, exitJumpLocationMarker); println(ptxBuffer, ": ret;")
println(ptxBuffer, "}")
generatedCode = String(take!(ptxBuffer))
println(generatedCode)
return generatedCode
end
# TODO: Make version, target and address_size configurable; also see what address_size means exactly
function get_cuda_header()::String
return "
.version 7.1
.target sm_61
.address_size 64
"
end
function get_kernel_signature(kernelName::String, parameters::Vector{DataType})::Tuple{String, String}
signatureBuffer = IOBuffer()
paramLoadingBuffer = IOBuffer()
print(signatureBuffer, ".visible .entry ")
print(signatureBuffer, kernelName)
println(signatureBuffer, "(")
for i in eachindex(parameters)
print(signatureBuffer, " .param .u32", " ", "param_", i)
parameterRegister = get_next_free_register("r")
println(paramLoadingBuffer, "ld.param.u32 $parameterRegister, [param_$i];")
println(paramLoadingBuffer, "cvta.to.global.u32 $(get_next_free_register("parameter")), $parameterRegister;")
if i != lastindex(parameters)
println(signatureBuffer, ",")
end
end
print(signatureBuffer, ")")
return (String(take!(signatureBuffer)), String(take!(paramLoadingBuffer)))
end
"
Constructs the PTX code used for handling the case where too many threads are started.
- param ```nrOfVarSetsRegister```: The register which holds the total amount of variable sets for the kernel
"
function get_guard_clause(exitJumpLocation::String, nrOfVarSetsRegister::String)::String
guardBuffer = IOBuffer()
threadIds = get_next_free_register("r")
threadsPerCTA = get_next_free_register("r")
currentThreadId = get_next_free_register("r")
# load data into above defined registers
println(guardBuffer, "mov.u32 $threadIds, %ntid.x;")
println(guardBuffer, "mov.u32 $threadsPerCTA, %ctaid.x;")
println(guardBuffer, "mov.u32 $currentThreadId, %tid.x;")
globalThreadId = get_next_free_register("r") # basically the index of the thread in the variable set
breakCondition = get_next_free_register("p")
nrOfVarSets = get_next_free_register("i")
println(guardBuffer, "ld.global.u32 $nrOfVarSets, $nrOfVarSetsRegister;")
println(guardBuffer, "mad.lo.s32 $globalThreadId, $threadIds, $threadsPerCTA, $currentThreadId;")
println(guardBuffer, "setp.ge.s32 $breakCondition, $globalThreadId, $nrOfVarSets;") # guard clause = index > nrOfVariableSets
# branch to end if breakCondition is true
print(guardBuffer, "@$breakCondition bra $exitJumpLocation;")
return String(take!(guardBuffer))
end
function generate_calculation_code(expression::ExpressionProcessing.PostfixType, variablesRegister::String)::String
codeBuffer = IOBuffer()
operands = Vector{Operand}()
for i in eachindex(expression)
token = expression[i]
if token.Type == FLOAT32
push!(operands, reinterpret(Float32, token.Value))
elseif token.Type == OPERATOR
operator = reinterpret(Operator, token.Value)
right = nothing
if is_binary_operator(operator)
right = pop!(operands)
left = pop!(operands)
else
left = pop!(operands)
end
operation, resultRegister = get_operation(operator, left, right)
println(codeBuffer, operation)
push!(operands, resultRegister)
elseif token.Type == INDEX
# TODO
# %parameter1 + startIndex + Index * bytes
# startIndex: should be calculateable by global threadId and size of variables
if token.Value > 0 # varaibles
var = get_next_free_register("f")
#TODO: investigate how best to load var from global to local memory, especially when var used multiple times. (probably kind of symtable)
push!(operands, "[$variablesRegister+$(token.Value*sizeof(token.Value))]") # missing: startIndex
end
end
end
return String(take!(codeBuffer))
end
function type_to_ptx_type(type::DataType)::String
if type == Int64
return ".s64"
elseif type == Int32
return ".s32"
elseif type == Float32
return ".f32"
else
return ".b64"
end
end
function get_operation(operator::Operator, left::Operand, right::Union{Operand, Nothing} = nothing)::Tuple{String, String}
resultRegister = get_next_free_register("f")
resultCode = ""
if is_binary_operator(operator) && isnothing(right)
throw(ArgumentError("Given operator '$operator' is a binary operator. However only one operator has been given."))
end
if operator == ADD
resultCode = "add.f32 $resultRegister, $left, $right;"
elseif operator == SUBTRACT
resultCode = "sub.f32 $resultRegister, $left, $right;"
elseif operator == MULTIPLY
resultCode = "mul.f32 $resultRegister, $left, $right;"
elseif operator == DIVIDE
resultCode = "div.approx.f32 $resultRegister, $left, $right;"
elseif operator == POWER
# x^y == 2^(y*log2(x)) as generated by nvcc for "pow(x, y)"
resultCode = "
lg2.approx.f32 $resultRegister, $left;
mul.f32 $resultRegister, $right, $resultRegister;
ex2.approx.f32 $resultRegister, $resultRegister;"
elseif operator == ABS
resultCode = "abs.f32 $resultRegister, $left;"
elseif operator == LOG
# log(x) == log2(x) * ln(2) as generated by nvcc for "log(x)"
resultCode = "
lg2.approx.f32 $resultRegister, $left;
mul.f32 $resultRegister, $resultRegister, 0.693147182;"
elseif operator == EXP
# e^x == 2^(x/ln(2)) as generated by nvcc for "exp(x)"
resultCode = "
mul.f32 $resultRegister, $left, 1.44269502;
ex2.approx.f32 $resultRegister, $resultRegister;"
elseif operator == SQRT
resultCode = "sqrt.approx.f32 $resultRegister, $left;"
else
throw(ArgumentError("Operator conversion to ptx not implemented for '$operator'"))
end
return (resultCode, resultRegister)
end
let registers = Dict() # stores the count of the register already used.
global get_next_free_register
global get_register_definitions
# By convention these names correspond to the following types:
# - p -> pred
# - f -> float32
# - r -> 32 bit
# - var -> float32
# - param -> float32 !! although, they might get inserted as fixed number and not be sent to gpu?
function get_next_free_register(name::String)::String
if haskey(registers, name)
registers[name] += 1
else
registers[name] = 1
end
return string("%", name, registers[name] - 1)
end
function get_register_definitions()::String
registersBuffer = IOBuffer()
for definition in registers
regType = ""
if definition.first == "p"
regType = ".pred"
elseif definition.first == "f"
regType = ".f32"
elseif definition.first == "var"
regType = ".f32"
elseif definition.first == "param"
regType = ".f32"
elseif definition.first == "r"
regType = ".b32"
elseif definition.first == "parameter"
regType = ".u32"
elseif definition.first == "i"
regType = ".u32"
else
throw(ArgumentError("Unknown register name used. Name '$(definition.first)' cannot be mapped to a PTX type."))
end
println(registersBuffer, ".reg $regType %$(definition.first)<$(definition.second)>;")
end
return String(take!(registersBuffer))
end
end
end