Through the new `-foffload-via-llvm` flag, CUDA kernels can now be lowered to the LLVM/Offload API. On the Clang side, this is simply done by using the OpenMP offload toolchain and emitting calls to `llvm*` functions to orchestrate the kernel launch rather than `cuda*` functions. These `llvm*` functions are implemented on top of the existing LLVM/Offload API. As we are about to redefine the Offload API, this wil help us in the design process as a second offload language. We do not support any CUDA APIs yet, however, we could: https://www.osti.gov/servlets/purl/1892137 For proper host execution we need to resurrect/rebase https://tianshilei.me/wp-content/uploads/2021/12/llpp-2021.pdf (which was designed for debugging). ``` ❯❯❯ cat test.cu extern "C" { void *llvm_omp_target_alloc_shared(size_t Size, int DeviceNum); void llvm_omp_target_free_shared(void *DevicePtr, int DeviceNum); } __global__ void square(int *A) { *A = 42; } int main(int argc, char **argv) { int DevNo = 0; int *Ptr = reinterpret_cast<int *>(llvm_omp_target_alloc_shared(4, DevNo)); *Ptr = 7; printf("Ptr %p, *Ptr %i\n", Ptr, *Ptr); square<<<1, 1>>>(Ptr); printf("Ptr %p, *Ptr %i\n", Ptr, *Ptr); llvm_omp_target_free_shared(Ptr, DevNo); } ❯❯❯ clang++ test.cu -O3 -o test123 -foffload-via-llvm --offload-arch=native ❯❯❯ llvm-objdump --offloading test123 test123: file format elf64-x86-64 OFFLOADING IMAGE [0]: kind elf arch gfx90a triple amdgcn-amd-amdhsa producer openmp ❯❯❯ LIBOMPTARGET_INFO=16 ./test123 Ptr 0x155448ac8000, *Ptr 7 Ptr 0x155448ac8000, *Ptr 42 ```
73 lines
3.1 KiB
C
73 lines
3.1 KiB
C
/* Minimal declarations for CUDA support. Testing purposes only. */
|
|
|
|
#include <stddef.h>
|
|
|
|
#if __HIP__ || __CUDA__
|
|
#define __constant__ __attribute__((constant))
|
|
#define __device__ __attribute__((device))
|
|
#define __global__ __attribute__((global))
|
|
#define __host__ __attribute__((host))
|
|
#define __shared__ __attribute__((shared))
|
|
#if __HIP__
|
|
#define __managed__ __attribute__((managed))
|
|
#endif
|
|
#define __launch_bounds__(...) __attribute__((launch_bounds(__VA_ARGS__)))
|
|
#else
|
|
#define __constant__
|
|
#define __device__
|
|
#define __global__
|
|
#define __host__
|
|
#define __shared__
|
|
#define __managed__
|
|
#define __launch_bounds__(...)
|
|
#endif
|
|
|
|
struct dim3 {
|
|
unsigned x, y, z;
|
|
__host__ __device__ dim3(unsigned x, unsigned y = 1, unsigned z = 1) : x(x), y(y), z(z) {}
|
|
};
|
|
|
|
#if __HIP__ || HIP_PLATFORM
|
|
typedef struct hipStream *hipStream_t;
|
|
typedef enum hipError {} hipError_t;
|
|
int hipConfigureCall(dim3 gridSize, dim3 blockSize, size_t sharedSize = 0,
|
|
hipStream_t stream = 0);
|
|
extern "C" hipError_t __hipPushCallConfiguration(dim3 gridSize, dim3 blockSize,
|
|
size_t sharedSize = 0,
|
|
hipStream_t stream = 0);
|
|
#ifndef __HIP_API_PER_THREAD_DEFAULT_STREAM__
|
|
extern "C" hipError_t hipLaunchKernel(const void *func, dim3 gridDim,
|
|
dim3 blockDim, void **args,
|
|
size_t sharedMem,
|
|
hipStream_t stream);
|
|
#else
|
|
extern "C" hipError_t hipLaunchKernel_spt(const void *func, dim3 gridDim,
|
|
dim3 blockDim, void **args,
|
|
size_t sharedMem,
|
|
hipStream_t stream);
|
|
#endif // __HIP_API_PER_THREAD_DEFAULT_STREAM__
|
|
#elif __OFFLOAD_VIA_LLVM__
|
|
extern "C" unsigned __llvmPushCallConfiguration(dim3 gridDim, dim3 blockDim,
|
|
size_t sharedMem = 0, void *stream = 0);
|
|
extern "C" unsigned llvmLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
|
|
void **args, size_t sharedMem = 0, void *stream = 0);
|
|
#else
|
|
typedef struct cudaStream *cudaStream_t;
|
|
typedef enum cudaError {} cudaError_t;
|
|
extern "C" int cudaConfigureCall(dim3 gridSize, dim3 blockSize,
|
|
size_t sharedSize = 0,
|
|
cudaStream_t stream = 0);
|
|
extern "C" int __cudaPushCallConfiguration(dim3 gridSize, dim3 blockSize,
|
|
size_t sharedSize = 0,
|
|
cudaStream_t stream = 0);
|
|
extern "C" cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim,
|
|
dim3 blockDim, void **args,
|
|
size_t sharedMem, cudaStream_t stream);
|
|
extern "C" cudaError_t cudaLaunchKernel_ptsz(const void *func, dim3 gridDim,
|
|
dim3 blockDim, void **args,
|
|
size_t sharedMem, cudaStream_t stream);
|
|
|
|
#endif
|
|
|
|
extern "C" __device__ int printf(const char*, ...);
|