tide-GPR 0.0.9__py3-none-manylinux_2_28_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tide/csrc/common_gpu.h ADDED
@@ -0,0 +1,56 @@
1
+ #ifndef COMMON_GPU_H
2
+ #define COMMON_GPU_H
3
+
4
+ #include <cuda_runtime.h>
5
+
6
+ // Define TIDE_DTYPE_FLOAT based on TIDE_DTYPE (for vectorized load optimization)
7
+ // Note: TIDE_DTYPE_FLOAT is now defined in CMakelists.txt for better compatibility
8
+ #ifndef TIDE_DTYPE_FLOAT
9
+ #ifdef TIDE_DTYPE
10
+ #if TIDE_DTYPE == float
11
+ #define TIDE_DTYPE_FLOAT 1
12
+ #elif TIDE_DTYPE == double
13
+ #define TIDE_DTYPE_FLOAT 0
14
+ #endif
15
+ #endif
16
+ #endif
17
+
18
+ // Macro to check for CUDA kernel errors
19
+ #define CHECK_KERNEL_ERROR \
20
+ { \
21
+ cudaError_t err = cudaGetLastError(); \
22
+ if (err != cudaSuccess) { \
23
+ fprintf(stderr, "CUDA kernel error: %s at %s:%d\n", \
24
+ cudaGetErrorString(err), __FILE__, __LINE__); \
25
+ exit(EXIT_FAILURE); \
26
+ } \
27
+ }
28
+
29
+ // GPU error checking helper function
30
+ inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
31
+ if (code != cudaSuccess) {
32
+ fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
33
+ if (abort) exit(code);
34
+ }
35
+ }
36
+
37
+ // Custom atomicAdd for double precision (needed for older compute capabilities)
38
+ // This is a software emulation using atomicCAS
39
+ #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 600
40
+ __device__ __forceinline__ double atomicAddDouble(double* address, double val) {
41
+ unsigned long long int* address_as_ull = (unsigned long long int*)address;
42
+ unsigned long long int old = *address_as_ull, assumed;
43
+ do {
44
+ assumed = old;
45
+ old = atomicCAS(address_as_ull, assumed,
46
+ __double_as_longlong(val + __longlong_as_double(assumed)));
47
+ } while (assumed != old);
48
+ return __longlong_as_double(old);
49
+ }
50
+ #else
51
+ __device__ __forceinline__ double atomicAddDouble(double* address, double val) {
52
+ return atomicAdd(address, val);
53
+ }
54
+ #endif
55
+
56
+ #endif